From f544afac3f34124088b981c63843a3cc48f4ee3e Mon Sep 17 00:00:00 2001 From: Amber Lin Date: Tue, 17 May 2022 23:41:01 +0800 Subject: drm/amdgpu: Add kgd2kfd for GC 9.4.3 New GC (v9.4.3) and ATHUB (v1.8.0) versions are used. Add kgd_gfx_v9_4_3_* functions if registers in use of kgd_gfx_v9_* functions are changed or have different offset. Signed-off-by: Amber Lin Acked-by: Felix Kuehling Reviewed-by: Mukul Joshi Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 00f528eb9812..1510041a6ee1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -51,6 +51,7 @@ extern const struct kfd2kgd_calls gfx_v8_kfd2kgd; extern const struct kfd2kgd_calls gfx_v9_kfd2kgd; extern const struct kfd2kgd_calls arcturus_kfd2kgd; extern const struct kfd2kgd_calls aldebaran_kfd2kgd; +extern const struct kfd2kgd_calls gc_9_4_3_kfd2kgd; extern const struct kfd2kgd_calls gfx_v10_kfd2kgd; extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd; extern const struct kfd2kgd_calls gfx_v11_kfd2kgd; @@ -328,7 +329,7 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) break; case IP_VERSION(9, 4, 3): gfx_target_version = 90400; - f2g = &aldebaran_kfd2kgd; + f2g = &gc_9_4_3_kfd2kgd; break; /* Navi10 */ case IP_VERSION(10, 1, 10): -- cgit v1.2.3 From 8dc1db3172ae2f17ae71e33b608a33411ce8a1aa Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Wed, 14 Sep 2022 16:39:48 +0800 Subject: drm/amdkfd: Introduce kfd_node struct (v5) Introduce a new structure, kfd_node, which will now represent a compute node. kfd_node is carved out of kfd_dev structure. kfd_dev struct now will become the parent of kfd_node, and will store common resources such as doorbells, GTT sub-alloctor etc. kfd_node struct will store all resources specific to a compute node, such as device queue manager, interrupt handling etc. This is the first step in adding compute partition support in KFD. v2: introduce kfd_node struct to gc v11 (Hawking) v3: make reference to kfd_dev struct through kfd_node (Morris) v4: use kfd_node instead for kfd isr/mqd functions (Morris) v5: rebase (Alex) Signed-off-by: Mukul Joshi Tested-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Hawking Zhang Signed-off-by: Morris Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 1 + drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 43 ++-- drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 28 +-- drivers/gpu/drm/amd/amdkfd/kfd_crat.h | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 258 +++++++++++++-------- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 100 ++++---- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 4 +- .../drm/amd/amdkfd/kfd_device_queue_manager_v9.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 14 +- drivers/gpu/drm/amd/amdkfd/kfd_events.c | 12 +- drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 12 +- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 10 +- drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c | 64 ++--- drivers/gpu/drm/amd/amdkfd/kfd_iommu.c | 22 +- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 18 +- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 8 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h | 8 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 8 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c | 18 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 24 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 10 +- drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c | 8 +- drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 168 ++++++++------ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 54 ++--- .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 20 +- drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c | 40 ++-- drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h | 8 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 56 ++--- drivers/gpu/drm/amd/amdkfd/kfd_topology.h | 8 +- 38 files changed, 573 insertions(+), 495 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index de6ba0d4b860..af37f2ef4438 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -35,6 +35,7 @@ #include "amdgpu_dma_buf.h" #include #include "amdgpu_xgmi.h" +#include "kfd_priv.h" #include "kfd_smi_events.h" #include diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c index 5c8023cba196..4ebfff6b6c55 100644 --- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c @@ -26,7 +26,7 @@ #include "amdgpu_amdkfd.h" #include "kfd_smi_events.h" -static bool cik_event_interrupt_isr(struct kfd_dev *dev, +static bool cik_event_interrupt_isr(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *patched_flag) @@ -85,7 +85,7 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev, !amdgpu_no_queue_eviction_on_vm_fault); } -static void cik_event_interrupt_wq(struct kfd_dev *dev, +static void cik_event_interrupt_wq(struct kfd_node *dev, const uint32_t *ih_ring_entry) { const struct cik_ih_ring_entry *ihre = diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 81d07ecf666d..eb0b0b38f10e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -293,7 +293,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, void *data) { struct kfd_ioctl_create_queue_args *args = data; - struct kfd_dev *dev; + struct kfd_node *dev; int err = 0; unsigned int queue_id; struct kfd_process_device *pdd; @@ -328,7 +328,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, } if (!pdd->doorbell_index && - kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) { + kfd_alloc_process_doorbells(dev->kfd, &pdd->doorbell_index) < 0) { err = -ENOMEM; goto err_alloc_doorbells; } @@ -336,7 +336,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, /* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work * on unmapped queues for usermode queue oversubscription (no aggregated doorbell) */ - if (dev->shared_resources.enable_mes && + if (dev->kfd->shared_resources.enable_mes && ((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) { struct amdgpu_bo_va_mapping *wptr_mapping; @@ -887,7 +887,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep, { struct kfd_ioctl_set_scratch_backing_va_args *args = data; struct kfd_process_device *pdd; - struct kfd_dev *dev; + struct kfd_node *dev; long err; mutex_lock(&p->mutex); @@ -1006,18 +1006,18 @@ err_drm_file: return ret; } -bool kfd_dev_is_large_bar(struct kfd_dev *dev) +bool kfd_dev_is_large_bar(struct kfd_node *dev) { if (debug_largebar) { pr_debug("Simulate large-bar allocation on non large-bar machine\n"); return true; } - if (dev->use_iommu_v2) + if (dev->kfd->use_iommu_v2) return false; - if (dev->local_mem_info.local_mem_size_private == 0 && - dev->local_mem_info.local_mem_size_public > 0) + if (dev->kfd->local_mem_info.local_mem_size_private == 0 && + dev->kfd->local_mem_info.local_mem_size_public > 0) return true; return false; } @@ -1041,7 +1041,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, struct kfd_ioctl_alloc_memory_of_gpu_args *args = data; struct kfd_process_device *pdd; void *mem; - struct kfd_dev *dev; + struct kfd_node *dev; int idr_handle; long err; uint64_t offset = args->mmap_offset; @@ -1105,7 +1105,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, } if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) { - if (args->size != kfd_doorbell_process_slice(dev)) { + if (args->size != kfd_doorbell_process_slice(dev->kfd)) { err = -EINVAL; goto err_unlock; } @@ -1231,7 +1231,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, struct kfd_ioctl_map_memory_to_gpu_args *args = data; struct kfd_process_device *pdd, *peer_pdd; void *mem; - struct kfd_dev *dev; + struct kfd_node *dev; long err = 0; int i; uint32_t *devices_arr = NULL; @@ -1405,7 +1405,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, args->n_success = i+1; } - flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev); + flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev->kfd); if (flush_tlb) { err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev, (struct kgd_mem *) mem, true); @@ -1445,7 +1445,7 @@ static int kfd_ioctl_alloc_queue_gws(struct file *filep, int retval; struct kfd_ioctl_alloc_queue_gws_args *args = data; struct queue *q; - struct kfd_dev *dev; + struct kfd_node *dev; mutex_lock(&p->mutex); q = pqm_get_user_queue(&p->pqm, args->queue_id); @@ -1482,7 +1482,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep, struct kfd_process *p, void *data) { struct kfd_ioctl_get_dmabuf_info_args *args = data; - struct kfd_dev *dev = NULL; + struct kfd_node *dev = NULL; struct amdgpu_device *dmabuf_adev; void *metadata_buffer = NULL; uint32_t flags; @@ -1596,7 +1596,7 @@ static int kfd_ioctl_export_dmabuf(struct file *filep, struct kfd_ioctl_export_dmabuf_args *args = data; struct kfd_process_device *pdd; struct dma_buf *dmabuf; - struct kfd_dev *dev; + struct kfd_node *dev; void *mem; int ret = 0; @@ -2178,7 +2178,7 @@ static int criu_restore_devices(struct kfd_process *p, } for (i = 0; i < args->num_devices; i++) { - struct kfd_dev *dev; + struct kfd_node *dev; struct kfd_process_device *pdd; struct file *drm_file; @@ -2240,7 +2240,7 @@ static int criu_restore_devices(struct kfd_process *p, } if (!pdd->doorbell_index && - kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) { + kfd_alloc_process_doorbells(pdd->dev->kfd, &pdd->doorbell_index) < 0) { ret = -ENOMEM; goto exit; } @@ -2268,7 +2268,8 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd, u64 offset; if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) { - if (bo_bucket->size != kfd_doorbell_process_slice(pdd->dev)) + if (bo_bucket->size != + kfd_doorbell_process_slice(pdd->dev->kfd)) return -EINVAL; offset = kfd_get_process_doorbells(pdd); @@ -2350,7 +2351,7 @@ static int criu_restore_bo(struct kfd_process *p, /* now map these BOs to GPU/s */ for (j = 0; j < p->n_pdds; j++) { - struct kfd_dev *peer; + struct kfd_node *peer; struct kfd_process_device *peer_pdd; if (!bo_priv->mapped_gpuids[j]) @@ -2947,7 +2948,7 @@ err_i1: return retcode; } -static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process, +static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process, struct vm_area_struct *vma) { phys_addr_t address; @@ -2981,7 +2982,7 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process, static int kfd_mmap(struct file *filp, struct vm_area_struct *vma) { struct kfd_process *process; - struct kfd_dev *dev = NULL; + struct kfd_node *dev = NULL; unsigned long mmap_offset; unsigned int gpu_id; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 475e47027354..f5aebba31e88 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1405,7 +1405,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev, return i; } -int kfd_get_gpu_cache_info(struct kfd_dev *kdev, struct kfd_gpu_cache_info **pcache_info) +int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info) { int num_of_cache_types = 0; @@ -1524,7 +1524,7 @@ int kfd_get_gpu_cache_info(struct kfd_dev *kdev, struct kfd_gpu_cache_info **pca case IP_VERSION(11, 0, 3): case IP_VERSION(11, 0, 4): num_of_cache_types = - kfd_fill_gpu_cache_info_from_gfx_config(kdev, *pcache_info); + kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, *pcache_info); break; default: *pcache_info = dummy_cache_info; @@ -1858,7 +1858,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) } static int kfd_fill_gpu_memory_affinity(int *avail_size, - struct kfd_dev *kdev, uint8_t type, uint64_t size, + struct kfd_node *kdev, uint8_t type, uint64_t size, struct crat_subtype_memory *sub_type_hdr, uint32_t proximity_domain, const struct kfd_local_mem_info *local_mem_info) @@ -1887,7 +1887,7 @@ static int kfd_fill_gpu_memory_affinity(int *avail_size, } #ifdef CONFIG_ACPI_NUMA -static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev) +static void kfd_find_numa_node_in_srat(struct kfd_node *kdev) { struct acpi_table_header *table_header = NULL; struct acpi_subtable_header *sub_header = NULL; @@ -1982,7 +1982,7 @@ static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev) * Return 0 if successful else return -ve value */ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, - struct kfd_dev *kdev, + struct kfd_node *kdev, struct crat_subtype_iolink *sub_type_hdr, uint32_t proximity_domain) { @@ -2044,8 +2044,8 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, } static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size, - struct kfd_dev *kdev, - struct kfd_dev *peer_kdev, + struct kfd_node *kdev, + struct kfd_node *peer_kdev, struct crat_subtype_iolink *sub_type_hdr, uint32_t proximity_domain_from, uint32_t proximity_domain_to) @@ -2081,7 +2081,7 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size, * [OUT] actual size of data filled in crat_image */ static int kfd_create_vcrat_image_gpu(void *pcrat_image, - size_t *size, struct kfd_dev *kdev, + size_t *size, struct kfd_node *kdev, uint32_t proximity_domain) { struct crat_header *crat_table = (struct crat_header *)pcrat_image; @@ -2153,7 +2153,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, /* Check if this node supports IOMMU. During parsing this flag will * translate to HSA_CAP_ATS_PRESENT */ - if (!kfd_iommu_check_device(kdev)) + if (!kfd_iommu_check_device(kdev->kfd)) cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT; crat_table->length += sub_type_hdr->length; @@ -2164,7 +2164,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, * report the total FB size (public+private) as a single * private heap. */ - local_mem_info = kdev->local_mem_info; + local_mem_info = kdev->kfd->local_mem_info; sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + sub_type_hdr->length); @@ -2216,12 +2216,12 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, * (from other GPU to this GPU) will be added * in kfd_parse_subtype_iolink. */ - if (kdev->hive_id) { + if (kdev->kfd->hive_id) { for (nid = 0; nid < proximity_domain; ++nid) { peer_dev = kfd_topology_device_by_proximity_domain_no_lock(nid); if (!peer_dev->gpu) continue; - if (peer_dev->gpu->hive_id != kdev->hive_id) + if (peer_dev->gpu->kfd->hive_id != kdev->kfd->hive_id) continue; sub_type_hdr = (typeof(sub_type_hdr))( (char *)sub_type_hdr + @@ -2255,12 +2255,12 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, * (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU * -- this option is not currently implemented. * The assumption is that all AMD APUs will have CRAT - * @kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU + * @kdev: Valid kfd_node required if flags contain COMPUTE_UNIT_GPU * * Return 0 if successful else return -ve value */ int kfd_create_crat_image_virtual(void **crat_image, size_t *size, - int flags, struct kfd_dev *kdev, + int flags, struct kfd_node *kdev, uint32_t proximity_domain) { void *pcrat_image = NULL; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h index 8d1e8ba58dee..3d0e533b93b9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h @@ -293,7 +293,7 @@ struct crat_subtype_generic { #pragma pack() -struct kfd_dev; +struct kfd_node; /* Static table to describe GPU Cache information */ struct kfd_gpu_cache_info { @@ -305,14 +305,14 @@ struct kfd_gpu_cache_info { */ uint32_t num_cu_shared; }; -int kfd_get_gpu_cache_info(struct kfd_dev *kdev, struct kfd_gpu_cache_info **pcache_info); +int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info); int kfd_create_crat_image_acpi(void **crat_image, size_t *size); void kfd_destroy_crat_image(void *crat_image); int kfd_parse_crat_table(void *crat_image, struct list_head *device_list, uint32_t proximity_domain); int kfd_create_crat_image_virtual(void **crat_image, size_t *size, - int flags, struct kfd_dev *kdev, + int flags, struct kfd_node *kdev, uint32_t proximity_domain); #endif /* KFD_CRAT_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c index ad5a40a685ac..4a5a0a4e00f2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c @@ -43,7 +43,7 @@ static int kfd_debugfs_hang_hws_read(struct seq_file *m, void *data) static ssize_t kfd_debugfs_hang_hws_write(struct file *file, const char __user *user_buf, size_t size, loff_t *ppos) { - struct kfd_dev *dev; + struct kfd_node *dev; char tmp[16]; uint32_t gpu_id; int ret = -EINVAL; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 1510041a6ee1..23d9a7f77055 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -61,7 +61,7 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, static void kfd_gtt_sa_fini(struct kfd_dev *kfd); static int kfd_resume_iommu(struct kfd_dev *kfd); -static int kfd_resume(struct kfd_dev *kfd); +static int kfd_resume(struct kfd_node *kfd); static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) { @@ -441,8 +441,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) memset(&kfd->doorbell_available_index, 0, sizeof(kfd->doorbell_available_index)); - atomic_set(&kfd->sram_ecc_flag, 0); - ida_init(&kfd->doorbell_ida); return kfd; @@ -489,41 +487,106 @@ static void kfd_cwsr_init(struct kfd_dev *kfd) } } -static int kfd_gws_init(struct kfd_dev *kfd) +static int kfd_gws_init(struct kfd_node *node) { int ret = 0; + struct kfd_dev *kfd = node->kfd; - if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) + if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) return 0; - if (hws_gws_support || (KFD_IS_SOC15(kfd) && - ((KFD_GC_VERSION(kfd) == IP_VERSION(9, 0, 1) + if (hws_gws_support || (KFD_IS_SOC15(node) && + ((KFD_GC_VERSION(node) == IP_VERSION(9, 0, 1) && kfd->mec2_fw_version >= 0x81b3) || - (KFD_GC_VERSION(kfd) <= IP_VERSION(9, 4, 0) + (KFD_GC_VERSION(node) <= IP_VERSION(9, 4, 0) && kfd->mec2_fw_version >= 0x1b3) || - (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1) + (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 1) && kfd->mec2_fw_version >= 0x30) || - (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) + (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 2) && kfd->mec2_fw_version >= 0x28) || - (KFD_GC_VERSION(kfd) >= IP_VERSION(10, 3, 0) - && KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0) + (KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0) + && KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0) && kfd->mec2_fw_version >= 0x6b)))) - ret = amdgpu_amdkfd_alloc_gws(kfd->adev, - kfd->adev->gds.gws_size, &kfd->gws); + ret = amdgpu_amdkfd_alloc_gws(node->adev, + node->adev->gds.gws_size, &node->gws); return ret; } -static void kfd_smi_init(struct kfd_dev *dev) +static void kfd_smi_init(struct kfd_node *dev) { INIT_LIST_HEAD(&dev->smi_clients); spin_lock_init(&dev->smi_lock); } +static int kfd_init_node(struct kfd_node *node) +{ + int err = -1; + + if (kfd_interrupt_init(node)) { + dev_err(kfd_device, "Error initializing interrupts\n"); + goto kfd_interrupt_error; + } + + node->dqm = device_queue_manager_init(node); + if (!node->dqm) { + dev_err(kfd_device, "Error initializing queue manager\n"); + goto device_queue_manager_error; + } + + if (kfd_gws_init(node)) { + dev_err(kfd_device, "Could not allocate %d gws\n", + node->adev->gds.gws_size); + goto gws_error; + } + + if (kfd_resume(node)) + goto kfd_resume_error; + + if (kfd_topology_add_device(node)) { + dev_err(kfd_device, "Error adding device to topology\n"); + goto kfd_topology_add_device_error; + } + + kfd_smi_init(node); + + return 0; + +kfd_topology_add_device_error: +kfd_resume_error: +gws_error: + device_queue_manager_uninit(node->dqm); +device_queue_manager_error: + kfd_interrupt_exit(node); +kfd_interrupt_error: + if (node->gws) + amdgpu_amdkfd_free_gws(node->adev, node->gws); + + /* Cleanup the node memory here */ + kfree(node); + return err; +} + +static void kfd_cleanup_node(struct kfd_dev *kfd) +{ + struct kfd_node *knode = kfd->node; + + device_queue_manager_uninit(knode->dqm); + kfd_interrupt_exit(knode); + kfd_topology_remove_device(knode); + if (knode->gws) + amdgpu_amdkfd_free_gws(knode->adev, knode->gws); + kfree(knode); + kfd->node = NULL; +} + bool kgd2kfd_device_init(struct kfd_dev *kfd, const struct kgd2kfd_shared_resources *gpu_resources) { unsigned int size, map_process_packet_size; + struct kfd_node *node; + uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd; + unsigned int max_proc_per_quantum; kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_MEC1); @@ -533,10 +596,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, KGD_ENGINE_SDMA1); kfd->shared_resources = *gpu_resources; - kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; - kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; - kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd - - kfd->vm_info.first_vmid_kfd + 1; + first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; + last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; + vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1; /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. * 32 and 64-bit requests are possible and must be @@ -557,9 +619,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, /* Verify module parameters regarding mapped process number*/ if (hws_max_conc_proc >= 0) - kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd); + max_proc_per_quantum = min((u32)hws_max_conc_proc, vmid_num_kfd); else - kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd; + max_proc_per_quantum = vmid_num_kfd; /* calculate max size of mqds needed for queues */ size = max_num_of_queues_per_device * @@ -609,26 +671,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->noretry = kfd->adev->gmc.noretry; - if (kfd_interrupt_init(kfd)) { - dev_err(kfd_device, "Error initializing interrupts\n"); - goto kfd_interrupt_error; - } - - kfd->dqm = device_queue_manager_init(kfd); - if (!kfd->dqm) { - dev_err(kfd_device, "Error initializing queue manager\n"); - goto device_queue_manager_error; - } - - /* If supported on this device, allocate global GWS that is shared - * by all KFD processes - */ - if (kfd_gws_init(kfd)) { - dev_err(kfd_device, "Could not allocate %d gws\n", - kfd->adev->gds.gws_size); - goto gws_error; - } - /* If CRAT is broken, won't set iommu enabled */ kfd_double_confirm_iommu_support(kfd); @@ -642,46 +684,54 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, svm_migrate_init(kfd->adev); - if (kfd_resume_iommu(kfd)) - goto device_iommu_error; - - if (kfd_resume(kfd)) - goto kfd_resume_error; - - amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); + /* Allocate the KFD node */ + node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL); + if (!node) { + dev_err(kfd_device, "Error allocating KFD node\n"); + goto node_alloc_error; + } - if (kfd_topology_add_device(kfd)) { - dev_err(kfd_device, "Error adding device to topology\n"); - goto kfd_topology_add_device_error; + node->adev = kfd->adev; + node->kfd = kfd; + node->kfd2kgd = kfd->kfd2kgd; + node->vm_info.vmid_num_kfd = vmid_num_kfd; + node->vm_info.first_vmid_kfd = first_vmid_kfd; + node->vm_info.last_vmid_kfd = last_vmid_kfd; + node->max_proc_per_quantum = max_proc_per_quantum; + atomic_set(&node->sram_ecc_flag, 0); + + /* Initialize the KFD node */ + if (kfd_init_node(node)) { + dev_err(kfd_device, "Error initializing KFD node\n"); + goto node_init_error; } + kfd->node = node; - kfd_smi_init(kfd); + if (kfd_resume_iommu(kfd)) + goto kfd_resume_iommu_error; + + amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); kfd->init_complete = true; dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor, kfd->adev->pdev->device); pr_debug("Starting kfd with the following scheduling policy %d\n", - kfd->dqm->sched_policy); + node->dqm->sched_policy); goto out; -kfd_topology_add_device_error: -kfd_resume_error: +kfd_resume_iommu_error: + kfd_cleanup_node(kfd); +node_init_error: +node_alloc_error: device_iommu_error: -gws_error: - device_queue_manager_uninit(kfd->dqm); -device_queue_manager_error: - kfd_interrupt_exit(kfd); -kfd_interrupt_error: kfd_doorbell_fini(kfd); kfd_doorbell_error: kfd_gtt_sa_fini(kfd); kfd_gtt_sa_init_error: amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); alloc_gtt_mem_failure: - if (kfd->gws) - amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); dev_err(kfd_device, "device %x:%x NOT added due to errors\n", kfd->adev->pdev->vendor, kfd->adev->pdev->device); @@ -692,15 +742,11 @@ out: void kgd2kfd_device_exit(struct kfd_dev *kfd) { if (kfd->init_complete) { - device_queue_manager_uninit(kfd->dqm); - kfd_interrupt_exit(kfd); - kfd_topology_remove_device(kfd); + kfd_cleanup_node(kfd); kfd_doorbell_fini(kfd); ida_destroy(&kfd->doorbell_ida); kfd_gtt_sa_fini(kfd); amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); - if (kfd->gws) - amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); } kfree(kfd); @@ -708,16 +754,18 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd) int kgd2kfd_pre_reset(struct kfd_dev *kfd) { + struct kfd_node *node = kfd->node; + if (!kfd->init_complete) return 0; - kfd_smi_event_update_gpu_reset(kfd, false); + kfd_smi_event_update_gpu_reset(node, false); - kfd->dqm->ops.pre_reset(kfd->dqm); + node->dqm->ops.pre_reset(node->dqm); kgd2kfd_suspend(kfd, false); - kfd_signal_reset_event(kfd); + kfd_signal_reset_event(node); return 0; } @@ -730,18 +778,19 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd) int kgd2kfd_post_reset(struct kfd_dev *kfd) { int ret; + struct kfd_node *node = kfd->node; if (!kfd->init_complete) return 0; - ret = kfd_resume(kfd); + ret = kfd_resume(node); if (ret) return ret; atomic_dec(&kfd_locked); - atomic_set(&kfd->sram_ecc_flag, 0); + atomic_set(&node->sram_ecc_flag, 0); - kfd_smi_event_update_gpu_reset(kfd, true); + kfd_smi_event_update_gpu_reset(node, true); return 0; } @@ -753,6 +802,8 @@ bool kfd_is_locked(void) void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) { + struct kfd_node *node = kfd->node; + if (!kfd->init_complete) return; @@ -763,18 +814,19 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) kfd_suspend_all_processes(); } - kfd->dqm->ops.stop(kfd->dqm); + node->dqm->ops.stop(node->dqm); kfd_iommu_suspend(kfd); } int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) { int ret, count; + struct kfd_node *node = kfd->node; if (!kfd->init_complete) return 0; - ret = kfd_resume(kfd); + ret = kfd_resume(node); if (ret) return ret; @@ -809,15 +861,15 @@ static int kfd_resume_iommu(struct kfd_dev *kfd) return err; } -static int kfd_resume(struct kfd_dev *kfd) +static int kfd_resume(struct kfd_node *node) { int err = 0; - err = kfd->dqm->ops.start(kfd->dqm); + err = node->dqm->ops.start(node->dqm); if (err) dev_err(kfd_device, "Error starting queue manager for device %x:%x\n", - kfd->adev->pdev->vendor, kfd->adev->pdev->device); + node->adev->pdev->vendor, node->adev->pdev->device); return err; } @@ -843,6 +895,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE]; bool is_patched = false; unsigned long flags; + struct kfd_node *node = kfd->node; if (!kfd->init_complete) return; @@ -852,16 +905,16 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) return; } - spin_lock_irqsave(&kfd->interrupt_lock, flags); + spin_lock_irqsave(&node->interrupt_lock, flags); - if (kfd->interrupts_active - && interrupt_is_wanted(kfd, ih_ring_entry, + if (node->interrupts_active + && interrupt_is_wanted(node, ih_ring_entry, patched_ihre, &is_patched) - && enqueue_ih_ring_entry(kfd, + && enqueue_ih_ring_entry(node, is_patched ? patched_ihre : ih_ring_entry)) - kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work); + kfd_queue_work(node->ih_wq, &node->interrupt_work); - spin_unlock_irqrestore(&kfd->interrupt_lock, flags); + spin_unlock_irqrestore(&node->interrupt_lock, flags); } int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger) @@ -999,10 +1052,11 @@ static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr, return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size); } -int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, +int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size, struct kfd_mem_obj **mem_obj) { unsigned int found, start_search, cur_size; + struct kfd_dev *kfd = node->kfd; if (size == 0) return -EINVAL; @@ -1102,8 +1156,10 @@ kfd_gtt_no_free_chunk: return -ENOMEM; } -int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj) +int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj) { + struct kfd_dev *kfd = node->kfd; + /* Act like kfree when trying to free a NULL object */ if (!mem_obj) return 0; @@ -1126,28 +1182,28 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj) void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) { if (kfd) - atomic_inc(&kfd->sram_ecc_flag); + atomic_inc(&kfd->node->sram_ecc_flag); } -void kfd_inc_compute_active(struct kfd_dev *kfd) +void kfd_inc_compute_active(struct kfd_node *node) { - if (atomic_inc_return(&kfd->compute_profile) == 1) - amdgpu_amdkfd_set_compute_idle(kfd->adev, false); + if (atomic_inc_return(&node->kfd->compute_profile) == 1) + amdgpu_amdkfd_set_compute_idle(node->adev, false); } -void kfd_dec_compute_active(struct kfd_dev *kfd) +void kfd_dec_compute_active(struct kfd_node *node) { - int count = atomic_dec_return(&kfd->compute_profile); + int count = atomic_dec_return(&node->kfd->compute_profile); if (count == 0) - amdgpu_amdkfd_set_compute_idle(kfd->adev, true); + amdgpu_amdkfd_set_compute_idle(node->adev, true); WARN_ONCE(count < 0, "Compute profile ref. count error"); } void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) { if (kfd && kfd->init_complete) - kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask); + kfd_smi_event_update_thermal_throttling(kfd->node, throttle_bitmask); } /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and @@ -1155,19 +1211,19 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) * When the device has more than two engines, we reserve two for PCIe to enable * full-duplex and the rest are used as XGMI. */ -unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev) +unsigned int kfd_get_num_sdma_engines(struct kfd_node *node) { /* If XGMI is not supported, all SDMA engines are PCIe */ - if (!kdev->adev->gmc.xgmi.supported) - return kdev->adev->sdma.num_instances; + if (!node->adev->gmc.xgmi.supported) + return node->adev->sdma.num_instances; - return min(kdev->adev->sdma.num_instances, 2); + return min(node->adev->sdma.num_instances, 2); } -unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev) +unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node) { /* After reserved for PCIe, the rest of engines are XGMI */ - return kdev->adev->sdma.num_instances - kfd_get_num_sdma_engines(kdev); + return node->adev->sdma.num_instances - kfd_get_num_sdma_engines(node); } #if defined(CONFIG_DEBUG_FS) @@ -1175,7 +1231,7 @@ unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev) /* This function will send a package to HIQ to hang the HWS * which will trigger a GPU reset and bring the HWS back to normal state */ -int kfd_debugfs_hang_hws(struct kfd_dev *dev) +int kfd_debugfs_hang_hws(struct kfd_node *dev) { if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) { pr_err("HWS is not enabled"); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 7a95698d83f7..34977d89f01c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -74,31 +74,31 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type) static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) { int i; - int pipe_offset = (mec * dqm->dev->shared_resources.num_pipe_per_mec - + pipe) * dqm->dev->shared_resources.num_queue_per_pipe; + int pipe_offset = (mec * dqm->dev->kfd->shared_resources.num_pipe_per_mec + + pipe) * dqm->dev->kfd->shared_resources.num_queue_per_pipe; /* queue is available for KFD usage if bit is 1 */ - for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i) + for (i = 0; i < dqm->dev->kfd->shared_resources.num_queue_per_pipe; ++i) if (test_bit(pipe_offset + i, - dqm->dev->shared_resources.cp_queue_bitmap)) + dqm->dev->kfd->shared_resources.cp_queue_bitmap)) return true; return false; } unsigned int get_cp_queues_num(struct device_queue_manager *dqm) { - return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap, + return bitmap_weight(dqm->dev->kfd->shared_resources.cp_queue_bitmap, KGD_MAX_QUEUES); } unsigned int get_queues_per_pipe(struct device_queue_manager *dqm) { - return dqm->dev->shared_resources.num_queue_per_pipe; + return dqm->dev->kfd->shared_resources.num_queue_per_pipe; } unsigned int get_pipes_per_mec(struct device_queue_manager *dqm) { - return dqm->dev->shared_resources.num_pipe_per_mec; + return dqm->dev->kfd->shared_resources.num_pipe_per_mec; } static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm) @@ -110,18 +110,18 @@ static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm) unsigned int get_num_sdma_queues(struct device_queue_manager *dqm) { return kfd_get_num_sdma_engines(dqm->dev) * - dqm->dev->device_info.num_sdma_queues_per_engine; + dqm->dev->kfd->device_info.num_sdma_queues_per_engine; } unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm) { return kfd_get_num_xgmi_sdma_engines(dqm->dev) * - dqm->dev->device_info.num_sdma_queues_per_engine; + dqm->dev->kfd->device_info.num_sdma_queues_per_engine; } static inline uint64_t get_reserved_sdma_queues_bitmap(struct device_queue_manager *dqm) { - return dqm->dev->device_info.reserved_sdma_queues_bitmap; + return dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap; } void program_sh_mem_settings(struct device_queue_manager *dqm, @@ -330,7 +330,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q, uint32_t const *restore_id) { - struct kfd_dev *dev = qpd->dqm->dev; + struct kfd_node *dev = qpd->dqm->dev; if (!KFD_IS_SOC15(dev)) { /* On pre-SOC15 chips we need to use the queue ID to @@ -349,7 +349,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, * for a SDMA engine is 512. */ - uint32_t *idx_offset = dev->shared_resources.sdma_doorbell_idx; + uint32_t *idx_offset = dev->kfd->shared_resources.sdma_doorbell_idx; uint32_t valid_id = idx_offset[q->properties.sdma_engine_id] + (q->properties.sdma_queue_id & 1) * KFD_QUEUE_DOORBELL_MIRROR_OFFSET @@ -382,7 +382,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, } q->properties.doorbell_off = - kfd_get_doorbell_dw_offset_in_bar(dev, qpd_to_pdd(qpd), + kfd_get_doorbell_dw_offset_in_bar(dev->kfd, qpd_to_pdd(qpd), q->doorbell_id); return 0; } @@ -391,7 +391,7 @@ static void deallocate_doorbell(struct qcm_process_device *qpd, struct queue *q) { unsigned int old; - struct kfd_dev *dev = qpd->dqm->dev; + struct kfd_node *dev = qpd->dqm->dev; if (!KFD_IS_SOC15(dev) || q->properties.type == KFD_QUEUE_TYPE_SDMA || @@ -441,7 +441,7 @@ static int allocate_vmid(struct device_queue_manager *dqm, program_sh_mem_settings(dqm, qpd); - if (KFD_IS_SOC15(dqm->dev) && dqm->dev->cwsr_enabled) + if (KFD_IS_SOC15(dqm->dev) && dqm->dev->kfd->cwsr_enabled) program_trap_handler_settings(dqm, qpd); /* qpd->page_table_base is set earlier when register_process() @@ -460,7 +460,7 @@ static int allocate_vmid(struct device_queue_manager *dqm, return 0; } -static int flush_texture_cache_nocpsch(struct kfd_dev *kdev, +static int flush_texture_cache_nocpsch(struct kfd_node *kdev, struct qcm_process_device *qpd) { const struct packet_manager_funcs *pmf = qpd->dqm->packet_mgr.pmf; @@ -661,7 +661,7 @@ static inline void deallocate_hqd(struct device_queue_manager *dqm, #define SQ_IND_CMD_CMD_KILL 0x00000003 #define SQ_IND_CMD_MODE_BROADCAST 0x00000001 -static int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p) +static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process *p) { int status = 0; unsigned int vmid; @@ -837,7 +837,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q, /* Make sure the queue is unmapped before updating the MQD */ if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false); else if (prev_active) @@ -858,7 +858,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q, } retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, - (dqm->dev->cwsr_enabled ? + (dqm->dev->kfd->cwsr_enabled ? KFD_PREEMPT_TYPE_WAVEFRONT_SAVE : KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN), KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); @@ -895,7 +895,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q, } if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = map_queues_cpsch(dqm); else if (q->properties.is_active) retval = add_queue_mes(dqm, q, &pdd->qpd); @@ -951,7 +951,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, continue; retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, - (dqm->dev->cwsr_enabled ? + (dqm->dev->kfd->cwsr_enabled ? KFD_PREEMPT_TYPE_WAVEFRONT_SAVE : KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN), KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); @@ -993,7 +993,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, q->properties.is_active = false; decrement_queue_count(dqm, qpd, q); - if (dqm->dev->shared_resources.enable_mes) { + if (dqm->dev->kfd->shared_resources.enable_mes) { retval = remove_queue_mes(dqm, q, qpd); if (retval) { pr_err("Failed to evict queue %d\n", @@ -1003,7 +1003,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, } } pdd->last_evict_timestamp = get_jiffies_64(); - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, qpd->is_debug ? KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES : @@ -1132,7 +1132,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, q->properties.is_active = true; increment_queue_count(dqm, &pdd->qpd, q); - if (dqm->dev->shared_resources.enable_mes) { + if (dqm->dev->kfd->shared_resources.enable_mes) { retval = add_queue_mes(dqm, q, qpd); if (retval) { pr_err("Failed to restore queue %d\n", @@ -1141,7 +1141,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, } } } - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); qpd->evicted = 0; @@ -1282,7 +1282,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm) for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) if (test_bit(pipe_offset + queue, - dqm->dev->shared_resources.cp_queue_bitmap)) + dqm->dev->kfd->shared_resources.cp_queue_bitmap)) dqm->allocated_queues[pipe] |= 1 << queue; } @@ -1426,14 +1426,14 @@ static int set_sched_resources(struct device_queue_manager *dqm) int i, mec; struct scheduling_resources res; - res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap; + res.vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap; res.queue_mask = 0; for (i = 0; i < KGD_MAX_QUEUES; ++i) { - mec = (i / dqm->dev->shared_resources.num_queue_per_pipe) - / dqm->dev->shared_resources.num_pipe_per_mec; + mec = (i / dqm->dev->kfd->shared_resources.num_queue_per_pipe) + / dqm->dev->kfd->shared_resources.num_pipe_per_mec; - if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap)) + if (!test_bit(i, dqm->dev->kfd->shared_resources.cp_queue_bitmap)) continue; /* only acquire queues from the first MEC */ @@ -1489,7 +1489,7 @@ static int start_cpsch(struct device_queue_manager *dqm) dqm_lock(dqm); - if (!dqm->dev->shared_resources.enable_mes) { + if (!dqm->dev->kfd->shared_resources.enable_mes) { retval = pm_init(&dqm->packet_mgr, dqm); if (retval) goto fail_packet_manager_init; @@ -1516,14 +1516,14 @@ static int start_cpsch(struct device_queue_manager *dqm) dqm->is_hws_hang = false; dqm->is_resetting = false; dqm->sched_running = true; - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); dqm_unlock(dqm); return 0; fail_allocate_vidmem: fail_set_sched_resources: - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) pm_uninit(&dqm->packet_mgr, false); fail_packet_manager_init: dqm_unlock(dqm); @@ -1541,7 +1541,7 @@ static int stop_cpsch(struct device_queue_manager *dqm) } if (!dqm->is_hws_hang) { - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, false); else remove_all_queues_mes(dqm); @@ -1550,11 +1550,11 @@ static int stop_cpsch(struct device_queue_manager *dqm) hanging = dqm->is_hws_hang || dqm->is_resetting; dqm->sched_running = false; - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) pm_release_ib(&dqm->packet_mgr); kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) pm_uninit(&dqm->packet_mgr, hanging); dqm_unlock(dqm); @@ -1673,7 +1673,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, if (q->properties.is_active) { increment_queue_count(dqm, qpd, q); - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); else @@ -1893,7 +1893,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, list_del(&q->list); qpd->queue_count--; if (q->properties.is_active) { - if (!dqm->dev->shared_resources.enable_mes) { + if (!dqm->dev->kfd->shared_resources.enable_mes) { decrement_queue_count(dqm, qpd, q); retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); @@ -2056,7 +2056,7 @@ static int get_wave_state(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP]; if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE || - q->properties.is_active || !q->device->cwsr_enabled || + q->properties.is_active || !q->device->kfd->cwsr_enabled || !mqd_mgr->get_wave_state) { dqm_unlock(dqm); return -EINVAL; @@ -2105,7 +2105,7 @@ static int checkpoint_mqd(struct device_queue_manager *dqm, dqm_lock(dqm); - if (q->properties.is_active || !q->device->cwsr_enabled) { + if (q->properties.is_active || !q->device->kfd->cwsr_enabled) { r = -EINVAL; goto dqm_unlock; } @@ -2158,7 +2158,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, if (q->properties.is_active) { decrement_queue_count(dqm, qpd, q); - if (dqm->dev->shared_resources.enable_mes) { + if (dqm->dev->kfd->shared_resources.enable_mes) { retval = remove_queue_mes(dqm, q, qpd); if (retval) pr_err("Failed to remove queue %d\n", @@ -2180,7 +2180,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, } } - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, filter, 0); if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) { @@ -2242,11 +2242,11 @@ out_free: static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) { int retval; - struct kfd_dev *dev = dqm->dev; + struct kfd_node *dev = dqm->dev; struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd; uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size * get_num_all_sdma_engines(dqm) * - dev->device_info.num_sdma_queues_per_engine + + dev->kfd->device_info.num_sdma_queues_per_engine + dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size, @@ -2256,7 +2256,7 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) return retval; } -struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) +struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev) { struct device_queue_manager *dqm; @@ -2373,7 +2373,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) if (init_mqd_managers(dqm)) goto out_free; - if (!dev->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) { + if (!dev->kfd->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) { pr_err("Failed to allocate hiq sdma mqd trunk buffer\n"); goto out_free; } @@ -2386,7 +2386,7 @@ out_free: return NULL; } -static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev, +static void deallocate_hiq_sdma_mqd(struct kfd_node *dev, struct kfd_mem_obj *mqd) { WARN(!mqd, "No hiq sdma mqd trunk to free"); @@ -2397,7 +2397,7 @@ static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev, void device_queue_manager_uninit(struct device_queue_manager *dqm) { dqm->ops.uninitialize(dqm); - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd); kfree(dqm); } @@ -2479,7 +2479,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) { if (!test_bit(pipe_offset + queue, - dqm->dev->shared_resources.cp_queue_bitmap)) + dqm->dev->kfd->shared_resources.cp_queue_bitmap)) continue; r = dqm->dev->kfd2kgd->hqd_dump( @@ -2497,7 +2497,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) { for (queue = 0; - queue < dqm->dev->device_info.num_sdma_queues_per_engine; + queue < dqm->dev->kfd->device_info.num_sdma_queues_per_engine; queue++) { r = dqm->dev->kfd2kgd->hqd_sdma_dump( dqm->dev->adev, pipe, queue, &dump, &n_regs); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index a537b9ef3e16..e554a48f3054 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -207,7 +207,7 @@ struct device_queue_manager_asic_ops { struct queue *q, struct qcm_process_device *qpd); struct mqd_manager * (*mqd_manager_init)(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); }; /** @@ -228,7 +228,7 @@ struct device_queue_manager { struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX]; struct packet_manager packet_mgr; - struct kfd_dev *dev; + struct kfd_node *dev; struct mutex lock_hidden; /* use dqm_lock/unlock(dqm) */ struct list_head queues; unsigned int saved_flags; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c index 914d94679d73..8af643388768 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c @@ -60,10 +60,10 @@ static int update_qpd_v9(struct device_queue_manager *dqm, qpd->sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; - if (dqm->dev->noretry && !dqm->dev->use_iommu_v2) + if (dqm->dev->kfd->noretry && !dqm->dev->kfd->use_iommu_v2) qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT; - if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3)) + if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3)) qpd->sh_mem_config |= (1 << SH_MEM_CONFIG__F8_MODE__SHIFT); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index 38c9e1ca6691..6421b620388d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -138,7 +138,7 @@ void kfd_doorbell_fini(struct kfd_dev *kfd) iounmap(kfd->doorbell_kernel_ptr); } -int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process, +int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process, struct vm_area_struct *vma) { phys_addr_t address; @@ -148,7 +148,7 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process, * For simplicitly we only allow mapping of the entire doorbell * allocation of a single device & process. */ - if (vma->vm_end - vma->vm_start != kfd_doorbell_process_slice(dev)) + if (vma->vm_end - vma->vm_start != kfd_doorbell_process_slice(dev->kfd)) return -EINVAL; pdd = kfd_get_process_device_data(dev, process); @@ -170,13 +170,13 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process, " vm_flags == 0x%04lX\n" " size == 0x%04lX\n", (unsigned long long) vma->vm_start, address, vma->vm_flags, - kfd_doorbell_process_slice(dev)); + kfd_doorbell_process_slice(dev->kfd)); return io_remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT, - kfd_doorbell_process_slice(dev), + kfd_doorbell_process_slice(dev->kfd), vma->vm_page_prot); } @@ -278,14 +278,14 @@ uint64_t kfd_get_number_elems(struct kfd_dev *kfd) phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd) { if (!pdd->doorbell_index) { - int r = kfd_alloc_process_doorbells(pdd->dev, + int r = kfd_alloc_process_doorbells(pdd->dev->kfd, &pdd->doorbell_index); if (r < 0) return 0; } - return pdd->dev->doorbell_base + - pdd->doorbell_index * kfd_doorbell_process_slice(pdd->dev); + return pdd->dev->kfd->doorbell_base + + pdd->doorbell_index * kfd_doorbell_process_slice(pdd->dev->kfd); } int kfd_alloc_process_doorbells(struct kfd_dev *kfd, unsigned int *doorbell_index) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index c894cf8f7c50..9926186f88a6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -348,7 +348,7 @@ static int kfd_event_page_set(struct kfd_process *p, void *kernel_address, int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset) { - struct kfd_dev *kfd; + struct kfd_node *kfd; struct kfd_process_device *pdd; void *mem, *kern_addr; uint64_t size; @@ -1125,7 +1125,7 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p, } #ifdef KFD_SUPPORT_IOMMU_V2 -void kfd_signal_iommu_event(struct kfd_dev *dev, u32 pasid, +void kfd_signal_iommu_event(struct kfd_node *dev, u32 pasid, unsigned long address, bool is_write_requested, bool is_execute_requested) { @@ -1221,8 +1221,8 @@ void kfd_signal_hw_exception_event(u32 pasid) kfd_unref_process(p); } -void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid, - struct kfd_vm_fault_info *info) +void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid, + struct kfd_vm_fault_info *info) { struct kfd_event *ev; uint32_t id; @@ -1269,7 +1269,7 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid, kfd_unref_process(p); } -void kfd_signal_reset_event(struct kfd_dev *dev) +void kfd_signal_reset_event(struct kfd_node *dev) { struct kfd_hsa_hw_exception_data hw_exception_data; struct kfd_hsa_memory_exception_data memory_exception_data; @@ -1325,7 +1325,7 @@ void kfd_signal_reset_event(struct kfd_dev *dev) srcu_read_unlock(&kfd_processes_srcu, idx); } -void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid) +void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid) { struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); struct kfd_hsa_memory_exception_data memory_exception_data; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index 8aebe408c544..da2ca00d79e5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -322,21 +322,21 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id) pdd->lds_base = MAKE_LDS_APP_BASE_VI(); pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base); - if (!pdd->dev->use_iommu_v2) { + if (!pdd->dev->kfd->use_iommu_v2) { /* dGPUs: SVM aperture starting at 0 * with small reserved space for kernel. * Set them to CANONICAL addresses. */ pdd->gpuvm_base = SVM_USER_BASE; pdd->gpuvm_limit = - pdd->dev->shared_resources.gpuvm_size - 1; + pdd->dev->kfd->shared_resources.gpuvm_size - 1; } else { /* set them to non CANONICAL addresses, and no SVM is * allocated. */ pdd->gpuvm_base = MAKE_GPUVM_APP_BASE_VI(id + 1); pdd->gpuvm_limit = MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base, - pdd->dev->shared_resources.gpuvm_size); + pdd->dev->kfd->shared_resources.gpuvm_size); } pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI(); @@ -356,7 +356,7 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id) */ pdd->gpuvm_base = SVM_USER_BASE; pdd->gpuvm_limit = - pdd->dev->shared_resources.gpuvm_size - 1; + pdd->dev->kfd->shared_resources.gpuvm_size - 1; pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9(); pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base); @@ -365,7 +365,7 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id) int kfd_init_apertures(struct kfd_process *process) { uint8_t id = 0; - struct kfd_dev *dev; + struct kfd_node *dev; struct kfd_process_device *pdd; /*Iterating over all devices*/ @@ -417,7 +417,7 @@ int kfd_init_apertures(struct kfd_process *process) } } - if (!dev->use_iommu_v2) { + if (!dev->kfd->use_iommu_v2) { /* dGPUs: the reserved space for kernel * before SVM */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c index 0d53f6067422..0f0fdea4cd8a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c @@ -187,7 +187,7 @@ static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1) REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID1, WGP_ID)); } -static void event_interrupt_poison_consumption_v11(struct kfd_dev *dev, +static void event_interrupt_poison_consumption_v11(struct kfd_node *dev, uint16_t pasid, uint16_t source_id) { int ret = -EINVAL; @@ -225,7 +225,7 @@ static void event_interrupt_poison_consumption_v11(struct kfd_dev *dev, amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true); } -static bool event_interrupt_isr_v11(struct kfd_dev *dev, +static bool event_interrupt_isr_v11(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *patched_flag) @@ -274,7 +274,7 @@ static bool event_interrupt_isr_v11(struct kfd_dev *dev, !amdgpu_no_queue_eviction_on_vm_fault); } -static void event_interrupt_wq_v11(struct kfd_dev *dev, +static void event_interrupt_wq_v11(struct kfd_node *dev, const uint32_t *ih_ring_entry) { uint16_t source_id, client_id, ring_id, pasid, vmid; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 0b75a37b689b..861bccb1e9dc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -90,7 +90,7 @@ enum SQ_INTERRUPT_ERROR_TYPE { #define KFD_SQ_INT_DATA__ERR_TYPE_MASK 0xF00000 #define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20 -static void event_interrupt_poison_consumption_v9(struct kfd_dev *dev, +static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, uint16_t pasid, uint16_t client_id) { int old_poison, ret = -EINVAL; @@ -160,7 +160,7 @@ static bool context_id_expected(struct kfd_dev *dev) } } -static bool event_interrupt_isr_v9(struct kfd_dev *dev, +static bool event_interrupt_isr_v9(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *patched_flag) @@ -206,7 +206,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, *patched_flag = true; memcpy(patched_ihre, ih_ring_entry, - dev->device_info.ih_ring_entry_size); + dev->kfd->device_info.ih_ring_entry_size); pasid = dev->dqm->vmid_pasid[vmid]; @@ -235,7 +235,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, uint32_t context_id = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry); - if (context_id == 0 && context_id_expected(dev)) + if (context_id == 0 && context_id_expected(dev->kfd)) return false; } @@ -253,7 +253,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, !amdgpu_no_queue_eviction_on_vm_fault); } -static void event_interrupt_wq_v9(struct kfd_dev *dev, +static void event_interrupt_wq_v9(struct kfd_node *dev, const uint32_t *ih_ring_entry) { uint16_t source_id, client_id, pasid, vmid; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c index 34772fe74296..dd3c43c1ad70 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c @@ -50,29 +50,29 @@ static void interrupt_wq(struct work_struct *); -int kfd_interrupt_init(struct kfd_dev *kfd) +int kfd_interrupt_init(struct kfd_node *node) { int r; - r = kfifo_alloc(&kfd->ih_fifo, - KFD_IH_NUM_ENTRIES * kfd->device_info.ih_ring_entry_size, + r = kfifo_alloc(&node->ih_fifo, + KFD_IH_NUM_ENTRIES * node->kfd->device_info.ih_ring_entry_size, GFP_KERNEL); if (r) { - dev_err(kfd->adev->dev, "Failed to allocate IH fifo\n"); + dev_err(node->adev->dev, "Failed to allocate IH fifo\n"); return r; } - kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1); - if (unlikely(!kfd->ih_wq)) { - kfifo_free(&kfd->ih_fifo); - dev_err(kfd->adev->dev, "Failed to allocate KFD IH workqueue\n"); + node->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1); + if (unlikely(!node->ih_wq)) { + kfifo_free(&node->ih_fifo); + dev_err(node->adev->dev, "Failed to allocate KFD IH workqueue\n"); return -ENOMEM; } - spin_lock_init(&kfd->interrupt_lock); + spin_lock_init(&node->interrupt_lock); - INIT_WORK(&kfd->interrupt_work, interrupt_wq); + INIT_WORK(&node->interrupt_work, interrupt_wq); - kfd->interrupts_active = true; + node->interrupts_active = true; /* * After this function returns, the interrupt will be enabled. This @@ -84,7 +84,7 @@ int kfd_interrupt_init(struct kfd_dev *kfd) return 0; } -void kfd_interrupt_exit(struct kfd_dev *kfd) +void kfd_interrupt_exit(struct kfd_node *node) { /* * Stop the interrupt handler from writing to the ring and scheduling @@ -93,31 +93,31 @@ void kfd_interrupt_exit(struct kfd_dev *kfd) */ unsigned long flags; - spin_lock_irqsave(&kfd->interrupt_lock, flags); - kfd->interrupts_active = false; - spin_unlock_irqrestore(&kfd->interrupt_lock, flags); + spin_lock_irqsave(&node->interrupt_lock, flags); + node->interrupts_active = false; + spin_unlock_irqrestore(&node->interrupt_lock, flags); /* * flush_work ensures that there are no outstanding * work-queue items that will access interrupt_ring. New work items * can't be created because we stopped interrupt handling above. */ - flush_workqueue(kfd->ih_wq); + flush_workqueue(node->ih_wq); - kfifo_free(&kfd->ih_fifo); + kfifo_free(&node->ih_fifo); } /* * Assumption: single reader/writer. This function is not re-entrant */ -bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry) +bool enqueue_ih_ring_entry(struct kfd_node *node, const void *ih_ring_entry) { int count; - count = kfifo_in(&kfd->ih_fifo, ih_ring_entry, - kfd->device_info.ih_ring_entry_size); - if (count != kfd->device_info.ih_ring_entry_size) { - dev_dbg_ratelimited(kfd->adev->dev, + count = kfifo_in(&node->ih_fifo, ih_ring_entry, + node->kfd->device_info.ih_ring_entry_size); + if (count != node->kfd->device_info.ih_ring_entry_size) { + dev_dbg_ratelimited(node->adev->dev, "Interrupt ring overflow, dropping interrupt %d\n", count); return false; @@ -129,32 +129,32 @@ bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry) /* * Assumption: single reader/writer. This function is not re-entrant */ -static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry) +static bool dequeue_ih_ring_entry(struct kfd_node *node, void *ih_ring_entry) { int count; - count = kfifo_out(&kfd->ih_fifo, ih_ring_entry, - kfd->device_info.ih_ring_entry_size); + count = kfifo_out(&node->ih_fifo, ih_ring_entry, + node->kfd->device_info.ih_ring_entry_size); - WARN_ON(count && count != kfd->device_info.ih_ring_entry_size); + WARN_ON(count && count != node->kfd->device_info.ih_ring_entry_size); - return count == kfd->device_info.ih_ring_entry_size; + return count == node->kfd->device_info.ih_ring_entry_size; } static void interrupt_wq(struct work_struct *work) { - struct kfd_dev *dev = container_of(work, struct kfd_dev, + struct kfd_node *dev = container_of(work, struct kfd_node, interrupt_work); uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE]; unsigned long start_jiffies = jiffies; - if (dev->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) { + if (dev->kfd->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) { dev_err_once(dev->adev->dev, "Ring entry too small\n"); return; } while (dequeue_ih_ring_entry(dev, ih_ring_entry)) { - dev->device_info.event_interrupt_class->interrupt_wq(dev, + dev->kfd->device_info.event_interrupt_class->interrupt_wq(dev, ih_ring_entry); if (time_is_before_jiffies(start_jiffies + HZ)) { /* If we spent more than a second processing signals, @@ -166,14 +166,14 @@ static void interrupt_wq(struct work_struct *work) } } -bool interrupt_is_wanted(struct kfd_dev *dev, +bool interrupt_is_wanted(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *flag) { /* integer and bitwise OR so there is no boolean short-circuiting */ unsigned int wanted = 0; - wanted |= dev->device_info.event_interrupt_class->interrupt_isr(dev, + wanted |= dev->kfd->device_info.event_interrupt_class->interrupt_isr(dev, ih_ring_entry, patched_ihre, flag); return wanted != 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index ec1bf611624e..6eee9a0944f3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -109,11 +109,11 @@ int kfd_iommu_device_init(struct kfd_dev *kfd) */ int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd) { - struct kfd_dev *dev = pdd->dev; + struct kfd_node *dev = pdd->dev; struct kfd_process *p = pdd->process; int err; - if (!dev->use_iommu_v2 || pdd->bound == PDD_BOUND) + if (!dev->kfd->use_iommu_v2 || pdd->bound == PDD_BOUND) return 0; if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) { @@ -146,7 +146,7 @@ void kfd_iommu_unbind_process(struct kfd_process *p) /* Callback for process shutdown invoked by the IOMMU driver */ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, u32 pasid) { - struct kfd_dev *dev = kfd_device_by_pci_dev(pdev); + struct kfd_node *dev = kfd_device_by_pci_dev(pdev); struct kfd_process *p; struct kfd_process_device *pdd; @@ -182,7 +182,7 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, u32 pasid) static int iommu_invalid_ppr_cb(struct pci_dev *pdev, u32 pasid, unsigned long address, u16 flags) { - struct kfd_dev *dev; + struct kfd_node *dev; dev_warn_ratelimited(kfd_device, "Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X", @@ -205,7 +205,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, u32 pasid, * Bind processes do the device that have been temporarily unbound * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device. */ -static int kfd_bind_processes_to_device(struct kfd_dev *kfd) +static int kfd_bind_processes_to_device(struct kfd_node *knode) { struct kfd_process_device *pdd; struct kfd_process *p; @@ -216,14 +216,14 @@ static int kfd_bind_processes_to_device(struct kfd_dev *kfd) hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { mutex_lock(&p->mutex); - pdd = kfd_get_process_device_data(kfd, p); + pdd = kfd_get_process_device_data(knode, p); if (WARN_ON(!pdd) || pdd->bound != PDD_BOUND_SUSPENDED) { mutex_unlock(&p->mutex); continue; } - err = amd_iommu_bind_pasid(kfd->adev->pdev, p->pasid, + err = amd_iommu_bind_pasid(knode->adev->pdev, p->pasid, p->lead_thread); if (err < 0) { pr_err("Unexpected pasid 0x%x binding failure\n", @@ -246,7 +246,7 @@ static int kfd_bind_processes_to_device(struct kfd_dev *kfd) * processes will be restored to PDD_BOUND state in * kfd_bind_processes_to_device. */ -static void kfd_unbind_processes_from_device(struct kfd_dev *kfd) +static void kfd_unbind_processes_from_device(struct kfd_node *knode) { struct kfd_process_device *pdd; struct kfd_process *p; @@ -256,7 +256,7 @@ static void kfd_unbind_processes_from_device(struct kfd_dev *kfd) hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { mutex_lock(&p->mutex); - pdd = kfd_get_process_device_data(kfd, p); + pdd = kfd_get_process_device_data(knode, p); if (WARN_ON(!pdd)) { mutex_unlock(&p->mutex); @@ -281,7 +281,7 @@ void kfd_iommu_suspend(struct kfd_dev *kfd) if (!kfd->use_iommu_v2) return; - kfd_unbind_processes_from_device(kfd); + kfd_unbind_processes_from_device(kfd->node); amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL); amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL); @@ -312,7 +312,7 @@ int kfd_iommu_resume(struct kfd_dev *kfd) amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, iommu_invalid_ppr_cb); - err = kfd_bind_processes_to_device(kfd); + err = kfd_bind_processes_to_device(kfd->node); if (err) { amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL); amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index bcf7bc3302c9..1bea629c49ca 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -38,7 +38,7 @@ /* Initialize a kernel queue, including allocations of GART memory * needed for the queue. */ -static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev, +static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev, enum kfd_queue_type type, unsigned int queue_size) { struct queue_properties prop; @@ -75,7 +75,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev, if (!kq->mqd_mgr) return false; - prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); + prop.doorbell_ptr = kfd_get_kernel_doorbell(dev->kfd, &prop.doorbell_off); if (!prop.doorbell_ptr) { pr_err("Failed to initialize doorbell"); @@ -112,7 +112,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev, kq->rptr_kernel = kq->rptr_mem->cpu_ptr; kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr; - retval = kfd_gtt_sa_allocate(dev, dev->device_info.doorbell_size, + retval = kfd_gtt_sa_allocate(dev, dev->kfd->device_info.doorbell_size, &kq->wptr_mem); if (retval != 0) @@ -189,7 +189,7 @@ err_rptr_allocate_vidmem: err_eop_allocate_vidmem: kfd_gtt_sa_free(dev, kq->pq); err_pq_allocate_vidmem: - kfd_release_kernel_doorbell(dev, prop.doorbell_ptr); + kfd_release_kernel_doorbell(dev->kfd, prop.doorbell_ptr); err_get_kernel_doorbell: return false; @@ -220,7 +220,7 @@ static void kq_uninitialize(struct kernel_queue *kq, bool hanging) kfd_gtt_sa_free(kq->dev, kq->eop_mem); kfd_gtt_sa_free(kq->dev, kq->pq); - kfd_release_kernel_doorbell(kq->dev, + kfd_release_kernel_doorbell(kq->dev->kfd, kq->queue->properties.doorbell_ptr); uninit_queue(kq->queue); } @@ -298,7 +298,7 @@ void kq_submit_packet(struct kernel_queue *kq) } pr_debug("\n"); #endif - if (kq->dev->device_info.doorbell_size == 8) { + if (kq->dev->kfd->device_info.doorbell_size == 8) { *kq->wptr64_kernel = kq->pending_wptr64; write_kernel_doorbell64(kq->queue->properties.doorbell_ptr, kq->pending_wptr64); @@ -311,7 +311,7 @@ void kq_submit_packet(struct kernel_queue *kq) void kq_rollback_packet(struct kernel_queue *kq) { - if (kq->dev->device_info.doorbell_size == 8) { + if (kq->dev->kfd->device_info.doorbell_size == 8) { kq->pending_wptr64 = *kq->wptr64_kernel; kq->pending_wptr = *kq->wptr_kernel % (kq->queue->properties.queue_size / 4); @@ -320,7 +320,7 @@ void kq_rollback_packet(struct kernel_queue *kq) } } -struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, +struct kernel_queue *kernel_queue_init(struct kfd_node *dev, enum kfd_queue_type type) { struct kernel_queue *kq; @@ -345,7 +345,7 @@ void kernel_queue_uninit(struct kernel_queue *kq, bool hanging) } /* FIXME: Can this test be removed? */ -static __attribute__((unused)) void test_kq(struct kfd_dev *dev) +static __attribute__((unused)) void test_kq(struct kfd_node *dev) { struct kernel_queue *kq; uint32_t *buffer, i; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h index 383202fd1ea2..9a6244430845 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h @@ -53,7 +53,7 @@ void kq_rollback_packet(struct kernel_queue *kq); struct kernel_queue { /* data */ - struct kfd_dev *dev; + struct kfd_node *dev; struct mqd_manager *mqd_mgr; struct queue *queue; uint64_t pending_wptr64; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 54933903bcb8..1e187677c90a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -423,7 +423,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - 0, adev->kfd.dev->id, prange->prefetch_loc, + 0, adev->kfd.dev->node->id, prange->prefetch_loc, prange->preferred_loc, trigger); r = migrate_vma_setup(&migrate); @@ -456,7 +456,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - 0, adev->kfd.dev->id, trigger); + 0, adev->kfd.dev->node->id, trigger); svm_range_dma_unmap(adev->dev, scratch, 0, npages); svm_range_free_dma_mappings(prange); @@ -701,7 +701,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - adev->kfd.dev->id, 0, prange->prefetch_loc, + adev->kfd.dev->node->id, 0, prange->prefetch_loc, prange->preferred_loc, trigger); r = migrate_vma_setup(&migrate); @@ -737,7 +737,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - adev->kfd.dev->id, 0, trigger); + adev->kfd.dev->node->id, 0, trigger); svm_range_dma_unmap(adev->dev, scratch, 0, npages); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index 623ccd227b7d..61f6dd68c84b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -46,7 +46,7 @@ int pipe_priority_map[] = { KFD_PIPE_PRIORITY_CS_HIGH }; -struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev, struct queue_properties *q) +struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev, struct queue_properties *q) { struct kfd_mem_obj *mqd_mem_obj = NULL; @@ -61,7 +61,7 @@ struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev, struct queue_propertie return mqd_mem_obj; } -struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev, +struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev, struct queue_properties *q) { struct kfd_mem_obj *mqd_mem_obj = NULL; @@ -72,7 +72,7 @@ struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev, return NULL; offset = (q->sdma_engine_id * - dev->device_info.num_sdma_queues_per_engine + + dev->kfd->device_info.num_sdma_queues_per_engine + q->sdma_queue_id) * dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h index 57f900ccaa10..46fc3f273d0d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h @@ -68,7 +68,7 @@ */ extern int pipe_priority_map[]; struct mqd_manager { - struct kfd_mem_obj* (*allocate_mqd)(struct kfd_dev *kfd, + struct kfd_mem_obj* (*allocate_mqd)(struct kfd_node *kfd, struct queue_properties *q); void (*init_mqd)(struct mqd_manager *mm, void **mqd, @@ -121,14 +121,14 @@ struct mqd_manager { uint32_t (*read_doorbell_id)(void *mqd); struct mutex mqd_mutex; - struct kfd_dev *dev; + struct kfd_node *dev; uint32_t mqd_size; }; -struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev, +struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev, struct queue_properties *q); -struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_dev *dev, +struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev, struct queue_properties *q); void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd, struct kfd_mem_obj *mqd_mem_obj); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 4889865c725c..03e04d5e5a11 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -74,7 +74,7 @@ static void set_priority(struct cik_mqd *m, struct queue_properties *q) m->cp_hqd_queue_priority = q->priority; } -static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, +static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd, struct queue_properties *q) { struct kfd_mem_obj *mqd_mem_obj; @@ -390,7 +390,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; @@ -470,7 +470,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, } struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index d3e2b6a599a4..7a93be0ebb19 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -74,7 +74,7 @@ static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q) m->cp_hqd_queue_priority = q->priority; } -static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, +static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd, struct queue_properties *q) { struct kfd_mem_obj *mqd_mem_obj; @@ -122,7 +122,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; } - if (mm->dev->cwsr_enabled) { + if (mm->dev->kfd->cwsr_enabled) { m->cp_hqd_persistent_state |= (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT); m->cp_hqd_ctx_save_base_addr_lo = @@ -210,7 +210,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, m->cp_hqd_pq_doorbell_control |= 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT; } - if (mm->dev->cwsr_enabled) + if (mm->dev->kfd->cwsr_enabled) m->cp_hqd_ctx_save_control = 0; update_cu_mask(mm, mqd, minfo); @@ -405,7 +405,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) #endif struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 5aa75f72caa1..dff171b54b5c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -81,7 +81,7 @@ static void set_priority(struct v11_compute_mqd *m, struct queue_properties *q) m->cp_hqd_queue_priority = q->priority; } -static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, +static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node, struct queue_properties *q) { struct kfd_mem_obj *mqd_mem_obj; @@ -91,12 +91,12 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, * MES write to areas beyond MQD size. So allocate * 1 PAGE_SIZE memory for MQD is MES is enabled. */ - if (kfd->shared_resources.enable_mes) + if (node->kfd->shared_resources.enable_mes) size = PAGE_SIZE; else size = sizeof(struct v11_compute_mqd); - if (kfd_gtt_sa_allocate(kfd, size, &mqd_mem_obj)) + if (kfd_gtt_sa_allocate(node, size, &mqd_mem_obj)) return NULL; return mqd_mem_obj; @@ -113,7 +113,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr; addr = mqd_mem_obj->gpu_addr; - if (mm->dev->shared_resources.enable_mes) + if (mm->dev->kfd->shared_resources.enable_mes) size = PAGE_SIZE; else size = sizeof(struct v11_compute_mqd); @@ -155,7 +155,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; } - if (mm->dev->cwsr_enabled) { + if (mm->dev->kfd->cwsr_enabled) { m->cp_hqd_persistent_state |= (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT); m->cp_hqd_ctx_save_base_addr_lo = @@ -243,7 +243,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, m->cp_hqd_pq_doorbell_control |= 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT; } - if (mm->dev->cwsr_enabled) + if (mm->dev->kfd->cwsr_enabled) m->cp_hqd_ctx_save_control = 0; update_cu_mask(mm, mqd, minfo); @@ -319,7 +319,7 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, m = (struct v11_sdma_mqd *) mqd_mem_obj->cpu_ptr; - if (mm->dev->shared_resources.enable_mes) + if (mm->dev->kfd->shared_resources.enable_mes) size = PAGE_SIZE; else size = sizeof(struct v11_sdma_mqd); @@ -387,7 +387,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) #endif struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; @@ -463,7 +463,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, * To allocate SDMA MQDs by generic functions * when MES is enabled. */ - if (dev->shared_resources.enable_mes) { + if (dev->kfd->shared_resources.enable_mes) { mqd->allocate_mqd = allocate_mqd; mqd->free_mqd = kfd_free_mqd_cp; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 51b53110341b..943a738e73f9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -83,7 +83,7 @@ static void set_priority(struct v9_mqd *m, struct queue_properties *q) m->cp_hqd_queue_priority = q->priority; } -static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, +static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node, struct queue_properties *q) { int retval; @@ -105,11 +105,11 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, * pass a special bo flag AMDGPU_GEM_CREATE_CP_MQD_GFX9 to instruct * amdgpu memory functions to do so. */ - if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { + if (node->kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); if (!mqd_mem_obj) return NULL; - retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->adev, + retval = amdgpu_amdkfd_alloc_gtt_mem(node->adev, ALIGN(q->ctl_stack_size, PAGE_SIZE) + ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), &(mqd_mem_obj->gtt_mem), @@ -121,7 +121,7 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, return NULL; } } else { - retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v9_mqd), + retval = kfd_gtt_sa_allocate(node, sizeof(struct v9_mqd), &mqd_mem_obj); if (retval) return NULL; @@ -136,7 +136,6 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, { uint64_t addr; struct v9_mqd *m; - struct amdgpu_device *adev = (struct amdgpu_device *)mm->dev->adev; m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr; addr = mqd_mem_obj->gpu_addr; @@ -169,7 +168,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, if (q->format == KFD_QUEUE_FORMAT_AQL) { m->cp_hqd_aql_control = 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { + if (KFD_GC_VERSION(mm->dev) == IP_VERSION(9, 4, 3)) { /* On GC 9.4.3, DW 41 is re-purposed as * compute_tg_chunk_size. * TODO: review this setting when active CUs in the @@ -179,7 +178,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, } } else { /* PM4 queue */ - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { + if (KFD_GC_VERSION(mm->dev) == IP_VERSION(9, 4, 3)) { m->compute_static_thread_mgmt_se6 = 0; /* TODO: program pm4_target_xcc */ } @@ -190,7 +189,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT); } - if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) { + if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) { m->cp_hqd_persistent_state |= (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT); m->cp_hqd_ctx_save_base_addr_lo = @@ -225,7 +224,6 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, struct queue_properties *q, struct mqd_update_info *minfo) { - struct amdgpu_device *adev = (struct amdgpu_device *)mm->dev->adev; struct v9_mqd *m; m = get_mqd(mqd); @@ -275,13 +273,13 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT | 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT | 1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT; - if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) + if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3)) m->cp_hqd_pq_control |= - CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK; + CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK; m->cp_hqd_pq_doorbell_control |= 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT; } - if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) + if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) m->cp_hqd_ctx_save_control = 0; update_cu_mask(mm, mqd, minfo); @@ -487,7 +485,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) #endif struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index 530ba6f5b57e..f6b4a5686dcb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -77,7 +77,7 @@ static void set_priority(struct vi_mqd *m, struct queue_properties *q) m->cp_hqd_queue_priority = q->priority; } -static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, +static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd, struct queue_properties *q) { struct kfd_mem_obj *mqd_mem_obj; @@ -136,7 +136,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT); } - if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) { + if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) { m->cp_hqd_persistent_state |= (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT); m->cp_hqd_ctx_save_base_addr_lo = @@ -227,7 +227,7 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd, 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT; } - if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) + if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) m->cp_hqd_ctx_save_control = atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT | mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT; @@ -446,7 +446,7 @@ static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) #endif struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; @@ -528,7 +528,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, } struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, - struct kfd_dev *dev) + struct kfd_node *dev) { struct mqd_manager *mqd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index f612325241aa..2f54172e9175 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -45,7 +45,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm, unsigned int process_count, queue_count, compute_queue_count, gws_queue_count; unsigned int map_queue_size; unsigned int max_proc_per_quantum = 1; - struct kfd_dev *dev = pm->dqm->dev; + struct kfd_node *dev = pm->dqm->dev; process_count = pm->dqm->processes_count; queue_count = pm->dqm->active_queue_count; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 18250845a989..54d7d4665ad2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -119,7 +119,7 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer, struct pm4_mes_runlist *packet; int concurrent_proc_cnt = 0; - struct kfd_dev *kfd = pm->dqm->dev; + struct kfd_node *kfd = pm->dqm->dev; /* Determine the number of processes to map together to HW: * it can not exceed the number of VMIDs available to the @@ -220,7 +220,8 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, case KFD_QUEUE_TYPE_SDMA: case KFD_QUEUE_TYPE_SDMA_XGMI: use_static = false; /* no static queues under SDMA */ - if (q->properties.sdma_engine_id < 2 && !pm_use_ext_eng(q->device)) + if (q->properties.sdma_engine_id < 2 && + !pm_use_ext_eng(q->device->kfd)) packet->bitfields2.engine_sel = q->properties.sdma_engine_id + engine_sel__mes_map_queues__sdma0_vi; else { @@ -263,7 +264,8 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer, packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES, sizeof(struct pm4_mes_unmap_queues)); - packet->bitfields2.extended_engine_sel = pm_use_ext_eng(pm->dqm->dev) ? + packet->bitfields2.extended_engine_sel = + pm_use_ext_eng(pm->dqm->dev->kfd) ? extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel : extended_engine_sel__mes_unmap_queues__legacy_engine_sel; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c index 4f951eaa6ee8..faf4772ed317 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c @@ -77,7 +77,7 @@ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer, { struct pm4_mes_runlist *packet; int concurrent_proc_cnt = 0; - struct kfd_dev *kfd = pm->dqm->dev; + struct kfd_node *kfd = pm->dqm->dev; if (WARN_ON(!ib)) return -EFAULT; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 94a438956868..fdb97e5d0c01 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -210,11 +210,13 @@ enum cache_policy { ((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) || \ (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3))) +struct kfd_node; + struct kfd_event_interrupt_class { - bool (*interrupt_isr)(struct kfd_dev *dev, + bool (*interrupt_isr)(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *patched_flag); - void (*interrupt_wq)(struct kfd_dev *dev, + void (*interrupt_wq)(struct kfd_node *dev, const uint32_t *ih_ring_entry); }; @@ -236,8 +238,8 @@ struct kfd_device_info { uint64_t reserved_sdma_queues_bitmap; }; -unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev); -unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev); +unsigned int kfd_get_num_sdma_engines(struct kfd_node *kdev); +unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *kdev); struct kfd_mem_obj { uint32_t range_start; @@ -253,13 +255,59 @@ struct kfd_vmid_info { uint32_t vmid_num_kfd; }; +struct kfd_dev; + +struct kfd_node { + struct amdgpu_device *adev; /* Duplicated here along with keeping + * a copy in kfd_dev to save a hop + */ + const struct kfd2kgd_calls *kfd2kgd; /* Duplicated here along with + * keeping a copy in kfd_dev to + * save a hop + */ + struct kfd_vmid_info vm_info; + unsigned int id; /* topology stub index */ + /* Interrupts */ + struct kfifo ih_fifo; + struct workqueue_struct *ih_wq; + struct work_struct interrupt_work; + spinlock_t interrupt_lock; + + /* + * Interrupts of interest to KFD are copied + * from the HW ring into a SW ring. + */ + bool interrupts_active; + + /* QCM Device instance */ + struct device_queue_manager *dqm; + + /* Global GWS resource shared between processes */ + void *gws; + bool gws_debug_workaround; + + /* Clients watching SMI events */ + struct list_head smi_clients; + spinlock_t smi_lock; + uint32_t reset_seq_num; + + /* SRAM ECC flag */ + atomic_t sram_ecc_flag; + + /*spm process id */ + unsigned int spm_pasid; + + /* Maximum process number mapped to HW scheduler */ + unsigned int max_proc_per_quantum; + + struct kfd_dev *kfd; +}; + struct kfd_dev { struct amdgpu_device *adev; struct kfd_device_info device_info; - unsigned int id; /* topology stub index */ - phys_addr_t doorbell_base; /* Start of actual doorbells used by * KFD. It is aligned for mapping * into user mode @@ -274,7 +322,6 @@ struct kfd_dev { */ struct kgd2kfd_shared_resources shared_resources; - struct kfd_vmid_info vm_info; struct kfd_local_mem_info local_mem_info; const struct kfd2kgd_calls *kfd2kgd; @@ -290,30 +337,13 @@ struct kfd_dev { unsigned int gtt_sa_chunk_size; unsigned int gtt_sa_num_of_chunks; - /* Interrupts */ - struct kfifo ih_fifo; - struct workqueue_struct *ih_wq; - struct work_struct interrupt_work; - spinlock_t interrupt_lock; - - /* QCM Device instance */ - struct device_queue_manager *dqm; - bool init_complete; - /* - * Interrupts of interest to KFD are copied - * from the HW ring into a SW ring. - */ - bool interrupts_active; /* Firmware versions */ uint16_t mec_fw_version; uint16_t mec2_fw_version; uint16_t sdma_fw_version; - /* Maximum process number mapped to HW scheduler */ - unsigned int max_proc_per_quantum; - /* CWSR */ bool cwsr_enabled; const void *cwsr_isa; @@ -327,21 +357,9 @@ struct kfd_dev { /* Use IOMMU v2 flag */ bool use_iommu_v2; - /* SRAM ECC flag */ - atomic_t sram_ecc_flag; - /* Compute Profile ref. count */ atomic_t compute_profile; - /* Global GWS resource shared between processes */ - void *gws; - - /* Clients watching SMI events */ - struct list_head smi_clients; - spinlock_t smi_lock; - - uint32_t reset_seq_num; - struct ida doorbell_ida; unsigned int max_doorbell_slices; @@ -349,6 +367,8 @@ struct kfd_dev { /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ struct dev_pagemap pgmap; + + struct kfd_node *node; }; enum kfd_mempool { @@ -563,7 +583,7 @@ struct queue { unsigned int doorbell_id; struct kfd_process *process; - struct kfd_dev *device; + struct kfd_node *device; void *gws; /* procfs */ @@ -697,7 +717,7 @@ enum kfd_pdd_bound { /* Data that is per-process-per device. */ struct kfd_process_device { /* The device that owns this data. */ - struct kfd_dev *dev; + struct kfd_node *dev; /* The process that owns this kfd_process_device. */ struct kfd_process *process; @@ -925,7 +945,7 @@ struct amdkfd_ioctl_desc { unsigned int cmd_drv; const char *name; }; -bool kfd_dev_is_large_bar(struct kfd_dev *dev); +bool kfd_dev_is_large_bar(struct kfd_node *dev); int kfd_process_create_wq(void); void kfd_process_destroy_wq(void); @@ -961,16 +981,16 @@ int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id); int kfd_process_device_init_vm(struct kfd_process_device *pdd, struct file *drm_file); -struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, +struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev, struct kfd_process *p); -struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, +struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev, struct kfd_process *p); -struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, +struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, struct kfd_process *p); bool kfd_process_xnack_mode(struct kfd_process *p, bool supported); -int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process, +int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process, struct vm_area_struct *vma); /* KFD process API for creating and translating handles */ @@ -994,7 +1014,7 @@ void kfd_pasid_free(u32 pasid); size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); int kfd_doorbell_init(struct kfd_dev *kfd); void kfd_doorbell_fini(struct kfd_dev *kfd); -int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process, +int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process, struct vm_area_struct *vma); void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, unsigned int *doorbell_off); @@ -1012,10 +1032,10 @@ void kfd_free_process_doorbells(struct kfd_dev *kfd, unsigned int doorbell_index); /* GTT Sub-Allocator */ -int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, +int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size, struct kfd_mem_obj **mem_obj); -int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj); +int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj); extern struct device *kfd_device; @@ -1028,25 +1048,25 @@ void kfd_procfs_del_queue(struct queue *q); /* Topology */ int kfd_topology_init(void); void kfd_topology_shutdown(void); -int kfd_topology_add_device(struct kfd_dev *gpu); -int kfd_topology_remove_device(struct kfd_dev *gpu); +int kfd_topology_add_device(struct kfd_node *gpu); +int kfd_topology_remove_device(struct kfd_node *gpu); struct kfd_topology_device *kfd_topology_device_by_proximity_domain( uint32_t proximity_domain); struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock( uint32_t proximity_domain); struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); -struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); -struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); -struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev); -int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); +struct kfd_node *kfd_device_by_id(uint32_t gpu_id); +struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev); +struct kfd_node *kfd_device_by_adev(const struct amdgpu_device *adev); +int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev); int kfd_numa_node_to_apic_id(int numa_node_id); void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); /* Interrupts */ -int kfd_interrupt_init(struct kfd_dev *dev); -void kfd_interrupt_exit(struct kfd_dev *dev); -bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry); -bool interrupt_is_wanted(struct kfd_dev *dev, +int kfd_interrupt_init(struct kfd_node *dev); +void kfd_interrupt_exit(struct kfd_node *dev); +bool enqueue_ih_ring_entry(struct kfd_node *kfd, const void *ih_ring_entry); +bool interrupt_is_wanted(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *flag); @@ -1174,22 +1194,22 @@ void print_queue_properties(struct queue_properties *q); void print_queue(struct queue *q); struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); + struct kfd_node *dev); struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, - struct kfd_dev *dev); -struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev); + struct kfd_node *dev); +struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev); void device_queue_manager_uninit(struct device_queue_manager *dqm); -struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, +struct kernel_queue *kernel_queue_init(struct kfd_node *dev, enum kfd_queue_type type); void kernel_queue_uninit(struct kernel_queue *kq, bool hanging); int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid); @@ -1206,7 +1226,7 @@ void kfd_process_dequeue_from_all_devices(struct kfd_process *p); int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p); void pqm_uninit(struct process_queue_manager *pqm); int pqm_create_queue(struct process_queue_manager *pqm, - struct kfd_dev *dev, + struct kfd_node *dev, struct file *f, struct queue_properties *properties, unsigned int *qid, @@ -1323,7 +1343,7 @@ int kfd_wait_on_events(struct kfd_process *p, uint32_t *wait_result); void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, uint32_t valid_id_bits); -void kfd_signal_iommu_event(struct kfd_dev *dev, +void kfd_signal_iommu_event(struct kfd_node *dev, u32 pasid, unsigned long address, bool is_write_requested, bool is_execute_requested); void kfd_signal_hw_exception_event(u32 pasid); @@ -1339,12 +1359,12 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p, int kfd_get_num_events(struct kfd_process *p); int kfd_event_destroy(struct kfd_process *p, uint32_t event_id); -void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid, +void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid, struct kfd_vm_fault_info *info); -void kfd_signal_reset_event(struct kfd_dev *dev); +void kfd_signal_reset_event(struct kfd_node *dev); -void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid); +void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid); void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type); @@ -1359,12 +1379,12 @@ static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) bool kfd_is_locked(void); /* Compute profile */ -void kfd_inc_compute_active(struct kfd_dev *dev); -void kfd_dec_compute_active(struct kfd_dev *dev); +void kfd_inc_compute_active(struct kfd_node *dev); +void kfd_dec_compute_active(struct kfd_node *dev); /* Cgroup Support */ /* Check with device cgroup if @kfd device is accessible */ -static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd) +static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd) { #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) struct drm_device *ddev = adev_to_drm(kfd->adev); @@ -1389,7 +1409,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data); int kfd_debugfs_rls_by_device(struct seq_file *m, void *data); int pm_debugfs_runlist(struct seq_file *m, void *data); -int kfd_debugfs_hang_hws(struct kfd_dev *dev); +int kfd_debugfs_hang_hws(struct kfd_node *dev); int pm_debugfs_hang_hws(struct packet_manager *pm); int dqm_debugfs_hang_hws(struct device_queue_manager *dqm); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 07a9eaf9b7d8..66e021889c64 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -269,7 +269,7 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) int cu_cnt; int wave_cnt; int max_waves_per_cu; - struct kfd_dev *dev = NULL; + struct kfd_node *dev = NULL; struct kfd_process *proc = NULL; struct kfd_process_device *pdd = NULL; @@ -691,7 +691,7 @@ void kfd_process_destroy_wq(void) static void kfd_process_free_gpuvm(struct kgd_mem *mem, struct kfd_process_device *pdd, void **kptr) { - struct kfd_dev *dev = pdd->dev; + struct kfd_node *dev = pdd->dev; if (kptr && *kptr) { amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem); @@ -713,7 +713,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, uint64_t gpu_va, uint32_t size, uint32_t flags, struct kgd_mem **mem, void **kptr) { - struct kfd_dev *kdev = pdd->dev; + struct kfd_node *kdev = pdd->dev; int err; err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size, @@ -982,7 +982,7 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd) static void kfd_process_kunmap_signal_bo(struct kfd_process *p) { struct kfd_process_device *pdd; - struct kfd_dev *kdev; + struct kfd_node *kdev; void *mem; kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle)); @@ -1040,9 +1040,9 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) bitmap_free(pdd->qpd.doorbell_bitmap); idr_destroy(&pdd->alloc_idr); - kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index); + kfd_free_process_doorbells(pdd->dev->kfd, pdd->doorbell_index); - if (pdd->dev->shared_resources.enable_mes) + if (pdd->dev->kfd->shared_resources.enable_mes) amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev, pdd->proc_ctx_bo); /* @@ -1259,10 +1259,10 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) int i; for (i = 0; i < p->n_pdds; i++) { - struct kfd_dev *dev = p->pdds[i]->dev; + struct kfd_node *dev = p->pdds[i]->dev; struct qcm_process_device *qpd = &p->pdds[i]->qpd; - if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base) + if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base) continue; offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id); @@ -1279,7 +1279,7 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) return err; } - memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size); + memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size); qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n", @@ -1291,7 +1291,7 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) { - struct kfd_dev *dev = pdd->dev; + struct kfd_node *dev = pdd->dev; struct qcm_process_device *qpd = &pdd->qpd; uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE @@ -1300,7 +1300,7 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) void *kaddr; int ret; - if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base) + if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base) return 0; /* cwsr_base is only set for dGPU */ @@ -1313,7 +1313,7 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) qpd->cwsr_kaddr = kaddr; qpd->tba_addr = qpd->cwsr_base; - memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size); + memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size); qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n", @@ -1324,10 +1324,10 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd) { - struct kfd_dev *dev = pdd->dev; + struct kfd_node *dev = pdd->dev; struct qcm_process_device *qpd = &pdd->qpd; - if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base) + if (!dev->kfd->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base) return; kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr); @@ -1371,7 +1371,7 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) * support retry. */ for (i = 0; i < p->n_pdds; i++) { - struct kfd_dev *dev = p->pdds[i]->dev; + struct kfd_node *dev = p->pdds[i]->dev; /* Only consider GFXv9 and higher GPUs. Older GPUs don't * support the SVM APIs and don't need to be considered @@ -1394,7 +1394,7 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1)) return false; - if (dev->noretry) + if (dev->kfd->noretry) return false; } @@ -1528,7 +1528,7 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd, return 0; } -struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, +struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev, struct kfd_process *p) { int i; @@ -1540,7 +1540,7 @@ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, return NULL; } -struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, +struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, struct kfd_process *p) { struct kfd_process_device *pdd = NULL; @@ -1552,7 +1552,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, if (!pdd) return NULL; - if (init_doorbell_bitmap(&pdd->qpd, dev)) { + if (init_doorbell_bitmap(&pdd->qpd, dev->kfd)) { pr_err("Failed to init doorbell for process\n"); goto err_free_pdd; } @@ -1573,7 +1573,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, pdd->user_gpu_id = dev->id; atomic64_set(&pdd->evict_duration_counter, 0); - if (dev->shared_resources.enable_mes) { + if (dev->kfd->shared_resources.enable_mes) { retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, AMDGPU_MES_PROC_CTX_SIZE, &pdd->proc_ctx_bo, @@ -1619,7 +1619,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, struct amdgpu_fpriv *drv_priv; struct amdgpu_vm *avm; struct kfd_process *p; - struct kfd_dev *dev; + struct kfd_node *dev; int ret; if (!drm_file) @@ -1679,7 +1679,7 @@ err_reserve_ib_mem: * * Assumes that the process lock is held. */ -struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, +struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev, struct kfd_process *p) { struct kfd_process_device *pdd; @@ -1811,7 +1811,7 @@ int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger) for (i = 0; i < p->n_pdds; i++) { struct kfd_process_device *pdd = p->pdds[i]; - kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid, + kfd_smi_event_queue_eviction(pdd->dev->kfd, p->lead_thread->pid, trigger); r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, @@ -1839,7 +1839,7 @@ fail: if (n_evicted == 0) break; - kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); + kfd_smi_event_queue_restore(pdd->dev->kfd, p->lead_thread->pid); if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, &pdd->qpd)) @@ -1860,7 +1860,7 @@ int kfd_process_restore_queues(struct kfd_process *p) for (i = 0; i < p->n_pdds; i++) { struct kfd_process_device *pdd = p->pdds[i]; - kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); + kfd_smi_event_queue_restore(pdd->dev->kfd, p->lead_thread->pid); r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, &pdd->qpd); @@ -2016,7 +2016,7 @@ int kfd_resume_all_processes(void) return ret; } -int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process, +int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process, struct vm_area_struct *vma) { struct kfd_process_device *pdd; @@ -2051,7 +2051,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) { struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm); - struct kfd_dev *dev = pdd->dev; + struct kfd_node *dev = pdd->dev; /* * It can be that we race and lose here, but that is extremely unlikely diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 4236539d9f93..5602498e713f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -81,7 +81,7 @@ static int find_available_queue_slot(struct process_queue_manager *pqm, void kfd_process_dequeue_from_device(struct kfd_process_device *pdd) { - struct kfd_dev *dev = pdd->dev; + struct kfd_node *dev = pdd->dev; if (pdd->already_dequeued) return; @@ -93,7 +93,7 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd) int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, void *gws) { - struct kfd_dev *dev = NULL; + struct kfd_node *dev = NULL; struct process_queue_node *pqn; struct kfd_process_device *pdd; struct kgd_mem *mem = NULL; @@ -178,7 +178,7 @@ void pqm_uninit(struct process_queue_manager *pqm) } static int init_user_queue(struct process_queue_manager *pqm, - struct kfd_dev *dev, struct queue **q, + struct kfd_node *dev, struct queue **q, struct queue_properties *q_properties, struct file *f, struct amdgpu_bo *wptr_bo, unsigned int qid) @@ -199,7 +199,7 @@ static int init_user_queue(struct process_queue_manager *pqm, (*q)->device = dev; (*q)->process = pqm->process; - if (dev->shared_resources.enable_mes) { + if (dev->kfd->shared_resources.enable_mes) { retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, AMDGPU_MES_GANG_CTX_SIZE, &(*q)->gang_ctx_bo, @@ -224,7 +224,7 @@ cleanup: } int pqm_create_queue(struct process_queue_manager *pqm, - struct kfd_dev *dev, + struct kfd_node *dev, struct file *f, struct queue_properties *properties, unsigned int *qid, @@ -258,7 +258,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, * Hence we also check the type as well */ if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ)) - max_queues = dev->device_info.max_no_of_hqd/2; + max_queues = dev->kfd->device_info.max_no_of_hqd/2; if (pdd->qpd.queue_count >= max_queues) return -ENOSPC; @@ -354,7 +354,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, */ *p_doorbell_offset_in_process = (q->properties.doorbell_off * sizeof(uint32_t)) & - (kfd_doorbell_process_slice(dev) - 1); + (kfd_doorbell_process_slice(dev->kfd) - 1); pr_debug("PQM After DQM create queue\n"); @@ -387,7 +387,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) struct process_queue_node *pqn; struct kfd_process_device *pdd; struct device_queue_manager *dqm; - struct kfd_dev *dev; + struct kfd_node *dev; int retval; dqm = NULL; @@ -439,7 +439,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) pdd->qpd.num_gws = 0; } - if (dev->shared_resources.enable_mes) { + if (dev->kfd->shared_resources.enable_mes) { amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->gang_ctx_bo); if (pqn->q->wptr_bo) @@ -859,7 +859,7 @@ int kfd_criu_restore_queue(struct kfd_process *p, } if (!pdd->doorbell_index && - kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) { + kfd_alloc_process_doorbells(pdd->dev->kfd, &pdd->doorbell_index) < 0) { ret = -ENOMEM; goto exit; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index 0472b56de245..a0bf6558f4ac 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -36,7 +36,7 @@ struct kfd_smi_client { wait_queue_head_t wait_queue; /* events enabled */ uint64_t events; - struct kfd_dev *dev; + struct kfd_node *dev; spinlock_t lock; struct rcu_head rcu; pid_t pid; @@ -149,7 +149,7 @@ static void kfd_smi_ev_client_free(struct rcu_head *p) static int kfd_smi_ev_release(struct inode *inode, struct file *filep) { struct kfd_smi_client *client = filep->private_data; - struct kfd_dev *dev = client->dev; + struct kfd_node *dev = client->dev; spin_lock(&dev->smi_lock); list_del_rcu(&client->list); @@ -171,7 +171,7 @@ static bool kfd_smi_ev_enabled(pid_t pid, struct kfd_smi_client *client, return events & KFD_SMI_EVENT_MASK_FROM_INDEX(event); } -static void add_event_to_kfifo(pid_t pid, struct kfd_dev *dev, +static void add_event_to_kfifo(pid_t pid, struct kfd_node *dev, unsigned int smi_event, char *event_msg, int len) { struct kfd_smi_client *client; @@ -196,7 +196,7 @@ static void add_event_to_kfifo(pid_t pid, struct kfd_dev *dev, } __printf(4, 5) -static void kfd_smi_event_add(pid_t pid, struct kfd_dev *dev, +static void kfd_smi_event_add(pid_t pid, struct kfd_node *dev, unsigned int event, char *fmt, ...) { char fifo_in[KFD_SMI_EVENT_MSG_SIZE]; @@ -215,7 +215,7 @@ static void kfd_smi_event_add(pid_t pid, struct kfd_dev *dev, add_event_to_kfifo(pid, dev, event, fifo_in, len); } -void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset) +void kfd_smi_event_update_gpu_reset(struct kfd_node *dev, bool post_reset) { unsigned int event; @@ -228,7 +228,7 @@ void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset) kfd_smi_event_add(0, dev, event, "%x\n", dev->reset_seq_num); } -void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, +void kfd_smi_event_update_thermal_throttling(struct kfd_node *dev, uint64_t throttle_bitmask) { kfd_smi_event_add(0, dev, KFD_SMI_EVENT_THERMAL_THROTTLE, "%llx:%llx\n", @@ -236,7 +236,7 @@ void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, amdgpu_dpm_get_thermal_throttling_counter(dev->adev)); } -void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) +void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid) { struct amdgpu_task_info task_info; @@ -254,17 +254,17 @@ void kfd_smi_event_page_fault_start(struct kfd_dev *dev, pid_t pid, unsigned long address, bool write_fault, ktime_t ts) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_PAGE_FAULT_START, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_PAGE_FAULT_START, "%lld -%d @%lx(%x) %c\n", ktime_to_ns(ts), pid, - address, dev->id, write_fault ? 'W' : 'R'); + address, dev->node->id, write_fault ? 'W' : 'R'); } void kfd_smi_event_page_fault_end(struct kfd_dev *dev, pid_t pid, unsigned long address, bool migration) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_PAGE_FAULT_END, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_PAGE_FAULT_END, "%lld -%d @%lx(%x) %c\n", ktime_get_boottime_ns(), - pid, address, dev->id, migration ? 'M' : 'U'); + pid, address, dev->node->id, migration ? 'M' : 'U'); } void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid, @@ -273,7 +273,7 @@ void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid, uint32_t prefetch_loc, uint32_t preferred_loc, uint32_t trigger) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_MIGRATE_START, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_MIGRATE_START, "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n", ktime_get_boottime_ns(), pid, start, end - start, from, to, prefetch_loc, preferred_loc, trigger); @@ -283,7 +283,7 @@ void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid, unsigned long start, unsigned long end, uint32_t from, uint32_t to, uint32_t trigger) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_MIGRATE_END, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_MIGRATE_END, "%lld -%d @%lx(%lx) %x->%x %d\n", ktime_get_boottime_ns(), pid, start, end - start, from, to, trigger); @@ -292,16 +292,16 @@ void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid, void kfd_smi_event_queue_eviction(struct kfd_dev *dev, pid_t pid, uint32_t trigger) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_QUEUE_EVICTION, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_QUEUE_EVICTION, "%lld -%d %x %d\n", ktime_get_boottime_ns(), pid, - dev->id, trigger); + dev->node->id, trigger); } void kfd_smi_event_queue_restore(struct kfd_dev *dev, pid_t pid) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_QUEUE_RESTORE, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_QUEUE_RESTORE, "%lld -%d %x\n", ktime_get_boottime_ns(), pid, - dev->id); + dev->node->id); } void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm) @@ -328,12 +328,12 @@ void kfd_smi_event_unmap_from_gpu(struct kfd_dev *dev, pid_t pid, unsigned long address, unsigned long last, uint32_t trigger) { - kfd_smi_event_add(pid, dev, KFD_SMI_EVENT_UNMAP_FROM_GPU, + kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_UNMAP_FROM_GPU, "%lld -%d @%lx(%lx) %x %d\n", ktime_get_boottime_ns(), - pid, address, last - address + 1, dev->id, trigger); + pid, address, last - address + 1, dev->node->id, trigger); } -int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd) +int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd) { struct kfd_smi_client *client; int ret; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h index 76fe4e0ec2d2..59cd089f80d1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h @@ -24,11 +24,11 @@ #ifndef KFD_SMI_EVENTS_H_INCLUDED #define KFD_SMI_EVENTS_H_INCLUDED -int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd); -void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid); -void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, +int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd); +void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid); +void kfd_smi_event_update_thermal_throttling(struct kfd_node *dev, uint64_t throttle_bitmask); -void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset); +void kfd_smi_event_update_gpu_reset(struct kfd_node *dev, bool post_reset); void kfd_smi_event_page_fault_start(struct kfd_dev *dev, pid_t pid, unsigned long address, bool write_fault, ktime_t ts); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index c02430537e9c..96ccff79902c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1266,7 +1266,7 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, return -EINVAL; } - kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid, + kfd_smi_event_unmap_from_gpu(pdd->dev->kfd, p->lead_thread->pid, start, last, trigger); r = svm_range_unmap_from_gpu(pdd->dev->adev, @@ -3083,7 +3083,7 @@ int svm_range_list_init(struct kfd_process *p) spin_lock_init(&svms->deferred_list_lock); for (i = 0; i < p->n_pdds; i++) - if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev)) + if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->kfd)) bitmap_set(svms->bitmap_supported, i, 1); return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 8e4124dcb6e4..06a11186d947 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -96,7 +96,7 @@ struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id) return ret; } -struct kfd_dev *kfd_device_by_id(uint32_t gpu_id) +struct kfd_node *kfd_device_by_id(uint32_t gpu_id) { struct kfd_topology_device *top_dev; @@ -107,10 +107,10 @@ struct kfd_dev *kfd_device_by_id(uint32_t gpu_id) return top_dev->gpu; } -struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) +struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev) { struct kfd_topology_device *top_dev; - struct kfd_dev *device = NULL; + struct kfd_node *device = NULL; down_read(&topology_lock); @@ -125,10 +125,10 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) return device; } -struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev) +struct kfd_node *kfd_device_by_adev(const struct amdgpu_device *adev) { struct kfd_topology_device *top_dev; - struct kfd_dev *device = NULL; + struct kfd_node *device = NULL; down_read(&topology_lock); @@ -526,7 +526,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, if (dev->gpu) { log_max_watch_addr = - __ilog2_u32(dev->gpu->device_info.num_of_watch_points); + __ilog2_u32(dev->gpu->kfd->device_info.num_of_watch_points); if (log_max_watch_addr) { dev->node_props.capability |= @@ -548,11 +548,11 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, sysfs_show_64bit_prop(buffer, offs, "local_mem_size", 0ULL); sysfs_show_32bit_prop(buffer, offs, "fw_version", - dev->gpu->mec_fw_version); + dev->gpu->kfd->mec_fw_version); sysfs_show_32bit_prop(buffer, offs, "capability", dev->node_props.capability); sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version", - dev->gpu->sdma_fw_version); + dev->gpu->kfd->sdma_fw_version); sysfs_show_64bit_prop(buffer, offs, "unique_id", dev->gpu->adev->unique_id); @@ -1157,7 +1157,7 @@ void kfd_topology_shutdown(void) up_write(&topology_lock); } -static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) +static uint32_t kfd_generate_gpu_id(struct kfd_node *gpu) { uint32_t hashout; uint32_t buf[7]; @@ -1167,8 +1167,8 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) if (!gpu) return 0; - local_mem_size = gpu->local_mem_info.local_mem_size_private + - gpu->local_mem_info.local_mem_size_public; + local_mem_size = gpu->kfd->local_mem_info.local_mem_size_private + + gpu->kfd->local_mem_info.local_mem_size_public; buf[0] = gpu->adev->pdev->devfn; buf[1] = gpu->adev->pdev->subsystem_vendor | (gpu->adev->pdev->subsystem_device << 16); @@ -1188,7 +1188,7 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) * list then return NULL. This means a new topology device has to * be created for this GPU. */ -static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) +static struct kfd_topology_device *kfd_assign_gpu(struct kfd_node *gpu) { struct kfd_topology_device *dev; struct kfd_topology_device *out_dev = NULL; @@ -1201,7 +1201,7 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) /* Discrete GPUs need their own topology device list * entries. Don't assign them to CPU/APU nodes. */ - if (!gpu->use_iommu_v2 && + if (!gpu->kfd->use_iommu_v2 && dev->node_props.cpu_cores_count) continue; @@ -1275,7 +1275,7 @@ static void kfd_set_iolink_no_atomics(struct kfd_topology_device *dev, CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT; /* set gpu (dev) flags. */ } else { - if (!dev->gpu->pci_atomic_requested || + if (!dev->gpu->kfd->pci_atomic_requested || dev->gpu->adev->asic_type == CHIP_HAWAII) link->flags |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT | CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT; @@ -1569,8 +1569,8 @@ static int kfd_dev_create_p2p_links(void) if (dev == new_dev) break; if (!dev->gpu || !dev->gpu->adev || - (dev->gpu->hive_id && - dev->gpu->hive_id == new_dev->gpu->hive_id)) + (dev->gpu->kfd->hive_id && + dev->gpu->kfd->hive_id == new_dev->gpu->kfd->hive_id)) goto next; /* check if node(s) is/are peer accessible in one direction or bi-direction */ @@ -1590,7 +1590,6 @@ out: return ret; } - /* Helper function. See kfd_fill_gpu_cache_info for parameter description */ static int fill_in_l1_pcache(struct kfd_cache_properties **props_ext, struct kfd_gpu_cache_info *pcache_info, @@ -1723,7 +1722,7 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext, /* kfd_fill_cache_non_crat_info - Fill GPU cache info using kfd_gpu_cache_info * tables */ -static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_dev *kdev) +static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_node *kdev) { struct kfd_gpu_cache_info *pcache_info = NULL; int i, j, k; @@ -1805,7 +1804,7 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct pr_debug("Added [%d] GPU cache entries\n", num_of_entries); } -static int kfd_topology_add_device_locked(struct kfd_dev *gpu, uint32_t gpu_id, +static int kfd_topology_add_device_locked(struct kfd_node *gpu, uint32_t gpu_id, struct kfd_topology_device **dev) { int proximity_domain = ++topology_crat_proximity_domain; @@ -1865,7 +1864,7 @@ err: return res; } -int kfd_topology_add_device(struct kfd_dev *gpu) +int kfd_topology_add_device(struct kfd_node *gpu) { uint32_t gpu_id; struct kfd_topology_device *dev; @@ -1916,7 +1915,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu) dev->node_props.simd_arrays_per_engine = cu_info.num_shader_arrays_per_engine; - dev->node_props.gfx_target_version = gpu->device_info.gfx_target_version; + dev->node_props.gfx_target_version = + gpu->kfd->device_info.gfx_target_version; dev->node_props.vendor_id = gpu->adev->pdev->vendor; dev->node_props.device_id = gpu->adev->pdev->device; dev->node_props.capability |= @@ -1929,15 +1929,15 @@ int kfd_topology_add_device(struct kfd_dev *gpu) dev->node_props.max_engine_clk_ccompute = cpufreq_quick_get_max(0) / 1000; dev->node_props.drm_render_minor = - gpu->shared_resources.drm_render_minor; + gpu->kfd->shared_resources.drm_render_minor; - dev->node_props.hive_id = gpu->hive_id; + dev->node_props.hive_id = gpu->kfd->hive_id; dev->node_props.num_sdma_engines = kfd_get_num_sdma_engines(gpu); dev->node_props.num_sdma_xgmi_engines = kfd_get_num_xgmi_sdma_engines(gpu); dev->node_props.num_sdma_queues_per_engine = - gpu->device_info.num_sdma_queues_per_engine - - gpu->device_info.num_reserved_sdma_queues_per_engine; + gpu->kfd->device_info.num_sdma_queues_per_engine - + gpu->kfd->device_info.num_reserved_sdma_queues_per_engine; dev->node_props.num_gws = (dev->gpu->gws && dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? dev->gpu->adev->gds.gws_size : 0; @@ -1979,7 +1979,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu) * Overwrite ATS capability according to needs_iommu_device to fix * potential missing corresponding bit in CRAT of BIOS. */ - if (dev->gpu->use_iommu_v2) + if (dev->gpu->kfd->use_iommu_v2) dev->node_props.capability |= HSA_CAP_ATS_PRESENT; else dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT; @@ -2079,7 +2079,7 @@ static void kfd_topology_update_io_links(int proximity_domain) } } -int kfd_topology_remove_device(struct kfd_dev *gpu) +int kfd_topology_remove_device(struct kfd_node *gpu) { struct kfd_topology_device *dev, *tmp; uint32_t gpu_id; @@ -2119,7 +2119,7 @@ int kfd_topology_remove_device(struct kfd_dev *gpu) * Return - 0: On success (@kdev will be NULL for non GPU nodes) * -1: If end of list */ -int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev) +int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev) { struct kfd_topology_device *top_dev; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index fca30d00a9bb..3b8afb6aba79 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -75,7 +75,7 @@ struct kfd_mem_properties { uint32_t flags; uint32_t width; uint32_t mem_clk_max; - struct kfd_dev *gpu; + struct kfd_node *gpu; struct kobject *kobj; struct attribute attr; }; @@ -93,7 +93,7 @@ struct kfd_cache_properties { uint32_t cache_latency; uint32_t cache_type; uint8_t sibling_map[CACHE_SIBLINGMAP_SIZE]; - struct kfd_dev *gpu; + struct kfd_node *gpu; struct kobject *kobj; struct attribute attr; uint32_t sibling_map_size; @@ -113,7 +113,7 @@ struct kfd_iolink_properties { uint32_t max_bandwidth; uint32_t rec_transfer_size; uint32_t flags; - struct kfd_dev *gpu; + struct kfd_node *gpu; struct kobject *kobj; struct attribute attr; }; @@ -135,7 +135,7 @@ struct kfd_topology_device { struct list_head io_link_props; struct list_head p2p_link_props; struct list_head perf_props; - struct kfd_dev *gpu; + struct kfd_node *gpu; struct kobject *kobj_node; struct kobject *kobj_mem; struct kobject *kobj_cache; -- cgit v1.2.3 From 74c5b85da75475c73a8f040397610fbfcc2c3e78 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 9 May 2022 16:33:38 -0400 Subject: drm/amdkfd: Add spatial partitioning support in KFD This patch introduces multi-partition support in KFD. This patch includes: - Support for maximum 8 spatial partitions in KFD. - Initialize one HIQ per partition. - Management of VMID range depending on partition mode. - Management of doorbell aperture space between all partitions. - Each partition does its own queue management, interrupt handling, SMI event reporting. - IOMMU, if enabled with multiple partitions, will only work on first partition. - SPM is only supported on the first partition. - Currently, there is no support for resetting individual partitions. All partitions will reset together. Signed-off-by: Mukul Joshi Tested-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 214 ++++++++++++++++----- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_iommu.c | 13 +- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 8 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 16 +- drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c | 24 +-- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 8 +- 7 files changed, 208 insertions(+), 77 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 23d9a7f77055..37c6dc5c37bf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -567,23 +567,27 @@ kfd_interrupt_error: return err; } -static void kfd_cleanup_node(struct kfd_dev *kfd) +static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes) { - struct kfd_node *knode = kfd->node; - - device_queue_manager_uninit(knode->dqm); - kfd_interrupt_exit(knode); - kfd_topology_remove_device(knode); - if (knode->gws) - amdgpu_amdkfd_free_gws(knode->adev, knode->gws); - kfree(knode); - kfd->node = NULL; + struct kfd_node *knode; + unsigned int i; + + for (i = 0; i < num_nodes; i++) { + knode = kfd->nodes[i]; + device_queue_manager_uninit(knode->dqm); + kfd_interrupt_exit(knode); + kfd_topology_remove_device(knode); + if (knode->gws) + amdgpu_amdkfd_free_gws(knode->adev, knode->gws); + kfree(knode); + kfd->nodes[i] = NULL; + } } bool kgd2kfd_device_init(struct kfd_dev *kfd, const struct kgd2kfd_shared_resources *gpu_resources) { - unsigned int size, map_process_packet_size; + unsigned int size, map_process_packet_size, i; struct kfd_node *node; uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd; unsigned int max_proc_per_quantum; @@ -596,9 +600,18 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, KGD_ENGINE_SDMA1); kfd->shared_resources = *gpu_resources; - first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; - last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; - vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1; + if (kfd->adev->gfx.num_xcd == 0 || kfd->adev->gfx.num_xcd == 1 || + kfd->adev->gfx.num_xcc_per_xcp == 0) + kfd->num_nodes = 1; + else + kfd->num_nodes = + kfd->adev->gfx.num_xcd/kfd->adev->gfx.num_xcc_per_xcp; + if (kfd->num_nodes == 0) { + dev_err(kfd_device, + "KFD num nodes cannot be 0, GC inst: %d, num_xcc_in_node: %d\n", + kfd->adev->gfx.num_xcd, kfd->adev->gfx.num_xcc_per_xcp); + goto out; + } /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. * 32 and 64-bit requests are possible and must be @@ -617,6 +630,26 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, return false; } + first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; + last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; + vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1; + + /* For GFX9.4.3, we need special handling for VMIDs depending on + * partition mode. + * In CPX mode, the VMID range needs to be shared between XCDs. + * Additionally, there are 13 VMIDs (3-15) available for KFD. To + * divide them equally, we change starting VMID to 4 and not use + * VMID 3. + * If the VMID range changes for GFX9.4.3, then this code MUST be + * revisited. + */ + if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && + kfd->adev->gfx.partition_mode == AMDGPU_CPX_PARTITION_MODE && + kfd->num_nodes != 1) { + vmid_num_kfd /= 2; + first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2; + } + /* Verify module parameters regarding mapped process number*/ if (hws_max_conc_proc >= 0) max_proc_per_quantum = min((u32)hws_max_conc_proc, vmid_num_kfd); @@ -682,6 +715,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd_cwsr_init(kfd); + /* TODO: Needs to be updated for memory partitioning */ svm_migrate_init(kfd->adev); /* Allocate the KFD node */ @@ -700,12 +734,51 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, node->max_proc_per_quantum = max_proc_per_quantum; atomic_set(&node->sram_ecc_flag, 0); - /* Initialize the KFD node */ - if (kfd_init_node(node)) { - dev_err(kfd_device, "Error initializing KFD node\n"); - goto node_init_error; + dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n", + kfd->num_nodes); + for (i = 0; i < kfd->num_nodes; i++) { + node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL); + if (!node) + goto node_alloc_error; + + node->adev = kfd->adev; + node->kfd = kfd; + node->kfd2kgd = kfd->kfd2kgd; + node->vm_info.vmid_num_kfd = vmid_num_kfd; + node->num_xcc_per_node = max(1U, kfd->adev->gfx.num_xcc_per_xcp); + node->start_xcc_id = node->num_xcc_per_node * i; + + if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && + kfd->adev->gfx.partition_mode == AMDGPU_CPX_PARTITION_MODE && + kfd->num_nodes != 1) { + /* For GFX9.4.3 and CPX mode, first XCD gets VMID range + * 4-9 and second XCD gets VMID range 10-15. + */ + + node->vm_info.first_vmid_kfd = (i%2 == 0) ? + first_vmid_kfd : + first_vmid_kfd+vmid_num_kfd; + node->vm_info.last_vmid_kfd = (i%2 == 0) ? + last_vmid_kfd-vmid_num_kfd : + last_vmid_kfd; + node->compute_vmid_bitmap = + ((0x1 << (node->vm_info.last_vmid_kfd + 1)) - 1) - + ((0x1 << (node->vm_info.first_vmid_kfd)) - 1); + } else { + node->vm_info.first_vmid_kfd = first_vmid_kfd; + node->vm_info.last_vmid_kfd = last_vmid_kfd; + node->compute_vmid_bitmap = + gpu_resources->compute_vmid_bitmap; + } + node->max_proc_per_quantum = max_proc_per_quantum; + atomic_set(&node->sram_ecc_flag, 0); + /* Initialize the KFD node */ + if (kfd_init_node(node)) { + dev_err(kfd_device, "Error initializing KFD node\n"); + goto node_init_error; + } + kfd->nodes[i] = node; } - kfd->node = node; if (kfd_resume_iommu(kfd)) goto kfd_resume_iommu_error; @@ -722,9 +795,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, goto out; kfd_resume_iommu_error: - kfd_cleanup_node(kfd); node_init_error: node_alloc_error: + kfd_cleanup_nodes(kfd, i); device_iommu_error: kfd_doorbell_fini(kfd); kfd_doorbell_error: @@ -742,7 +815,9 @@ out: void kgd2kfd_device_exit(struct kfd_dev *kfd) { if (kfd->init_complete) { - kfd_cleanup_node(kfd); + /* Cleanup KFD nodes */ + kfd_cleanup_nodes(kfd, kfd->num_nodes); + /* Cleanup common/shared resources */ kfd_doorbell_fini(kfd); ida_destroy(&kfd->doorbell_ida); kfd_gtt_sa_fini(kfd); @@ -754,18 +829,23 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd) int kgd2kfd_pre_reset(struct kfd_dev *kfd) { - struct kfd_node *node = kfd->node; + struct kfd_node *node; + int i; if (!kfd->init_complete) return 0; - kfd_smi_event_update_gpu_reset(node, false); - - node->dqm->ops.pre_reset(node->dqm); + for (i = 0; i < kfd->num_nodes; i++) { + node = kfd->nodes[i]; + kfd_smi_event_update_gpu_reset(node, false); + node->dqm->ops.pre_reset(node->dqm); + } kgd2kfd_suspend(kfd, false); - kfd_signal_reset_event(node); + for (i = 0; i < kfd->num_nodes; i++) + kfd_signal_reset_event(kfd->nodes[i]); + return 0; } @@ -778,19 +858,25 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd) int kgd2kfd_post_reset(struct kfd_dev *kfd) { int ret; - struct kfd_node *node = kfd->node; + struct kfd_node *node; + int i; if (!kfd->init_complete) return 0; - ret = kfd_resume(node); - if (ret) - return ret; - atomic_dec(&kfd_locked); + for (i = 0; i < kfd->num_nodes; i++) { + ret = kfd_resume(kfd->nodes[i]); + if (ret) + return ret; + } - atomic_set(&node->sram_ecc_flag, 0); + atomic_dec(&kfd_locked); - kfd_smi_event_update_gpu_reset(node, true); + for (i = 0; i < kfd->num_nodes; i++) { + node = kfd->nodes[i]; + atomic_set(&node->sram_ecc_flag, 0); + kfd_smi_event_update_gpu_reset(node, true); + } return 0; } @@ -802,7 +888,8 @@ bool kfd_is_locked(void) void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) { - struct kfd_node *node = kfd->node; + struct kfd_node *node; + int i; if (!kfd->init_complete) return; @@ -814,21 +901,25 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) kfd_suspend_all_processes(); } - node->dqm->ops.stop(node->dqm); + for (i = 0; i < kfd->num_nodes; i++) { + node = kfd->nodes[i]; + node->dqm->ops.stop(node->dqm); + } kfd_iommu_suspend(kfd); } int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) { - int ret, count; - struct kfd_node *node = kfd->node; + int ret, count, i; if (!kfd->init_complete) return 0; - ret = kfd_resume(node); - if (ret) - return ret; + for (i = 0; i < kfd->num_nodes; i++) { + ret = kfd_resume(kfd->nodes[i]); + if (ret) + return ret; + } /* for runtime resume, skip unlocking kfd */ if (!run_pm) { @@ -892,10 +983,10 @@ static inline void kfd_queue_work(struct workqueue_struct *wq, /* This is called directly from KGD at ISR. */ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) { - uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE]; + uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE], i; bool is_patched = false; unsigned long flags; - struct kfd_node *node = kfd->node; + struct kfd_node *node; if (!kfd->init_complete) return; @@ -905,16 +996,22 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) return; } - spin_lock_irqsave(&node->interrupt_lock, flags); - - if (node->interrupts_active - && interrupt_is_wanted(node, ih_ring_entry, - patched_ihre, &is_patched) - && enqueue_ih_ring_entry(node, - is_patched ? patched_ihre : ih_ring_entry)) - kfd_queue_work(node->ih_wq, &node->interrupt_work); + for (i = 0; i < kfd->num_nodes; i++) { + node = kfd->nodes[i]; + spin_lock_irqsave(&node->interrupt_lock, flags); + + if (node->interrupts_active + && interrupt_is_wanted(node, ih_ring_entry, + patched_ihre, &is_patched) + && enqueue_ih_ring_entry(node, + is_patched ? patched_ihre : ih_ring_entry)) { + kfd_queue_work(node->ih_wq, &node->interrupt_work); + spin_unlock_irqrestore(&node->interrupt_lock, flags); + return; + } + spin_unlock_irqrestore(&node->interrupt_lock, flags); + } - spin_unlock_irqrestore(&node->interrupt_lock, flags); } int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger) @@ -1181,8 +1278,13 @@ int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj) void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) { + /* + * TODO: Currently update SRAM ECC flag for first node. + * This needs to be updated later when we can + * identify SRAM ECC error on other nodes also. + */ if (kfd) - atomic_inc(&kfd->node->sram_ecc_flag); + atomic_inc(&kfd->nodes[0]->sram_ecc_flag); } void kfd_inc_compute_active(struct kfd_node *node) @@ -1202,8 +1304,14 @@ void kfd_dec_compute_active(struct kfd_node *node) void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) { + /* + * TODO: For now, raise the throttling event only on first node. + * This will need to change after we are able to determine + * which node raised the throttling event. + */ if (kfd && kfd->init_complete) - kfd_smi_event_update_thermal_throttling(kfd->node, throttle_bitmask); + kfd_smi_event_update_thermal_throttling(kfd->nodes[0], + throttle_bitmask); } /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 34977d89f01c..6ee17100c333 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1426,7 +1426,7 @@ static int set_sched_resources(struct device_queue_manager *dqm) int i, mec; struct scheduling_resources res; - res.vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap; + res.vmid_mask = dqm->dev->compute_vmid_bitmap; res.queue_mask = 0; for (i = 0; i < KGD_MAX_QUEUES; ++i) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index 6eee9a0944f3..808ee010520a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -121,6 +121,12 @@ int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd) return -EINVAL; } + if (!kfd_is_first_node(dev)) { + dev_warn_once(kfd_device, + "IOMMU supported only on first node\n"); + return 0; + } + err = amd_iommu_bind_pasid(dev->adev->pdev, p->pasid, p->lead_thread); if (!err) pdd->bound = PDD_BOUND; @@ -138,7 +144,8 @@ void kfd_iommu_unbind_process(struct kfd_process *p) int i; for (i = 0; i < p->n_pdds; i++) - if (p->pdds[i]->bound == PDD_BOUND) + if ((p->pdds[i]->bound == PDD_BOUND) && + (kfd_is_first_node((p->pdds[i]->dev)))) amd_iommu_unbind_pasid(p->pdds[i]->dev->adev->pdev, p->pasid); } @@ -281,7 +288,7 @@ void kfd_iommu_suspend(struct kfd_dev *kfd) if (!kfd->use_iommu_v2) return; - kfd_unbind_processes_from_device(kfd->node); + kfd_unbind_processes_from_device(kfd->nodes[0]); amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL); amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL); @@ -312,7 +319,7 @@ int kfd_iommu_resume(struct kfd_dev *kfd) amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, iommu_invalid_ppr_cb); - err = kfd_bind_processes_to_device(kfd->node); + err = kfd_bind_processes_to_device(kfd->nodes[0]); if (err) { amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL); amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 1e187677c90a..5f4dc2a45bd0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -423,7 +423,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - 0, adev->kfd.dev->node->id, prange->prefetch_loc, + 0, adev->kfd.dev->nodes[0]->id, prange->prefetch_loc, prange->preferred_loc, trigger); r = migrate_vma_setup(&migrate); @@ -456,7 +456,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - 0, adev->kfd.dev->node->id, trigger); + 0, adev->kfd.dev->nodes[0]->id, trigger); svm_range_dma_unmap(adev->dev, scratch, 0, npages); svm_range_free_dma_mappings(prange); @@ -701,7 +701,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - adev->kfd.dev->node->id, 0, prange->prefetch_loc, + adev->kfd.dev->nodes[0]->id, 0, prange->prefetch_loc, prange->preferred_loc, trigger); r = migrate_vma_setup(&migrate); @@ -737,7 +737,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - adev->kfd.dev->node->id, 0, trigger); + adev->kfd.dev->nodes[0]->id, 0, trigger); svm_range_dma_unmap(adev->dev, scratch, 0, npages); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index fdb97e5d0c01..873b49238dc1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -255,6 +255,8 @@ struct kfd_vmid_info { uint32_t vmid_num_kfd; }; +#define MAX_KFD_NODES 8 + struct kfd_dev; struct kfd_node { @@ -267,6 +269,10 @@ struct kfd_node { */ struct kfd_vmid_info vm_info; unsigned int id; /* topology stub index */ + unsigned int num_xcc_per_node; + unsigned int start_xcc_id; /* Starting XCC instance + * number for the node + */ /* Interrupts */ struct kfifo ih_fifo; struct workqueue_struct *ih_wq; @@ -300,6 +306,8 @@ struct kfd_node { /* Maximum process number mapped to HW scheduler */ unsigned int max_proc_per_quantum; + unsigned int compute_vmid_bitmap; + struct kfd_dev *kfd; }; @@ -368,7 +376,8 @@ struct kfd_dev { /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ struct dev_pagemap pgmap; - struct kfd_node *node; + struct kfd_node *nodes[MAX_KFD_NODES]; + unsigned int num_nodes; }; enum kfd_mempool { @@ -1397,6 +1406,11 @@ static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd) #endif } +static inline bool kfd_is_first_node(struct kfd_node *node) +{ + return (node == node->kfd->nodes[0]); +} + /* Debugfs */ #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index a0bf6558f4ac..b703da59e067 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -254,17 +254,17 @@ void kfd_smi_event_page_fault_start(struct kfd_dev *dev, pid_t pid, unsigned long address, bool write_fault, ktime_t ts) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_PAGE_FAULT_START, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_PAGE_FAULT_START, "%lld -%d @%lx(%x) %c\n", ktime_to_ns(ts), pid, - address, dev->node->id, write_fault ? 'W' : 'R'); + address, dev->nodes[0]->id, write_fault ? 'W' : 'R'); } void kfd_smi_event_page_fault_end(struct kfd_dev *dev, pid_t pid, unsigned long address, bool migration) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_PAGE_FAULT_END, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_PAGE_FAULT_END, "%lld -%d @%lx(%x) %c\n", ktime_get_boottime_ns(), - pid, address, dev->node->id, migration ? 'M' : 'U'); + pid, address, dev->nodes[0]->id, migration ? 'M' : 'U'); } void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid, @@ -273,7 +273,7 @@ void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid, uint32_t prefetch_loc, uint32_t preferred_loc, uint32_t trigger) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_MIGRATE_START, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_MIGRATE_START, "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n", ktime_get_boottime_ns(), pid, start, end - start, from, to, prefetch_loc, preferred_loc, trigger); @@ -283,7 +283,7 @@ void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid, unsigned long start, unsigned long end, uint32_t from, uint32_t to, uint32_t trigger) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_MIGRATE_END, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_MIGRATE_END, "%lld -%d @%lx(%lx) %x->%x %d\n", ktime_get_boottime_ns(), pid, start, end - start, from, to, trigger); @@ -292,16 +292,16 @@ void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid, void kfd_smi_event_queue_eviction(struct kfd_dev *dev, pid_t pid, uint32_t trigger) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_QUEUE_EVICTION, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_QUEUE_EVICTION, "%lld -%d %x %d\n", ktime_get_boottime_ns(), pid, - dev->node->id, trigger); + dev->nodes[0]->id, trigger); } void kfd_smi_event_queue_restore(struct kfd_dev *dev, pid_t pid) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_QUEUE_RESTORE, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_QUEUE_RESTORE, "%lld -%d %x\n", ktime_get_boottime_ns(), pid, - dev->node->id); + dev->nodes[0]->id); } void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm) @@ -328,9 +328,9 @@ void kfd_smi_event_unmap_from_gpu(struct kfd_dev *dev, pid_t pid, unsigned long address, unsigned long last, uint32_t trigger) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_UNMAP_FROM_GPU, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_UNMAP_FROM_GPU, "%lld -%d @%lx(%lx) %x %d\n", ktime_get_boottime_ns(), - pid, address, last - address + 1, dev->node->id, trigger); + pid, address, last - address + 1, dev->nodes[0]->id, trigger); } int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 06a11186d947..94af37df3ed2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -555,7 +555,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->gpu->kfd->sdma_fw_version); sysfs_show_64bit_prop(buffer, offs, "unique_id", dev->gpu->adev->unique_id); - + sysfs_show_32bit_prop(buffer, offs, "num_xcc", + dev->gpu->num_xcc_per_node); } return sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_ccompute", @@ -1160,7 +1161,7 @@ void kfd_topology_shutdown(void) static uint32_t kfd_generate_gpu_id(struct kfd_node *gpu) { uint32_t hashout; - uint32_t buf[7]; + uint32_t buf[8]; uint64_t local_mem_size; int i; @@ -1177,8 +1178,9 @@ static uint32_t kfd_generate_gpu_id(struct kfd_node *gpu) buf[4] = gpu->adev->pdev->bus->number; buf[5] = lower_32_bits(local_mem_size); buf[6] = upper_32_bits(local_mem_size); + buf[7] = gpu->start_xcc_id | (gpu->num_xcc_per_node << 16); - for (i = 0, hashout = 0; i < 7; i++) + for (i = 0, hashout = 0; i < 8; i++) hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH); return hashout; -- cgit v1.2.3 From a805889a15315f7fa78c1c4bb2f1875c7c43f919 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 9 May 2022 22:52:39 -0400 Subject: drm/amdkfd: Update SDMA queue management for GFX9.4.3 This patch updates SDMA queue management for multi XCC in GFX9.4.3. - Allocate/deallocate SDMA queues from the correct SDMA engines based on the partition mode. - Updates the kgd2kfd interface to fetch the correct SDMA register addresses. - It also fixes dumping correct SDMA queue info in debugfs. v2: squash in fix "drm/amdkfd: Fix XGMI SDMA user-mode queue allocation" Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c | 194 ++++++++++++++++++++- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 8 +- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 59 +++---- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 + 5 files changed, 227 insertions(+), 41 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c index 49d8087e469e..e81bdca53f42 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c @@ -31,6 +31,192 @@ #include "oss/osssys_4_0_sh_mask.h" #include "v9_structs.h" #include "soc15.h" +#include "sdma/sdma_4_4_2_offset.h" +#include "sdma/sdma_4_4_2_sh_mask.h" + +static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) +{ + return (struct v9_sdma_mqd *)mqd; +} + +static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, + unsigned int engine_id, + unsigned int queue_id) +{ + uint32_t sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, engine_id, + regSDMA_RLC0_RB_CNTL) - + regSDMA_RLC0_RB_CNTL; + uint32_t retval = sdma_engine_reg_base + + queue_id * (regSDMA_RLC1_RB_CNTL - regSDMA_RLC0_RB_CNTL); + + pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, + queue_id, retval); + return retval; +} + +int kgd_gfx_v9_4_3_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, + uint32_t __user *wptr, struct mm_struct *mm) +{ + struct v9_sdma_mqd *m; + uint32_t sdma_rlc_reg_offset; + unsigned long end_jiffies; + uint32_t data; + uint64_t data64; + uint64_t __user *wptr64 = (uint64_t __user *)wptr; + + m = get_sdma_mqd(mqd); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, + m->sdma_queue_id); + + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL, + m->sdmax_rlcx_rb_cntl & (~SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK)); + + end_jiffies = msecs_to_jiffies(2000) + jiffies; + while (true) { + data = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_CONTEXT_STATUS); + if (data & SDMA_RLC0_CONTEXT_STATUS__IDLE_MASK) + break; + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); + return -ETIME; + } + usleep_range(500, 1000); + } + + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_DOORBELL_OFFSET, + m->sdmax_rlcx_doorbell_offset); + + data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA_RLC0_DOORBELL, + ENABLE, 1); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_DOORBELL, data); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_HI, + m->sdmax_rlcx_rb_rptr_hi); + + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_MINOR_PTR_UPDATE, 1); + if (read_user_wptr(mm, wptr64, data64)) { + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR, + lower_32_bits(data64)); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR_HI, + upper_32_bits(data64)); + } else { + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR_HI, + m->sdmax_rlcx_rb_rptr_hi); + } + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_MINOR_PTR_UPDATE, 0); + + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_BASE_HI, + m->sdmax_rlcx_rb_base_hi); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_ADDR_LO, + m->sdmax_rlcx_rb_rptr_addr_lo); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_ADDR_HI, + m->sdmax_rlcx_rb_rptr_addr_hi); + + data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA_RLC0_RB_CNTL, + RB_ENABLE, 1); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL, data); + + return 0; +} + +int kgd_gfx_v9_4_3_hqd_sdma_dump(struct amdgpu_device *adev, + uint32_t engine_id, uint32_t queue_id, + uint32_t (**dump)[2], uint32_t *n_regs) +{ + uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, + engine_id, queue_id); + uint32_t i = 0, reg; +#undef HQD_N_REGS +#define HQD_N_REGS (19+6+7+12) +#define DUMP_REG(addr) do { \ + if (WARN_ON_ONCE(i >= HQD_N_REGS)) \ + break; \ + (*dump)[i][0] = (addr) << 2; \ + (*dump)[i++][1] = RREG32(addr); \ + } while (0) + + *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + if (*dump == NULL) + return -ENOMEM; + + for (reg = regSDMA_RLC0_RB_CNTL; reg <= regSDMA_RLC0_DOORBELL; reg++) + DUMP_REG(sdma_rlc_reg_offset + reg); + for (reg = regSDMA_RLC0_STATUS; reg <= regSDMA_RLC0_CSA_ADDR_HI; reg++) + DUMP_REG(sdma_rlc_reg_offset + reg); + for (reg = regSDMA_RLC0_IB_SUB_REMAIN; + reg <= regSDMA_RLC0_MINOR_PTR_UPDATE; reg++) + DUMP_REG(sdma_rlc_reg_offset + reg); + for (reg = regSDMA_RLC0_MIDCMD_DATA0; + reg <= regSDMA_RLC0_MIDCMD_CNTL; reg++) + DUMP_REG(sdma_rlc_reg_offset + reg); + + WARN_ON_ONCE(i != HQD_N_REGS); + *n_regs = i; + + return 0; +} + +bool kgd_gfx_v9_4_3_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) +{ + struct v9_sdma_mqd *m; + uint32_t sdma_rlc_reg_offset; + uint32_t sdma_rlc_rb_cntl; + + m = get_sdma_mqd(mqd); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, + m->sdma_queue_id); + + sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL); + + if (sdma_rlc_rb_cntl & SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK) + return true; + + return false; +} + +int kgd_gfx_v9_4_3_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, + unsigned int utimeout) +{ + struct v9_sdma_mqd *m; + uint32_t sdma_rlc_reg_offset; + uint32_t temp; + unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; + + m = get_sdma_mqd(mqd); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, + m->sdma_queue_id); + + temp = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL); + temp = temp & ~SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK; + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL, temp); + + while (true) { + temp = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_CONTEXT_STATUS); + if (temp & SDMA_RLC0_CONTEXT_STATUS__IDLE_MASK) + break; + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); + return -ETIME; + } + usleep_range(500, 1000); + } + + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_DOORBELL, 0); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL, + RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL) | + SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK); + + m->sdmax_rlcx_rb_rptr = + RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR); + m->sdmax_rlcx_rb_rptr_hi = + RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_HI); + + return 0; +} static int kgd_gfx_v9_4_3_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, unsigned int vmid, uint32_t inst) @@ -166,13 +352,13 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = { .init_interrupts = kgd_gfx_v9_init_interrupts, .hqd_load = kgd_gfx_v9_4_3_hqd_load, .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load, - .hqd_sdma_load = kgd_arcturus_hqd_sdma_load, + .hqd_sdma_load = kgd_gfx_v9_4_3_hqd_sdma_load, .hqd_dump = kgd_gfx_v9_hqd_dump, - .hqd_sdma_dump = kgd_arcturus_hqd_sdma_dump, + .hqd_sdma_dump = kgd_gfx_v9_4_3_hqd_sdma_dump, .hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied, - .hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied, + .hqd_sdma_is_occupied = kgd_gfx_v9_4_3_hqd_sdma_is_occupied, .hqd_destroy = kgd_gfx_v9_hqd_destroy, - .hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy, + .hqd_sdma_destroy = kgd_gfx_v9_4_3_hqd_sdma_destroy, .wave_control_execute = kgd_gfx_v9_wave_control_execute, .get_atc_vmid_pasid_mapping_info = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 37c6dc5c37bf..ec5f85ff34e5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -741,6 +741,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (!node) goto node_alloc_error; + node->node_id = i; node->adev = kfd->adev; node->kfd = kfd; node->kfd2kgd = kfd->kfd2kgd; @@ -1323,15 +1324,16 @@ unsigned int kfd_get_num_sdma_engines(struct kfd_node *node) { /* If XGMI is not supported, all SDMA engines are PCIe */ if (!node->adev->gmc.xgmi.supported) - return node->adev->sdma.num_instances; + return node->adev->sdma.num_instances/(int)node->kfd->num_nodes; - return min(node->adev->sdma.num_instances, 2); + return min(node->adev->sdma.num_instances/(int)node->kfd->num_nodes, 2); } unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node) { /* After reserved for PCIe, the rest of engines are XGMI */ - return node->adev->sdma.num_instances - kfd_get_num_sdma_engines(node); + return node->adev->sdma.num_instances/(int)node->kfd->num_nodes - + kfd_get_num_sdma_engines(node); } #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index f78c1e7aad57..69419a53a14e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -124,6 +124,15 @@ static inline uint64_t get_reserved_sdma_queues_bitmap(struct device_queue_manag return dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap; } +static void init_sdma_bitmaps(struct device_queue_manager *dqm) +{ + bitmap_zero(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES); + bitmap_set(dqm->sdma_bitmap, 0, get_num_sdma_queues(dqm)); + + bitmap_zero(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES); + bitmap_set(dqm->xgmi_sdma_bitmap, 0, get_num_xgmi_sdma_queues(dqm)); +} + void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { @@ -1268,24 +1277,6 @@ static void init_interrupts(struct device_queue_manager *dqm) } } -static void init_sdma_bitmaps(struct device_queue_manager *dqm) -{ - unsigned int num_sdma_queues = - min_t(unsigned int, sizeof(dqm->sdma_bitmap)*8, - get_num_sdma_queues(dqm)); - unsigned int num_xgmi_sdma_queues = - min_t(unsigned int, sizeof(dqm->xgmi_sdma_bitmap)*8, - get_num_xgmi_sdma_queues(dqm)); - - if (num_sdma_queues) - dqm->sdma_bitmap = GENMASK_ULL(num_sdma_queues-1, 0); - if (num_xgmi_sdma_queues) - dqm->xgmi_sdma_bitmap = GENMASK_ULL(num_xgmi_sdma_queues-1, 0); - - dqm->sdma_bitmap &= ~get_reserved_sdma_queues_bitmap(dqm); - pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap); -} - static int initialize_nocpsch(struct device_queue_manager *dqm) { int pipe, queue; @@ -1375,46 +1366,49 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, int bit; if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { - if (dqm->sdma_bitmap == 0) { + if (bitmap_empty(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) { pr_err("No more SDMA queue to allocate\n"); return -ENOMEM; } if (restore_sdma_id) { /* Re-use existing sdma_id */ - if (!(dqm->sdma_bitmap & (1ULL << *restore_sdma_id))) { + if (!test_bit(*restore_sdma_id, dqm->sdma_bitmap)) { pr_err("SDMA queue already in use\n"); return -EBUSY; } - dqm->sdma_bitmap &= ~(1ULL << *restore_sdma_id); + clear_bit(*restore_sdma_id, dqm->sdma_bitmap); q->sdma_id = *restore_sdma_id; } else { /* Find first available sdma_id */ - bit = __ffs64(dqm->sdma_bitmap); - dqm->sdma_bitmap &= ~(1ULL << bit); + bit = find_first_bit(dqm->sdma_bitmap, + get_num_sdma_queues(dqm)); + clear_bit(bit, dqm->sdma_bitmap); q->sdma_id = bit; } - q->properties.sdma_engine_id = q->sdma_id % - kfd_get_num_sdma_engines(dqm->dev); + q->properties.sdma_engine_id = + dqm->dev->node_id * get_num_all_sdma_engines(dqm) + + q->sdma_id % kfd_get_num_sdma_engines(dqm->dev); q->properties.sdma_queue_id = q->sdma_id / kfd_get_num_sdma_engines(dqm->dev); } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { - if (dqm->xgmi_sdma_bitmap == 0) { + if (bitmap_empty(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) { pr_err("No more XGMI SDMA queue to allocate\n"); return -ENOMEM; } if (restore_sdma_id) { /* Re-use existing sdma_id */ - if (!(dqm->xgmi_sdma_bitmap & (1ULL << *restore_sdma_id))) { + if (!test_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap)) { pr_err("SDMA queue already in use\n"); return -EBUSY; } - dqm->xgmi_sdma_bitmap &= ~(1ULL << *restore_sdma_id); + clear_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap); q->sdma_id = *restore_sdma_id; } else { - bit = __ffs64(dqm->xgmi_sdma_bitmap); - dqm->xgmi_sdma_bitmap &= ~(1ULL << bit); + bit = find_first_bit(dqm->xgmi_sdma_bitmap, + get_num_xgmi_sdma_queues(dqm)); + clear_bit(bit, dqm->xgmi_sdma_bitmap); q->sdma_id = bit; } /* sdma_engine_id is sdma id including @@ -1424,6 +1418,7 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, * PCIe-optimized ones */ q->properties.sdma_engine_id = + dqm->dev->node_id * get_num_all_sdma_engines(dqm) + kfd_get_num_sdma_engines(dqm->dev) + q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev); q->properties.sdma_queue_id = q->sdma_id / @@ -1442,11 +1437,11 @@ static void deallocate_sdma_queue(struct device_queue_manager *dqm, if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { if (q->sdma_id >= get_num_sdma_queues(dqm)) return; - dqm->sdma_bitmap |= (1ULL << q->sdma_id); + set_bit(q->sdma_id, dqm->sdma_bitmap); } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm)) return; - dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id); + set_bit(q->sdma_id, dqm->xgmi_sdma_bitmap); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index e554a48f3054..b11c474d4067 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -239,8 +239,8 @@ struct device_queue_manager { unsigned int total_queue_count; unsigned int next_pipe_to_allocate; unsigned int *allocated_queues; - uint64_t sdma_bitmap; - uint64_t xgmi_sdma_bitmap; + DECLARE_BITMAP(sdma_bitmap, KFD_MAX_SDMA_QUEUES); + DECLARE_BITMAP(xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES); /* the pasid mapping for each kfd vmid */ uint16_t vmid_pasid[VMID_NUM]; uint64_t pipelines_addr; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 1337fcdf8958..5cfebcc8b305 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -113,6 +113,8 @@ #define KFD_UNMAP_LATENCY_MS (4000) +#define KFD_MAX_SDMA_QUEUES 128 + /* * 512 = 0x200 * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the @@ -260,6 +262,7 @@ struct kfd_vmid_info { struct kfd_dev; struct kfd_node { + unsigned int node_id; struct amdgpu_device *adev; /* Duplicated here along with keeping * a copy in kfd_dev to save a hop */ -- cgit v1.2.3 From 92085240ef9c0ec60c27a60b3cc0d4f5266fa511 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Tue, 3 May 2022 10:16:46 -0400 Subject: drm/amdkfd: add gpu compute cores io links for gfx9.4.3 The PSP TA will only provide xGMI topology info for links between GPU sockets so links between partitions from different sockets will be hardcoded as 3 xGMI hops with 1 hops weighted as xGMI and 2 hops weighted with a new intra-socket weight to indicate the longest possible distance. If the link between a partition and the CPU is non-PCIe, then assume the CPU (CCDs) is located within the same socket as the partition and represent the link as an intra-socket weighted single hop XGMI link with memory bandwidth. Links between partitions within a single socket will be abstracted as single hop xGMI links weighted with the new intra-socket weight and will have memory bandwidth. Finally, use the unused function bits in the location ID to represent the coordinates of the compute partition within its socket. A follow on patch will resolve the requirement for GPU socket xGMI link representation sometime later. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 49 +++++++++++++++++++++---------- drivers/gpu/drm/amd/amdkfd/kfd_crat.h | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 8 +++++ drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 4 +++ 4 files changed, 47 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index f5aebba31e88..dc93a67257e1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1166,7 +1166,7 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink, if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS) props->weight = 20; else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI) - props->weight = 15 * iolink->num_hops_xgmi; + props->weight = iolink->weight_xgmi; else props->weight = node_distance(id_from, id_to); @@ -1972,6 +1972,9 @@ static void kfd_find_numa_node_in_srat(struct kfd_node *kdev) } #endif +#define KFD_CRAT_INTRA_SOCKET_WEIGHT 13 +#define KFD_CRAT_XGMI_WEIGHT 15 + /* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU * to its NUMA node * @avail_size: Available size in the memory @@ -2003,6 +2006,12 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, * TODO: Fill-in other fields of iolink subtype */ if (kdev->adev->gmc.xgmi.connected_to_cpu) { + bool ext_cpu = KFD_GC_VERSION(kdev) != IP_VERSION(9, 4, 3); + int mem_bw = 819200, weight = ext_cpu ? KFD_CRAT_XGMI_WEIGHT : + KFD_CRAT_INTRA_SOCKET_WEIGHT; + uint32_t bandwidth = ext_cpu ? amdgpu_amdkfd_get_xgmi_bandwidth_mbytes( + kdev->adev, NULL, true) : mem_bw; + /* * with host gpu xgmi link, host can access gpu memory whether * or not pcie bar type is large, so always create bidirectional @@ -2010,14 +2019,9 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, */ sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL; sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI; - sub_type_hdr->num_hops_xgmi = 1; - if (KFD_GC_VERSION(kdev) == IP_VERSION(9, 4, 2)) { - sub_type_hdr->minimum_bandwidth_mbs = - amdgpu_amdkfd_get_xgmi_bandwidth_mbytes( - kdev->adev, NULL, true); - sub_type_hdr->maximum_bandwidth_mbs = - sub_type_hdr->minimum_bandwidth_mbs; - } + sub_type_hdr->weight_xgmi = weight; + sub_type_hdr->minimum_bandwidth_mbs = bandwidth; + sub_type_hdr->maximum_bandwidth_mbs = bandwidth; } else { sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS; sub_type_hdr->minimum_bandwidth_mbs = @@ -2050,6 +2054,8 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size, uint32_t proximity_domain_from, uint32_t proximity_domain_to) { + bool use_ta_info = kdev->kfd->num_nodes == 1; + *avail_size -= sizeof(struct crat_subtype_iolink); if (*avail_size < 0) return -ENOMEM; @@ -2064,12 +2070,25 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size, sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI; sub_type_hdr->proximity_domain_from = proximity_domain_from; sub_type_hdr->proximity_domain_to = proximity_domain_to; - sub_type_hdr->num_hops_xgmi = - amdgpu_amdkfd_get_xgmi_hops_count(kdev->adev, peer_kdev->adev); - sub_type_hdr->maximum_bandwidth_mbs = - amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, peer_kdev->adev, false); - sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ? - amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, NULL, true) : 0; + + if (use_ta_info) { + sub_type_hdr->weight_xgmi = KFD_CRAT_XGMI_WEIGHT * + amdgpu_amdkfd_get_xgmi_hops_count(kdev->adev, peer_kdev->adev); + sub_type_hdr->maximum_bandwidth_mbs = + amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, + peer_kdev->adev, false); + sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ? + amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, NULL, true) : 0; + } else { + bool is_single_hop = kdev->kfd == peer_kdev->kfd; + int weight = is_single_hop ? KFD_CRAT_INTRA_SOCKET_WEIGHT : + (2 * KFD_CRAT_INTRA_SOCKET_WEIGHT) + KFD_CRAT_XGMI_WEIGHT; + int mem_bw = 819200; + + sub_type_hdr->weight_xgmi = weight; + sub_type_hdr->maximum_bandwidth_mbs = is_single_hop ? mem_bw : 0; + sub_type_hdr->minimum_bandwidth_mbs = is_single_hop ? mem_bw : 0; + } return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h index 3d0e533b93b9..fc719389b5d6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h @@ -275,7 +275,7 @@ struct crat_subtype_iolink { uint32_t maximum_bandwidth_mbs; uint32_t recommended_transfer_size; uint8_t reserved2[CRAT_IOLINK_RESERVED_LENGTH - 1]; - uint8_t num_hops_xgmi; + uint8_t weight_xgmi; }; /* diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index ec5f85ff34e5..971a3aa3294a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -702,6 +702,14 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (amdgpu_use_xgmi_p2p) kfd->hive_id = kfd->adev->gmc.xgmi.hive_id; + /* + * For GFX9.4.3, the KFD abstracts all partitions within a socket as + * xGMI connected in the topology so assign a unique hive id per + * device based on the pci device location if device is in PCIe mode. + */ + if (!kfd->hive_id && (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3)) && kfd->num_nodes > 1) + kfd->hive_id = pci_dev_id(kfd->adev->pdev); + kfd->noretry = kfd->adev->gmc.noretry; /* If CRAT is broken, won't set iommu enabled */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 6d958bf0fe90..d3e70341dfad 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1926,7 +1926,11 @@ int kfd_topology_add_device(struct kfd_node *gpu) dev->node_props.capability |= ((dev->gpu->adev->rev_id << HSA_CAP_ASIC_REVISION_SHIFT) & HSA_CAP_ASIC_REVISION_MASK); + dev->node_props.location_id = pci_dev_id(gpu->adev->pdev); + if (KFD_GC_VERSION(dev->gpu->kfd) == IP_VERSION(9, 4, 3)) + dev->node_props.location_id |= dev->gpu->node_id; + dev->node_props.domain = pci_domain_nr(gpu->adev->pdev->bus); dev->node_props.max_engine_clk_fcompute = amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->adev); -- cgit v1.2.3 From 1bd6dd21fcd53ac78a9018b96699ef1aa99a3e59 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Tue, 31 May 2022 16:31:28 -0400 Subject: drm/amdkfd: Add SDMA info for SDMA 4.4.2 Update SDMA queue information for SDMA 4.4.2. Signed-off-by: Mukul Joshi Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 971a3aa3294a..8e5d785b8824 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -82,6 +82,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) case IP_VERSION(4, 2, 0):/* VEGA20 */ case IP_VERSION(4, 2, 2):/* ARCTURUS */ case IP_VERSION(4, 4, 0):/* ALDEBARAN */ + case IP_VERSION(4, 4, 2): case IP_VERSION(5, 0, 0):/* NAVI10 */ case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */ case IP_VERSION(5, 0, 2):/* NAVI14 */ -- cgit v1.2.3 From d1d22df174ae512c57374f517e346b608f61555c Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Tue, 7 Jun 2022 14:46:18 -0400 Subject: drm/amdkfd: Populate memory info before adding GPU node to topology The local memory info needs to be fetched before the GPU node is added to topology. Without this, the sysfs is incorrectly populated and the size is reported as 0. This was causing rocr tests to fail. This issue was caused because of a bad merge. Signed-off-by: Mukul Joshi Reviewed-by: Amber Lin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 8e5d785b8824..829e32433faf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -743,6 +743,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, node->max_proc_per_quantum = max_proc_per_quantum; atomic_set(&node->sram_ecc_flag, 0); + amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); + dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n", kfd->num_nodes); for (i = 0; i < kfd->num_nodes; i++) { @@ -793,8 +795,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (kfd_resume_iommu(kfd)) goto kfd_resume_iommu_error; - amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); - kfd->init_complete = true; dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor, kfd->adev->pdev->device); -- cgit v1.2.3 From 8078f1c610fdcdd8003e2c538fb04af41fa5c269 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 29 Jun 2022 11:41:53 +0530 Subject: drm/amdgpu: Change num_xcd to xcc_mask Instead of number of XCCs, keep a mask of XCCs for the exact XCCs available on the ASIC. XCC configuration could differ based on different ASIC configs. v2: Rename num_xcd to num_xcc (Hawking) Use smaller xcc_mask size, changed to u16 (Le) Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 21 ++--- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 4 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 133 ++++++++++++++++++------------- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 67 ++++++++++------ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 3 +- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 10 +-- 7 files changed, 141 insertions(+), 99 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 76438f197de1..069b259f384c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -209,12 +209,12 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe, adev->gfx.num_compute_rings); - int num_xcd = (adev->gfx.num_xcd > 1) ? adev->gfx.num_xcd : 1; + int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; if (multipipe_policy) { /* policy: make queues evenly cross all pipes on MEC1 only * for multiple xcc, just use the original policy for simplicity */ - for (j = 0; j < num_xcd; j++) { + for (j = 0; j < num_xcc; j++) { for (i = 0; i < max_queues_per_mec; i++) { pipe = i % adev->gfx.mec.num_pipe_per_mec; queue = (i / adev->gfx.mec.num_pipe_per_mec) % @@ -226,13 +226,13 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) } } else { /* policy: amdgpu owns all queues in the given pipe */ - for (j = 0; j < num_xcd; j++) { + for (j = 0; j < num_xcc; j++) { for (i = 0; i < max_queues_per_mec; ++i) set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap); } } - for (j = 0; j < num_xcd; j++) { + for (j = 0; j < num_xcc; j++) { dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)); } @@ -1207,23 +1207,24 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); enum amdgpu_gfx_partition mode; - int ret; + int ret = 0, num_xcc; - if (adev->gfx.num_xcd % 2 != 0) + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + if (num_xcc % 2 != 0) return -EINVAL; if (!strncasecmp("SPX", buf, strlen("SPX"))) { mode = AMDGPU_SPX_PARTITION_MODE; } else if (!strncasecmp("DPX", buf, strlen("DPX"))) { - if (adev->gfx.num_xcd != 4 || adev->gfx.num_xcd != 8) + if (num_xcc != 4 || num_xcc != 8) return -EINVAL; mode = AMDGPU_DPX_PARTITION_MODE; } else if (!strncasecmp("TPX", buf, strlen("TPX"))) { - if (adev->gfx.num_xcd != 6) + if (num_xcc != 6) return -EINVAL; mode = AMDGPU_TPX_PARTITION_MODE; } else if (!strncasecmp("QPX", buf, strlen("QPX"))) { - if (adev->gfx.num_xcd != 8) + if (num_xcc != 8) return -EINVAL; mode = AMDGPU_QPX_PARTITION_MODE; } else if (!strncasecmp("CPX", buf, strlen("CPX"))) { @@ -1253,7 +1254,7 @@ static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev, char *supported_partition; /* TBD */ - switch (adev->gfx.num_xcd) { + switch (NUM_XCC(adev->gfx.xcc_mask)) { case 8: supported_partition = "SPX, DPX, QPX, CPX"; break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 8df36527aee9..93f9875154db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -64,6 +64,8 @@ enum amdgpu_gfx_partition { AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, }; +#define NUM_XCC(x) hweight16(x) + struct amdgpu_mec { struct amdgpu_bo *hpd_eop_obj; u64 hpd_eop_gpu_addr; @@ -396,7 +398,7 @@ struct amdgpu_gfx { bool cp_gfx_shadow; /* for gfx11 */ enum amdgpu_gfx_partition partition_mode; - uint32_t num_xcd; + uint16_t xcc_mask; uint32_t num_xcc_per_xcp; struct mutex partition_mutex; }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 91814dc083c9..da69177dc76f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4536,7 +4536,7 @@ static int gfx_v9_0_early_init(void *handle) adev->gfx.num_gfx_rings = 0; else adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS; - adev->gfx.num_xcd = 1; + adev->gfx.xcc_mask = 1; adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), AMDGPU_MAX_COMPUTE_RINGS); gfx_v9_0_set_kiq_pm4_funcs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 52185b1d5d31..c776fc5884de 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -177,16 +177,19 @@ static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = { static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev) { - int i; - for (i = 0; i < adev->gfx.num_xcd; i++) + int i, num_xcc; + + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs; } static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev) { - int i; + int i, num_xcc; - for (i = 2; i < adev->gfx.num_xcd; i++) + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 2; i < num_xcc; i++) WREG32_SOC15(GC, i, regGRBM_MCM_ADDR, 0x4); } @@ -499,7 +502,7 @@ static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev) static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev) { - int r, i; + int r, i, num_xcc; u32 *hpd; const __le32 *fw_data; unsigned fw_size; @@ -508,7 +511,8 @@ static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev) const struct gfx_firmware_header_v1_0 *mec_hdr; - for (i = 0; i < adev->gfx.num_xcd; i++) + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); @@ -683,23 +687,24 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, enum amdgpu_gfx_partition mode) { u32 tmp = 0; - int num_xcc_per_partition, i; + int num_xcc_per_partition, i, num_xcc; if (mode == adev->gfx.partition_mode) return mode; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); switch (mode) { case AMDGPU_SPX_PARTITION_MODE: - num_xcc_per_partition = adev->gfx.num_xcd; + num_xcc_per_partition = num_xcc; break; case AMDGPU_DPX_PARTITION_MODE: - num_xcc_per_partition = adev->gfx.num_xcd / 2; + num_xcc_per_partition = num_xcc / 2; break; case AMDGPU_TPX_PARTITION_MODE: - num_xcc_per_partition = adev->gfx.num_xcd / 3; + num_xcc_per_partition = num_xcc / 3; break; case AMDGPU_QPX_PARTITION_MODE: - num_xcc_per_partition = adev->gfx.num_xcd / 4; + num_xcc_per_partition = num_xcc / 4; break; case AMDGPU_CPX_PARTITION_MODE: num_xcc_per_partition = 1; @@ -712,7 +717,7 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, * Stop user queues and threads, and make sure GPU is empty of work. */ - for (i = 0; i < adev->gfx.num_xcd; i++) { + for (i = 0; i < num_xcc; i++) { tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, num_xcc_per_partition); tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, @@ -836,7 +841,7 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, static int gfx_v9_4_3_sw_init(void *handle) { - int i, j, k, r, ring_id, xcc_id; + int i, j, k, r, ring_id, xcc_id, num_xcc; struct amdgpu_kiq *kiq; struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -844,6 +849,8 @@ static int gfx_v9_4_3_sw_init(void *handle) adev->gfx.mec.num_pipe_per_mec = 4; adev->gfx.mec.num_queue_per_pipe = 8; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + /* EOP Event */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq); if (r) @@ -877,8 +884,7 @@ static int gfx_v9_4_3_sw_init(void *handle) /* set up the compute queues - allocate horizontally across pipes */ ring_id = 0; - for (xcc_id = 0; xcc_id < adev->gfx.num_xcd; xcc_id++) { - + for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; @@ -930,14 +936,14 @@ static int gfx_v9_4_3_sw_init(void *handle) static int gfx_v9_4_3_sw_fini(void *handle) { - int i; + int i, num_xcc; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - for (i = 0; i < adev->gfx.num_compute_rings * - adev->gfx.num_xcd; i++) + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); - for (i = 0; i < adev->gfx.num_xcd; i++) { + for (i = 0; i < num_xcc; i++) { amdgpu_gfx_mqd_sw_fini(adev, i); amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring); amdgpu_gfx_kiq_fini(adev, i); @@ -1050,9 +1056,10 @@ static void gfx_v9_4_3_init_gds_vmid(struct amdgpu_device *adev, int xcc_id) static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) { u32 tmp; - int i, j; + int i, j, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { WREG32_FIELD15_PREREG(GC, i, GRBM_CNTL, READ_TIMEOUT, 0xff); gfx_v9_4_3_setup_rb(adev, i); } @@ -1064,7 +1071,7 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { - for (j = 0; j < adev->gfx.num_xcd; j++) { + for (j = 0; j < num_xcc; j++) { soc15_grbm_select(adev, 0, 0, 0, i, j); /* CP and shaders */ if (i == 0) { @@ -1092,7 +1099,7 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); - for (i = 0; i < adev->gfx.num_xcd; i++) { + for (i = 0; i < num_xcc; i++) { gfx_v9_4_3_init_compute_vmid(adev, i); gfx_v9_4_3_init_gds_vmid(adev, i); } @@ -1150,8 +1157,10 @@ static void gfx_v9_4_3_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id) static void gfx_v9_4_3_program_xcc_id(struct amdgpu_device *adev, int xcc_id) { uint32_t tmp = 0; + int num_xcc; - switch (adev->gfx.num_xcd) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + switch (num_xcc) { /* directly config VIRTUAL_XCC_ID to 0 for 1-XCC */ case 1: WREG32_SOC15(GC, xcc_id, regCP_HYP_XCP_CTL, 0x8); @@ -1288,9 +1297,10 @@ static void gfx_v9_4_3_enable_gui_idle_interrupt(struct amdgpu_device *adev, static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev) { - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { WREG32_FIELD15_PREREG(GC, i, RLC_CNTL, RLC_ENABLE_F32, 0); gfx_v9_4_3_enable_gui_idle_interrupt(adev, false, i); gfx_v9_4_3_wait_for_rlc_serdes(adev, i); @@ -1299,9 +1309,10 @@ static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev) static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev) { - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { WREG32_FIELD15_PREREG(GC, i, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); udelay(50); WREG32_FIELD15_PREREG(GC, i, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); @@ -1314,9 +1325,10 @@ static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev) #ifdef AMDGPU_RLC_DEBUG_RETRY u32 rlc_ucode_ver; #endif - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { WREG32_FIELD15_PREREG(GC, i, RLC_CNTL, RLC_ENABLE_F32, 1); udelay(50); @@ -1377,11 +1389,12 @@ static int gfx_v9_4_3_rlc_load_microcode(struct amdgpu_device *adev, int xcc_id) static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev) { - int r, i; + int r, i, num_xcc; adev->gfx.rlc.funcs->stop(adev); - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { /* disable CG */ WREG32_SOC15(GC, i, regRLC_CGCG_CGLS_CTRL, 0); @@ -1954,10 +1967,11 @@ done: static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) { - int r, i, j; + int r, i, j, num_xcc; struct amdgpu_ring *ring; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { gfx_v9_4_3_enable_gui_idle_interrupt(adev, false, i); if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { @@ -2021,12 +2035,13 @@ static int gfx_v9_4_3_hw_init(void *handle) static int gfx_v9_4_3_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i; + int i, num_xcc; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { if (amdgpu_gfx_disable_kcq(adev, i)) DRM_ERROR("XCD %d KCQ disable failed\n", i); @@ -2069,9 +2084,10 @@ static int gfx_v9_4_3_resume(void *handle) static bool gfx_v9_4_3_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { if (REG_GET_FIELD(RREG32_SOC15(GC, i, regGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)) return false; @@ -2183,30 +2199,30 @@ static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring, static int gfx_v9_4_3_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int num_xcc; - /* hardcode in emulation phase */ - adev->gfx.num_xcd = 1; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); adev->gfx.partition_mode = amdgpu_user_partt_mode; /* calculate the num_xcc_in_xcp for the partition mode*/ switch (amdgpu_user_partt_mode) { case AMDGPU_SPX_PARTITION_MODE: - adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd; + adev->gfx.num_xcc_per_xcp = num_xcc; break; case AMDGPU_DPX_PARTITION_MODE: - adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd / 2; + adev->gfx.num_xcc_per_xcp = num_xcc / 2; break; case AMDGPU_TPX_PARTITION_MODE: - adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd / 3; + adev->gfx.num_xcc_per_xcp = num_xcc / 3; break; case AMDGPU_QPX_PARTITION_MODE: - adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd / 4; + adev->gfx.num_xcc_per_xcp = num_xcc / 4; break; case AMDGPU_CPX_PARTITION_MODE: adev->gfx.num_xcc_per_xcp = 1; break; default: - adev->gfx.num_xcc_per_xcp = adev->gfx.num_xcd; + adev->gfx.num_xcc_per_xcp = num_xcc; break; } @@ -2404,14 +2420,15 @@ static int gfx_v9_4_3_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i; + int i, num_xcc; if (amdgpu_sriov_vf(adev)) return 0; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 4, 3): - for (i = 0; i < adev->gfx.num_xcd; i++) + for (i = 0; i < num_xcc; i++) gfx_v9_4_3_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE, i); break; @@ -2739,12 +2756,13 @@ static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - int i; + int i, num_xcc; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); switch (state) { case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_ENABLE: - for (i = 0; i < adev->gfx.num_xcd; i++) + for (i = 0; i < num_xcc; i++) WREG32_FIELD15_PREREG(GC, i, CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE, state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); @@ -2761,12 +2779,13 @@ static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - int i; + int i, num_xcc; + num_xcc = NUM_XCC(adev->gfx.xcc_mask); switch (state) { case AMDGPU_IRQ_STATE_DISABLE: case AMDGPU_IRQ_STATE_ENABLE: - for (i = 0; i < adev->gfx.num_xcd; i++) + for (i = 0; i < num_xcc; i++) WREG32_FIELD15_PREREG(GC, i, CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE, state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); @@ -2783,8 +2802,10 @@ static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - int i; - for (i = 0; i < adev->gfx.num_xcd; i++) { + int i, num_xcc; + + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { switch (type) { case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 0, state, i); @@ -2842,6 +2863,7 @@ static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev, /* Per-queue interrupt is supported for MEC starting from VI. * The interrupt can only be enabled/disabled per pipe instead of per queue. */ + if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id)) amdgpu_fence_process(ring); } @@ -3056,9 +3078,10 @@ static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = { static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev) { - int i, j; + int i, j, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq; for (j = 0; j < adev->gfx.num_compute_rings; j++) diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index 1bb17d95f720..e35365ab3f1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -43,9 +43,10 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev, uint64_t page_table_base) { struct amdgpu_vmhub *hub; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { hub = &adev->vmhub[AMDGPU_GFXHUB(i)]; WREG32_SOC15_OFFSET(GC, i, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, @@ -56,13 +57,14 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, hub->ctx_addr_distance * vmid, upper_32_bits(page_table_base)); + } } static void gfxhub_v1_2_init_gart_aperture_regs(struct amdgpu_device *adev) { uint64_t pt_base; - int i; + int i, num_xcc; if (adev->gmc.pdb0_bo) pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo); @@ -74,7 +76,8 @@ static void gfxhub_v1_2_init_gart_aperture_regs(struct amdgpu_device *adev) /* If use GART for FB translation, vmid0 page table covers both * vram and system memory (gart) */ - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { if (adev->gmc.pdb0_bo) { WREG32_SOC15(GC, i, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, @@ -111,9 +114,10 @@ static void gfxhub_v1_2_init_system_aperture_regs(struct amdgpu_device *adev) { uint64_t value; uint32_t tmp; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { /* Program the AGP BAR */ WREG32_SOC15_RLC(GC, i, regMC_VM_AGP_BASE, 0); WREG32_SOC15_RLC(GC, i, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); @@ -177,9 +181,10 @@ static void gfxhub_v1_2_init_system_aperture_regs(struct amdgpu_device *adev) static void gfxhub_v1_2_init_tlb_regs(struct amdgpu_device *adev) { uint32_t tmp; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { /* Setup TLB control */ tmp = RREG32_SOC15(GC, i, regMC_VM_MX_L1_TLB_CNTL); @@ -202,9 +207,10 @@ static void gfxhub_v1_2_init_tlb_regs(struct amdgpu_device *adev) static void gfxhub_v1_2_init_cache_regs(struct amdgpu_device *adev) { uint32_t tmp; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { /* Setup L2 cache */ tmp = RREG32_SOC15(GC, i, regVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); @@ -249,9 +255,10 @@ static void gfxhub_v1_2_init_cache_regs(struct amdgpu_device *adev) static void gfxhub_v1_2_enable_system_domain(struct amdgpu_device *adev) { uint32_t tmp; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { tmp = RREG32_SOC15(GC, i, regVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, @@ -266,9 +273,10 @@ static void gfxhub_v1_2_enable_system_domain(struct amdgpu_device *adev) static void gfxhub_v1_2_disable_identity_aperture(struct amdgpu_device *adev) { - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { WREG32_SOC15(GC, i, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 0XFFFFFFFF); @@ -295,7 +303,7 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) struct amdgpu_vmhub *hub; unsigned num_level, block_size; uint32_t tmp; - int i, j; + int i, j, num_xcc; num_level = adev->vm_manager.num_level; block_size = adev->vm_manager.block_size; @@ -304,7 +312,8 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) else block_size -= 9; - for (j = 0; j < adev->gfx.num_xcd; j++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (j = 0; j < num_xcc; j++) { hub = &adev->vmhub[AMDGPU_GFXHUB(j)]; for (i = 0; i <= 14; i++) { tmp = RREG32_SOC15_OFFSET(GC, j, regVM_CONTEXT1_CNTL, i); @@ -362,10 +371,12 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v1_2_program_invalidation(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub; - unsigned i, j; + unsigned i, j, num_xcc; - for (j = 0; j < adev->gfx.num_xcd; j++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (j = 0; j < num_xcc; j++) { hub = &adev->vmhub[AMDGPU_GFXHUB(j)]; + for (i = 0 ; i < 18; ++i) { WREG32_SOC15_OFFSET(GC, j, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, i * hub->eng_addr_distance, 0xffffffff); @@ -377,9 +388,10 @@ static void gfxhub_v1_2_program_invalidation(struct amdgpu_device *adev) static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev) { - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { if (amdgpu_sriov_vf(adev)) { /* * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are @@ -413,9 +425,10 @@ static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub; u32 tmp; - u32 i, j; + u32 i, j, num_xcc; - for (j = 0; j < adev->gfx.num_xcd; j++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (j = 0; j < num_xcc; j++) { hub = &adev->vmhub[AMDGPU_GFXHUB(j)]; /* Disable all tables */ for (i = 0; i < 16; i++) @@ -449,9 +462,10 @@ static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { tmp = RREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); @@ -490,9 +504,10 @@ static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev, static void gfxhub_v1_2_init(struct amdgpu_device *adev) { struct amdgpu_vmhub *hub; - int i; + int i, num_xcc; - for (i = 0; i < adev->gfx.num_xcd; i++) { + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + for (i = 0; i < num_xcc; i++) { hub = &adev->vmhub[AMDGPU_GFXHUB(i)]; hub->ctx0_ptb_addr_lo32 = diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 4b2c4ecd7253..2c322a25bf1c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1733,7 +1733,8 @@ static int gmc_v9_0_sw_init(void *handle) adev->gmc.translate_further = adev->vm_manager.num_level > 1; break; case IP_VERSION(9, 4, 3): - bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0), adev->gfx.num_xcd); + bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0), + NUM_XCC(adev->gfx.xcc_mask)); bitmap_set(adev->vmhubs_mask, AMDGPU_MMHUB0(0), adev->num_aid); amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 829e32433faf..df96c4c508a0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -592,6 +592,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, struct kfd_node *node; uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd; unsigned int max_proc_per_quantum; + int num_xcd; kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_MEC1); @@ -601,16 +602,15 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, KGD_ENGINE_SDMA1); kfd->shared_resources = *gpu_resources; - if (kfd->adev->gfx.num_xcd == 0 || kfd->adev->gfx.num_xcd == 1 || - kfd->adev->gfx.num_xcc_per_xcp == 0) + num_xcd = NUM_XCC(kfd->adev->gfx.xcc_mask); + if (num_xcd == 0 || num_xcd == 1 || kfd->adev->gfx.num_xcc_per_xcp == 0) kfd->num_nodes = 1; else - kfd->num_nodes = - kfd->adev->gfx.num_xcd/kfd->adev->gfx.num_xcc_per_xcp; + kfd->num_nodes = num_xcd / kfd->adev->gfx.num_xcc_per_xcp; if (kfd->num_nodes == 0) { dev_err(kfd_device, "KFD num nodes cannot be 0, GC inst: %d, num_xcc_in_node: %d\n", - kfd->adev->gfx.num_xcd, kfd->adev->gfx.num_xcc_per_xcp); + num_xcd, kfd->adev->gfx.num_xcc_per_xcp); goto out; } -- cgit v1.2.3 From fe1f05df5919c67c3add49efb55e251a8d78ee4e Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Tue, 31 May 2022 14:39:36 -0400 Subject: drm/amdkfd: Rework kfd_locked handling Currently, even if kfd_locked is set, a process is first created and then removed to work around a race condition in updating kfd_locked flag. Rework kfd_locked handling to ensure no processes is created if kfd_locked is set. This is achieved by updating kfd_locked under kfd_processes_mutex. With this there is no need for kfd_locked to be an atomic counter. Instead, it can be a regular integer. Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 7 ------- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 21 ++++++++++++++++----- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 ++ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 8 +++++++- 4 files changed, 25 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 45e8da125f70..8b9accecf49b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -146,13 +146,6 @@ static int kfd_open(struct inode *inode, struct file *filep) if (IS_ERR(process)) return PTR_ERR(process); - if (kfd_is_locked()) { - dev_dbg(kfd_device, "kfd is locked!\n" - "process %d unreferenced", process->pasid); - kfd_unref_process(process); - return -EAGAIN; - } - /* filep now owns the reference returned by kfd_create_process */ filep->private_data = process; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index df96c4c508a0..eb2b44fddf74 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -42,7 +42,7 @@ * once locked, kfd driver will stop any further GPU execution. * create process (open) will return -EAGAIN. */ -static atomic_t kfd_locked = ATOMIC_INIT(0); +static int kfd_locked; #ifdef CONFIG_DRM_AMDGPU_CIK extern const struct kfd2kgd_calls gfx_v7_kfd2kgd; @@ -880,7 +880,9 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd) return ret; } - atomic_dec(&kfd_locked); + mutex_lock(&kfd_processes_mutex); + --kfd_locked; + mutex_unlock(&kfd_processes_mutex); for (i = 0; i < kfd->num_nodes; i++) { node = kfd->nodes[i]; @@ -893,21 +895,27 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd) bool kfd_is_locked(void) { - return (atomic_read(&kfd_locked) > 0); + lockdep_assert_held(&kfd_processes_mutex); + return (kfd_locked > 0); } void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) { struct kfd_node *node; int i; + int count; if (!kfd->init_complete) return; /* for runtime suspend, skip locking kfd */ if (!run_pm) { + mutex_lock(&kfd_processes_mutex); + count = ++kfd_locked; + mutex_unlock(&kfd_processes_mutex); + /* For first KFD device suspend all the KFD processes */ - if (atomic_inc_return(&kfd_locked) == 1) + if (count == 1) kfd_suspend_all_processes(); } @@ -933,7 +941,10 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) /* for runtime resume, skip unlocking kfd */ if (!run_pm) { - count = atomic_dec_return(&kfd_locked); + mutex_lock(&kfd_processes_mutex); + count = --kfd_locked; + mutex_unlock(&kfd_processes_mutex); + WARN_ONCE(count < 0, "KFD suspend / resume ref. error"); if (count == 0) ret = kfd_resume_all_processes(); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 5cfebcc8b305..400b4dcbdf05 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -201,6 +201,8 @@ extern int amdgpu_no_queue_eviction_on_vm_fault; /* Enable eviction debug messages */ extern bool debug_evictions; +extern struct mutex kfd_processes_mutex; + enum cache_policy { cache_policy_coherent, cache_policy_noncoherent diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 9b1e84d33cdc..c3d43e6e5236 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -50,7 +50,7 @@ struct mm_struct; * Unique/indexed by mm_struct* */ DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); -static DEFINE_MUTEX(kfd_processes_mutex); +DEFINE_MUTEX(kfd_processes_mutex); DEFINE_SRCU(kfd_processes_srcu); @@ -818,6 +818,12 @@ struct kfd_process *kfd_create_process(struct file *filep) */ mutex_lock(&kfd_processes_mutex); + if (kfd_is_locked()) { + mutex_unlock(&kfd_processes_mutex); + pr_debug("KFD is locked! Cannot create process"); + return ERR_PTR(-EINVAL); + } + /* A prior open of /dev/kfd could have already created the process. */ process = find_process(thread, false); if (process) { -- cgit v1.2.3 From 0c7315e7d5ef9b36ca4db32ffeb34a187cbaf231 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Fri, 10 Jun 2022 09:41:29 -0400 Subject: drm/amdkfd: Add device repartition support GFX9.4.3 will support dynamic repartitioning of the GPU through sysfs. Add device repartitioning support in KFD to repartition GPU from one mode to other. v2: squash in fix ("drm/amdkfd: Fix warning kgd2kfd_unlock_kfd defined but not used") Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 10 ++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 13 +++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 22 +++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 5 +---- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 21 +++++++++++++++++++++ 5 files changed, 66 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 9d19c7ceda3f..bbbfe9ec4adf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -773,3 +773,13 @@ bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev) else return false; } + +int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev) +{ + return kgd2kfd_check_and_lock_kfd(); +} + +void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev) +{ + kgd2kfd_unlock_kfd(); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index df07e212c21e..d1d643a050a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -151,6 +151,8 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); void amdgpu_amdkfd_device_init(struct amdgpu_device *adev); void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev); +int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev); +void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev); int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, enum kgd_engine_type engine, uint32_t vmid, uint64_t gpu_addr, @@ -373,6 +375,8 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd); void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask); +int kgd2kfd_check_and_lock_kfd(void); +void kgd2kfd_unlock_kfd(void); #else static inline int kgd2kfd_init(void) { @@ -438,5 +442,14 @@ static inline void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) { } + +static inline int kgd2kfd_check_and_lock_kfd(void) +{ + return 0; +} + +static inline void kgd2kfd_unlock_kfd(void) +{ +} #endif #endif /* AMDGPU_AMDKFD_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 069b259f384c..69bac5b801ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1233,10 +1233,30 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, return -EINVAL; } + if (!adev->kfd.init_complete) + return -EPERM; + mutex_lock(&adev->gfx.partition_mutex); - ret = adev->gfx.funcs->switch_partition_mode(adev, mode); + if (mode == adev->gfx.funcs->query_partition_mode(adev)) + goto out; + + ret = amdgpu_amdkfd_check_and_lock_kfd(adev); + if (ret) + goto out; + + amdgpu_amdkfd_device_fini_sw(adev); + + adev->gfx.funcs->switch_partition_mode(adev, mode); + + amdgpu_amdkfd_device_probe(adev); + amdgpu_amdkfd_device_init(adev); + /* If KFD init failed, return failure */ + if (!adev->kfd.init_complete) + ret = -EIO; + amdgpu_amdkfd_unlock_kfd(adev); +out: mutex_unlock(&adev->gfx.partition_mutex); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index c776fc5884de..47d8ac64e877 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -675,7 +675,7 @@ static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, static enum amdgpu_gfx_partition gfx_v9_4_3_query_compute_partition(struct amdgpu_device *adev) { - enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; + enum amdgpu_gfx_partition mode = adev->gfx.partition_mode; if (adev->nbio.funcs->get_compute_partition_mode) mode = adev->nbio.funcs->get_compute_partition_mode(adev); @@ -689,9 +689,6 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, u32 tmp = 0; int num_xcc_per_partition, i, num_xcc; - if (mode == adev->gfx.partition_mode) - return mode; - num_xcc = NUM_XCC(adev->gfx.xcc_mask); switch (mode) { case AMDGPU_SPX_PARTITION_MODE: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index eb2b44fddf74..293787290e36 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -1356,6 +1356,27 @@ unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node) kfd_get_num_sdma_engines(node); } +int kgd2kfd_check_and_lock_kfd(void) +{ + mutex_lock(&kfd_processes_mutex); + if (!hash_empty(kfd_processes_table) || kfd_is_locked()) { + mutex_unlock(&kfd_processes_mutex); + return -EBUSY; + } + + ++kfd_locked; + mutex_unlock(&kfd_processes_mutex); + + return 0; +} + +void kgd2kfd_unlock_kfd(void) +{ + mutex_lock(&kfd_processes_mutex); + --kfd_locked; + mutex_unlock(&kfd_processes_mutex); +} + #if defined(CONFIG_DEBUG_FS) /* This function will send a package to HIQ to hang the HWS -- cgit v1.2.3 From 8e7fd19380f9187dae3ad18a61793b1752dfa097 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 16 Nov 2022 17:15:47 +0530 Subject: drm/amdgpu: Switch to SOC partition funcs For GFXv9.4.3, use SOC level partition switch implementation rather than keeping them at GFX IP level. Change the exisiting implementation in GFX IP for keeping partition mode and restrict it to only GFX related switch. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 31 ++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 5 -- .../gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c | 4 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 59 ++++------------------ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 8 +-- 5 files changed, 20 insertions(+), 87 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 5ff49737d7c6..f895a4b8ca0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -28,6 +28,7 @@ #include "amdgpu_gfx.h" #include "amdgpu_rlc.h" #include "amdgpu_ras.h" +#include "amdgpu_xcp.h" /* delay 0.1 second to enable gfx off feature */ #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) @@ -1170,10 +1171,10 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - enum amdgpu_gfx_partition mode; + int mode; char *partition_mode; - mode = adev->gfx.funcs->query_partition_mode(adev); + mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr); switch (mode) { case AMDGPU_SPX_PARTITION_MODE: @@ -1254,31 +1255,7 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, return -EINVAL; } - if (!adev->kfd.init_complete) - return -EPERM; - - mutex_lock(&adev->gfx.partition_mutex); - - if (mode == adev->gfx.funcs->query_partition_mode(adev)) - goto out; - - ret = amdgpu_amdkfd_check_and_lock_kfd(adev); - if (ret) - goto out; - - amdgpu_amdkfd_device_fini_sw(adev); - - adev->gfx.funcs->switch_partition_mode(adev, mode); - - amdgpu_amdkfd_device_probe(adev); - amdgpu_amdkfd_device_init(adev); - /* If KFD init failed, return failure */ - if (!adev->kfd.init_complete) - ret = -EIO; - - amdgpu_amdkfd_unlock_kfd(adev); -out: - mutex_unlock(&adev->gfx.partition_mutex); + ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 8be4ab50b171..2287768ed141 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -278,11 +278,7 @@ struct amdgpu_gfx_funcs { (*query_partition_mode)(struct amdgpu_device *adev); enum amdgpu_memory_partition (*query_mem_partition_mode)(struct amdgpu_device *adev); - int (*switch_partition_mode)(struct amdgpu_device *adev, - enum amdgpu_gfx_partition mode); - - int (*switch_gfx_partition_mode)(struct amdgpu_device *adev, int num_xccs_per_xcp); }; @@ -416,7 +412,6 @@ struct amdgpu_gfx { bool cp_gfx_shadow; /* for gfx11 */ - enum amdgpu_gfx_partition partition_mode; uint16_t xcc_mask; enum amdgpu_memory_partition mem_partition_mode; uint32_t num_xcc_per_xcp; diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c index a9de229a2828..bbcdececfd2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c @@ -307,8 +307,8 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, goto unlock; num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode); - if (adev->gfx.funcs->switch_gfx_partition_mode) - adev->gfx.funcs->switch_gfx_partition_mode(xcp_mgr->adev, + if (adev->gfx.funcs->switch_partition_mode) + adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev, num_xcc_per_xcp); if (adev->nbio.funcs->set_compute_partition_mode) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 93a0baa4515c..d684037a7a5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -38,6 +38,7 @@ #include "gc/gc_9_4_3_sh_mask.h" #include "gfx_v9_4_3.h" +#include "amdgpu_xcp.h" MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin"); @@ -614,61 +615,23 @@ gfx_v9_4_3_query_memory_partition(struct amdgpu_device *adev) return mode; } -static enum amdgpu_gfx_partition -gfx_v9_4_3_query_compute_partition(struct amdgpu_device *adev) -{ - enum amdgpu_gfx_partition mode = adev->gfx.partition_mode; - - if (adev->nbio.funcs->get_compute_partition_mode) - mode = adev->nbio.funcs->get_compute_partition_mode(adev); - - return mode; -} - static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, - enum amdgpu_gfx_partition mode) + int num_xccs_per_xcp) { + int i, num_xcc; u32 tmp = 0; - int num_xcc_per_partition, i, num_xcc; num_xcc = NUM_XCC(adev->gfx.xcc_mask); - switch (mode) { - case AMDGPU_SPX_PARTITION_MODE: - num_xcc_per_partition = num_xcc; - break; - case AMDGPU_DPX_PARTITION_MODE: - num_xcc_per_partition = num_xcc / 2; - break; - case AMDGPU_TPX_PARTITION_MODE: - num_xcc_per_partition = num_xcc / 3; - break; - case AMDGPU_QPX_PARTITION_MODE: - num_xcc_per_partition = num_xcc / 4; - break; - case AMDGPU_CPX_PARTITION_MODE: - num_xcc_per_partition = 1; - break; - default: - return -EINVAL; - } - - /* TODO: - * Stop user queues and threads, and make sure GPU is empty of work. - */ for (i = 0; i < num_xcc; i++) { tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, - num_xcc_per_partition); + num_xccs_per_xcp); tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, - i % num_xcc_per_partition); + i % num_xccs_per_xcp); WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL, tmp); } - if (adev->nbio.funcs->set_compute_partition_mode) - adev->nbio.funcs->set_compute_partition_mode(adev, mode); - - adev->gfx.num_xcc_per_xcp = num_xcc_per_partition; - adev->gfx.partition_mode = mode; + adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp; return 0; } @@ -680,7 +643,6 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs, .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs, .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, - .query_partition_mode = &gfx_v9_4_3_query_compute_partition, .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition, .query_mem_partition_mode = &gfx_v9_4_3_query_memory_partition, }; @@ -1899,10 +1861,6 @@ static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) return r; } - if (adev->gfx.partition_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) - gfx_v9_4_3_switch_compute_partition(adev, - amdgpu_user_partt_mode); - /* set the virtual and physical id based on partition_mode */ gfx_v9_4_3_xcc_program_xcc_id(adev, xcc_id); @@ -1931,6 +1889,9 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) { int r, i, num_xcc; + if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr) == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) + amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, amdgpu_user_partt_mode); + num_xcc = NUM_XCC(adev->gfx.xcc_mask); for (i = 0; i < num_xcc; i++) { r = gfx_v9_4_3_xcc_cp_resume(adev, i); @@ -2146,8 +2107,6 @@ static int gfx_v9_4_3_early_init(void *handle) num_xcc = NUM_XCC(adev->gfx.xcc_mask); - adev->gfx.partition_mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; - adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), AMDGPU_MAX_COMPUTE_RINGS); gfx_v9_4_3_set_kiq_pm4_funcs(adev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 293787290e36..7a963d0a34e2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -34,6 +34,7 @@ #include "kfd_smi_events.h" #include "kfd_migrate.h" #include "amdgpu.h" +#include "amdgpu_xcp.h" #define MQD_SIZE_ALIGNED 768 @@ -592,7 +593,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, struct kfd_node *node; uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd; unsigned int max_proc_per_quantum; - int num_xcd; + int num_xcd, partition_mode; kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_MEC1); @@ -644,8 +645,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, * If the VMID range changes for GFX9.4.3, then this code MUST be * revisited. */ + partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr); if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && - kfd->adev->gfx.partition_mode == AMDGPU_CPX_PARTITION_MODE && + partition_mode == AMDGPU_CPX_PARTITION_MODE && kfd->num_nodes != 1) { vmid_num_kfd /= 2; first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2; @@ -761,7 +763,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, node->start_xcc_id = node->num_xcc_per_node * i; if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && - kfd->adev->gfx.partition_mode == AMDGPU_CPX_PARTITION_MODE && + partition_mode == AMDGPU_CPX_PARTITION_MODE && kfd->num_nodes != 1) { /* For GFX9.4.3 and CPX mode, first XCD gets VMID range * 4-9 and second XCD gets VMID range 10-15. -- cgit v1.2.3 From 8c45a8340dd097ea0d6be6f718c4882283d9645d Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Tue, 24 Jan 2023 10:10:14 -0500 Subject: drm/amdkfd: Cleanup KFD nodes creation kfd node allocation outside kfd->num_nodes loop is not needed and causes memory leak because kfd->num_nodes is at least equal to 1. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 7a963d0a34e2..d7cffd91f1d7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -729,26 +729,12 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, /* TODO: Needs to be updated for memory partitioning */ svm_migrate_init(kfd->adev); - /* Allocate the KFD node */ - node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL); - if (!node) { - dev_err(kfd_device, "Error allocating KFD node\n"); - goto node_alloc_error; - } - - node->adev = kfd->adev; - node->kfd = kfd; - node->kfd2kgd = kfd->kfd2kgd; - node->vm_info.vmid_num_kfd = vmid_num_kfd; - node->vm_info.first_vmid_kfd = first_vmid_kfd; - node->vm_info.last_vmid_kfd = last_vmid_kfd; - node->max_proc_per_quantum = max_proc_per_quantum; - atomic_set(&node->sram_ecc_flag, 0); - amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n", kfd->num_nodes); + + /* Allocate the KFD nodes */ for (i = 0; i < kfd->num_nodes; i++) { node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL); if (!node) -- cgit v1.2.3 From ded7d99eb5b78931cec30dd49cd4097d0ac770e1 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 16 Jan 2023 10:55:38 +0530 Subject: drm/amdgpu: Add flags for partition mode query It's not required to take lock on all cases while querying partition mode. Querying partition mode during KFD init process doesn't need to take a lock. Init process after a switch will already be happening under lock. Control the behaviour by adding flags to xcp_query_partition_mode. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 8 +++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h | 5 ++++- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 4 +++- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 2 +- 5 files changed, 15 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 70c6099353b8..1487ecac2705 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1177,7 +1177,8 @@ static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev, int mode; char *partition_mode; - mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr); + mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr, + AMDGPU_XCP_FL_NONE); switch (mode) { case AMDGPU_SPX_PARTITION_MODE: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index f59bc450cabe..5b999e5334bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -170,7 +170,7 @@ out: return ret; } -int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) +int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) { int mode; @@ -180,7 +180,8 @@ int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode) return xcp_mgr->mode; - mutex_lock(&xcp_mgr->xcp_lock); + if (!(flags & AMDGPU_XCP_FL_LOCKED)) + mutex_lock(&xcp_mgr->xcp_lock); mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr); if (mode != xcp_mgr->mode) dev_WARN( @@ -188,7 +189,8 @@ int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) "Cached partition mode %d not matching with device mode %d", xcp_mgr->mode, mode); - mutex_unlock(&xcp_mgr->xcp_lock); + if (!(flags & AMDGPU_XCP_FL_LOCKED)) + mutex_unlock(&xcp_mgr->xcp_lock); return mode; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index f0b973c6092f..9fa6f0ea2061 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -30,6 +30,9 @@ #define AMDGPU_XCP_MODE_NONE -1 +#define AMDGPU_XCP_FL_NONE 0 +#define AMDGPU_XCP_FL_LOCKED (1 << 0) + enum AMDGPU_XCP_IP_BLOCK { AMDGPU_XCP_GFXHUB, AMDGPU_XCP_GFX, @@ -99,7 +102,7 @@ int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, int init_xcps, struct amdgpu_xcp_mgr_funcs *xcp_funcs); -int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr); +int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags); int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode); int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr, enum AMDGPU_XCP_IP_BLOCK ip, int instance); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 42877c4505f1..69867294117e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1940,7 +1940,9 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) { int r, i, num_xcc; - if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr) == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) + if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr, + AMDGPU_XCP_FL_NONE) == + AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, amdgpu_user_partt_mode); num_xcc = NUM_XCC(adev->gfx.xcc_mask); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index d7cffd91f1d7..4293cbf9ceb0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -645,7 +645,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, * If the VMID range changes for GFX9.4.3, then this code MUST be * revisited. */ - partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr); + partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr, AMDGPU_XCP_FL_LOCKED); if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && partition_mode == AMDGPU_CPX_PARTITION_MODE && kfd->num_nodes != 1) { -- cgit v1.2.3 From a75f2271a4936265c8a189ab06f9eb89e343b441 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 9 Feb 2023 14:44:13 +0530 Subject: drm/amdkfd: Add xcp reference to kfd node Fetch xcp information from xcp_mgr and also add xcc_mask to kfd node. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 19 +++++++++++++------ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 +++ 2 files changed, 16 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 4293cbf9ceb0..647c3313c27e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -594,6 +594,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd; unsigned int max_proc_per_quantum; int num_xcd, partition_mode; + int xcp_idx; kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_MEC1); @@ -603,11 +604,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, KGD_ENGINE_SDMA1); kfd->shared_resources = *gpu_resources; - num_xcd = NUM_XCC(kfd->adev->gfx.xcc_mask); - if (num_xcd == 0 || num_xcd == 1 || kfd->adev->gfx.num_xcc_per_xcp == 0) - kfd->num_nodes = 1; - else - kfd->num_nodes = num_xcd / kfd->adev->gfx.num_xcc_per_xcp; + kfd->num_nodes = amdgpu_xcp_get_num_xcp(kfd->adev->xcp_mgr); + if (kfd->num_nodes == 0) { dev_err(kfd_device, "KFD num nodes cannot be 0, GC inst: %d, num_xcc_in_node: %d\n", @@ -735,7 +733,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->num_nodes); /* Allocate the KFD nodes */ - for (i = 0; i < kfd->num_nodes; i++) { + for (i = 0, xcp_idx = 0; i < kfd->num_nodes; i++) { node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL); if (!node) goto node_alloc_error; @@ -745,6 +743,15 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, node->kfd = kfd; node->kfd2kgd = kfd->kfd2kgd; node->vm_info.vmid_num_kfd = vmid_num_kfd; + node->xcp = amdgpu_get_next_xcp(kfd->adev->xcp_mgr, &xcp_idx); + /* TODO : Check if error handling is needed */ + if (node->xcp) + amdgpu_xcp_get_inst_details(node->xcp, AMDGPU_XCP_GFX, + &node->xcc_mask); + else + node->xcc_mask = + (1U << NUM_XCC(kfd->adev->gfx.xcc_mask)) - 1; + node->num_xcc_per_node = max(1U, kfd->adev->gfx.num_xcc_per_xcp); node->start_xcc_id = node->num_xcc_per_node * i; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 6e1c15682c28..559ac5efdc26 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -278,6 +278,9 @@ struct kfd_node { unsigned int start_xcc_id; /* Starting XCC instance * number for the node */ + uint32_t xcc_mask; /* Instance mask of XCCs present */ + struct amdgpu_xcp *xcp; + /* Interrupts */ struct kfifo ih_fifo; struct workqueue_struct *ih_wq; -- cgit v1.2.3 From c4050ff1a43eec08498b1ed876efc6213592dba0 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 9 Feb 2023 16:30:53 +0530 Subject: drm/amdkfd: Use xcc mask for identifying xcc Instead of start xcc id and number of xcc per node, use the xcc mask which is the mask of logical ids of xccs belonging to a parition. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 9 +-- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 86 +++++++++++----------- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 71 +++++++++--------- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 4 - drivers/gpu/drm/amd/amdkfd/kfd_process.c | 8 +- .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 8 +- 8 files changed, 95 insertions(+), 95 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 647c3313c27e..b5497d2ee984 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -745,15 +745,14 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, node->vm_info.vmid_num_kfd = vmid_num_kfd; node->xcp = amdgpu_get_next_xcp(kfd->adev->xcp_mgr, &xcp_idx); /* TODO : Check if error handling is needed */ - if (node->xcp) + if (node->xcp) { amdgpu_xcp_get_inst_details(node->xcp, AMDGPU_XCP_GFX, &node->xcc_mask); - else + ++xcp_idx; + } else { node->xcc_mask = (1U << NUM_XCC(kfd->adev->gfx.xcc_mask)) - 1; - - node->num_xcc_per_node = max(1U, kfd->adev->gfx.num_xcc_per_xcp); - node->start_xcc_id = node->num_xcc_per_node * i; + } if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && partition_mode == AMDGPU_CPX_PARTITION_MODE && diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 2b5c4b2dd242..493b4b66f180 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -136,16 +136,14 @@ static void init_sdma_bitmaps(struct device_queue_manager *dqm) void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { - int xcc = 0; + uint32_t xcc_mask = dqm->dev->xcc_mask; + int xcc_id; - for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) + for_each_inst(xcc_id, xcc_mask) dqm->dev->kfd2kgd->program_sh_mem_settings( - dqm->dev->adev, qpd->vmid, - qpd->sh_mem_config, - qpd->sh_mem_ape1_base, - qpd->sh_mem_ape1_limit, - qpd->sh_mem_bases, - dqm->dev->start_xcc_id + xcc); + dqm->dev->adev, qpd->vmid, qpd->sh_mem_config, + qpd->sh_mem_ape1_base, qpd->sh_mem_ape1_limit, + qpd->sh_mem_bases, xcc_id); } static void kfd_hws_hang(struct device_queue_manager *dqm) @@ -427,14 +425,14 @@ static void deallocate_doorbell(struct qcm_process_device *qpd, static void program_trap_handler_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { - int xcc = 0; + uint32_t xcc_mask = dqm->dev->xcc_mask; + int xcc_id; if (dqm->dev->kfd2kgd->program_trap_handler_settings) - for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) + for_each_inst(xcc_id, xcc_mask) dqm->dev->kfd2kgd->program_trap_handler_settings( - dqm->dev->adev, qpd->vmid, - qpd->tba_addr, qpd->tma_addr, - dqm->dev->start_xcc_id + xcc); + dqm->dev->adev, qpd->vmid, qpd->tba_addr, + qpd->tma_addr, xcc_id); } static int allocate_vmid(struct device_queue_manager *dqm, @@ -697,7 +695,8 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process struct kfd_process_device *pdd; int first_vmid_to_scan = dev->vm_info.first_vmid_kfd; int last_vmid_to_scan = dev->vm_info.last_vmid_kfd; - int xcc = 0; + uint32_t xcc_mask = dev->xcc_mask; + int xcc_id; reg_sq_cmd.u32All = 0; reg_gfx_index.u32All = 0; @@ -742,11 +741,10 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL; reg_sq_cmd.bits.vm_id = vmid; - for (xcc = 0; xcc < dev->num_xcc_per_node; xcc++) - dev->kfd2kgd->wave_control_execute(dev->adev, - reg_gfx_index.u32All, - reg_sq_cmd.u32All, - dev->start_xcc_id + xcc); + for_each_inst(xcc_id, xcc_mask) + dev->kfd2kgd->wave_control_execute( + dev->adev, reg_gfx_index.u32All, + reg_sq_cmd.u32All, xcc_id); return 0; } @@ -1258,12 +1256,12 @@ static int set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid, unsigned int vmid) { - int xcc = 0, ret; + uint32_t xcc_mask = dqm->dev->xcc_mask; + int xcc_id, ret; - for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) { + for_each_inst(xcc_id, xcc_mask) { ret = dqm->dev->kfd2kgd->set_pasid_vmid_mapping( - dqm->dev->adev, pasid, vmid, - dqm->dev->start_xcc_id + xcc); + dqm->dev->adev, pasid, vmid, xcc_id); if (ret) break; } @@ -1273,15 +1271,14 @@ set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid, static void init_interrupts(struct device_queue_manager *dqm) { - unsigned int i, xcc; + uint32_t xcc_mask = dqm->dev->xcc_mask; + unsigned int i, xcc_id; for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) { if (is_pipe_enabled(dqm, 0, i)) { - for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) + for_each_inst(xcc_id, xcc_mask) dqm->dev->kfd2kgd->init_interrupts( - dqm->dev->adev, i, - dqm->dev->start_xcc_id + - xcc); + dqm->dev->adev, i, xcc_id); } } } @@ -2283,7 +2280,7 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) get_num_all_sdma_engines(dqm) * dev->kfd->device_info.num_sdma_queues_per_engine + (dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size * - dqm->dev->num_xcc_per_node); + NUM_XCC(dqm->dev->xcc_mask)); retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size, &(mem_obj->gtt_mem), &(mem_obj->gpu_addr), @@ -2489,10 +2486,10 @@ static void seq_reg_dump(struct seq_file *m, int dqm_debugfs_hqds(struct seq_file *m, void *data) { struct device_queue_manager *dqm = data; + uint32_t xcc_mask = dqm->dev->xcc_mask; uint32_t (*dump)[2], n_regs; int pipe, queue; - int r = 0, xcc; - uint32_t inst; + int r = 0, xcc_id; uint32_t sdma_engine_start; if (!dqm->sched_running) { @@ -2500,16 +2497,18 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) return 0; } - for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) { - inst = dqm->dev->start_xcc_id + xcc; + for_each_inst(xcc_id, xcc_mask) { r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev, - KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE, - &dump, &n_regs, inst); + KFD_CIK_HIQ_PIPE, + KFD_CIK_HIQ_QUEUE, &dump, + &n_regs, xcc_id); if (!r) { - seq_printf(m, + seq_printf( + m, " Inst %d, HIQ on MEC %d Pipe %d Queue %d\n", - inst, KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1, - KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm), + xcc_id, + KFD_CIK_HIQ_PIPE / get_pipes_per_mec(dqm) + 1, + KFD_CIK_HIQ_PIPE % get_pipes_per_mec(dqm), KFD_CIK_HIQ_QUEUE); seq_reg_dump(m, dump, n_regs); @@ -2524,13 +2523,16 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) dqm->dev->kfd->shared_resources.cp_queue_bitmap)) continue; - r = dqm->dev->kfd2kgd->hqd_dump( - dqm->dev->adev, pipe, queue, &dump, &n_regs, inst); + r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev, + pipe, queue, + &dump, &n_regs, + xcc_id); if (r) break; - seq_printf(m, " Inst %d, CP Pipe %d, Queue %d\n", - inst, pipe, queue); + seq_printf(m, + " Inst %d, CP Pipe %d, Queue %d\n", + xcc_id, pipe, queue); seq_reg_dump(m, dump, n_regs); kfree(dump); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index d81125421aaf..863cf060af48 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -77,7 +77,7 @@ struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev, dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size * - dev->num_xcc_per_node; + NUM_XCC(dev->xcc_mask); mqd_mem_obj->gtt_mem = (void *)((uint64_t)dev->dqm->hiq_sdma_mqd.gtt_mem + offset); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index c781314b213c..226132ec3714 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -128,7 +128,7 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node, retval = amdgpu_amdkfd_alloc_gtt_mem(node->adev, (ALIGN(q->ctl_stack_size, PAGE_SIZE) + ALIGN(sizeof(struct v9_mqd), PAGE_SIZE)) * - node->num_xcc_per_node, + NUM_XCC(node->xcc_mask), &(mqd_mem_obj->gtt_mem), &(mqd_mem_obj->gpu_addr), (void *)&(mqd_mem_obj->cpu_ptr), true); @@ -482,7 +482,7 @@ static void init_mqd_hiq_v9_4_3(struct mqd_manager *mm, void **mqd, memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj)); - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) { kfd_get_hiq_xcc_mqd(mm->dev, &xcc_mqd_mem_obj, xcc); init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q); @@ -506,21 +506,21 @@ static int hiq_load_mqd_kiq_v9_4_3(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - int xcc, err; + uint32_t xcc_mask = mm->dev->xcc_mask; + int xcc_id, err, inst = 0; void *xcc_mqd; - uint32_t start_inst = mm->dev->start_xcc_id; uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev); - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { - xcc_mqd = mqd + hiq_mqd_size * xcc; + for_each_inst(xcc_id, xcc_mask) { + xcc_mqd = mqd + hiq_mqd_size * inst; err = mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, xcc_mqd, pipe_id, queue_id, - p->doorbell_off, - start_inst+xcc); + p->doorbell_off, xcc_id); if (err) { - pr_debug("Failed to load HIQ MQD for XCC: %d\n", xcc); + pr_debug("Failed to load HIQ MQD for XCC: %d\n", inst); break; } + ++inst; } return err; @@ -530,20 +530,21 @@ static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, enum kfd_preempt_type type, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - int xcc = 0, err; + uint32_t xcc_mask = mm->dev->xcc_mask; + int xcc_id, err, inst = 0; void *xcc_mqd; - uint32_t start_inst = mm->dev->start_xcc_id; uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev); - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { - xcc_mqd = mqd + hiq_mqd_size * xcc; + for_each_inst(xcc_id, xcc_mask) { + xcc_mqd = mqd + hiq_mqd_size * inst; err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd, type, timeout, pipe_id, - queue_id, start_inst+xcc); + queue_id, xcc_id); if (err) { - pr_debug("Destroy MQD failed for xcc: %d\n", xcc); + pr_debug("Destroy MQD failed for xcc: %d\n", inst); break; } + ++inst; } return err; @@ -573,7 +574,7 @@ static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, uint32_t local_xcc_start = mm->dev->dqm->current_logical_xcc_start++; memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj)); - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) { get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset*xcc); init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q); @@ -600,7 +601,7 @@ static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, m->compute_tg_chunk_size = 1; m->compute_current_logic_xcc_id = (local_xcc_start + xcc) % - mm->dev->num_xcc_per_node; + NUM_XCC(mm->dev->xcc_mask); switch (xcc) { case 0: @@ -633,7 +634,7 @@ static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, int xcc = 0; uint64_t size = mm->mqd_stride(mm, q); - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) { m = get_mqd(mqd + size * xcc); update_mqd(mm, m, q, minfo); @@ -661,24 +662,25 @@ static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, enum kfd_preempt_type type, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - int xcc = 0, err; + uint32_t xcc_mask = mm->dev->xcc_mask; + int xcc_id, err, inst = 0; void *xcc_mqd; struct v9_mqd *m; uint64_t mqd_offset; - uint32_t start_inst = mm->dev->start_xcc_id; m = get_mqd(mqd); mqd_offset = m->cp_mqd_stride_size; - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { - xcc_mqd = mqd + mqd_offset * xcc; + for_each_inst(xcc_id, xcc_mask) { + xcc_mqd = mqd + mqd_offset * inst; err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd, type, timeout, pipe_id, - queue_id, start_inst+xcc); + queue_id, xcc_id); if (err) { - pr_debug("Destroy MQD failed for xcc: %d\n", xcc); + pr_debug("Destroy MQD failed for xcc: %d\n", inst); break; } + ++inst; } return err; @@ -690,21 +692,22 @@ static int load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, { /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); - int xcc = 0, err; + uint32_t xcc_mask = mm->dev->xcc_mask; + int xcc_id, err, inst = 0; void *xcc_mqd; - uint32_t start_inst = mm->dev->start_xcc_id; uint64_t mqd_stride_size = mm->mqd_stride(mm, p); - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { - xcc_mqd = mqd + mqd_stride_size * xcc; - err = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, xcc_mqd, - pipe_id, queue_id, - (uint32_t __user *)p->write_ptr, - wptr_shift, 0, mms, start_inst+xcc); + for_each_inst(xcc_id, xcc_mask) { + xcc_mqd = mqd + mqd_stride_size * inst; + err = mm->dev->kfd2kgd->hqd_load( + mm->dev->adev, xcc_mqd, pipe_id, queue_id, + (uint32_t __user *)p->write_ptr, wptr_shift, 0, mms, + xcc_id); if (err) { - pr_debug("Load MQD failed for xcc: %d\n", xcc); + pr_debug("Load MQD failed for xcc: %d\n", inst); break; } + ++inst; } return err; @@ -722,7 +725,7 @@ static int get_wave_state_v9_4_3(struct mqd_manager *mm, void *mqd, uint64_t mqd_stride_size = mm->mqd_stride(mm, q); u32 tmp_ctl_stack_used_size = 0, tmp_save_area_used_size = 0; - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) { xcc_mqd = mqd + mqd_stride_size * xcc; xcc_ctl_stack = (void __user *)((uintptr_t)ctl_stack + q->ctx_save_restore_area_size * xcc); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 559ac5efdc26..02a90fd7f646 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -274,10 +274,6 @@ struct kfd_node { */ struct kfd_vmid_info vm_info; unsigned int id; /* topology stub index */ - unsigned int num_xcc_per_node; - unsigned int start_xcc_id; /* Starting XCC instance - * number for the node - */ uint32_t xcc_mask; /* Instance mask of XCCs present */ struct amdgpu_xcp *xcp; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index a6ff57f11472..7f7d1378a2f8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -2058,6 +2058,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm); struct kfd_node *dev = pdd->dev; + uint32_t xcc_mask = dev->xcc_mask; int xcc = 0; /* @@ -2076,10 +2077,9 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev, pdd->qpd.vmid); } else { - for (xcc = 0; xcc < dev->num_xcc_per_node; xcc++) - amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev, - pdd->process->pasid, type, - dev->start_xcc_id + xcc); + for_each_inst(xcc, xcc_mask) + amdgpu_amdkfd_flush_gpu_tlb_pasid( + dev->adev, pdd->process->pasid, type, xcc); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 2b2ae0c9902b..a3c23d07c7df 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -946,7 +946,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data) seq_printf(m, " Compute queue on device %x\n", q->device->id); mqd_type = KFD_MQD_TYPE_CP; - num_xccs = q->device->num_xcc_per_node; + num_xccs = NUM_XCC(q->device->xcc_mask); break; default: seq_printf(m, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index c7072fff778e..d2a42b6b1fa8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -469,7 +469,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.cpu_cores_count); sysfs_show_32bit_prop(buffer, offs, "simd_count", dev->gpu ? (dev->node_props.simd_count * - dev->gpu->num_xcc_per_node) : 0); + NUM_XCC(dev->gpu->xcc_mask)) : 0); sysfs_show_32bit_prop(buffer, offs, "mem_banks_count", dev->node_props.mem_banks_count); sysfs_show_32bit_prop(buffer, offs, "caches_count", @@ -494,7 +494,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.wave_front_size); sysfs_show_32bit_prop(buffer, offs, "array_count", dev->gpu ? (dev->node_props.array_count * - dev->gpu->num_xcc_per_node) : 0); + NUM_XCC(dev->gpu->xcc_mask)) : 0); sysfs_show_32bit_prop(buffer, offs, "simd_arrays_per_engine", dev->node_props.simd_arrays_per_engine); sysfs_show_32bit_prop(buffer, offs, "cu_per_simd_array", @@ -558,7 +558,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, sysfs_show_64bit_prop(buffer, offs, "unique_id", dev->gpu->adev->unique_id); sysfs_show_32bit_prop(buffer, offs, "num_xcc", - dev->gpu->num_xcc_per_node); + NUM_XCC(dev->gpu->xcc_mask)); } return sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_ccompute", @@ -1180,7 +1180,7 @@ static uint32_t kfd_generate_gpu_id(struct kfd_node *gpu) buf[4] = gpu->adev->pdev->bus->number; buf[5] = lower_32_bits(local_mem_size); buf[6] = upper_32_bits(local_mem_size); - buf[7] = gpu->start_xcc_id | (gpu->num_xcc_per_node << 16); + buf[7] = (ffs(gpu->xcc_mask) - 1) | (NUM_XCC(gpu->xcc_mask) << 16); for (i = 0, hashout = 0; i < 8; i++) hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH); -- cgit v1.2.3 From 4c6ce75fdd628c43aea11448ed41b52119dae42b Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 26 Jan 2023 18:11:29 -0500 Subject: drm/amdkfd: Show KFD node memory partition info Show KFD node memory partition id and size, add helper function KFD_XCP_MEMORY_SIZE to get kfd node memory size, will be used later to support memory accounting per partition. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 5 +++++ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 7 ++++++- 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index e4e1dbba060a..324cb566ca2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -330,6 +330,11 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, uint64_t size, u32 alloc_flag); +#define KFD_XCP_MEMORY_SIZE(n) ((n)->adev->gmc.num_mem_partitions ?\ + (n)->adev->gmc.mem_partitions[(n)->xcp->mem_id].size /\ + (n)->adev->xcp_mgr->num_xcp_per_mem_partition :\ + (n)->adev->gmc.real_vram_size) + #if IS_ENABLED(CONFIG_HSA_AMD) void amdgpu_amdkfd_gpuvm_init_mem_limits(void); void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index b5497d2ee984..db5b53fcdf11 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -724,7 +724,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd_cwsr_init(kfd); - /* TODO: Needs to be updated for memory partitioning */ svm_migrate_init(kfd->adev); amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); @@ -754,6 +753,12 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, (1U << NUM_XCC(kfd->adev->gfx.xcc_mask)) - 1; } + if (node->xcp) { + dev_info(kfd_device, "KFD node %d partition %d size %lldM\n", + node->node_id, node->xcp->mem_id, + KFD_XCP_MEMORY_SIZE(node) >> 20); + } + if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && partition_mode == AMDGPU_CPX_PARTITION_MODE && kfd->num_nodes != 1) { -- cgit v1.2.3 From 315e29eca57f85107cc6f687c2d510aa532fb3f0 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 20 Mar 2023 11:21:38 -0400 Subject: drm/amdkfd: Move local_mem_info to kfd_node We need to track memory usage on a per partition basis. To do that, store the local memory information in KFD node instead of kfd device. v2: squash in fix ("amdkfd: Use mem_id to access mem_partition info") Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 17 +++++++++++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 12 +++++++----- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 7 ++++--- drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 7 +++++-- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 ++- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 7 ++++--- 7 files changed, 36 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 00edb13d2124..85df73f2c85e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -428,14 +428,23 @@ uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, } void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, - struct kfd_local_mem_info *mem_info) + struct kfd_local_mem_info *mem_info, + uint8_t xcp_id) { memset(mem_info, 0, sizeof(*mem_info)); - mem_info->local_mem_size_public = adev->gmc.visible_vram_size; - mem_info->local_mem_size_private = adev->gmc.real_vram_size - + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { + if (adev->gmc.real_vram_size == adev->gmc.visible_vram_size) + mem_info->local_mem_size_public = + KFD_XCP_MEMORY_SIZE(adev, xcp_id); + else + mem_info->local_mem_size_private = + KFD_XCP_MEMORY_SIZE(adev, xcp_id); + } else { + mem_info->local_mem_size_public = adev->gmc.visible_vram_size; + mem_info->local_mem_size_private = adev->gmc.real_vram_size - adev->gmc.visible_vram_size; - + } mem_info->vram_width = adev->gmc.vram_width; pr_debug("Address base: %pap public 0x%llx private 0x%llx\n", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 4e6221bccffe..4bf6f5659568 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -231,7 +231,8 @@ int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem); uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, enum kgd_engine_type type); void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, - struct kfd_local_mem_info *mem_info); + struct kfd_local_mem_info *mem_info, + uint8_t xcp_id); uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev); uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev); @@ -334,10 +335,11 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, ((adev)->xcp_mgr && (xcp_id) >= 0 ?\ (adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1) -#define KFD_XCP_MEMORY_SIZE(n) ((n)->adev->gmc.num_mem_partitions ?\ - (n)->adev->gmc.mem_partitions[(n)->xcp->mem_id].size /\ - (n)->adev->xcp_mgr->num_xcp_per_mem_partition :\ - (n)->adev->gmc.real_vram_size) +#define KFD_XCP_MEMORY_SIZE(adev, xcp_id)\ + ((adev)->gmc.num_mem_partitions && (xcp_id) >= 0 ?\ + (adev)->gmc.mem_partitions[KFD_XCP_MEM_ID((adev), (xcp_id))].size /\ + (adev)->xcp_mgr->num_xcp_per_mem_partition :\ + (adev)->gmc.real_vram_size) #if IS_ENABLED(CONFIG_HSA_AMD) void amdgpu_amdkfd_gpuvm_init_mem_limits(void); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index fcad90d53c9b..1ae867482bc7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1023,11 +1023,12 @@ bool kfd_dev_is_large_bar(struct kfd_node *dev) if (dev->kfd->use_iommu_v2) return false; - if (dev->kfd->local_mem_info.local_mem_size_private == 0 && - dev->kfd->local_mem_info.local_mem_size_public > 0) + if (dev->local_mem_info.local_mem_size_private == 0 && + dev->local_mem_info.local_mem_size_public > 0) return true; - if (dev->kfd->local_mem_info.local_mem_size_public == 0 && dev->kfd->adev->gmc.is_app_apu) { + if (dev->local_mem_info.local_mem_size_public == 0 && + dev->kfd->adev->gmc.is_app_apu) { pr_debug("APP APU, Consider like a large bar system\n"); return true; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 1aaf933f9f48..950af6820153 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -2191,7 +2191,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, * report the total FB size (public+private) as a single * private heap. */ - local_mem_info = kdev->kfd->local_mem_info; + local_mem_info = kdev->local_mem_info; sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + sub_type_hdr->length); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index db5b53fcdf11..d41da964d2f5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -726,7 +726,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, svm_migrate_init(kfd->adev); - amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n", kfd->num_nodes); @@ -756,7 +755,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (node->xcp) { dev_info(kfd_device, "KFD node %d partition %d size %lldM\n", node->node_id, node->xcp->mem_id, - KFD_XCP_MEMORY_SIZE(node) >> 20); + KFD_XCP_MEMORY_SIZE(node->adev, node->node_id) >> 20); } if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && @@ -783,6 +782,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, } node->max_proc_per_quantum = max_proc_per_quantum; atomic_set(&node->sram_ecc_flag, 0); + + amdgpu_amdkfd_get_local_mem_info(kfd->adev, + &node->local_mem_info, node->xcp->id); + /* Initialize the KFD node */ if (kfd_init_node(node)) { dev_err(kfd_device, "Error initializing KFD node\n"); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 44f4d5509db6..3bd222e8f6c3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -313,6 +313,8 @@ struct kfd_node { unsigned int compute_vmid_bitmap; + struct kfd_local_mem_info local_mem_info; + struct kfd_dev *kfd; }; @@ -335,7 +337,6 @@ struct kfd_dev { */ struct kgd2kfd_shared_resources shared_resources; - struct kfd_local_mem_info local_mem_info; const struct kfd2kgd_calls *kfd2kgd; struct mutex doorbell_mutex; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index dbb6159344b3..e0bacf017a40 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1152,8 +1152,8 @@ static uint32_t kfd_generate_gpu_id(struct kfd_node *gpu) if (!gpu) return 0; - local_mem_size = gpu->kfd->local_mem_info.local_mem_size_private + - gpu->kfd->local_mem_info.local_mem_size_public; + local_mem_size = gpu->local_mem_info.local_mem_size_private + + gpu->local_mem_info.local_mem_size_public; buf[0] = gpu->adev->pdev->devfn; buf[1] = gpu->adev->pdev->subsystem_vendor | (gpu->adev->pdev->subsystem_device << 16); @@ -1234,7 +1234,8 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev) * for APUs - If CRAT from ACPI reports more than one bank, then * all the banks will report the same mem_clk_max information */ - amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info); + amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info, + dev->gpu->xcp->id); list_for_each_entry(mem, &dev->mem_props, list) mem->mem_clk_max = local_mem_info.mem_clk_max; -- cgit v1.2.3 From 25f50704343de1bea70100ad41621b5737a6a96b Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 23 Mar 2023 08:45:56 -0400 Subject: drm/amdkfd: APU mode set max svm range pages svm_migrate_init set the max svm range pages based on the KFD nodes partition size. APU mode don't init pgmap because there is no migration. kgd2kfd_device_init calls svm_migrate_init after KFD nodes allocation and initialization. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 5 ++--- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 7 +++++-- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 15 ++++++++++----- 3 files changed, 17 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index d41da964d2f5..882ff86bba08 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -724,9 +724,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd_cwsr_init(kfd); - svm_migrate_init(kfd->adev); - - dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n", kfd->num_nodes); @@ -794,6 +791,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->nodes[i] = node; } + svm_migrate_init(kfd->adev); + if (kfd_resume_iommu(kfd)) goto kfd_resume_iommu_error; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 199d32c7c289..2512bf681112 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -1000,6 +1000,11 @@ int svm_migrate_init(struct amdgpu_device *adev) if (!KFD_IS_SOC15(kfddev->dev)) return -EINVAL; + svm_range_set_max_pages(adev); + + if (adev->gmc.is_app_apu) + return 0; + pgmap = &kfddev->pgmap; memset(pgmap, 0, sizeof(*pgmap)); @@ -1042,8 +1047,6 @@ int svm_migrate_init(struct amdgpu_device *adev) amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size)); - svm_range_set_max_pages(adev); - pr_info("HMM registered %ldMB device memory\n", size >> 20); return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index c1ab70faf36e..206851c9e642 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1938,14 +1938,19 @@ void svm_range_set_max_pages(struct amdgpu_device *adev) { uint64_t max_pages; uint64_t pages, _pages; + uint64_t min_pages = 0; + int i; + + for (i = 0; i < adev->kfd.dev->num_nodes; i++) { + pages = KFD_XCP_MEMORY_SIZE(adev, adev->kfd.dev->nodes[i]->xcp->id) >> 17; + pages = clamp(pages, 1ULL << 9, 1ULL << 18); + pages = rounddown_pow_of_two(pages); + min_pages = min_not_zero(min_pages, pages); + } - /* 1/32 VRAM size in pages */ - pages = adev->gmc.real_vram_size >> 17; - pages = clamp(pages, 1ULL << 9, 1ULL << 18); - pages = rounddown_pow_of_two(pages); do { max_pages = READ_ONCE(max_svm_range_pages); - _pages = min_not_zero(max_pages, pages); + _pages = min_not_zero(max_pages, min_pages); } while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages); } -- cgit v1.2.3 From 84b4dd3f84de424a68e1fda0d483530ddaa92b45 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Fri, 31 Mar 2023 11:18:12 -0400 Subject: drm/amdkfd: Refactor migrate init to support partition switch Rename smv_migrate_init to a better name kgd2kfd_init_zone_device because it setup zone devive pgmap for page migration and keep it in kfd_migrate.c to access static functions svm_migrate_pgmap_ops. Call it only once in amdgpu_device_ip_init after adev ip blocks are initialized, but before amdgpu_amdkfd_device_init initialize kfd nodes which enable SVM support based on pgmap. svm_range_set_max_pages is called by kgd2kfd_device_init everytime after switching compute partition mode. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 11 +++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 +++- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 3 ++- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 8 +++----- drivers/gpu/drm/amd/amdkfd/kfd_migrate.h | 9 --------- drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 4 ++++ 6 files changed, 23 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 948d362adabb..48d12dbff968 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -372,6 +372,17 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) { } #endif + +#if IS_ENABLED(CONFIG_HSA_AMD_SVM) +int kgd2kfd_init_zone_device(struct amdgpu_device *adev); +#else +static inline +int kgd2kfd_init_zone_device(struct amdgpu_device *adev) +{ + return 0; +} +#endif + /* KGD2KFD callbacks */ int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger); int kgd2kfd_resume_mm(struct mm_struct *mm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 02ee79b7b56d..f0666230b2ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2633,8 +2633,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) goto init_failed; /* Don't init kfd if whole hive need to be reset during init */ - if (!adev->gmc.xgmi.pending_reset) + if (!adev->gmc.xgmi.pending_reset) { + kgd2kfd_init_zone_device(adev); amdgpu_amdkfd_device_init(adev); + } amdgpu_fru_get_product_info(adev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 882ff86bba08..bf32e547182c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -32,6 +32,7 @@ #include "kfd_iommu.h" #include "amdgpu_amdkfd.h" #include "kfd_smi_events.h" +#include "kfd_svm.h" #include "kfd_migrate.h" #include "amdgpu.h" #include "amdgpu_xcp.h" @@ -791,7 +792,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->nodes[i] = node; } - svm_migrate_init(kfd->adev); + svm_range_set_max_pages(kfd->adev); if (kfd_resume_iommu(kfd)) goto kfd_resume_iommu_error; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 2512bf681112..35cf6558cf1b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -988,7 +988,7 @@ static const struct dev_pagemap_ops svm_migrate_pgmap_ops = { /* Each VRAM page uses sizeof(struct page) on system memory */ #define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page)) -int svm_migrate_init(struct amdgpu_device *adev) +int kgd2kfd_init_zone_device(struct amdgpu_device *adev) { struct amdgpu_kfd_dev *kfddev = &adev->kfd; struct dev_pagemap *pgmap; @@ -996,12 +996,10 @@ int svm_migrate_init(struct amdgpu_device *adev) unsigned long size; void *r; - /* Page migration works on Vega10 or newer */ - if (!KFD_IS_SOC15(kfddev->dev)) + /* Page migration works on gfx9 or newer */ + if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 1)) return -EINVAL; - svm_range_set_max_pages(adev); - if (adev->gmc.is_app_apu) return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h index a5d7e6d22264..487f26368164 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h @@ -47,15 +47,6 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, unsigned long svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr); -int svm_migrate_init(struct amdgpu_device *adev); - -#else - -static inline int svm_migrate_init(struct amdgpu_device *adev) -{ - return 0; -} - #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */ #endif /* KFD_MIGRATE_H_ */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 021def496f5a..762679835e31 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -265,6 +265,10 @@ static inline int kfd_criu_resume_svm(struct kfd_process *p) return 0; } +static inline void svm_range_set_max_pages(struct amdgpu_device *adev) +{ +} + #define KFD_IS_SVM_API_SUPPORTED(dev) false #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */ -- cgit v1.2.3 From 0409022c15623d91c112e51f38cb62633becd432 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 11 May 2023 17:01:03 +0800 Subject: drm/amdkfd: Fix null ptr access Avoid access null xcp_mgr pointer. Signed-off-by: Hawking Zhang Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index bf32e547182c..2cfef3f9456f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -644,12 +644,14 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, * If the VMID range changes for GFX9.4.3, then this code MUST be * revisited. */ - partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr, AMDGPU_XCP_FL_LOCKED); - if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && - partition_mode == AMDGPU_CPX_PARTITION_MODE && - kfd->num_nodes != 1) { - vmid_num_kfd /= 2; - first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2; + if (kfd->adev->xcp_mgr) { + partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr, + AMDGPU_XCP_FL_LOCKED); + if (partition_mode == AMDGPU_CPX_PARTITION_MODE && + kfd->num_nodes != 1) { + vmid_num_kfd /= 2; + first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2; + } } /* Verify module parameters regarding mapped process number*/ -- cgit v1.2.3 From 9a3ce1a7a9e5372d8c275bf3fbef4456c8407145 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Fri, 12 May 2023 13:22:57 +0800 Subject: drm/amdgpu: Do not access members of xcp w/o check (v2) Not all the asic needs xcp. ensure check xcp availabity before accessing its member. v2: add missing change in kfd_topology.c Signed-off-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 85df73f2c85e..739eb7c0d133 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -429,17 +429,17 @@ uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, struct kfd_local_mem_info *mem_info, - uint8_t xcp_id) + struct amdgpu_xcp *xcp) { memset(mem_info, 0, sizeof(*mem_info)); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { + if (xcp) { if (adev->gmc.real_vram_size == adev->gmc.visible_vram_size) mem_info->local_mem_size_public = - KFD_XCP_MEMORY_SIZE(adev, xcp_id); + KFD_XCP_MEMORY_SIZE(adev, xcp->id); else mem_info->local_mem_size_private = - KFD_XCP_MEMORY_SIZE(adev, xcp_id); + KFD_XCP_MEMORY_SIZE(adev, xcp->id); } else { mem_info->local_mem_size_public = adev->gmc.visible_vram_size; mem_info->local_mem_size_private = adev->gmc.real_vram_size - diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 48d12dbff968..be43d71ba7ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -233,7 +233,7 @@ uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, enum kgd_engine_type type); void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, struct kfd_local_mem_info *mem_info, - uint8_t xcp_id); + struct amdgpu_xcp *xcp); uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev); uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 2cfef3f9456f..986543a000bf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -784,7 +784,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, atomic_set(&node->sram_ecc_flag, 0); amdgpu_amdkfd_get_local_mem_info(kfd->adev, - &node->local_mem_info, node->xcp->id); + &node->local_mem_info, node->xcp); /* Initialize the KFD node */ if (kfd_init_node(node)) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index e0bacf017a40..8302d8967158 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1235,7 +1235,7 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev) * all the banks will report the same mem_clk_max information */ amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info, - dev->gpu->xcp->id); + dev->gpu->xcp); list_for_each_entry(mem, &dev->mem_props, list) mem->mem_clk_max = local_mem_info.mem_clk_max; -- cgit v1.2.3 From 2ad00e753ae13b0c523a579fb04372787f77cce9 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Fri, 19 May 2023 10:44:41 +0530 Subject: drm/amdgpu: Fix uninitalized variable in kgd2kfd_device_init MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_device.c:613:4: error: variable 'num_xcd' is uninitialized when used here [-Werror,-Wuninitialized] num_xcd, kfd->adev->gfx.num_xcc_per_xcp); ^~~~~~~ include/linux/dev_printk.h:144:65: note: expanded from macro 'dev_err' dev_printk_index_wrap(_dev_err, KERN_ERR, dev, dev_fmt(fmt), ##__VA_ARGS__) ^~~~~~~~~~~ include/linux/dev_printk.h:110:23: note: expanded from macro 'dev_printk_index_wrap' _p_func(dev, fmt, ##__VA_ARGS__); \ ^~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_device.c:597:13: note: initialize the variable 'num_xcd' to silence this warning int num_xcd, partition_mode; ^ = 0 1 error generated. Cc: Luben Tuikov Cc: Alex Deucher Cc: Christian König Cc: Felix Kuehling Cc: Mukul Joshi Signed-off-by: Srinivasan Shanmugam Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 986543a000bf..e92b93b2c14c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -594,7 +594,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, struct kfd_node *node; uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd; unsigned int max_proc_per_quantum; - int num_xcd, partition_mode; + int partition_mode; int xcp_idx; kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, @@ -609,8 +609,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (kfd->num_nodes == 0) { dev_err(kfd_device, - "KFD num nodes cannot be 0, GC inst: %d, num_xcc_in_node: %d\n", - num_xcd, kfd->adev->gfx.num_xcc_per_xcp); + "KFD num nodes cannot be 0, num_xcc_in_node: %d\n", + kfd->adev->gfx.num_xcc_per_xcp); goto out; } -- cgit v1.2.3 From 55a6dc60b47c817c644af2b505d46815d8b9219e Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Tue, 23 May 2023 11:59:53 -0400 Subject: drm/amdkfd: Set event interrupt class for GFX 9.4.3 Fix the warning during driver load because the event interrupt class is not set for GFX9.4.3. Signed-off-by: Mukul Joshi Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index e92b93b2c14c..862a50f7b490 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -139,6 +139,7 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) case IP_VERSION(9, 4, 0): /* VEGA20 */ case IP_VERSION(9, 4, 1): /* ARCTURUS */ case IP_VERSION(9, 4, 2): /* ALDEBARAN */ + case IP_VERSION(9, 4, 3): /* GC 9.4.3 */ case IP_VERSION(10, 3, 1): /* VANGOGH */ case IP_VERSION(10, 3, 3): /* YELLOW_CARP */ case IP_VERSION(10, 3, 6): /* GC 10.3.6 */ -- cgit v1.2.3 From 28ebbb4981cb1fad12e0b1227dbecc88810b1ee8 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 24 May 2023 14:30:12 -0400 Subject: drm/amdkfd: fix gfx_target_version for certain 11.0.3 devices Certain boards with GC IP 11.0.3 need slightly different handling in the shader compiler due to board specific bounding box optimizations. Acked-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 862a50f7b490..ebc3c3f965f9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -411,8 +411,15 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) f2g = &gfx_v11_kfd2kgd; break; case IP_VERSION(11, 0, 3): - /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */ - gfx_target_version = 110001; + if ((adev->pdev->device == 0x7460 && + adev->pdev->revision == 0x00) || + (adev->pdev->device == 0x7461 && + adev->pdev->revision == 0x00)) + /* Note: Compiler version is 11.0.5 while HW version is 11.0.3 */ + gfx_target_version = 110005; + else + /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */ + gfx_target_version = 110001; f2g = &gfx_v11_kfd2kgd; break; default: -- cgit v1.2.3 From 07a1475279244cd8eea81bec44fd5f0a9d6871f8 Mon Sep 17 00:00:00 2001 From: Graham Sider Date: Mon, 3 Apr 2023 15:31:53 -0400 Subject: drm/amdkfd: Add new gfx_target_versions for GC 9.4.3 For GC 9.4.3, set gfx_target_version to 90402 for rev 1 and later (APU or dGPU), 90401 for rev 0 dGPU, and 90400 for rev 0 APU. Signed-off-by: Graham Sider Acked-by: Alex Deucher Reviewed-by: Amber Lin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index ebc3c3f965f9..e84ad1c5ef44 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -332,7 +332,9 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) f2g = &aldebaran_kfd2kgd; break; case IP_VERSION(9, 4, 3): - gfx_target_version = 90400; + gfx_target_version = adev->rev_id >= 1 ? 90402 + : adev->flags & AMD_IS_APU ? 90400 + : 90401; f2g = &gc_9_4_3_kfd2kgd; break; /* Navi10 */ -- cgit v1.2.3 From d3116d9f27b89d363dd528e42fcf4895a15e0c3c Mon Sep 17 00:00:00 2001 From: Yang Li Date: Wed, 31 May 2023 10:08:11 +0800 Subject: drm/amdkfd: clean up one inconsistent indenting drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_device.c:1036 kgd2kfd_interrupt() warn: inconsistent indenting Signed-off-by: Yang Li Signed-off-by: Felix Kuehling Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index e84ad1c5ef44..f0ed6e6416c3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -1042,7 +1042,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) is_patched ? patched_ihre : ih_ring_entry)) { kfd_queue_work(node->ih_wq, &node->interrupt_work); spin_unlock_irqrestore(&node->interrupt_lock, flags); - return; + return; } spin_unlock_irqrestore(&node->interrupt_lock, flags); } -- cgit v1.2.3 From 12fb1ad70d65edc3405884792d044fa79df7244f Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Fri, 22 Apr 2022 12:26:18 -0400 Subject: drm/amdkfd: update process interrupt handling for debug events The debugger must be notified by any debugger subscribed exception that comes from hardware interrupts. If a debugger session exits, any exceptions it subscribed to may still have interrupts in the interrupt ring buffer or KGD/KFD pipeline. To prevent a new session from inheriting stale interrupts, when a new queue is created, open an interrupt drain and allow the IH ring to drain from a timestamped checkpoint. Then inject a custom IV so that once the custom IV is picked up by the KFD, it's safe to close the drain and proceed with queue creation. The drain must also be on debug disable as SW interrupts may still be processed. Drain at this time and clear all the exception status. The debugger may also not be attached nor subscibed to certain exceptions so forward them directly to the runtime. GFX10 also requires its own IV processing, hence the creation of kfd_int_process_v10.c. This is because the IV from SQ interrupts are packed into a new continguous format unlike GFX9. To make this clear, a separate interrupting handling code file was created. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 16 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 2 + drivers/gpu/drm/amd/amdkfd/Makefile | 1 + drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 84 +++++ drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 6 + drivers/gpu/drm/amd/amdkfd/kfd_device.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c | 405 +++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c | 26 +- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 98 ++++- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 12 + drivers/gpu/drm/amd/amdkfd/kfd_process.c | 47 +++ .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 4 + 12 files changed, 686 insertions(+), 19 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 66f80b9ab0c5..98cd52bb005f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -777,6 +777,22 @@ void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bo amdgpu_umc_poison_handler(adev, reset); } +int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, + uint32_t *payload) +{ + int ret; + + /* Device or IH ring is not ready so bail. */ + ret = amdgpu_ih_wait_on_checkpoint_process_ts(adev, &adev->irq.ih); + if (ret) + return ret; + + /* Send payload to fence KFD interrupts */ + amdgpu_amdkfd_interrupt(adev, payload); + + return 0; +} + bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev) { if (adev->gfx.ras && adev->gfx.ras->query_utcl2_poison_status) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 94cc456761e5..dd740e64e6e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -250,6 +250,8 @@ int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, struct amdgpu_device *src, bool is_min); int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min); +int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, + uint32_t *payload); /* Read user wptr from a specified user address space with page fault * disabled. The memory must be pinned and mapped to the hardware when diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index 747754428073..2ec8f27c5366 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile @@ -53,6 +53,7 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \ $(AMDKFD_PATH)/kfd_events.o \ $(AMDKFD_PATH)/cik_event_interrupt.o \ $(AMDKFD_PATH)/kfd_int_process_v9.o \ + $(AMDKFD_PATH)/kfd_int_process_v10.o \ $(AMDKFD_PATH)/kfd_int_process_v11.o \ $(AMDKFD_PATH)/kfd_smi_events.o \ $(AMDKFD_PATH)/kfd_crat.o \ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 17e8e9edccbf..68b657398d41 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -125,6 +125,64 @@ bool kfd_dbg_ev_raise(uint64_t event_mask, return is_subscribed; } +/* set pending event queue entry from ring entry */ +bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev, + unsigned int pasid, + uint32_t doorbell_id, + uint64_t trap_mask, + void *exception_data, + size_t exception_data_size) +{ + struct kfd_process *p; + bool signaled_to_debugger_or_runtime = false; + + p = kfd_lookup_process_by_pasid(pasid); + + if (!p) + return false; + + if (!kfd_dbg_ev_raise(trap_mask, p, dev, doorbell_id, true, + exception_data, exception_data_size)) { + struct process_queue_manager *pqm; + struct process_queue_node *pqn; + + if (!!(trap_mask & KFD_EC_MASK_QUEUE) && + p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) { + mutex_lock(&p->mutex); + + pqm = &p->pqm; + list_for_each_entry(pqn, &pqm->queues, + process_queue_list) { + + if (!(pqn->q && pqn->q->device == dev && + pqn->q->doorbell_id == doorbell_id)) + continue; + + kfd_send_exception_to_runtime(p, pqn->q->properties.queue_id, + trap_mask); + + signaled_to_debugger_or_runtime = true; + + break; + } + + mutex_unlock(&p->mutex); + } else if (trap_mask & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) { + kfd_dqm_evict_pasid(dev->dqm, p->pasid); + kfd_signal_vm_fault_event(dev, p->pasid, NULL, + exception_data); + + signaled_to_debugger_or_runtime = true; + } + } else { + signaled_to_debugger_or_runtime = true; + } + + kfd_unref_process(p); + + return signaled_to_debugger_or_runtime; +} + int kfd_dbg_send_exception_to_runtime(struct kfd_process *p, unsigned int dev_id, unsigned int queue_id, @@ -281,6 +339,31 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind kfd_dbg_set_workaround(target, false); } +static void kfd_dbg_clean_exception_status(struct kfd_process *target) +{ + struct process_queue_manager *pqm; + struct process_queue_node *pqn; + int i; + + for (i = 0; i < target->n_pdds; i++) { + struct kfd_process_device *pdd = target->pdds[i]; + + kfd_process_drain_interrupts(pdd); + + pdd->exception_status = 0; + } + + pqm = &target->pqm; + list_for_each_entry(pqn, &pqm->queues, process_queue_list) { + if (!pqn->q) + continue; + + pqn->q->properties.exception_status = 0; + } + + target->exception_status = 0; +} + int kfd_dbg_trap_disable(struct kfd_process *target) { if (!target->debug_trap_enabled) @@ -304,6 +387,7 @@ int kfd_dbg_trap_disable(struct kfd_process *target) } target->debug_trap_enabled = false; + kfd_dbg_clean_exception_status(target); kfd_unref_process(target); return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index fca928564948..5153ccbd7fd1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -27,6 +27,12 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind_count); int kfd_dbg_trap_activate(struct kfd_process *target); +bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev, + unsigned int pasid, + uint32_t doorbell_id, + uint64_t trap_mask, + void *exception_data, + size_t exception_data_size); bool kfd_dbg_ev_raise(uint64_t event_mask, struct kfd_process *process, struct kfd_node *dev, unsigned int source_id, bool use_worker, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index f0ed6e6416c3..2c36bb578633 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -140,6 +140,8 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) case IP_VERSION(9, 4, 1): /* ARCTURUS */ case IP_VERSION(9, 4, 2): /* ALDEBARAN */ case IP_VERSION(9, 4, 3): /* GC 9.4.3 */ + kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; + break; case IP_VERSION(10, 3, 1): /* VANGOGH */ case IP_VERSION(10, 3, 3): /* YELLOW_CARP */ case IP_VERSION(10, 3, 6): /* GC 10.3.6 */ @@ -153,7 +155,7 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */ case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */ case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */ - kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; + kfd->device_info.event_interrupt_class = &event_interrupt_class_v10; break; case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c new file mode 100644 index 000000000000..c7991e07b6be --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c @@ -0,0 +1,405 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "kfd_events.h" +#include "kfd_debug.h" +#include "soc15_int.h" +#include "kfd_device_queue_manager.h" + +/* + * GFX10 SQ Interrupts + * + * There are 3 encoding types of interrupts sourced from SQ sent as a 44-bit + * packet to the Interrupt Handler: + * Auto - Generated by the SQG (various cmd overflows, timestamps etc) + * Wave - Generated by S_SENDMSG through a shader program + * Error - HW generated errors (Illegal instructions, Memviols, EDC etc) + * + * The 44-bit packet is mapped as {context_id1[7:0],context_id0[31:0]} plus + * 4-bits for VMID (SOC15_VMID_FROM_IH_ENTRY) as such: + * + * - context_id1[7:6] + * Encoding type (0 = Auto, 1 = Wave, 2 = Error) + * + * - context_id0[24] + * PRIV bit indicates that Wave S_SEND or error occurred within trap + * + * - context_id0[22:0] + * 23-bit data with the following layout per encoding type: + * Auto - only context_id0[8:0] is used, which reports various interrupts + * generated by SQG. The rest is 0. + * Wave - user data sent from m0 via S_SENDMSG + * Error - Error type (context_id0[22:19]), Error Details (rest of bits) + * + * The other context_id bits show coordinates (SE/SH/CU/SIMD/WGP) for wave + * S_SENDMSG and Errors. These are 0 for Auto. + */ + +enum SQ_INTERRUPT_WORD_ENCODING { + SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0, + SQ_INTERRUPT_WORD_ENCODING_INST, + SQ_INTERRUPT_WORD_ENCODING_ERROR, +}; + +enum SQ_INTERRUPT_ERROR_TYPE { + SQ_INTERRUPT_ERROR_TYPE_EDC_FUE = 0x0, + SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST, + SQ_INTERRUPT_ERROR_TYPE_MEMVIOL, + SQ_INTERRUPT_ERROR_TYPE_EDC_FED, +}; + +/* SQ_INTERRUPT_WORD_AUTO_CTXID */ +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE__SHIFT 0 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__WLT__SHIFT 1 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF0_FULL__SHIFT 2 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF1_FULL__SHIFT 3 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_UTC_ERROR__SHIFT 7 +#define SQ_INTERRUPT_WORD_AUTO_CTXID1__SE_ID__SHIFT 4 +#define SQ_INTERRUPT_WORD_AUTO_CTXID1__ENCODING__SHIFT 6 + +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_MASK 0x00000001 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__WLT_MASK 0x00000002 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF0_FULL_MASK 0x00000004 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF1_FULL_MASK 0x00000008 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_UTC_ERROR_MASK 0x00000080 +#define SQ_INTERRUPT_WORD_AUTO_CTXID1__SE_ID_MASK 0x030 +#define SQ_INTERRUPT_WORD_AUTO_CTXID1__ENCODING_MASK 0x0c0 + +/* SQ_INTERRUPT_WORD_WAVE_CTXID */ +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__DATA__SHIFT 0 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SA_ID__SHIFT 23 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV__SHIFT 24 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__WAVE_ID__SHIFT 25 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SIMD_ID__SHIFT 30 +#define SQ_INTERRUPT_WORD_WAVE_CTXID1__WGP_ID__SHIFT 0 +#define SQ_INTERRUPT_WORD_WAVE_CTXID1__SE_ID__SHIFT 4 +#define SQ_INTERRUPT_WORD_WAVE_CTXID1__ENCODING__SHIFT 6 + +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__DATA_MASK 0x000007fffff +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SA_ID_MASK 0x0000800000 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV_MASK 0x00001000000 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__WAVE_ID_MASK 0x0003e000000 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SIMD_ID_MASK 0x000c0000000 +#define SQ_INTERRUPT_WORD_WAVE_CTXID1__WGP_ID_MASK 0x00f +#define SQ_INTERRUPT_WORD_WAVE_CTXID1__SE_ID_MASK 0x030 +#define SQ_INTERRUPT_WORD_WAVE_CTXID1__ENCODING_MASK 0x0c0 + +#define KFD_CTXID0__ERR_TYPE_MASK 0x780000 +#define KFD_CTXID0__ERR_TYPE__SHIFT 19 + +/* GFX10 SQ interrupt ENC type bit (context_id1[7:6]) for wave s_sendmsg */ +#define KFD_CONTEXT_ID1_ENC_TYPE_WAVE_MASK 0x40 +/* GFX10 SQ interrupt PRIV bit (context_id0[24]) for s_sendmsg inside trap */ +#define KFD_CONTEXT_ID0_PRIV_MASK 0x1000000 +/* + * The debugger will send user data(m0) with PRIV=1 to indicate it requires + * notification from the KFD with the following queue id (DOORBELL_ID) and + * trap code (TRAP_CODE). + */ +#define KFD_CONTEXT_ID0_DEBUG_DOORBELL_MASK 0x0003ff +#define KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_SHIFT 10 +#define KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_MASK 0x07fc00 +#define KFD_DEBUG_DOORBELL_ID(ctxid0) ((ctxid0) & \ + KFD_CONTEXT_ID0_DEBUG_DOORBELL_MASK) +#define KFD_DEBUG_TRAP_CODE(ctxid0) (((ctxid0) & \ + KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_MASK) \ + >> KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_SHIFT) +#define KFD_DEBUG_CP_BAD_OP_ECODE_MASK 0x3fffc00 +#define KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT 10 +#define KFD_DEBUG_CP_BAD_OP_ECODE(ctxid0) (((ctxid0) & \ + KFD_DEBUG_CP_BAD_OP_ECODE_MASK) \ + >> KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT) + +static void event_interrupt_poison_consumption(struct kfd_node *dev, + uint16_t pasid, uint16_t client_id) +{ + int old_poison, ret = -EINVAL; + struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); + + if (!p) + return; + + /* all queues of a process will be unmapped in one time */ + old_poison = atomic_cmpxchg(&p->poison, 0, 1); + kfd_unref_process(p); + if (old_poison) + return; + + switch (client_id) { + case SOC15_IH_CLIENTID_SE0SH: + case SOC15_IH_CLIENTID_SE1SH: + case SOC15_IH_CLIENTID_SE2SH: + case SOC15_IH_CLIENTID_SE3SH: + case SOC15_IH_CLIENTID_UTCL2: + ret = kfd_dqm_evict_pasid(dev->dqm, pasid); + break; + case SOC15_IH_CLIENTID_SDMA0: + case SOC15_IH_CLIENTID_SDMA1: + case SOC15_IH_CLIENTID_SDMA2: + case SOC15_IH_CLIENTID_SDMA3: + case SOC15_IH_CLIENTID_SDMA4: + break; + default: + break; + } + + kfd_signal_poison_consumed_event(dev, pasid); + + /* resetting queue passes, do page retirement without gpu reset + * resetting queue fails, fallback to gpu reset solution + */ + if (!ret) { + dev_warn(dev->adev->dev, + "RAS poison consumption, unmap queue flow succeeded: client id %d\n", + client_id); + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false); + } else { + dev_warn(dev->adev->dev, + "RAS poison consumption, fall back to gpu reset flow: client id %d\n", + client_id); + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true); + } +} + +static bool event_interrupt_isr_v10(struct kfd_node *dev, + const uint32_t *ih_ring_entry, + uint32_t *patched_ihre, + bool *patched_flag) +{ + uint16_t source_id, client_id, pasid, vmid; + const uint32_t *data = ih_ring_entry; + + source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); + client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); + + /* Only handle interrupts from KFD VMIDs */ + vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); + if (!KFD_IRQ_IS_FENCE(client_id, source_id) && + (vmid < dev->vm_info.first_vmid_kfd || + vmid > dev->vm_info.last_vmid_kfd)) + return false; + + pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); + + /* Only handle clients we care about */ + if (client_id != SOC15_IH_CLIENTID_GRBM_CP && + client_id != SOC15_IH_CLIENTID_SDMA0 && + client_id != SOC15_IH_CLIENTID_SDMA1 && + client_id != SOC15_IH_CLIENTID_SDMA2 && + client_id != SOC15_IH_CLIENTID_SDMA3 && + client_id != SOC15_IH_CLIENTID_SDMA4 && + client_id != SOC15_IH_CLIENTID_SDMA5 && + client_id != SOC15_IH_CLIENTID_SDMA6 && + client_id != SOC15_IH_CLIENTID_SDMA7 && + client_id != SOC15_IH_CLIENTID_VMC && + client_id != SOC15_IH_CLIENTID_VMC1 && + client_id != SOC15_IH_CLIENTID_UTCL2 && + client_id != SOC15_IH_CLIENTID_SE0SH && + client_id != SOC15_IH_CLIENTID_SE1SH && + client_id != SOC15_IH_CLIENTID_SE2SH && + client_id != SOC15_IH_CLIENTID_SE3SH) + return false; + + pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n", + client_id, source_id, vmid, pasid); + pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n", + data[0], data[1], data[2], data[3], + data[4], data[5], data[6], data[7]); + + /* If there is no valid PASID, it's likely a bug */ + if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt")) + return 0; + + /* Interrupt types we care about: various signals and faults. + * They will be forwarded to a work queue (see below). + */ + return source_id == SOC15_INTSRC_CP_END_OF_PIPE || + source_id == SOC15_INTSRC_SDMA_TRAP || + source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG || + source_id == SOC15_INTSRC_CP_BAD_OPCODE || + client_id == SOC15_IH_CLIENTID_VMC || + client_id == SOC15_IH_CLIENTID_VMC1 || + client_id == SOC15_IH_CLIENTID_UTCL2 || + KFD_IRQ_IS_FENCE(client_id, source_id); +} + +static void event_interrupt_wq_v10(struct kfd_node *dev, + const uint32_t *ih_ring_entry) +{ + uint16_t source_id, client_id, pasid, vmid; + uint32_t context_id0, context_id1; + uint32_t encoding, sq_intr_err_type; + + source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); + client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); + pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); + vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); + context_id0 = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry); + context_id1 = SOC15_CONTEXT_ID1_FROM_IH_ENTRY(ih_ring_entry); + + if (client_id == SOC15_IH_CLIENTID_GRBM_CP || + client_id == SOC15_IH_CLIENTID_SE0SH || + client_id == SOC15_IH_CLIENTID_SE1SH || + client_id == SOC15_IH_CLIENTID_SE2SH || + client_id == SOC15_IH_CLIENTID_SE3SH) { + if (source_id == SOC15_INTSRC_CP_END_OF_PIPE) + kfd_signal_event_interrupt(pasid, context_id0, 32); + else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG) { + encoding = REG_GET_FIELD(context_id1, + SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING); + switch (encoding) { + case SQ_INTERRUPT_WORD_ENCODING_AUTO: + pr_debug( + "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf0_full %d, ttrac_buf1_full %d, ttrace_utc_err %d\n", + REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_AUTO_CTXID1, + SE_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + THREAD_TRACE), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + WLT), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + THREAD_TRACE_BUF0_FULL), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + THREAD_TRACE_BUF1_FULL), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + THREAD_TRACE_UTC_ERROR)); + break; + case SQ_INTERRUPT_WORD_ENCODING_INST: + pr_debug("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n", + REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, + SE_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + DATA), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + SA_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + PRIV), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + WAVE_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + SIMD_ID), + REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, + WGP_ID)); + if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV_MASK) { + if (kfd_set_dbg_ev_from_interrupt(dev, pasid, + KFD_DEBUG_DOORBELL_ID(context_id0), + KFD_DEBUG_TRAP_CODE(context_id0), + NULL, 0)) + return; + } + break; + case SQ_INTERRUPT_WORD_ENCODING_ERROR: + sq_intr_err_type = REG_GET_FIELD(context_id0, KFD_CTXID0, + ERR_TYPE); + pr_warn("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n", + REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, + SE_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + DATA), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + SA_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + PRIV), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + WAVE_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + SIMD_ID), + REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, + WGP_ID), + sq_intr_err_type); + if (sq_intr_err_type != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST && + sq_intr_err_type != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) { + event_interrupt_poison_consumption(dev, pasid, source_id); + return; + } + break; + default: + break; + } + kfd_signal_event_interrupt(pasid, context_id0 & 0x7fffff, 23); + } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) { + kfd_set_dbg_ev_from_interrupt(dev, pasid, + KFD_DEBUG_DOORBELL_ID(context_id0), + KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)), + NULL, + 0); + } + } else if (client_id == SOC15_IH_CLIENTID_SDMA0 || + client_id == SOC15_IH_CLIENTID_SDMA1 || + client_id == SOC15_IH_CLIENTID_SDMA2 || + client_id == SOC15_IH_CLIENTID_SDMA3 || + (client_id == SOC15_IH_CLIENTID_SDMA3_Sienna_Cichlid && + KFD_GC_VERSION(dev) == IP_VERSION(10, 3, 0)) || + client_id == SOC15_IH_CLIENTID_SDMA4 || + client_id == SOC15_IH_CLIENTID_SDMA5 || + client_id == SOC15_IH_CLIENTID_SDMA6 || + client_id == SOC15_IH_CLIENTID_SDMA7) { + if (source_id == SOC15_INTSRC_SDMA_TRAP) { + kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28); + } else if (source_id == SOC15_INTSRC_SDMA_ECC) { + event_interrupt_poison_consumption(dev, pasid, source_id); + return; + } + } else if (client_id == SOC15_IH_CLIENTID_VMC || + client_id == SOC15_IH_CLIENTID_VMC1 || + client_id == SOC15_IH_CLIENTID_UTCL2) { + struct kfd_vm_fault_info info = {0}; + uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); + struct kfd_hsa_memory_exception_data exception_data; + + if (client_id == SOC15_IH_CLIENTID_UTCL2 && + amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) { + event_interrupt_poison_consumption(dev, pasid, client_id); + return; + } + + info.vmid = vmid; + info.mc_id = client_id; + info.page_addr = ih_ring_entry[4] | + (uint64_t)(ih_ring_entry[5] & 0xf) << 32; + info.prot_valid = ring_id & 0x08; + info.prot_read = ring_id & 0x10; + info.prot_write = ring_id & 0x20; + + memset(&exception_data, 0, sizeof(exception_data)); + exception_data.gpu_id = dev->id; + exception_data.va = (info.page_addr) << PAGE_SHIFT; + exception_data.failure.NotPresent = info.prot_valid ? 1 : 0; + exception_data.failure.NoExecute = info.prot_exec ? 1 : 0; + exception_data.failure.ReadOnly = info.prot_write ? 1 : 0; + exception_data.failure.imprecise = 0; + + kfd_set_dbg_ev_from_interrupt(dev, + pasid, + -1, + KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION), + &exception_data, + sizeof(exception_data)); + } else if (KFD_IRQ_IS_FENCE(client_id, source_id)) { + kfd_process_close_interrupt_drain(pasid); + } +} + +const struct kfd_event_interrupt_class event_interrupt_class_v10 = { + .interrupt_isr = event_interrupt_isr_v10, + .interrupt_wq = event_interrupt_wq_v10, +}; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c index c2166bf964ef..f933bd231fb9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c @@ -26,6 +26,7 @@ #include "kfd_device_queue_manager.h" #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" #include "kfd_smi_events.h" +#include "kfd_debug.h" /* * GFX11 SQ Interrupts @@ -238,7 +239,7 @@ static bool event_interrupt_isr_v11(struct kfd_node *dev, client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); /* Only handle interrupts from KFD VMIDs */ vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); - if (/*!KFD_IRQ_IS_FENCE(client_id, source_id) &&*/ + if (!KFD_IRQ_IS_FENCE(client_id, source_id) && (vmid < dev->vm_info.first_vmid_kfd || vmid > dev->vm_info.last_vmid_kfd)) return false; @@ -267,7 +268,7 @@ static bool event_interrupt_isr_v11(struct kfd_node *dev, source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG || source_id == SOC15_INTSRC_CP_BAD_OPCODE || source_id == SOC21_INTSRC_SDMA_TRAP || - /* KFD_IRQ_IS_FENCE(client_id, source_id) || */ + KFD_IRQ_IS_FENCE(client_id, source_id) || (((client_id == SOC21_IH_CLIENTID_VMC) || ((client_id == SOC21_IH_CLIENTID_GFX) && (source_id == UTCL2_1_0__SRCID__FAULT))) && @@ -279,7 +280,7 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, { uint16_t source_id, client_id, ring_id, pasid, vmid; uint32_t context_id0, context_id1; - uint8_t sq_int_enc, sq_int_errtype; + uint8_t sq_int_enc, sq_int_priv, sq_int_errtype; struct kfd_vm_fault_info info = {0}; struct kfd_hsa_memory_exception_data exception_data; @@ -312,9 +313,9 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, exception_data.failure.ReadOnly = info.prot_write ? 1 : 0; exception_data.failure.imprecise = 0; - /*kfd_set_dbg_ev_from_interrupt(dev, pasid, -1, + kfd_set_dbg_ev_from_interrupt(dev, pasid, -1, KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION), - &exception_data, sizeof(exception_data));*/ + &exception_data, sizeof(exception_data)); kfd_smi_event_update_vmfault(dev, pasid); /* GRBM, SDMA, SE, PMM */ @@ -324,11 +325,11 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, /* CP */ if (source_id == SOC15_INTSRC_CP_END_OF_PIPE) kfd_signal_event_interrupt(pasid, context_id0, 32); - /*else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) + else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) kfd_set_dbg_ev_from_interrupt(dev, pasid, KFD_CTXID0_DOORBELL_ID(context_id0), KFD_EC_MASK(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)), - NULL, 0);*/ + NULL, 0); /* SDMA */ else if (source_id == SOC21_INTSRC_SDMA_TRAP) @@ -348,6 +349,13 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, break; case SQ_INTERRUPT_WORD_ENCODING_INST: print_sq_intr_info_inst(context_id0, context_id1); + sq_int_priv = REG_GET_FIELD(context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV); + if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid, + KFD_CTXID0_DOORBELL_ID(context_id0), + KFD_CTXID0_TRAP_CODE(context_id0), + NULL, 0))) + return; break; case SQ_INTERRUPT_WORD_ENCODING_ERROR: print_sq_intr_info_error(context_id0, context_id1); @@ -366,8 +374,8 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, kfd_signal_event_interrupt(pasid, context_id0 & 0xffffff, 24); } - /*} else if (KFD_IRQ_IS_FENCE(client_id, source_id)) { - kfd_process_close_interrupt_drain(pasid);*/ + } else if (KFD_IRQ_IS_FENCE(client_id, source_id)) { + kfd_process_close_interrupt_drain(pasid); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 8cf58be80f4e..d5c9f30552e3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -23,10 +23,40 @@ #include "kfd_priv.h" #include "kfd_events.h" +#include "kfd_debug.h" #include "soc15_int.h" #include "kfd_device_queue_manager.h" #include "kfd_smi_events.h" +/* + * GFX9 SQ Interrupts + * + * There are 3 encoding types of interrupts sourced from SQ sent as a 44-bit + * packet to the Interrupt Handler: + * Auto - Generated by the SQG (various cmd overflows, timestamps etc) + * Wave - Generated by S_SENDMSG through a shader program + * Error - HW generated errors (Illegal instructions, Memviols, EDC etc) + * + * The 44-bit packet is mapped as {context_id1[7:0],context_id0[31:0]} plus + * 4-bits for VMID (SOC15_VMID_FROM_IH_ENTRY) as such: + * + * - context_id0[27:26] + * Encoding type (0 = Auto, 1 = Wave, 2 = Error) + * + * - context_id0[13] + * PRIV bit indicates that Wave S_SEND or error occurred within trap + * + * - {context_id1[7:0],context_id0[31:28],context_id0[11:0]} + * 24-bit data with the following layout per encoding type: + * Auto - only context_id0[8:0] is used, which reports various interrupts + * generated by SQG. The rest is 0. + * Wave - user data sent from m0 via S_SENDMSG + * Error - Error type (context_id1[7:4]), Error Details (rest of bits) + * + * The other context_id bits show coordinates (SE/SH/CU/SIMD/WAVE) for wave + * S_SENDMSG and Errors. These are 0 for Auto. + */ + enum SQ_INTERRUPT_WORD_ENCODING { SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0, SQ_INTERRUPT_WORD_ENCODING_INST, @@ -84,12 +114,32 @@ enum SQ_INTERRUPT_ERROR_TYPE { #define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID_MASK 0x03000000 #define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING_MASK 0x0c000000 +/* GFX9 SQ interrupt 24-bit data from context_id<0,1> */ #define KFD_CONTEXT_ID_GET_SQ_INT_DATA(ctx0, ctx1) \ ((ctx0 & 0xfff) | ((ctx0 >> 16) & 0xf000) | ((ctx1 << 16) & 0xff0000)) #define KFD_SQ_INT_DATA__ERR_TYPE_MASK 0xF00000 #define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20 +/* + * The debugger will send user data(m0) with PRIV=1 to indicate it requires + * notification from the KFD with the following queue id (DOORBELL_ID) and + * trap code (TRAP_CODE). + */ +#define KFD_INT_DATA_DEBUG_DOORBELL_MASK 0x0003ff +#define KFD_INT_DATA_DEBUG_TRAP_CODE_SHIFT 10 +#define KFD_INT_DATA_DEBUG_TRAP_CODE_MASK 0x07fc00 +#define KFD_DEBUG_DOORBELL_ID(sq_int_data) ((sq_int_data) & \ + KFD_INT_DATA_DEBUG_DOORBELL_MASK) +#define KFD_DEBUG_TRAP_CODE(sq_int_data) (((sq_int_data) & \ + KFD_INT_DATA_DEBUG_TRAP_CODE_MASK) \ + >> KFD_INT_DATA_DEBUG_TRAP_CODE_SHIFT) +#define KFD_DEBUG_CP_BAD_OP_ECODE_MASK 0x3fffc00 +#define KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT 10 +#define KFD_DEBUG_CP_BAD_OP_ECODE(ctxid0) (((ctxid0) & \ + KFD_DEBUG_CP_BAD_OP_ECODE_MASK) \ + >> KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT) + static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, uint16_t pasid, uint16_t client_id) { @@ -168,14 +218,16 @@ static bool event_interrupt_isr_v9(struct kfd_node *dev, uint16_t source_id, client_id, pasid, vmid; const uint32_t *data = ih_ring_entry; + source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); + client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); + /* Only handle interrupts from KFD VMIDs */ vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); - if (vmid < dev->vm_info.first_vmid_kfd || - vmid > dev->vm_info.last_vmid_kfd) + if (!KFD_IRQ_IS_FENCE(client_id, source_id) && + (vmid < dev->vm_info.first_vmid_kfd || + vmid > dev->vm_info.last_vmid_kfd)) return false; - source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); - client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); /* Only handle clients we care about */ @@ -194,7 +246,8 @@ static bool event_interrupt_isr_v9(struct kfd_node *dev, client_id != SOC15_IH_CLIENTID_SE0SH && client_id != SOC15_IH_CLIENTID_SE1SH && client_id != SOC15_IH_CLIENTID_SE2SH && - client_id != SOC15_IH_CLIENTID_SE3SH) + client_id != SOC15_IH_CLIENTID_SE3SH && + !KFD_IRQ_IS_FENCE(client_id, source_id)) return false; /* This is a known issue for gfx9. Under non HWS, pasid is not set @@ -247,6 +300,7 @@ static bool event_interrupt_isr_v9(struct kfd_node *dev, source_id == SOC15_INTSRC_SDMA_ECC || source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG || source_id == SOC15_INTSRC_CP_BAD_OPCODE || + KFD_IRQ_IS_FENCE(client_id, source_id) || ((client_id == SOC15_IH_CLIENTID_VMC || client_id == SOC15_IH_CLIENTID_VMC1 || client_id == SOC15_IH_CLIENTID_UTCL2) && @@ -302,6 +356,13 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID), REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID), sq_int_data); + if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK) { + if (kfd_set_dbg_ev_from_interrupt(dev, pasid, + KFD_DEBUG_DOORBELL_ID(sq_int_data), + KFD_DEBUG_TRAP_CODE(sq_int_data), + NULL, 0)) + return; + } break; case SQ_INTERRUPT_WORD_ENCODING_ERROR: sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE); @@ -324,8 +385,12 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, break; } kfd_signal_event_interrupt(pasid, context_id0 & 0xffffff, 24); - } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) - kfd_signal_hw_exception_event(pasid); + } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) { + kfd_set_dbg_ev_from_interrupt(dev, pasid, + KFD_DEBUG_DOORBELL_ID(context_id0), + KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)), + NULL, 0); + } } else if (client_id == SOC15_IH_CLIENTID_SDMA0 || client_id == SOC15_IH_CLIENTID_SDMA1 || client_id == SOC15_IH_CLIENTID_SDMA2 || @@ -345,6 +410,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, client_id == SOC15_IH_CLIENTID_UTCL2) { struct kfd_vm_fault_info info = {0}; uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); + struct kfd_hsa_memory_exception_data exception_data; if (client_id == SOC15_IH_CLIENTID_UTCL2 && amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) { @@ -360,9 +426,23 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, info.prot_read = ring_id & 0x10; info.prot_write = ring_id & 0x20; + memset(&exception_data, 0, sizeof(exception_data)); + exception_data.gpu_id = dev->id; + exception_data.va = (info.page_addr) << PAGE_SHIFT; + exception_data.failure.NotPresent = info.prot_valid ? 1 : 0; + exception_data.failure.NoExecute = info.prot_exec ? 1 : 0; + exception_data.failure.ReadOnly = info.prot_write ? 1 : 0; + exception_data.failure.imprecise = 0; + + kfd_set_dbg_ev_from_interrupt(dev, + pasid, + -1, + KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION), + &exception_data, + sizeof(exception_data)); kfd_smi_event_update_vmfault(dev, pasid); - kfd_dqm_evict_pasid(dev->dqm, pasid); - kfd_signal_vm_fault_event(dev, pasid, &info, NULL); + } else if (KFD_IRQ_IS_FENCE(client_id, source_id)) { + kfd_process_close_interrupt_drain(pasid); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index a02fb939614a..cd2d56e5cdf0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -963,6 +963,10 @@ struct kfd_process { uint64_t exception_enable_mask; uint64_t exception_status; + /* Used to drain stale interrupts */ + wait_queue_head_t wait_irq_drain; + bool irq_drain_is_open; + /* shared virtual memory registered by this process */ struct svm_range_list svms; @@ -1144,12 +1148,19 @@ int kfd_numa_node_to_apic_id(int numa_node_id); void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); /* Interrupts */ +#define KFD_IRQ_FENCE_CLIENTID 0xff +#define KFD_IRQ_FENCE_SOURCEID 0xff +#define KFD_IRQ_IS_FENCE(client, source) \ + ((client) == KFD_IRQ_FENCE_CLIENTID && \ + (source) == KFD_IRQ_FENCE_SOURCEID) int kfd_interrupt_init(struct kfd_node *dev); void kfd_interrupt_exit(struct kfd_node *dev); bool enqueue_ih_ring_entry(struct kfd_node *kfd, const void *ih_ring_entry); bool interrupt_is_wanted(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *flag); +int kfd_process_drain_interrupts(struct kfd_process_device *pdd); +void kfd_process_close_interrupt_drain(unsigned int pasid); /* amdkfd Apertures */ int kfd_init_apertures(struct kfd_process *process); @@ -1421,6 +1432,7 @@ uint64_t kfd_get_number_elems(struct kfd_dev *kfd); /* Events */ extern const struct kfd_event_interrupt_class event_interrupt_class_cik; extern const struct kfd_event_interrupt_class event_interrupt_class_v9; +extern const struct kfd_event_interrupt_class event_interrupt_class_v10; extern const struct kfd_event_interrupt_class event_interrupt_class_v11; extern const struct kfd_device_global_init_class device_global_init_class_cik; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 3b7f219c9d06..3d3611705d41 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -862,6 +862,8 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) kfd_procfs_add_sysfs_stats(process); kfd_procfs_add_sysfs_files(process); kfd_procfs_add_sysfs_counters(process); + + init_waitqueue_head(&process->wait_irq_drain); } out: if (!IS_ERR(process)) @@ -2136,6 +2138,51 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) } } +/* assumes caller holds process lock. */ +int kfd_process_drain_interrupts(struct kfd_process_device *pdd) +{ + uint32_t irq_drain_fence[8]; + int r = 0; + + if (!KFD_IS_SOC15(pdd->dev)) + return 0; + + pdd->process->irq_drain_is_open = true; + + memset(irq_drain_fence, 0, sizeof(irq_drain_fence)); + irq_drain_fence[0] = (KFD_IRQ_FENCE_SOURCEID << 8) | + KFD_IRQ_FENCE_CLIENTID; + irq_drain_fence[3] = pdd->process->pasid; + + /* ensure stale irqs scheduled KFD interrupts and send drain fence. */ + if (amdgpu_amdkfd_send_close_event_drain_irq(pdd->dev->adev, + irq_drain_fence)) { + pdd->process->irq_drain_is_open = false; + return 0; + } + + r = wait_event_interruptible(pdd->process->wait_irq_drain, + !READ_ONCE(pdd->process->irq_drain_is_open)); + if (r) + pdd->process->irq_drain_is_open = false; + + return r; +} + +void kfd_process_close_interrupt_drain(unsigned int pasid) +{ + struct kfd_process *p; + + p = kfd_lookup_process_by_pasid(pasid); + + if (!p) + return; + + WRITE_ONCE(p->irq_drain_is_open, false); + wake_up_all(&p->wait_irq_drain); + kfd_unref_process(p); +} + struct send_exception_work_handler_workarea { struct work_struct work; struct kfd_process *p; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 43d432b5c5bc..70852a200d8f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -337,6 +337,10 @@ int pqm_create_queue(struct process_queue_manager *pqm, kq->queue->properties.queue_id = *qid; pqn->kq = kq; pqn->q = NULL; + retval = kfd_process_drain_interrupts(pdd); + if (retval) + break; + retval = dev->dqm->ops.create_kernel_queue(dev->dqm, kq, &pdd->qpd); break; -- cgit v1.2.3 From e0f85f4690d089cc1a60337decafb1acf7eec45e Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Fri, 6 May 2022 14:58:55 -0400 Subject: drm/amdkfd: add debug set and clear address watch points operation Shader read, write and atomic memory operations can be alerted to the debugger as an address watch exception. Allow the debugger to pass in a watch point to a particular memory address per device. Note that there exists only 4 watch points per devices to date, so have the KFD keep track of what watch points are allocated or not. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c | 51 ++++++++ .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c | 78 ++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h | 8 ++ .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c | 5 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c | 52 +++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 77 ++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h | 8 ++ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 24 ++++ drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 136 +++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 8 +- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 2 + drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 6 +- 13 files changed, 452 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c index 774ecfc3451a..efd6a72aab4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c @@ -118,6 +118,55 @@ static uint32_t kgd_aldebaran_set_wave_launch_mode(struct amdgpu_device *adev, return data; } +#define TCP_WATCH_STRIDE (regTCP_WATCH1_ADDR_H - regTCP_WATCH0_ADDR_H) +static uint32_t kgd_gfx_aldebaran_set_address_watch( + struct amdgpu_device *adev, + uint64_t watch_address, + uint32_t watch_address_mask, + uint32_t watch_id, + uint32_t watch_mode, + uint32_t debug_vmid) +{ + uint32_t watch_address_high; + uint32_t watch_address_low; + uint32_t watch_address_cntl; + + watch_address_cntl = 0; + watch_address_low = lower_32_bits(watch_address); + watch_address_high = upper_32_bits(watch_address) & 0xffff; + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + MODE, + watch_mode); + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + MASK, + watch_address_mask >> 6); + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + VALID, + 1); + + WREG32_RLC((SOC15_REG_OFFSET(GC, 0, regTCP_WATCH0_ADDR_H) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_high); + + WREG32_RLC((SOC15_REG_OFFSET(GC, 0, regTCP_WATCH0_ADDR_L) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_low); + + return watch_address_cntl; +} + +uint32_t kgd_gfx_aldebaran_clear_address_watch(struct amdgpu_device *adev, + uint32_t watch_id) +{ + return 0; +} + const struct kfd2kgd_calls aldebaran_kfd2kgd = { .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, @@ -141,6 +190,8 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = { .validate_trap_override_request = kgd_aldebaran_validate_trap_override_request, .set_wave_launch_trap_override = kgd_aldebaran_set_wave_launch_trap_override, .set_wave_launch_mode = kgd_aldebaran_set_wave_launch_mode, + .set_address_watch = kgd_gfx_aldebaran_set_address_watch, + .clear_address_watch = kgd_gfx_aldebaran_clear_address_watch, .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index fbdc1b7b1e42..6df215aba4c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -413,6 +413,8 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = { .validate_trap_override_request = kgd_gfx_v9_validate_trap_override_request, .set_wave_launch_trap_override = kgd_gfx_v9_set_wave_launch_trap_override, .set_wave_launch_mode = kgd_gfx_v9_set_wave_launch_mode, + .set_address_watch = kgd_gfx_v9_set_address_watch, + .clear_address_watch = kgd_gfx_v9_clear_address_watch, .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index a7a6edda557f..8ad7a7779e14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -880,6 +880,82 @@ uint32_t kgd_gfx_v10_set_wave_launch_mode(struct amdgpu_device *adev, return 0; } +#define TCP_WATCH_STRIDE (mmTCP_WATCH1_ADDR_H - mmTCP_WATCH0_ADDR_H) +uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev, + uint64_t watch_address, + uint32_t watch_address_mask, + uint32_t watch_id, + uint32_t watch_mode, + uint32_t debug_vmid) +{ + uint32_t watch_address_high; + uint32_t watch_address_low; + uint32_t watch_address_cntl; + + watch_address_cntl = 0; + + watch_address_low = lower_32_bits(watch_address); + watch_address_high = upper_32_bits(watch_address) & 0xffff; + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + VMID, + debug_vmid); + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + MODE, + watch_mode); + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + MASK, + watch_address_mask >> 7); + + /* Turning off this watch point until we set all the registers */ + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + VALID, + 0); + + WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_cntl); + + WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_high); + + WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_L) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_low); + + /* Enable the watch point */ + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + VALID, + 1); + + WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_cntl); + + return 0; +} + +uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev, + uint32_t watch_id) +{ + uint32_t watch_address_cntl; + + watch_address_cntl = 0; + + WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_cntl); + + return 0; +} + + /* kgd_gfx_v10_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values * The values read are: * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads. @@ -969,6 +1045,8 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = { .validate_trap_override_request = kgd_gfx_v10_validate_trap_override_request, .set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override, .set_wave_launch_mode = kgd_gfx_v10_set_wave_launch_mode, + .set_address_watch = kgd_gfx_v10_set_address_watch, + .clear_address_watch = kgd_gfx_v10_clear_address_watch, .get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info, .program_trap_handler_settings = program_trap_handler_settings, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h index 3a6aca2b0eaa..e6b70196071a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h @@ -39,6 +39,14 @@ uint32_t kgd_gfx_v10_set_wave_launch_trap_override(struct amdgpu_device *adev, uint32_t kgd_gfx_v10_set_wave_launch_mode(struct amdgpu_device *adev, uint8_t wave_launch_mode, uint32_t vmid); +uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev, + uint64_t watch_address, + uint32_t watch_address_mask, + uint32_t watch_id, + uint32_t watch_mode, + uint32_t debug_vmid); +uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev, + uint32_t watch_id); void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times); void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t wait_times, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index ed36b433a48b..8c8437a4383f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -678,6 +678,7 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = { .disable_debug_trap = kgd_gfx_v10_disable_debug_trap, .validate_trap_override_request = kgd_gfx_v10_validate_trap_override_request, .set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override, - .set_wave_launch_mode = kgd_gfx_v10_set_wave_launch_mode - + .set_wave_launch_mode = kgd_gfx_v10_set_wave_launch_mode, + .set_address_watch = kgd_gfx_v10_set_address_watch, + .clear_address_watch = kgd_gfx_v10_clear_address_watch }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c index 9711d5128d09..52efa690a3c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c @@ -737,6 +737,54 @@ static uint32_t kgd_gfx_v11_set_wave_launch_mode(struct amdgpu_device *adev, return data; } +#define TCP_WATCH_STRIDE (regTCP_WATCH1_ADDR_H - regTCP_WATCH0_ADDR_H) +static uint32_t kgd_gfx_v11_set_address_watch(struct amdgpu_device *adev, + uint64_t watch_address, + uint32_t watch_address_mask, + uint32_t watch_id, + uint32_t watch_mode, + uint32_t debug_vmid) +{ + uint32_t watch_address_high; + uint32_t watch_address_low; + uint32_t watch_address_cntl; + + watch_address_cntl = 0; + watch_address_low = lower_32_bits(watch_address); + watch_address_high = upper_32_bits(watch_address) & 0xffff; + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + MODE, + watch_mode); + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + MASK, + watch_address_mask >> 7); + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + VALID, + 1); + + WREG32_RLC((SOC15_REG_OFFSET(GC, 0, regTCP_WATCH0_ADDR_H) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_high); + + WREG32_RLC((SOC15_REG_OFFSET(GC, 0, regTCP_WATCH0_ADDR_L) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_low); + + return watch_address_cntl; +} + +uint32_t kgd_gfx_v11_clear_address_watch(struct amdgpu_device *adev, + uint32_t watch_id) +{ + return 0; +} + const struct kfd2kgd_calls gfx_v11_kfd2kgd = { .program_sh_mem_settings = program_sh_mem_settings_v11, .set_pasid_vmid_mapping = set_pasid_vmid_mapping_v11, @@ -757,5 +805,7 @@ const struct kfd2kgd_calls gfx_v11_kfd2kgd = { .disable_debug_trap = kgd_gfx_v11_disable_debug_trap, .validate_trap_override_request = kgd_gfx_v11_validate_trap_override_request, .set_wave_launch_trap_override = kgd_gfx_v11_set_wave_launch_trap_override, - .set_wave_launch_mode = kgd_gfx_v11_set_wave_launch_mode + .set_wave_launch_mode = kgd_gfx_v11_set_wave_launch_mode, + .set_address_watch = kgd_gfx_v11_set_address_watch, + .clear_address_watch = kgd_gfx_v11_clear_address_watch }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 0acc0c18dfe6..51d93fb13ea3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -816,6 +816,81 @@ uint32_t kgd_gfx_v9_set_wave_launch_mode(struct amdgpu_device *adev, return 0; } +#define TCP_WATCH_STRIDE (mmTCP_WATCH1_ADDR_H - mmTCP_WATCH0_ADDR_H) +uint32_t kgd_gfx_v9_set_address_watch(struct amdgpu_device *adev, + uint64_t watch_address, + uint32_t watch_address_mask, + uint32_t watch_id, + uint32_t watch_mode, + uint32_t debug_vmid) +{ + uint32_t watch_address_high; + uint32_t watch_address_low; + uint32_t watch_address_cntl; + + watch_address_cntl = 0; + + watch_address_low = lower_32_bits(watch_address); + watch_address_high = upper_32_bits(watch_address) & 0xffff; + + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + VMID, + debug_vmid); + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + MODE, + watch_mode); + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + MASK, + watch_address_mask >> 6); + + /* Turning off this watch point until we set all the registers */ + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + VALID, + 0); + + WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_cntl); + + WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_high); + + WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_L) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_low); + + /* Enable the watch point */ + watch_address_cntl = REG_SET_FIELD(watch_address_cntl, + TCP_WATCH0_CNTL, + VALID, + 1); + + WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_cntl); + + return 0; +} + +uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev, + uint32_t watch_id) +{ + uint32_t watch_address_cntl; + + watch_address_cntl = 0; + + WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) + + (watch_id * TCP_WATCH_STRIDE)), + watch_address_cntl); + + return 0; +} + /* kgd_gfx_v9_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values * The values read are: * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads. @@ -1090,6 +1165,8 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .validate_trap_override_request = kgd_gfx_v9_validate_trap_override_request, .set_wave_launch_trap_override = kgd_gfx_v9_set_wave_launch_trap_override, .set_wave_launch_mode = kgd_gfx_v9_set_wave_launch_mode, + .set_address_watch = kgd_gfx_v9_set_address_watch, + .clear_address_watch = kgd_gfx_v9_clear_address_watch, .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index 18f4970ac8e4..5f54bff0db49 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -84,6 +84,14 @@ uint32_t kgd_gfx_v9_set_wave_launch_trap_override(struct amdgpu_device *adev, uint32_t trap_mask_request, uint32_t *trap_mask_prev, uint32_t kfd_dbg_trap_cntl_prev); +uint32_t kgd_gfx_v9_set_address_watch(struct amdgpu_device *adev, + uint64_t watch_address, + uint32_t watch_address_mask, + uint32_t watch_id, + uint32_t watch_mode, + uint32_t debug_vmid); +uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev, + uint32_t watch_id); void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times); void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev, uint32_t wait_times, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 1fae97df7a1e..016724c82928 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -2880,6 +2880,7 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v struct mm_struct *mm = NULL; struct pid *pid = NULL; struct kfd_process *target = NULL; + struct kfd_process_device *pdd = NULL; int r = 0; if (sched_policy == KFD_SCHED_POLICY_NO_HWS) { @@ -2957,6 +2958,20 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v goto unlock_out; } + if (args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH || + args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH) { + int user_gpu_id = kfd_process_get_user_gpu_id(target, + args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ? + args->set_node_address_watch.gpu_id : + args->clear_node_address_watch.gpu_id); + + pdd = kfd_process_device_data_by_id(target, user_gpu_id); + if (user_gpu_id == -EINVAL || !pdd) { + r = -ENODEV; + goto unlock_out; + } + } + switch (args->op) { case KFD_IOC_DBG_TRAP_ENABLE: if (target != p) @@ -3009,7 +3024,16 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v (uint32_t *)args->resume_queues.queue_array_ptr); break; case KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH: + r = kfd_dbg_trap_set_dev_address_watch(pdd, + args->set_node_address_watch.address, + args->set_node_address_watch.mask, + &args->set_node_address_watch.id, + args->set_node_address_watch.mode); + break; case KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH: + r = kfd_dbg_trap_clear_dev_address_watch(pdd, + args->clear_node_address_watch.id); + break; case KFD_IOC_DBG_TRAP_SET_FLAGS: case KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT: case KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index f4d3dfb35cb3..4b36cc8b5fb7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -24,6 +24,8 @@ #include "kfd_device_queue_manager.h" #include +#define MAX_WATCH_ADDRESSES 4 + void debug_event_write_work_handler(struct work_struct *work) { struct kfd_process *process; @@ -289,6 +291,139 @@ int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd) pdd->watch_points, flags); } +#define KFD_DEBUGGER_INVALID_WATCH_POINT_ID -1 +static int kfd_dbg_get_dev_watch_id(struct kfd_process_device *pdd, int *watch_id) +{ + int i; + + *watch_id = KFD_DEBUGGER_INVALID_WATCH_POINT_ID; + + spin_lock(&pdd->dev->kfd->watch_points_lock); + + for (i = 0; i < MAX_WATCH_ADDRESSES; i++) { + /* device watchpoint in use so skip */ + if ((pdd->dev->kfd->alloc_watch_ids >> i) & 0x1) + continue; + + pdd->alloc_watch_ids |= 0x1 << i; + pdd->dev->kfd->alloc_watch_ids |= 0x1 << i; + *watch_id = i; + spin_unlock(&pdd->dev->kfd->watch_points_lock); + return 0; + } + + spin_unlock(&pdd->dev->kfd->watch_points_lock); + + return -ENOMEM; +} + +static void kfd_dbg_clear_dev_watch_id(struct kfd_process_device *pdd, int watch_id) +{ + spin_lock(&pdd->dev->kfd->watch_points_lock); + + /* process owns device watch point so safe to clear */ + if ((pdd->alloc_watch_ids >> watch_id) & 0x1) { + pdd->alloc_watch_ids &= ~(0x1 << watch_id); + pdd->dev->kfd->alloc_watch_ids &= ~(0x1 << watch_id); + } + + spin_unlock(&pdd->dev->kfd->watch_points_lock); +} + +static bool kfd_dbg_owns_dev_watch_id(struct kfd_process_device *pdd, int watch_id) +{ + bool owns_watch_id = false; + + spin_lock(&pdd->dev->kfd->watch_points_lock); + owns_watch_id = watch_id < MAX_WATCH_ADDRESSES && + ((pdd->alloc_watch_ids >> watch_id) & 0x1); + + spin_unlock(&pdd->dev->kfd->watch_points_lock); + + return owns_watch_id; +} + +int kfd_dbg_trap_clear_dev_address_watch(struct kfd_process_device *pdd, + uint32_t watch_id) +{ + int r; + + if (!kfd_dbg_owns_dev_watch_id(pdd, watch_id)) + return -EINVAL; + + if (!pdd->dev->kfd->shared_resources.enable_mes) { + r = debug_lock_and_unmap(pdd->dev->dqm); + if (r) + return r; + } + + amdgpu_gfx_off_ctrl(pdd->dev->adev, false); + pdd->watch_points[watch_id] = pdd->dev->kfd2kgd->clear_address_watch( + pdd->dev->adev, + watch_id); + amdgpu_gfx_off_ctrl(pdd->dev->adev, true); + + if (!pdd->dev->kfd->shared_resources.enable_mes) + r = debug_map_and_unlock(pdd->dev->dqm); + else + r = kfd_dbg_set_mes_debug_mode(pdd); + + kfd_dbg_clear_dev_watch_id(pdd, watch_id); + + return r; +} + +int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd, + uint64_t watch_address, + uint32_t watch_address_mask, + uint32_t *watch_id, + uint32_t watch_mode) +{ + int r = kfd_dbg_get_dev_watch_id(pdd, watch_id); + + if (r) + return r; + + if (!pdd->dev->kfd->shared_resources.enable_mes) { + r = debug_lock_and_unmap(pdd->dev->dqm); + if (r) { + kfd_dbg_clear_dev_watch_id(pdd, *watch_id); + return r; + } + } + + amdgpu_gfx_off_ctrl(pdd->dev->adev, false); + pdd->watch_points[*watch_id] = pdd->dev->kfd2kgd->set_address_watch( + pdd->dev->adev, + watch_address, + watch_address_mask, + *watch_id, + watch_mode, + pdd->dev->vm_info.last_vmid_kfd); + amdgpu_gfx_off_ctrl(pdd->dev->adev, true); + + if (!pdd->dev->kfd->shared_resources.enable_mes) + r = debug_map_and_unlock(pdd->dev->dqm); + else + r = kfd_dbg_set_mes_debug_mode(pdd); + + /* HWS is broken so no point in HW rollback but release the watchpoint anyways */ + if (r) + kfd_dbg_clear_dev_watch_id(pdd, *watch_id); + + return 0; +} + +static void kfd_dbg_clear_process_address_watch(struct kfd_process *target) +{ + int i, j; + + for (i = 0; i < target->n_pdds; i++) + for (j = 0; j < MAX_WATCH_ADDRESSES; j++) + kfd_dbg_trap_clear_dev_address_watch(target->pdds[i], j); +} + + /* kfd_dbg_trap_deactivate: * target: target process * unwind: If this is unwinding a failed kfd_dbg_trap_enable() @@ -303,6 +438,7 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind if (!unwind) { cancel_work_sync(&target->debug_event_workarea); + kfd_dbg_clear_process_address_watch(target); kfd_dbg_trap_set_wave_launch_mode(target, 0); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index cb17869437c5..7f0757c2af2c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -50,7 +50,13 @@ int kfd_dbg_trap_set_wave_launch_override(struct kfd_process *target, uint32_t *trap_mask_supported); int kfd_dbg_trap_set_wave_launch_mode(struct kfd_process *target, uint8_t wave_launch_mode); - +int kfd_dbg_trap_clear_dev_address_watch(struct kfd_process_device *pdd, + uint32_t watch_id); +int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd, + uint64_t watch_address, + uint32_t watch_address_mask, + uint32_t *watch_id, + uint32_t watch_mode); int kfd_dbg_send_exception_to_runtime(struct kfd_process *p, unsigned int dev_id, unsigned int queue_id, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 2c36bb578633..9fc9d32cb579 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -811,6 +811,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (kfd_resume_iommu(kfd)) goto kfd_resume_iommu_error; + spin_lock_init(&kfd->watch_points_lock); + kfd->init_complete = true; dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor, kfd->adev->pdev->device); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 05da43bf233a..8ec87bc8ba82 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -381,6 +381,10 @@ struct kfd_dev { struct kfd_node *nodes[MAX_KFD_NODES]; unsigned int num_nodes; + + /* Track per device allocated watch points */ + uint32_t alloc_watch_ids; + spinlock_t watch_points_lock; }; enum kfd_mempool { @@ -833,6 +837,7 @@ struct kfd_process_device { uint32_t spi_dbg_override; uint32_t spi_dbg_launch_mode; uint32_t watch_points[4]; + uint32_t alloc_watch_ids; /* * If this process has been checkpointed before, then the user @@ -989,7 +994,6 @@ struct kfd_process { struct semaphore runtime_enable_sema; bool is_runtime_retry; struct kfd_runtime_info runtime_info; - }; #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ -- cgit v1.2.3 From 597364adc0fcf71617b3adbe647b6eec76e27554 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Wed, 31 May 2023 11:22:03 -0400 Subject: drm/amdkfd: Fix reserved SDMA queues handling This patch fixes a regression caused by a bad merge where the handling of reserved SDMA queues was accidentally removed. With the fix, the reserved SDMA queues are again correctly marked as unavailable for allocation. Fixes: a805889a1531 ("drm/amdkfd: Update SDMA queue management for GFX9.4.3") Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 13 ++++++------- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 10 +++++----- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 +- 3 files changed, 12 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 9fc9d32cb579..9d4abfd8b55e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -106,20 +106,19 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) kfd->device_info.num_sdma_queues_per_engine = 8; } + bitmap_zero(kfd->device_info.reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES); + switch (sdma_version) { case IP_VERSION(6, 0, 0): + case IP_VERSION(6, 0, 1): case IP_VERSION(6, 0, 2): case IP_VERSION(6, 0, 3): /* Reserve 1 for paging and 1 for gfx */ kfd->device_info.num_reserved_sdma_queues_per_engine = 2; /* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */ - kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL; - break; - case IP_VERSION(6, 0, 1): - /* Reserve 1 for paging and 1 for gfx */ - kfd->device_info.num_reserved_sdma_queues_per_engine = 2; - /* BIT(0)=engine-0 queue-0; BIT(1)=engine-0 queue-1; ... */ - kfd->device_info.reserved_sdma_queues_bitmap = 0x3ULL; + bitmap_set(kfd->device_info.reserved_sdma_queues_bitmap, 0, + kfd->adev->sdma.num_instances * + kfd->device_info.num_reserved_sdma_queues_per_engine); break; default: break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 0c1be91a87c6..498ad7d4e7d9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -123,11 +123,6 @@ unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm) dqm->dev->kfd->device_info.num_sdma_queues_per_engine; } -static inline uint64_t get_reserved_sdma_queues_bitmap(struct device_queue_manager *dqm) -{ - return dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap; -} - static void init_sdma_bitmaps(struct device_queue_manager *dqm) { bitmap_zero(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES); @@ -135,6 +130,11 @@ static void init_sdma_bitmaps(struct device_queue_manager *dqm) bitmap_zero(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES); bitmap_set(dqm->xgmi_sdma_bitmap, 0, get_num_xgmi_sdma_queues(dqm)); + + /* Mask out the reserved queues */ + bitmap_andnot(dqm->sdma_bitmap, dqm->sdma_bitmap, + dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap, + KFD_MAX_SDMA_QUEUES); } void program_sh_mem_settings(struct device_queue_manager *dqm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 023b17e0116b..7364a5d77c6e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -239,7 +239,7 @@ struct kfd_device_info { uint32_t no_atomic_fw_version; unsigned int num_sdma_queues_per_engine; unsigned int num_reserved_sdma_queues_per_engine; - uint64_t reserved_sdma_queues_bitmap; + DECLARE_BITMAP(reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES); }; unsigned int kfd_get_num_sdma_engines(struct kfd_node *kdev); -- cgit v1.2.3