diff options
Diffstat (limited to 'drivers/gpu')
300 files changed, 2778 insertions, 1953 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index dcd59040c449..9b1e0ede05a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1083,10 +1083,6 @@ struct amdgpu_device { struct amdgpu_virt virt; - /* link all shadow bo */ - struct list_head shadow_list; - struct mutex shadow_list_lock; - /* record hw reset is performed */ bool has_hw_reset; u8 reset_magic[AMDGPU_RESET_MAGIC_NUM]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index 57bda66e85ef..2ca127173135 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -511,7 +511,7 @@ static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *h return -EINVAL; } - /* udpate aca bank to aca source error_cache first */ + /* update aca bank to aca source error_cache first */ ret = aca_banks_update(adev, smu_type, handler_aca_log_bank_error, qctx, NULL); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index f85ace0384d2..7dd55ed57c1d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -147,6 +147,7 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif, struct acpi_buffer *params) { acpi_status status; + union acpi_object *obj; union acpi_object atif_arg_elements[2]; struct acpi_object_list atif_arg; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; @@ -169,16 +170,24 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif, status = acpi_evaluate_object(atif->handle, NULL, &atif_arg, &buffer); + obj = (union acpi_object *)buffer.pointer; - /* Fail only if calling the method fails and ATIF is supported */ - if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { + /* Fail if calling the method fails */ + if (ACPI_FAILURE(status)) { DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n", acpi_format_exception(status)); - kfree(buffer.pointer); + kfree(obj); return NULL; } - return buffer.pointer; + if (obj->type != ACPI_TYPE_BUFFER) { + DRM_DEBUG_DRIVER("bad object returned from ATIF: %d\n", + obj->type); + kfree(obj); + return NULL; + } + + return obj; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 1254a43ec96b..3bc0cbf45bc5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -950,28 +950,30 @@ static void unlock_spi_csq_mutexes(struct amdgpu_device *adev) * @inst: xcc's instance number on a multi-XCC setup */ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, - int *wave_cnt, int *vmid, uint32_t inst) + struct kfd_cu_occupancy *queue_cnt, uint32_t inst) { int pipe_idx; int queue_slot; unsigned int reg_val; - + unsigned int wave_cnt; /* * Program GRBM with appropriate MEID, PIPEID, QUEUEID and VMID * parameters to read out waves in flight. Get VMID if there are * non-zero waves in flight. */ - *vmid = 0xFF; - *wave_cnt = 0; pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe; queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe; - soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, inst); - reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, inst, mmSPI_CSQ_WF_ACTIVE_COUNT_0) + - queue_slot); - *wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK; - if (*wave_cnt != 0) - *vmid = (RREG32_SOC15(GC, inst, mmCP_HQD_VMID) & - CP_HQD_VMID__VMID_MASK) >> CP_HQD_VMID__VMID__SHIFT; + soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, GET_INST(GC, inst)); + reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, GET_INST(GC, inst), + mmSPI_CSQ_WF_ACTIVE_COUNT_0) + queue_slot); + wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK; + if (wave_cnt != 0) { + queue_cnt->wave_cnt += wave_cnt; + queue_cnt->doorbell_off = + (RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL) & + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK) >> + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; + } } /** @@ -981,9 +983,8 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, * or more queues running and submitting waves to compute units. * * @adev: Handle of device from which to get number of waves in flight - * @pasid: Identifies the process for which this query call is invoked - * @pasid_wave_cnt: Output parameter updated with number of waves in flight that - * belong to process with given pasid + * @cu_occupancy: Array that gets filled with wave_cnt and doorbell offset + * for comparison later. * @max_waves_per_cu: Output parameter updated with maximum number of waves * possible per Compute Unit * @inst: xcc's instance number on a multi-XCC setup @@ -1011,34 +1012,28 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, * number of waves that are in flight for the queue at specified index. The * index ranges from 0 to 7. * - * If non-zero waves are in flight, read CP_HQD_VMID register to obtain VMID - * of the wave(s). + * If non-zero waves are in flight, store the corresponding doorbell offset + * of the queue, along with the wave count. * - * Determine if VMID from above step maps to pasid provided as parameter. If - * it matches agrregate the wave count. That the VMID will not match pasid is - * a normal condition i.e. a device is expected to support multiple queues - * from multiple proceses. + * Determine if the queue belongs to the process by comparing the doorbell + * offset against the process's queues. If it matches, aggregate the wave + * count for the process. * * Reading registers referenced above involves programming GRBM appropriately */ -void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, - int *pasid_wave_cnt, int *max_waves_per_cu, uint32_t inst) +void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, + struct kfd_cu_occupancy *cu_occupancy, + int *max_waves_per_cu, uint32_t inst) { int qidx; - int vmid; int se_idx; - int sh_idx; int se_cnt; - int sh_cnt; - int wave_cnt; int queue_map; - int pasid_tmp; int max_queue_cnt; - int vmid_wave_cnt = 0; DECLARE_BITMAP(cp_queue_bitmap, AMDGPU_MAX_QUEUES); lock_spi_csq_mutexes(adev); - soc15_grbm_select(adev, 1, 0, 0, 0, inst); + soc15_grbm_select(adev, 1, 0, 0, 0, GET_INST(GC, inst)); /* * Iterate through the shader engines and arrays of the device @@ -1048,51 +1043,38 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, AMDGPU_MAX_QUEUES); max_queue_cnt = adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe; - sh_cnt = adev->gfx.config.max_sh_per_se; se_cnt = adev->gfx.config.max_shader_engines; for (se_idx = 0; se_idx < se_cnt; se_idx++) { - for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) { + amdgpu_gfx_select_se_sh(adev, se_idx, 0, 0xffffffff, inst); + queue_map = RREG32_SOC15(GC, GET_INST(GC, inst), mmSPI_CSQ_WF_ACTIVE_STATUS); + + /* + * Assumption: queue map encodes following schema: four + * pipes per each micro-engine, with each pipe mapping + * eight queues. This schema is true for GFX9 devices + * and must be verified for newer device families + */ + for (qidx = 0; qidx < max_queue_cnt; qidx++) { + /* Skip qeueus that are not associated with + * compute functions + */ + if (!test_bit(qidx, cp_queue_bitmap)) + continue; - amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff, inst); - queue_map = RREG32_SOC15(GC, inst, mmSPI_CSQ_WF_ACTIVE_STATUS); + if (!(queue_map & (1 << qidx))) + continue; - /* - * Assumption: queue map encodes following schema: four - * pipes per each micro-engine, with each pipe mapping - * eight queues. This schema is true for GFX9 devices - * and must be verified for newer device families - */ - for (qidx = 0; qidx < max_queue_cnt; qidx++) { - - /* Skip qeueus that are not associated with - * compute functions - */ - if (!test_bit(qidx, cp_queue_bitmap)) - continue; - - if (!(queue_map & (1 << qidx))) - continue; - - /* Get number of waves in flight and aggregate them */ - get_wave_count(adev, qidx, &wave_cnt, &vmid, - inst); - if (wave_cnt != 0) { - pasid_tmp = - RREG32(SOC15_REG_OFFSET(OSSSYS, inst, - mmIH_VMID_0_LUT) + vmid); - if (pasid_tmp == pasid) - vmid_wave_cnt += wave_cnt; - } - } + /* Get number of waves in flight and aggregate them */ + get_wave_count(adev, qidx, &cu_occupancy[qidx], + inst); } } amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, inst); - soc15_grbm_select(adev, 0, 0, 0, 0, inst); + soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, inst)); unlock_spi_csq_mutexes(adev); /* Update the output parameters and return */ - *pasid_wave_cnt = vmid_wave_cnt; *max_waves_per_cu = adev->gfx.cu_info.simd_per_cu * adev->gfx.cu_info.max_waves_per_simd; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index 988c50ac3be0..b6a91a552aa4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -52,8 +52,9 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid); void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base); -void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, - int *pasid_wave_cnt, int *max_waves_per_cu, uint32_t inst); +void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, + struct kfd_cu_occupancy *cu_occupancy, + int *max_waves_per_cu, uint32_t inst); void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, uint32_t inst); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 4afef5b46c7d..fa572ba7f9fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1439,8 +1439,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, list_add_tail(&vm->vm_list_node, &(vm->process_info->vm_list_head)); vm->process_info->n_vms++; - - *ef = dma_fence_get(&vm->process_info->eviction_fence->base); + if (ef) + *ef = dma_fence_get(&vm->process_info->eviction_fence->base); mutex_unlock(&vm->process_info->lock); return 0; @@ -1499,7 +1499,7 @@ static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain) } } - ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0); + ret = amdgpu_bo_pin(bo, domain); if (ret) pr_err("Error in Pinning BO to domain: %d\n", domain); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 42e64bce661e..45affc02548c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -87,8 +87,9 @@ static bool check_atom_bios(uint8_t *bios, size_t size) * part of the system bios. On boot, the system bios puts a * copy of the igp rom at the start of vram if a discrete card is * present. + * For SR-IOV, the vbios image is also put in VRAM in the VF. */ -static bool igp_read_bios_from_vram(struct amdgpu_device *adev) +static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev) { uint8_t __iomem *bios; resource_size_t vram_base; @@ -284,10 +285,6 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) acpi_status status; bool found = false; - /* ATRM is for the discrete card only */ - if (adev->flags & AMD_IS_APU) - return false; - /* ATRM is for on-platform devices only */ if (dev_is_removable(&adev->pdev->dev)) return false; @@ -343,11 +340,8 @@ static inline bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev) { - if (adev->flags & AMD_IS_APU) - return igp_read_bios_from_vram(adev); - else - return (!adev->asic_funcs || !adev->asic_funcs->read_disabled_bios) ? - false : amdgpu_asic_read_disabled_bios(adev); + return (!adev->asic_funcs || !adev->asic_funcs->read_disabled_bios) ? + false : amdgpu_asic_read_disabled_bios(adev); } #ifdef CONFIG_ACPI @@ -414,7 +408,36 @@ static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev) } #endif -bool amdgpu_get_bios(struct amdgpu_device *adev) +static bool amdgpu_get_bios_apu(struct amdgpu_device *adev) +{ + if (amdgpu_acpi_vfct_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from VFCT\n"); + goto success; + } + + if (amdgpu_read_bios_from_vram(adev)) { + dev_info(adev->dev, "Fetched VBIOS from VRAM BAR\n"); + goto success; + } + + if (amdgpu_read_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n"); + goto success; + } + + if (amdgpu_read_platform_bios(adev)) { + dev_info(adev->dev, "Fetched VBIOS from platform\n"); + goto success; + } + + dev_err(adev->dev, "Unable to locate a BIOS ROM\n"); + return false; + +success: + return true; +} + +static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev) { if (amdgpu_atrm_get_bios(adev)) { dev_info(adev->dev, "Fetched VBIOS from ATRM\n"); @@ -426,7 +449,8 @@ bool amdgpu_get_bios(struct amdgpu_device *adev) goto success; } - if (igp_read_bios_from_vram(adev)) { + /* this is required for SR-IOV */ + if (amdgpu_read_bios_from_vram(adev)) { dev_info(adev->dev, "Fetched VBIOS from VRAM BAR\n"); goto success; } @@ -455,10 +479,24 @@ bool amdgpu_get_bios(struct amdgpu_device *adev) return false; success: - adev->is_atom_fw = adev->asic_type >= CHIP_VEGA10; return true; } +bool amdgpu_get_bios(struct amdgpu_device *adev) +{ + bool found; + + if (adev->flags & AMD_IS_APU) + found = amdgpu_get_bios_apu(adev); + else + found = amdgpu_get_bios_dgpu(adev); + + if (found) + adev->is_atom_fw = adev->asic_type >= CHIP_VEGA10; + + return found; +} + /* helper function for soc15 and onwards to read bios from rom */ bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev, u8 *bios, u32 length_bytes) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 1e475eb01417..d891ab779ca7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -265,7 +265,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, /* Only a single BO list is allowed to simplify handling. */ if (p->bo_list) - ret = -EINVAL; + goto free_partial_kdata; ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index cbef720de779..9da4414de617 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -402,7 +402,7 @@ static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, siz int r; uint32_t *data, x; - if (size & 0x3 || *pos & 0x3) + if (size > 4096 || size & 0x3 || *pos & 0x3) return -EINVAL; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); @@ -1648,7 +1648,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { ent = debugfs_create_file(debugfs_regs_names[i], - S_IFREG | 0444, root, + S_IFREG | 0400, root, adev, debugfs_regs[i]); if (!i && !IS_ERR_OR_NULL(ent)) i_size_write(ent->d_inode, adev->rmmio_size); @@ -2100,11 +2100,11 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) amdgpu_securedisplay_debugfs_init(adev); amdgpu_fw_attestation_debugfs_init(adev); - debugfs_create_file("amdgpu_evict_vram", 0444, root, adev, + debugfs_create_file("amdgpu_evict_vram", 0400, root, adev, &amdgpu_evict_vram_fops); - debugfs_create_file("amdgpu_evict_gtt", 0444, root, adev, + debugfs_create_file("amdgpu_evict_gtt", 0400, root, adev, &amdgpu_evict_gtt_fops); - debugfs_create_file("amdgpu_test_ib", 0444, root, adev, + debugfs_create_file("amdgpu_test_ib", 0400, root, adev, &amdgpu_debugfs_test_ib_fops); debugfs_create_file("amdgpu_vm_info", 0444, root, adev, &amdgpu_debugfs_vm_info_fops); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f4628412dac4..c2394c8b4d6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4107,9 +4107,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->mm_stats.lock); spin_lock_init(&adev->wb.lock); - INIT_LIST_HEAD(&adev->shadow_list); - mutex_init(&adev->shadow_list_lock); - INIT_LIST_HEAD(&adev->reset_list); INIT_LIST_HEAD(&adev->ras_list); @@ -5030,80 +5027,6 @@ static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) } /** - * amdgpu_device_recover_vram - Recover some VRAM contents - * - * @adev: amdgpu_device pointer - * - * Restores the contents of VRAM buffers from the shadows in GTT. Used to - * restore things like GPUVM page tables after a GPU reset where - * the contents of VRAM might be lost. - * - * Returns: - * 0 on success, negative error code on failure. - */ -static int amdgpu_device_recover_vram(struct amdgpu_device *adev) -{ - struct dma_fence *fence = NULL, *next = NULL; - struct amdgpu_bo *shadow; - struct amdgpu_bo_vm *vmbo; - long r = 1, tmo; - - if (amdgpu_sriov_runtime(adev)) - tmo = msecs_to_jiffies(8000); - else - tmo = msecs_to_jiffies(100); - - dev_info(adev->dev, "recover vram bo from shadow start\n"); - mutex_lock(&adev->shadow_list_lock); - list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) { - /* If vm is compute context or adev is APU, shadow will be NULL */ - if (!vmbo->shadow) - continue; - shadow = vmbo->shadow; - - /* No need to recover an evicted BO */ - if (!shadow->tbo.resource || - shadow->tbo.resource->mem_type != TTM_PL_TT || - shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET || - shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM) - continue; - - r = amdgpu_bo_restore_shadow(shadow, &next); - if (r) - break; - - if (fence) { - tmo = dma_fence_wait_timeout(fence, false, tmo); - dma_fence_put(fence); - fence = next; - if (tmo == 0) { - r = -ETIMEDOUT; - break; - } else if (tmo < 0) { - r = tmo; - break; - } - } else { - fence = next; - } - } - mutex_unlock(&adev->shadow_list_lock); - - if (fence) - tmo = dma_fence_wait_timeout(fence, false, tmo); - dma_fence_put(fence); - - if (r < 0 || tmo <= 0) { - dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); - return -EIO; - } - - dev_info(adev->dev, "recover vram bo from shadow done\n"); - return 0; -} - - -/** * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf * * @adev: amdgpu_device pointer @@ -5165,12 +5088,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, if (r) return r; - if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { + if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) amdgpu_inc_vram_lost(adev); - r = amdgpu_device_recover_vram(adev); - } - if (r) - return r; /* need to be called during full access so we can't do it later like * bare-metal does. @@ -5569,9 +5488,7 @@ out: } } - if (!r) - r = amdgpu_device_recover_vram(tmp_adev); - else + if (r) tmp_adev->asic_reset_res = r; } @@ -6189,7 +6106,7 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, p2p_addressable = !(adev->gmc.aper_base & address_mask || aper_limit & address_mask); } - return is_large_bar && p2p_access && p2p_addressable; + return pcie_p2p && is_large_bar && p2p_access && p2p_addressable; #else return false; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 092ec11258cd..b119d27271c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -233,6 +233,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, } if (!adev->enable_virtual_display) { + new_abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev, new_abo->flags)); if (unlikely(r != 0)) { @@ -1474,7 +1475,7 @@ bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc, if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) || ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) && - connector->display_info.is_hdmi && + connector && connector->display_info.is_hdmi && amdgpu_display_is_hdtv_mode(mode)))) { if (amdgpu_encoder->underscan_hborder != 0) amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder; @@ -1759,6 +1760,7 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev) r = amdgpu_bo_reserve(aobj, true); if (r == 0) { + aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); if (r != 0) dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 86cff30d5c4e..81d9877c8735 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -117,9 +117,10 @@ * - 3.56.0 - Update IB start address and size alignment for decode and encode * - 3.57.0 - Compute tunneling on GFX10+ * - 3.58.0 - Add GFX12 DCC support + * - 3.59.0 - Cleared VRAM */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 58 +#define KMS_DRIVER_MINOR 59 #define KMS_DRIVER_PATCHLEVEL 0 /* @@ -2931,6 +2932,7 @@ static const struct file_operations amdgpu_driver_kms_fops = { #ifdef CONFIG_PROC_FS .show_fdinfo = drm_show_fdinfo, #endif + .fop_flags = FOP_UNSIGNED_OFFSET, }; int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 0e617dff8765..1a5df8b94661 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -43,8 +43,6 @@ #include "amdgpu_hmm.h" #include "amdgpu_xgmi.h" -static const struct drm_gem_object_funcs amdgpu_gem_object_funcs; - static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf) { struct ttm_buffer_object *bo = vmf->vma->vm_private_data; @@ -87,11 +85,11 @@ static const struct vm_operations_struct amdgpu_gem_vm_ops = { static void amdgpu_gem_object_free(struct drm_gem_object *gobj) { - struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); + struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj); - if (robj) { - amdgpu_hmm_unregister(robj); - amdgpu_bo_unref(&robj); + if (aobj) { + amdgpu_hmm_unregister(aobj); + ttm_bo_put(&aobj->tbo); } } @@ -126,7 +124,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, bo = &ubo->bo; *obj = &bo->tbo.base; - (*obj)->funcs = &amdgpu_gem_object_funcs; return 0; } @@ -295,7 +292,7 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str return drm_gem_ttm_mmap(obj, vma); } -static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = { +const struct drm_gem_object_funcs amdgpu_gem_object_funcs = { .free = amdgpu_gem_object_free, .open = amdgpu_gem_object_open, .close = amdgpu_gem_object_close, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h index f30264782ba2..3a8f57900a3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h @@ -33,6 +33,8 @@ #define AMDGPU_GEM_DOMAIN_MAX 0x3 #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, tbo.base) +extern const struct drm_gem_object_funcs amdgpu_gem_object_funcs; + unsigned long amdgpu_gem_timeout(uint64_t timeout_ns); /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 83e54697f0ee..f1ffab5a1eae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1635,11 +1635,9 @@ int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev) { int r; - if (!amdgpu_sriov_vf(adev)) { - r = device_create_file(adev->dev, &dev_attr_enforce_isolation); - if (r) - return r; - } + r = device_create_file(adev->dev, &dev_attr_enforce_isolation); + if (r) + return r; r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader); if (r) @@ -1650,8 +1648,7 @@ int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev) void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev) { - if (!amdgpu_sriov_vf(adev)) - device_remove_file(adev->dev, &dev_attr_enforce_isolation); + device_remove_file(adev->dev, &dev_attr_enforce_isolation); device_remove_file(adev->dev, &dev_attr_run_cleaner_shader); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index ad6bf5d4e0a9..16f2605ac50b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -107,8 +107,11 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) /* * Do the coredump immediately after a job timeout to get a very * close dump/snapshot/representation of GPU's current error status + * Skip it for SRIOV, since VF FLR will be triggered by host driver + * before job timeout */ - amdgpu_job_core_dump(adev, job); + if (!amdgpu_sriov_vf(adev)) + amdgpu_job_core_dump(adev, job); if (amdgpu_gpu_recovery && amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 10b61ff63802..7d4b540340e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -1203,8 +1203,10 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id, r = amdgpu_ring_init(adev, ring, 1024, NULL, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); - if (r) + if (r) { + amdgpu_mes_unlock(&adev->mes); goto clean_up_memory; + } amdgpu_mes_ring_to_queue_props(adev, ring, &qprops); @@ -1237,7 +1239,6 @@ clean_up_ring: amdgpu_ring_fini(ring); clean_up_memory: kfree(ring); - amdgpu_mes_unlock(&adev->mes); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index e32161f6b67a..44819cdba7fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -77,24 +77,6 @@ static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo) amdgpu_bo_destroy(tbo); } -static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); - struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo; - struct amdgpu_bo_vm *vmbo; - - bo = shadow_bo->parent; - vmbo = to_amdgpu_bo_vm(bo); - /* in case amdgpu_device_recover_vram got NULL of bo->parent */ - if (!list_empty(&vmbo->shadow_list)) { - mutex_lock(&adev->shadow_list_lock); - list_del_init(&vmbo->shadow_list); - mutex_unlock(&adev->shadow_list_lock); - } - - amdgpu_bo_destroy(tbo); -} - /** * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo * @bo: buffer object to be checked @@ -108,8 +90,7 @@ static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo) bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) { if (bo->destroy == &amdgpu_bo_destroy || - bo->destroy == &amdgpu_bo_user_destroy || - bo->destroy == &amdgpu_bo_vm_destroy) + bo->destroy == &amdgpu_bo_user_destroy) return true; return false; @@ -583,6 +564,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, if (bo == NULL) return -ENOMEM; drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size); + bo->tbo.base.funcs = &amdgpu_gem_object_funcs; bo->vm_bo = NULL; bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain : bp->domain; @@ -723,52 +705,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev, } /** - * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list - * - * @vmbo: BO that will be inserted into the shadow list - * - * Insert a BO to the shadow list. - */ -void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(vmbo->bo.tbo.bdev); - - mutex_lock(&adev->shadow_list_lock); - list_add_tail(&vmbo->shadow_list, &adev->shadow_list); - vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo); - vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy; - mutex_unlock(&adev->shadow_list_lock); -} - -/** - * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow - * - * @shadow: &amdgpu_bo shadow to be restored - * @fence: dma_fence associated with the operation - * - * Copies a buffer object's shadow content back to the object. - * This is used for recovering a buffer from its shadow in case of a gpu - * reset where vram context may be lost. - * - * Returns: - * 0 for success or a negative error code on failure. - */ -int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence) - -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev); - struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - uint64_t shadow_addr, parent_addr; - - shadow_addr = amdgpu_bo_gpu_offset(shadow); - parent_addr = amdgpu_bo_gpu_offset(shadow->parent); - - return amdgpu_copy_buffer(ring, shadow_addr, parent_addr, - amdgpu_bo_size(shadow), NULL, fence, - true, false, 0); -} - -/** * amdgpu_bo_kmap - map an &amdgpu_bo buffer object * @bo: &amdgpu_bo buffer object to be mapped * @ptr: kernel virtual address to be returned @@ -851,7 +787,7 @@ struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) if (bo == NULL) return NULL; - ttm_bo_get(&bo->tbo); + drm_gem_object_get(&bo->tbo.base); return bo; } @@ -863,40 +799,30 @@ struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) */ void amdgpu_bo_unref(struct amdgpu_bo **bo) { - struct ttm_buffer_object *tbo; - if ((*bo) == NULL) return; - tbo = &((*bo)->tbo); - ttm_bo_put(tbo); + drm_gem_object_put(&(*bo)->tbo.base); *bo = NULL; } /** - * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object + * amdgpu_bo_pin - pin an &amdgpu_bo buffer object * @bo: &amdgpu_bo buffer object to be pinned * @domain: domain to be pinned to - * @min_offset: the start of requested address range - * @max_offset: the end of requested address range * - * Pins the buffer object according to requested domain and address range. If - * the memory is unbound gart memory, binds the pages into gart table. Adjusts - * pin_count and pin_size accordingly. + * Pins the buffer object according to requested domain. If the memory is + * unbound gart memory, binds the pages into gart table. Adjusts pin_count and + * pin_size accordingly. * * Pinning means to lock pages in memory along with keeping them at a fixed * offset. It is required when a buffer can not be moved, for example, when * a display buffer is being scanned out. * - * Compared with amdgpu_bo_pin(), this function gives more flexibility on - * where to pin a buffer if there are specific restrictions on where a buffer - * must be located. - * * Returns: * 0 for success or a negative error code on failure. */ -int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, - u64 min_offset, u64 max_offset) +int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct ttm_operation_ctx ctx = { false, false }; @@ -905,9 +831,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) return -EPERM; - if (WARN_ON_ONCE(min_offset > max_offset)) - return -EINVAL; - /* Check domain to be pinned to against preferred domains */ if (bo->preferred_domains & domain) domain = bo->preferred_domains & domain; @@ -933,14 +856,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, return -EINVAL; ttm_bo_pin(&bo->tbo); - - if (max_offset != 0) { - u64 domain_start = amdgpu_ttm_domain_start(adev, - mem_type); - WARN_ON_ONCE(max_offset < - (amdgpu_bo_gpu_offset(bo) - domain_start)); - } - return 0; } @@ -957,17 +872,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; amdgpu_bo_placement_from_domain(bo, domain); for (i = 0; i < bo->placement.num_placement; i++) { - unsigned int fpfn, lpfn; - - fpfn = min_offset >> PAGE_SHIFT; - lpfn = max_offset >> PAGE_SHIFT; - - if (fpfn > bo->placements[i].fpfn) - bo->placements[i].fpfn = fpfn; - if (!bo->placements[i].lpfn || - (lpfn && lpfn < bo->placements[i].lpfn)) - bo->placements[i].lpfn = lpfn; - if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && bo->placements[i].mem_type == TTM_PL_VRAM) bo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS; @@ -994,24 +898,6 @@ error: } /** - * amdgpu_bo_pin - pin an &amdgpu_bo buffer object - * @bo: &amdgpu_bo buffer object to be pinned - * @domain: domain to be pinned to - * - * A simple wrapper to amdgpu_bo_pin_restricted(). - * Provides a simpler API for buffers that do not have any strict restrictions - * on where a buffer must be located. - * - * Returns: - * 0 for success or a negative error code on failure. - */ -int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) -{ - bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; - return amdgpu_bo_pin_restricted(bo, domain, 0, 0); -} - -/** * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object * @bo: &amdgpu_bo buffer object to be unpinned * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index d7e27957013f..717e47b46167 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -136,8 +136,6 @@ struct amdgpu_bo_user { struct amdgpu_bo_vm { struct amdgpu_bo bo; - struct amdgpu_bo *shadow; - struct list_head shadow_list; struct amdgpu_vm_bo_base entries[]; }; @@ -275,22 +273,6 @@ static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo) return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED; } -/** - * amdgpu_bo_shadowed - check if the BO is shadowed - * - * @bo: BO to be tested. - * - * Returns: - * NULL if not shadowed or else return a BO pointer. - */ -static inline struct amdgpu_bo *amdgpu_bo_shadowed(struct amdgpu_bo *bo) -{ - if (bo->tbo.type == ttm_bo_type_kernel) - return to_amdgpu_bo_vm(bo)->shadow; - - return NULL; -} - bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain); @@ -322,8 +304,6 @@ void amdgpu_bo_kunmap(struct amdgpu_bo *bo); struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); void amdgpu_bo_unref(struct amdgpu_bo **bo); int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain); -int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, - u64 min_offset, u64 max_offset); void amdgpu_bo_unpin(struct amdgpu_bo *bo); int amdgpu_bo_init(struct amdgpu_device *adev); void amdgpu_bo_fini(struct amdgpu_device *adev); @@ -349,9 +329,6 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); void amdgpu_bo_get_memory(struct amdgpu_bo *bo, struct amdgpu_mem_stats *stats); -void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo); -int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, - struct dma_fence **fence); uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, uint32_t domain); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 189574d53ebd..0b28b2cf1517 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -2853,7 +2853,7 @@ static int psp_load_non_psp_fw(struct psp_context *psp) if (ret) return ret; - /* Start rlc autoload after psp recieved all the gfx firmware */ + /* Start rlc autoload after psp received all the gfx firmware */ if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) { ret = psp_rlc_autoload_start(psp); @@ -3425,9 +3425,11 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; const struct psp_firmware_header_v2_0 *sos_hdr_v2_0; - int err = 0; + const struct psp_firmware_header_v2_1 *sos_hdr_v2_1; + int fw_index, fw_bin_count, start_index = 0; + const struct psp_fw_bin_desc *fw_bin; uint8_t *ucode_array_start_addr; - int fw_index = 0; + int err = 0; err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name); if (err) @@ -3478,15 +3480,30 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) case 2: sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data; - if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) { + fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); + + if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) { dev_err(adev->dev, "packed SOS count exceeds maximum limit\n"); err = -EINVAL; goto out; } - for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) { - err = parse_sos_bin_descriptor(psp, - &sos_hdr_v2_0->psp_fw_bin[fw_index], + if (sos_hdr_v2_0->header.header_version_minor == 1) { + sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data; + + fw_bin = sos_hdr_v2_1->psp_fw_bin; + + if (psp_is_aux_sos_load_required(psp)) + start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index); + else + fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index); + + } else { + fw_bin = sos_hdr_v2_0->psp_fw_bin; + } + + for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) { + err = parse_sos_bin_descriptor(psp, fw_bin + fw_index, sos_hdr_v2_0); if (err) goto out; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 74a96516c913..e8abbbcb4326 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -138,6 +138,7 @@ struct psp_funcs { int (*vbflash_stat)(struct psp_context *psp); int (*fatal_error_recovery_quirk)(struct psp_context *psp); bool (*get_ras_capability)(struct psp_context *psp); + bool (*is_aux_sos_load_required)(struct psp_context *psp); }; struct ta_funcs { @@ -464,6 +465,9 @@ struct amdgpu_psp_funcs { ((psp)->funcs->fatal_error_recovery_quirk ? \ (psp)->funcs->fatal_error_recovery_quirk((psp)) : 0) +#define psp_is_aux_sos_load_required(psp) \ + ((psp)->funcs->is_aux_sos_load_required ? (psp)->funcs->is_aux_sos_load_required((psp)) : 0) + extern const struct amd_ip_funcs psp_ip_funcs; extern const struct amdgpu_ip_block_version psp_v3_1_ip_block; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 61a2f386d9fb..1a1395c5fff1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -882,7 +882,7 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, if (ret) return ret; - /* gfx block ras dsiable cmd must send to ras-ta */ + /* gfx block ras disable cmd must send to ras-ta */ if (head->block == AMDGPU_RAS_BLOCK__GFX) con->features |= BIT(head->block); @@ -3468,6 +3468,11 @@ init_ras_enabled_flag: /* aca is disabled by default */ adev->aca.is_enabled = false; + + /* bad page feature is not applicable to specific app platform */ + if (adev->gmc.is_app_apu && + amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(12, 0, 0)) + amdgpu_bad_page_threshold = 0; } static void amdgpu_ras_counte_dw(struct work_struct *work) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index aab8077e5098..f28f6b4ba765 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -58,7 +58,7 @@ #define EEPROM_I2C_MADDR_4 0x40000 /* - * The 2 macros bellow represent the actual size in bytes that + * The 2 macros below represent the actual size in bytes that * those entities occupy in the EEPROM memory. * RAS_TABLE_RECORD_SIZE is different than sizeof(eeprom_table_record) which * uses uint64 to store 6b fields such as retired_page. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index f9ff493c100e..b0a8abc7a8ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -42,10 +42,10 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, uint32_t id; int r; - if (!f.file) + if (!fd_file(f)) return -EINVAL; - r = amdgpu_file_to_fpriv(f.file, &fpriv); + r = amdgpu_file_to_fpriv(fd_file(f), &fpriv); if (r) { fdput(f); return r; @@ -71,10 +71,10 @@ static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev, struct amdgpu_ctx *ctx; int r; - if (!f.file) + if (!fd_file(f)) return -EINVAL; - r = amdgpu_file_to_fpriv(f.file, &fpriv); + r = amdgpu_file_to_fpriv(fd_file(f), &fpriv); if (r) { fdput(f); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index bdf1ef825d89..c586ab4c911b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -260,6 +260,36 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, return 0; } +/** + * amdgpu_sync_kfd - sync to KFD fences + * + * @sync: sync object to add KFD fences to + * @resv: reservation object with KFD fences + * + * Extract all KFD fences and add them to the sync object. + */ +int amdgpu_sync_kfd(struct amdgpu_sync *sync, struct dma_resv *resv) +{ + struct dma_resv_iter cursor; + struct dma_fence *f; + int r = 0; + + dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP); + dma_resv_for_each_fence_unlocked(&cursor, f) { + void *fence_owner = amdgpu_sync_get_owner(f); + + if (fence_owner != AMDGPU_FENCE_OWNER_KFD) + continue; + + r = amdgpu_sync_fence(sync, f); + if (r) + break; + } + dma_resv_iter_end(&cursor); + + return r; +} + /* Free the entry back to the slab */ static void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h index cf1e9e858efd..e3272dce798d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h @@ -51,6 +51,7 @@ int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f); int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct dma_resv *resv, enum amdgpu_sync_mode mode, void *owner); +int amdgpu_sync_kfd(struct amdgpu_sync *sync, struct dma_resv *resv); struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct amdgpu_ring *ring); struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b8bc7fa8c375..74adb983ab03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1970,7 +1970,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_INFO("amdgpu: %uM of GTT memory ready.\n", (unsigned int)(gtt_size / (1024 * 1024))); - /* Initiailize doorbell pool on PCI BAR */ + /* Initialize doorbell pool on PCI BAR */ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE); if (r) { DRM_ERROR("Failed initializing doorbell heap.\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 5bc37acd3981..4e23419b92d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -136,6 +136,14 @@ struct psp_firmware_header_v2_0 { struct psp_fw_bin_desc psp_fw_bin[]; }; +/* version_major=2, version_minor=1 */ +struct psp_firmware_header_v2_1 { + struct common_firmware_header header; + uint32_t psp_fw_bin_count; + uint32_t psp_aux_fw_bin_index; + struct psp_fw_bin_desc psp_fw_bin[]; +}; + /* version_major=1, version_minor=0 */ struct ta_firmware_header_v1_0 { struct common_firmware_header header; @@ -426,6 +434,7 @@ union amdgpu_firmware_header { struct psp_firmware_header_v1_1 psp_v1_1; struct psp_firmware_header_v1_3 psp_v1_3; struct psp_firmware_header_v2_0 psp_v2_0; + struct psp_firmware_header_v2_0 psp_v2_1; struct ta_firmware_header_v1_0 ta; struct ta_firmware_header_v2_0 ta_v2_0; struct gfx_firmware_header_v1_0 gfx; @@ -447,7 +456,7 @@ union amdgpu_firmware_header { uint8_t raw[0x100]; }; -#define UCODE_MAX_PSP_PACKAGING ((sizeof(union amdgpu_firmware_header) - sizeof(struct common_firmware_header) - 4) / sizeof(struct psp_fw_bin_desc)) +#define UCODE_MAX_PSP_PACKAGING (((sizeof(union amdgpu_firmware_header) - sizeof(struct common_firmware_header) - 4) / sizeof(struct psp_fw_bin_desc)) * 2) /* * fw loading support diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index e5f508d34ed8..d4c2afafbb73 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -338,6 +338,7 @@ static int amdgpu_vkms_prepare_fb(struct drm_plane *plane, else domain = AMDGPU_GEM_DOMAIN_VRAM; + rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(rbo, domain); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 2452dfa6314f..6005280f5f38 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -465,7 +465,6 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, { uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm); struct amdgpu_vm_bo_base *bo_base; - struct amdgpu_bo *shadow; struct amdgpu_bo *bo; int r; @@ -486,16 +485,10 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, spin_unlock(&vm->status_lock); bo = bo_base->bo; - shadow = amdgpu_bo_shadowed(bo); r = validate(param, bo); if (r) return r; - if (shadow) { - r = validate(param, shadow); - if (r) - return r; - } if (bo->tbo.type != ttm_bo_type_kernel) { amdgpu_vm_bo_moved(bo_base); @@ -1176,6 +1169,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, AMDGPU_SYNC_EQ_OWNER, vm); if (r) goto error_free; + if (bo) { + r = amdgpu_sync_kfd(&sync, bo->tbo.base.resv); + if (r) + goto error_free; + } + } else { struct drm_gem_object *obj = &bo->tbo.base; @@ -2149,10 +2148,6 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, { struct amdgpu_vm_bo_base *bo_base; - /* shadow bo doesn't have bo base, its validation needs its parent */ - if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo)) - bo = bo->parent; - for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { struct amdgpu_vm *vm = bo_base->vm; @@ -2482,7 +2477,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, root_bo = amdgpu_bo_ref(&root->bo); r = amdgpu_bo_reserve(root_bo, true); if (r) { - amdgpu_bo_unref(&root->shadow); amdgpu_bo_unref(&root_bo); goto error_free_delayed; } @@ -2575,11 +2569,6 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) vm->last_update = dma_fence_get_stub(); vm->is_compute_context = true; - /* Free the shadow bo for compute VM */ - amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow); - - goto unreserve_bo; - unreserve_bo: amdgpu_bo_unreserve(vm->root.bo); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index a076f43097e4..f78a0434a48f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -383,14 +383,6 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (r) return r; - if (vmbo->shadow) { - struct amdgpu_bo *shadow = vmbo->shadow; - - r = ttm_bo_validate(&shadow->tbo, &shadow->placement, &ctx); - if (r) - return r; - } - if (!drm_dev_enter(adev_to_drm(adev), &idx)) return -ENODEV; @@ -448,10 +440,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id) { struct amdgpu_bo_param bp; - struct amdgpu_bo *bo; - struct dma_resv *resv; unsigned int num_entries; - int r; memset(&bp, 0, sizeof(bp)); @@ -484,42 +473,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (vm->root.bo) bp.resv = vm->root.bo->tbo.base.resv; - r = amdgpu_bo_create_vm(adev, &bp, vmbo); - if (r) - return r; - - bo = &(*vmbo)->bo; - if (vm->is_compute_context || (adev->flags & AMD_IS_APU)) { - (*vmbo)->shadow = NULL; - return 0; - } - - if (!bp.resv) - WARN_ON(dma_resv_lock(bo->tbo.base.resv, - NULL)); - resv = bp.resv; - memset(&bp, 0, sizeof(bp)); - bp.size = amdgpu_vm_pt_size(adev, level); - bp.domain = AMDGPU_GEM_DOMAIN_GTT; - bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; - bp.type = ttm_bo_type_kernel; - bp.resv = bo->tbo.base.resv; - bp.bo_ptr_size = sizeof(struct amdgpu_bo); - bp.xcp_id_plus1 = xcp_id + 1; - - r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow); - - if (!resv) - dma_resv_unlock(bo->tbo.base.resv); - - if (r) { - amdgpu_bo_unref(&bo); - return r; - } - - amdgpu_bo_add_to_shadow_list(*vmbo); - - return 0; + return amdgpu_bo_create_vm(adev, &bp, vmbo); } /** @@ -569,7 +523,6 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev, return 0; error_free_pt: - amdgpu_bo_unref(&pt->shadow); amdgpu_bo_unref(&pt_bo); return r; } @@ -581,17 +534,10 @@ error_free_pt: */ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry) { - struct amdgpu_bo *shadow; - if (!entry->bo) return; entry->bo->vm_bo = NULL; - shadow = amdgpu_bo_shadowed(entry->bo); - if (shadow) { - ttm_bo_set_bulk_move(&shadow->tbo, NULL); - amdgpu_bo_unref(&shadow); - } ttm_bo_set_bulk_move(&entry->bo->tbo, NULL); spin_lock(&entry->vm->status_lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 4772fba33285..46d9fb433ab2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -35,16 +35,7 @@ */ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table) { - int r; - - r = amdgpu_ttm_alloc_gart(&table->bo.tbo); - if (r) - return r; - - if (table->shadow) - r = amdgpu_ttm_alloc_gart(&table->shadow->tbo); - - return r; + return amdgpu_ttm_alloc_gart(&table->bo.tbo); } /* Allocate a new job for @count PTE updates */ @@ -265,17 +256,13 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, if (!p->pages_addr) { /* set page commands needed */ - if (vmbo->shadow) - amdgpu_vm_sdma_set_ptes(p, vmbo->shadow, pe, addr, - count, incr, flags); amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count, incr, flags); return 0; } /* copy commands needed */ - ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw * - (vmbo->shadow ? 2 : 1); + ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw; /* for padding */ ndw -= 7; @@ -290,8 +277,6 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, pte[i] |= flags; } - if (vmbo->shadow) - amdgpu_vm_sdma_copy_ptes(p, vmbo->shadow, pe, nptes); amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes); pe += nptes * 8; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index 90138bc5f03d..32775260556f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -180,6 +180,6 @@ amdgpu_get_next_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int *from) #define for_each_xcp(xcp_mgr, xcp, i) \ for (i = 0, xcp = amdgpu_get_next_xcp(xcp_mgr, &i); xcp; \ - xcp = amdgpu_get_next_xcp(xcp_mgr, &i)) + ++i, xcp = amdgpu_get_next_xcp(xcp_mgr, &i)) #endif diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index 26e2188101e7..ccfd2a4b4acc 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -94,8 +94,6 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, case AMDGPU_RING_TYPE_VCN_ENC: case AMDGPU_RING_TYPE_VCN_JPEG: ip_blk = AMDGPU_XCP_VCN; - if (aqua_vanjaram_xcp_vcn_shared(adev)) - inst_mask = 1 << (inst_idx * 2); break; default: DRM_ERROR("Not support ring type %d!", ring->funcs->type); @@ -105,6 +103,8 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) { if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) { ring->xcp_id = xcp_id; + dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name, + ring->xcp_id); if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id; break; @@ -394,38 +394,31 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x struct amdgpu_xcp_ip *ip) { struct amdgpu_device *adev = xcp_mgr->adev; + int num_sdma, num_vcn, num_shared_vcn, num_xcp; int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp; - int num_sdma, num_vcn; num_sdma = adev->sdma.num_instances; num_vcn = adev->vcn.num_vcn_inst; + num_shared_vcn = 1; + + num_xcc_xcp = adev->gfx.num_xcc_per_xcp; + num_xcp = NUM_XCC(adev->gfx.xcc_mask) / num_xcc_xcp; switch (xcp_mgr->mode) { case AMDGPU_SPX_PARTITION_MODE: - num_sdma_xcp = num_sdma; - num_vcn_xcp = num_vcn; - break; case AMDGPU_DPX_PARTITION_MODE: - num_sdma_xcp = num_sdma / 2; - num_vcn_xcp = num_vcn / 2; - break; case AMDGPU_TPX_PARTITION_MODE: - num_sdma_xcp = num_sdma / 3; - num_vcn_xcp = num_vcn / 3; - break; case AMDGPU_QPX_PARTITION_MODE: - num_sdma_xcp = num_sdma / 4; - num_vcn_xcp = num_vcn / 4; - break; case AMDGPU_CPX_PARTITION_MODE: - num_sdma_xcp = 2; - num_vcn_xcp = num_vcn ? 1 : 0; + num_sdma_xcp = DIV_ROUND_UP(num_sdma, num_xcp); + num_vcn_xcp = DIV_ROUND_UP(num_vcn, num_xcp); break; default: return -EINVAL; } - num_xcc_xcp = adev->gfx.num_xcc_per_xcp; + if (num_vcn && num_xcp > num_vcn) + num_shared_vcn = num_xcp / num_vcn; switch (ip_id) { case AMDGPU_XCP_GFXHUB: @@ -441,7 +434,8 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x ip->ip_funcs = &sdma_v4_4_2_xcp_funcs; break; case AMDGPU_XCP_VCN: - ip->inst_mask = XCP_INST_MASK(num_vcn_xcp, xcp_id); + ip->inst_mask = + XCP_INST_MASK(num_vcn_xcp, xcp_id / num_shared_vcn); /* TODO : Assign IP funcs */ break; default: @@ -488,7 +482,7 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr, case AMDGPU_SPX_PARTITION_MODE: return adev->gmc.num_mem_partitions == 1 && num_xcc > 0; case AMDGPU_DPX_PARTITION_MODE: - return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0; + return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0; case AMDGPU_TPX_PARTITION_MODE: return (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 3) && diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index 09715b506468..81d195d366ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -27,7 +27,7 @@ #include <linux/slab.h> #include <linux/string_helpers.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <drm/drm_util.h> diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 742adbc460c9..70c1399f738d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -1881,6 +1881,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, return r; if (!atomic) { + abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM); if (unlikely(r != 0)) { amdgpu_bo_unreserve(abo); @@ -2401,6 +2402,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, return ret; } + aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); amdgpu_bo_unreserve(aobj); if (ret) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 8d46ebadfa46..f154c24499c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -1931,6 +1931,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, return r; if (!atomic) { + abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM); if (unlikely(r != 0)) { amdgpu_bo_unreserve(abo); @@ -2485,6 +2486,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, return ret; } + aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); amdgpu_bo_unreserve(aobj); if (ret) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index f08dc6a3886f..a7fcb135827f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -1861,6 +1861,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, return r; if (!atomic) { + abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM); if (unlikely(r != 0)) { amdgpu_bo_unreserve(abo); @@ -2321,6 +2322,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, return ret; } + aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); amdgpu_bo_unreserve(aobj); if (ret) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index a6a3adf2ae13..77ac3f114d24 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -1828,6 +1828,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, return r; if (!atomic) { + abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM); if (unlikely(r != 0)) { amdgpu_bo_unreserve(abo); @@ -2320,6 +2321,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, return ret; } + aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); amdgpu_bo_unreserve(aobj); if (ret) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index d1357c01eb39..47b47d21f464 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -202,12 +202,16 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = { SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ) }; -static const struct soc15_reg_golden golden_settings_gc_12_0[] = { +static const struct soc15_reg_golden golden_settings_gc_12_0_rev0[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x0000000f, 0x0000000f), SOC15_REG_GOLDEN_VALUE(GC, 0, regCB_HW_CONTROL_1, 0x03000000, 0x03000000), SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL5, 0x00000070, 0x00000020) }; +static const struct soc15_reg_golden golden_settings_gc_12_0[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x00008000, 0x00008000), +}; + #define DEFAULT_SH_MEM_CONFIG \ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ @@ -3495,10 +3499,14 @@ static void gfx_v12_0_init_golden_registers(struct amdgpu_device *adev) switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1): + soc15_program_register_sequence(adev, + golden_settings_gc_12_0, + (const u32)ARRAY_SIZE(golden_settings_gc_12_0)); + if (adev->rev_id == 0) soc15_program_register_sequence(adev, - golden_settings_gc_12_0, - (const u32)ARRAY_SIZE(golden_settings_gc_12_0)); + golden_settings_gc_12_0_rev0, + (const u32)ARRAY_SIZE(golden_settings_gc_12_0_rev0)); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 408e5600bb61..c100845409f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1701,7 +1701,15 @@ static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev, WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0); } else { WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, - (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); + (CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK | + CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK | + CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK | + CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK | + CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK | + CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK | + CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK | + CP_MEC_CNTL__MEC_ME1_HALT_MASK | + CP_MEC_CNTL__MEC_ME2_HALT_MASK)); adev->gfx.kiq[xcc_id].ring.sched.ready = false; } udelay(50); @@ -2240,6 +2248,8 @@ static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id); if (r) return r; + } else { + gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id); } r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id); @@ -2299,12 +2309,6 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) return 0; } -static void gfx_v9_4_3_xcc_cp_enable(struct amdgpu_device *adev, bool enable, - int xcc_id) -{ - gfx_v9_4_3_xcc_cp_compute_enable(adev, enable, xcc_id); -} - static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id) { if (amdgpu_gfx_disable_kcq(adev, xcc_id)) @@ -2336,7 +2340,7 @@ static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id) } gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id); - gfx_v9_4_3_xcc_cp_enable(adev, false, xcc_id); + gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id); } static int gfx_v9_4_3_hw_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c index 6c1891889c4d..d4f72e47ae9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c @@ -153,7 +153,7 @@ static void imu_v11_0_setup(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, regGFX_IMU_C2PMSG_16, imu_reg_val); } - //disble imu Rtavfs, SmsRepair, DfllBTC, and ClkB + //disable imu Rtavfs, SmsRepair, DfllBTC, and ClkB imu_reg_val = RREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_10); imu_reg_val |= 0x10007; WREG32_SOC15(GC, 0, regGFX_IMU_SCRATCH_10, imu_reg_val); diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index ee91ff9e52a2..231a3d490ea8 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -161,7 +161,7 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, int api_status_off) { union MESAPI__QUERY_MES_STATUS mes_status_pkt; - signed long timeout = 3000000; /* 3000 ms */ + signed long timeout = 2100000; /* 2100 ms */ struct amdgpu_device *adev = mes->adev; struct amdgpu_ring *ring = &mes->ring[0]; struct MES_API_STATUS *api_status; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index e499b2857a01..a37a6801c9ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -146,7 +146,7 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, int api_status_off) { union MESAPI__QUERY_MES_STATUS mes_status_pkt; - signed long timeout = 3000000; /* 3000 ms */ + signed long timeout = 2100000; /* 2100 ms */ struct amdgpu_device *adev = mes->adev; struct amdgpu_ring *ring = &mes->ring[pipe]; spinlock_t *ring_lock = &mes->ring_lock[pipe]; @@ -479,6 +479,11 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes, union MESAPI__MISC misc_pkt; int pipe; + if (mes->adev->enable_uni_mes) + pipe = AMDGPU_MES_KIQ_PIPE; + else + pipe = AMDGPU_MES_SCHED_PIPE; + memset(&misc_pkt, 0, sizeof(misc_pkt)); misc_pkt.header.type = MES_API_TYPE_SCHEDULER; @@ -513,6 +518,7 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes, misc_pkt.wait_reg_mem.reg_offset2 = input->wrm_reg.reg1; break; case MES_MISC_OP_SET_SHADER_DEBUGGER: + pipe = AMDGPU_MES_SCHED_PIPE; misc_pkt.opcode = MESAPI_MISC__SET_SHADER_DEBUGGER; misc_pkt.set_shader_debugger.process_context_addr = input->set_shader_debugger.process_context_addr; @@ -530,11 +536,6 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes, return -EINVAL; } - if (mes->adev->enable_uni_mes) - pipe = AMDGPU_MES_KIQ_PIPE; - else - pipe = AMDGPU_MES_SCHED_PIPE; - return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, &misc_pkt, sizeof(misc_pkt), offsetof(union MESAPI__MISC, api_status)); @@ -608,6 +609,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe) mes_set_hw_res_pkt.disable_mes_log = 1; mes_set_hw_res_pkt.use_different_vmid_compute = 1; mes_set_hw_res_pkt.enable_reg_active_poll = 1; + mes_set_hw_res_pkt.enable_level_process_quantum_check = 1; /* * Keep oversubscribe timer for sdma . When we have unmapped doorbell @@ -619,7 +621,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe) if (amdgpu_mes_log_enable) { mes_set_hw_res_pkt.enable_mes_event_int_logging = 1; - mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr; + mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr + pipe * AMDGPU_MES_LOG_BUFFER_SIZE; } return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, @@ -1334,7 +1336,7 @@ static int mes_v12_0_sw_init(void *handle) adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini; adev->mes.enable_legacy_queue_map = true; - adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE; + adev->mes.event_log_size = adev->enable_uni_mes ? (AMDGPU_MAX_MES_PIPES * AMDGPU_MES_LOG_BUFFER_SIZE) : AMDGPU_MES_LOG_BUFFER_SIZE; r = amdgpu_mes_init(adev); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index fa479dfa1ec1..739fce4fa8fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -365,7 +365,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev, data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; } else { - /* Disbale ASPM L1 */ + /* Disable ASPM L1 */ data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; /* Disable ASPM TxL0s */ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index 1251ee38a676..51e470e8d67d 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -81,6 +81,8 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_4_ta.bin"); /* memory training timeout define */ #define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000 +#define regMP1_PUB_SCRATCH0 0x3b10090 + static int psp_v13_0_init_microcode(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -807,6 +809,20 @@ static bool psp_v13_0_get_ras_capability(struct psp_context *psp) } } +static bool psp_v13_0_is_aux_sos_load_required(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + u32 pmfw_ver; + + if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) + return false; + + /* load 4e version of sos if pmfw version less than 85.115.0 */ + pmfw_ver = RREG32(regMP1_PUB_SCRATCH0 / 4); + + return (pmfw_ver < 0x557300); +} + static const struct psp_funcs psp_v13_0_funcs = { .init_microcode = psp_v13_0_init_microcode, .wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state, @@ -830,6 +846,7 @@ static const struct psp_funcs psp_v13_0_funcs = { .vbflash_stat = psp_v13_0_vbflash_status, .fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk, .get_ras_capability = psp_v13_0_get_ras_capability, + .is_aux_sos_load_required = psp_v13_0_is_aux_sos_load_required, }; void psp_v13_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index aa637541da58..e65194fe94af 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -710,7 +710,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) upper_32_bits(wptr_gpu_addr)); wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]); if (ring->use_pollmem) { - /*wptr polling is not enogh fast, directly clean the wptr register */ + /*wptr polling is not enough fast, directly clean the wptr register */ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c index cfd8e183ad50..9288f37a3cc5 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c @@ -51,6 +51,12 @@ MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin"); #define SDMA0_HYP_DEC_REG_END 0x589a #define SDMA1_HYP_DEC_REG_OFFSET 0x20 +/*define for compression field for sdma7*/ +#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_offset 0 +#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_mask 0x00000001 +#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_shift 16 +#define SDMA_PKT_CONSTANT_FILL_HEADER_COMPRESS(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_compress_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_compress_shift) + static const struct amdgpu_hwip_reg_entry sdma_reg_list_7_0[] = { SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS_REG), SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS1_REG), @@ -1080,13 +1086,16 @@ static void sdma_v7_0_vm_copy_pte(struct amdgpu_ib *ib, unsigned bytes = count * 8; ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) | - SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) | + SDMA_PKT_COPY_LINEAR_HEADER_CPV(1); + ib->ptr[ib->length_dw++] = bytes - 1; ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ ib->ptr[ib->length_dw++] = lower_32_bits(src); ib->ptr[ib->length_dw++] = upper_32_bits(src); ib->ptr[ib->length_dw++] = lower_32_bits(pe); ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = 0; } @@ -1721,7 +1730,8 @@ static void sdma_v7_0_emit_fill_buffer(struct amdgpu_ib *ib, uint64_t dst_offset, uint32_t byte_count) { - ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_CONST_FILL); + ib->ptr[ib->length_dw++] = SDMA_PKT_CONSTANT_FILL_HEADER_OP(SDMA_OP_CONST_FILL) | + SDMA_PKT_CONSTANT_FILL_HEADER_COMPRESS(1); ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); ib->ptr[ib->length_dw++] = src_data; @@ -1744,7 +1754,7 @@ static void sdma_v7_0_set_buffer_funcs(struct amdgpu_device *adev) } static const struct amdgpu_vm_pte_funcs sdma_v7_0_vm_pte_funcs = { - .copy_pte_num_dw = 7, + .copy_pte_num_dw = 8, .copy_pte = sdma_v7_0_vm_copy_pte, .write_pte = sdma_v7_0_vm_write_pte, .set_pte_pde = sdma_v7_0_vm_set_pte_pde, diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c b/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c index e4e30b9d481b..c04fdd2d5b38 100644 --- a/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c @@ -60,7 +60,7 @@ static void smuio_v9_0_get_clock_gating_state(struct amdgpu_device *adev, u64 *f { u32 data; - /* CGTT_ROM_CLK_CTRL0 is not availabe for APUs */ + /* CGTT_ROM_CLK_CTRL0 is not available for APUs */ if (adev->flags & AMD_IS_APU) return; diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c index b0c3678cfb31..fd4c3d4f8387 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc24.c +++ b/drivers/gpu/drm/amd/amdgpu/soc24.c @@ -250,13 +250,6 @@ static void soc24_program_aspm(struct amdgpu_device *adev) adev->nbio.funcs->program_aspm(adev); } -static void soc24_enable_doorbell_aperture(struct amdgpu_device *adev, - bool enable) -{ - adev->nbio.funcs->enable_doorbell_aperture(adev, enable); - adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); -} - const struct amdgpu_ip_block_version soc24_common_ip_block = { .type = AMD_IP_BLOCK_TYPE_COMMON, .major = 1, @@ -454,6 +447,11 @@ static int soc24_common_late_init(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_nv_mailbox_get_irq(adev); + /* Enable selfring doorbell aperture late because doorbell BAR + * aperture will change if resize BAR successfully in gmc sw_init. + */ + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true); + return 0; } @@ -491,7 +489,7 @@ static int soc24_common_hw_init(void *handle) adev->df.funcs->hw_init(adev); /* enable the doorbell aperture */ - soc24_enable_doorbell_aperture(adev, true); + adev->nbio.funcs->enable_doorbell_aperture(adev, true); return 0; } @@ -500,8 +498,13 @@ static int soc24_common_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* disable the doorbell aperture */ - soc24_enable_doorbell_aperture(adev, false); + /* Disable the doorbell aperture and selfring doorbell aperture + * separately in hw_fini because soc21_enable_doorbell_aperture + * has been removed and there is no need to delay disabling + * selfring doorbell. + */ + adev->nbio.funcs->enable_doorbell_aperture(adev, false); + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false); if (amdgpu_sriov_vf(adev)) xgpu_nv_mailbox_put_irq(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index b1fd226b7efb..9d4f5352a62c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -1395,170 +1395,6 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring) } } -static int vcn_v4_0_5_limit_sched(struct amdgpu_cs_parser *p, - struct amdgpu_job *job) -{ - struct drm_gpu_scheduler **scheds; - - /* The create msg must be in the first IB submitted */ - if (atomic_read(&job->base.entity->fence_seq)) - return -EINVAL; - - /* if VCN0 is harvested, we can't support AV1 */ - if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) - return -EINVAL; - - scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC] - [AMDGPU_RING_PRIO_0].sched; - drm_sched_entity_modify_sched(job->base.entity, scheds, 1); - return 0; -} - -static int vcn_v4_0_5_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, - uint64_t addr) -{ - struct ttm_operation_ctx ctx = { false, false }; - struct amdgpu_bo_va_mapping *map; - uint32_t *msg, num_buffers; - struct amdgpu_bo *bo; - uint64_t start, end; - unsigned int i; - void *ptr; - int r; - - addr &= AMDGPU_GMC_HOLE_MASK; - r = amdgpu_cs_find_mapping(p, addr, &bo, &map); - if (r) { - DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr); - return r; - } - - start = map->start * AMDGPU_GPU_PAGE_SIZE; - end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE; - if (addr & 0x7) { - DRM_ERROR("VCN messages must be 8 byte aligned!\n"); - return -EINVAL; - } - - bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (r) { - DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r); - return r; - } - - r = amdgpu_bo_kmap(bo, &ptr); - if (r) { - DRM_ERROR("Failed mapping the VCN message (%d)!\n", r); - return r; - } - - msg = ptr + addr - start; - - /* Check length */ - if (msg[1] > end - addr) { - r = -EINVAL; - goto out; - } - - if (msg[3] != RDECODE_MSG_CREATE) - goto out; - - num_buffers = msg[2]; - for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) { - uint32_t offset, size, *create; - - if (msg[0] != RDECODE_MESSAGE_CREATE) - continue; - - offset = msg[1]; - size = msg[2]; - - if (offset + size > end) { - r = -EINVAL; - goto out; - } - - create = ptr + addr + offset - start; - - /* H264, HEVC and VP9 can run on any instance */ - if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11) - continue; - - r = vcn_v4_0_5_limit_sched(p, job); - if (r) - goto out; - } - -out: - amdgpu_bo_kunmap(bo); - return r; -} - -#define RADEON_VCN_ENGINE_TYPE_ENCODE (0x00000002) -#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003) - -#define RADEON_VCN_ENGINE_INFO (0x30000001) -#define RADEON_VCN_ENGINE_INFO_MAX_OFFSET 16 - -#define RENCODE_ENCODE_STANDARD_AV1 2 -#define RENCODE_IB_PARAM_SESSION_INIT 0x00000003 -#define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET 64 - -/* return the offset in ib if id is found, -1 otherwise - * to speed up the searching we only search upto max_offset - */ -static int vcn_v4_0_5_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset) -{ - int i; - - for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) { - if (ib->ptr[i + 1] == id) - return i; - } - return -1; -} - -static int vcn_v4_0_5_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, - struct amdgpu_job *job, - struct amdgpu_ib *ib) -{ - struct amdgpu_ring *ring = amdgpu_job_ring(job); - struct amdgpu_vcn_decode_buffer *decode_buffer; - uint64_t addr; - uint32_t val; - int idx; - - /* The first instance can decode anything */ - if (!ring->me) - return 0; - - /* RADEON_VCN_ENGINE_INFO is at the top of ib block */ - idx = vcn_v4_0_5_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO, - RADEON_VCN_ENGINE_INFO_MAX_OFFSET); - if (idx < 0) /* engine info is missing */ - return 0; - - val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */ - if (val == RADEON_VCN_ENGINE_TYPE_DECODE) { - decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6]; - - if (!(decode_buffer->valid_buf_flag & 0x1)) - return 0; - - addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 | - decode_buffer->msg_buffer_address_lo; - return vcn_v4_0_5_dec_msg(p, job, addr); - } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) { - idx = vcn_v4_0_5_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT, - RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET); - if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1) - return vcn_v4_0_5_limit_sched(p, job); - } - return 0; -} - static const struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, @@ -1566,7 +1402,6 @@ static const struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = { .get_rptr = vcn_v4_0_5_unified_ring_get_rptr, .get_wptr = vcn_v4_0_5_unified_ring_get_wptr, .set_wptr = vcn_v4_0_5_unified_ring_set_wptr, - .patch_cs_in_place = vcn_v4_0_5_ring_patch_cs_in_place, .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 9044bdb38cf4..3e6b4736a7fe 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1148,7 +1148,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM) size >>= 1; - WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size)); + atomic64_add(PAGE_ALIGN(size), &pdd->vram_usage); } mutex_unlock(&p->mutex); @@ -1219,7 +1219,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep, kfd_process_device_remove_obj_handle( pdd, GET_IDR_HANDLE(args->handle)); - WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size); + atomic64_sub(size, &pdd->vram_usage); err_unlock: err_pdd: @@ -2347,7 +2347,7 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd, } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { bo_bucket->restored_offset = offset; /* Update the VRAM usage count */ - WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size); + atomic64_add(bo_bucket->size, &pdd->vram_usage); } return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 71b465f8d83e..648f40091aa3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -3540,6 +3540,30 @@ int debug_refresh_runlist(struct device_queue_manager *dqm) return debug_map_and_unlock(dqm); } +bool kfd_dqm_is_queue_in_process(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + int doorbell_off, u32 *queue_format) +{ + struct queue *q; + bool r = false; + + if (!queue_format) + return r; + + dqm_lock(dqm); + + list_for_each_entry(q, &qpd->queues_list, list) { + if (q->properties.doorbell_off == doorbell_off) { + *queue_format = q->properties.format; + r = true; + goto out; + } + } + +out: + dqm_unlock(dqm); + return r; +} #if defined(CONFIG_DEBUG_FS) static void seq_reg_dump(struct seq_file *m, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 08b40826ad1e..09ab36f8e8c6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -324,6 +324,9 @@ void set_queue_snapshot_entry(struct queue *q, int debug_lock_and_unmap(struct device_queue_manager *dqm); int debug_map_and_unlock(struct device_queue_manager *dqm); int debug_refresh_runlist(struct device_queue_manager *dqm); +bool kfd_dqm_is_queue_in_process(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + int doorbell_off, u32 *queue_format); static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c index bb8cbfc39b90..37b69fe0ede3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c @@ -306,23 +306,8 @@ static void event_interrupt_wq_v10(struct kfd_node *dev, client_id == SOC15_IH_CLIENTID_UTCL2) { struct kfd_vm_fault_info info = {0}; uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); - uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry); - uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry); - int hub_inst = 0; struct kfd_hsa_memory_exception_data exception_data; - /* gfxhub */ - if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) { - hub_inst = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev, - node_id); - if (hub_inst < 0) - hub_inst = 0; - } - - /* mmhub */ - if (vmid_type && client_id == SOC15_IH_CLIENTID_VMC) - hub_inst = node_id / 4; - info.vmid = vmid; info.mc_id = client_id; info.page_addr = ih_ring_entry[4] | diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c index d163d92a692f..2b72d5b4949b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c @@ -341,6 +341,10 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd, m->sdmax_rlcx_doorbell_offset = q->doorbell_off << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT; + m->sdmax_rlcx_sched_cntl = (amdgpu_sdma_phase_quantum + << SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT) + & SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK; + m->sdma_engine_id = q->sdma_engine_id; m->sdma_queue_id = q->sdma_queue_id; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index d6530febabad..26e48fdc8728 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -775,7 +775,7 @@ struct kfd_process_device { enum kfd_pdd_bound bound; /* VRAM usage */ - uint64_t vram_usage; + atomic64_t vram_usage; struct attribute attr_vram; char vram_filename[MAX_SYSFS_FILENAME_LEN]; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index a902950cc060..d4aa843aacfd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -270,6 +270,11 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) struct kfd_node *dev = NULL; struct kfd_process *proc = NULL; struct kfd_process_device *pdd = NULL; + int i; + struct kfd_cu_occupancy cu_occupancy[AMDGPU_MAX_QUEUES]; + u32 queue_format; + + memset(cu_occupancy, 0x0, sizeof(cu_occupancy)); pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy); dev = pdd->dev; @@ -287,8 +292,29 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) /* Collect wave count from device if it supports */ wave_cnt = 0; max_waves_per_cu = 0; - dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt, - &max_waves_per_cu, 0); + + /* + * For GFX 9.4.3, fetch the CU occupancy from the first XCC in the partition. + * For AQL queues, because of cooperative dispatch we multiply the wave count + * by number of XCCs in the partition to get the total wave counts across all + * XCCs in the partition. + * For PM4 queues, there is no cooperative dispatch so wave_cnt stay as it is. + */ + dev->kfd2kgd->get_cu_occupancy(dev->adev, cu_occupancy, + &max_waves_per_cu, ffs(dev->xcc_mask) - 1); + + for (i = 0; i < AMDGPU_MAX_QUEUES; i++) { + if (cu_occupancy[i].wave_cnt != 0 && + kfd_dqm_is_queue_in_process(dev->dqm, &pdd->qpd, + cu_occupancy[i].doorbell_off, + &queue_format)) { + if (unlikely(queue_format == KFD_QUEUE_FORMAT_PM4)) + wave_cnt += cu_occupancy[i].wave_cnt; + else + wave_cnt += (NUM_XCC(dev->xcc_mask) * + cu_occupancy[i].wave_cnt); + } + } /* Translate wave count to number of compute units */ cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu; @@ -306,7 +332,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr, } else if (strncmp(attr->name, "vram_", 5) == 0) { struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device, attr_vram); - return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage)); + return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage)); } else if (strncmp(attr->name, "sdma_", 5) == 0) { struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device, attr_sdma); @@ -1599,7 +1625,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, pdd->bound = PDD_UNBOUND; pdd->already_dequeued = false; pdd->runtime_inuse = false; - pdd->vram_usage = 0; + atomic64_set(&pdd->vram_usage, 0); pdd->sdma_past_activity_counter = 0; pdd->user_gpu_id = dev->id; atomic64_set(&pdd->evict_duration_counter, 0); @@ -1676,12 +1702,15 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm, &p->kgd_process_info, - &ef); + p->ef ? NULL : &ef); if (ret) { dev_err(dev->adev->dev, "Failed to create process VM object\n"); return ret; } - RCU_INIT_POINTER(p->ef, ef); + + if (!p->ef) + RCU_INIT_POINTER(p->ef, ef); + pdd->drm_priv = drm_file->private_data; ret = kfd_process_device_reserve_ib_mem(pdd); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index b439d4d0bd84..01b960b15274 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -517,7 +517,6 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) if (retval) goto err_destroy_queue; - kfd_procfs_del_queue(pqn->q); dqm = pqn->q->device->dqm; retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q); if (retval) { @@ -527,6 +526,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) if (retval != -ETIME) goto err_destroy_queue; } + kfd_procfs_del_queue(pqn->q); kfd_queue_release_buffers(pdd, &pqn->q->properties); pqm_clean_queue_resource(pqm, pqn); uninit_queue(pqn->q); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 04e746923697..1893c27746a5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -405,6 +405,27 @@ static void svm_range_bo_release(struct kref *kref) spin_lock(&svm_bo->list_lock); } spin_unlock(&svm_bo->list_lock); + + if (mmget_not_zero(svm_bo->eviction_fence->mm)) { + struct kfd_process_device *pdd; + struct kfd_process *p; + struct mm_struct *mm; + + mm = svm_bo->eviction_fence->mm; + /* + * The forked child process takes svm_bo device pages ref, svm_bo could be + * released after parent process is gone. + */ + p = kfd_lookup_process_by_mm(mm); + if (p) { + pdd = kfd_get_process_device_data(svm_bo->node, p); + if (pdd) + atomic64_sub(amdgpu_bo_size(svm_bo->bo), &pdd->vram_usage); + kfd_unref_process(p); + } + mmput(mm); + } + if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) /* We're not in the eviction worker. Signal the fence. */ dma_fence_signal(&svm_bo->eviction_fence->base); @@ -532,6 +553,7 @@ int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, bool clear) { + struct kfd_process_device *pdd; struct amdgpu_bo_param bp; struct svm_range_bo *svm_bo; struct amdgpu_bo_user *ubo; @@ -623,6 +645,10 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, list_add(&prange->svm_bo_list, &svm_bo->range_list); spin_unlock(&svm_bo->list_lock); + pdd = svm_range_get_pdd_by_node(prange, node); + if (pdd) + atomic64_add(amdgpu_bo_size(bo), &pdd->vram_usage); + return 0; reserve_bo_failed: diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 0cff66735cfe..07e9ce99694f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -770,6 +770,12 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, return; } + /* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */ + if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) { + DRM_INFO("Skip DMUB HPD IRQ callback in suspend/resume\n"); + return; + } + link_index = notify->link_index; link = adev->dm.dc->links[link_index]; dev = adev->dm.ddev; @@ -808,6 +814,20 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, } /** + * dmub_hpd_sense_callback - DMUB HPD sense processing callback. + * @adev: amdgpu_device pointer + * @notify: dmub notification structure + * + * HPD sense changes can occur during low power states and need to be + * notified from firmware to driver. + */ +static void dmub_hpd_sense_callback(struct amdgpu_device *adev, + struct dmub_notification *notify) +{ + DRM_DEBUG_DRIVER("DMUB HPD SENSE callback.\n"); +} + +/** * register_dmub_notify_callback - Sets callback for DMUB notify * @adev: amdgpu_device pointer * @type: Type of dmub notification @@ -1757,25 +1777,41 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device * static enum dmub_ips_disable_type dm_get_default_ips_mode( struct amdgpu_device *adev) { - /* - * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to - * cause a hard hang. A fix exists for newer PMFW. - * - * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest - * IPS state in all cases, except for s0ix and all displays off (DPMS), - * where IPS2 is allowed. - * - * When checking pmfw version, use the major and minor only. - */ - if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(3, 5, 0) && - (adev->pm.fw_version & 0x00FFFF00) < 0x005D6300) - return DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; + enum dmub_ips_disable_type ret = DMUB_IPS_ENABLE; - if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0)) - return DMUB_IPS_ENABLE; + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 5, 0): + /* + * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to + * cause a hard hang. A fix exists for newer PMFW. + * + * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest + * IPS state in all cases, except for s0ix and all displays off (DPMS), + * where IPS2 is allowed. + * + * When checking pmfw version, use the major and minor only. + */ + if ((adev->pm.fw_version & 0x00FFFF00) < 0x005D6300) + ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; + else if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(11, 5, 0)) + /* + * Other ASICs with DCN35 that have residency issues with + * IPS2 in idle. + * We want them to use IPS2 only in display off cases. + */ + ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; + break; + case IP_VERSION(3, 5, 1): + ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; + break; + default: + /* ASICs older than DCN35 do not have IPSs */ + if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 5, 0)) + ret = DMUB_IPS_DISABLE_ALL; + break; + } - /* ASICs older than DCN35 do not have IPSs */ - return DMUB_IPS_DISABLE_ALL; + return ret; } static int amdgpu_dm_init(struct amdgpu_device *adev) @@ -1996,7 +2032,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); } - if (adev->dm.dc->caps.ips_support && adev->dm.dc->config.disable_ips == DMUB_IPS_ENABLE) + if (adev->dm.dc->caps.ips_support && + adev->dm.dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) adev->dm.idle_workqueue = idle_create_workqueue(adev); if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) { @@ -2935,10 +2972,11 @@ static int dm_suspend(void *handle) hpd_rx_irq_work_suspend(dm); - if (adev->dm.dc->caps.ips_support) - dc_allow_idle_optimizations(adev->dm.dc, true); - dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); + + if (dm->dc->caps.ips_support && adev->in_s0ix) + dc_allow_idle_optimizations(dm->dc, true); + dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3); return 0; @@ -3808,6 +3846,12 @@ static int register_hpd_handlers(struct amdgpu_device *adev) DRM_ERROR("amdgpu: fail to register dmub hpd callback"); return -EINVAL; } + + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY, + dmub_hpd_sense_callback, true)) { + DRM_ERROR("amdgpu: fail to register dmub hpd sense callback"); + return -EINVAL; + } } list_for_each_entry(connector, @@ -4449,6 +4493,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 +#define AMDGPU_DM_MIN_SPREAD ((AMDGPU_DM_DEFAULT_MAX_BACKLIGHT - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT) / 2) #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, @@ -4463,6 +4508,21 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, return; amdgpu_acpi_get_backlight_caps(&caps); + + /* validate the firmware value is sane */ + if (caps.caps_valid) { + int spread = caps.max_input_signal - caps.min_input_signal; + + if (caps.max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || + caps.min_input_signal < 0 || + spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || + spread < AMDGPU_DM_MIN_SPREAD) { + DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n", + caps.min_input_signal, caps.max_input_signal); + caps.caps_valid = false; + } + } + if (caps.caps_valid) { dm->backlight_caps[bl_idx].caps_valid = true; if (caps.aux_support) @@ -6683,12 +6743,21 @@ create_stream_for_sink(struct drm_connector *connector, if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || stream->signal == SIGNAL_TYPE_EDP) { + const struct dc_edid_caps *edid_caps; + unsigned int disable_colorimetry = 0; + + if (aconnector->dc_sink) { + edid_caps = &aconnector->dc_sink->edid_caps; + disable_colorimetry = edid_caps->panel_patch.disable_colorimetry; + } + // // should decide stream support vsc sdp colorimetry capability // before building vsc info packet // stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && - stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED; + stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED && + !disable_colorimetry; if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) tf = TRANSFER_FUNC_GAMMA_22; @@ -8305,7 +8374,8 @@ static void manage_dm_interrupts(struct amdgpu_device *adev, if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 5, 0) || acrtc_state->stream->link->psr_settings.psr_version < - DC_PSR_VERSION_UNSUPPORTED) { + DC_PSR_VERSION_UNSUPPORTED || + !(adev->flags & AMD_IS_APU)) { timing = &acrtc_state->stream->timing; /* at least 2 frames */ @@ -9359,6 +9429,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, bool mode_set_reset_required = false; u32 i; struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; + bool set_backlight_level = false; /* Disable writeback */ for_each_old_connector_in_state(state, connector, old_con_state, i) { @@ -9478,6 +9549,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, acrtc->hw_mode = new_crtc_state->mode; crtc->hwmode = new_crtc_state->mode; mode_set_reset_required = true; + set_backlight_level = true; } else if (modereset_required(new_crtc_state)) { drm_dbg_atomic(dev, "Atomic commit: RESET. crtc id %d:[%p]\n", @@ -9529,6 +9601,19 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, acrtc->otg_inst = status->primary_otg_inst; } } + + /* During boot up and resume the DC layer will reset the panel brightness + * to fix a flicker issue. + * It will cause the dm->actual_brightness is not the current panel brightness + * level. (the dm->brightness is the correct panel level) + * So we set the backlight level with dm->brightness value after set mode + */ + if (set_backlight_level) { + for (i = 0; i < dm->num_of_edps; i++) { + if (dm->backlight_dev[i]) + amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); + } + } } static void dm_set_writeback(struct amdgpu_display_manager *dm, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 2d7755e2b6c3..15d4690c74d6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -50,7 +50,7 @@ #define AMDGPU_DM_MAX_NUM_EDP 2 -#define AMDGPU_DMUB_NOTIFICATION_MAX 6 +#define AMDGPU_DMUB_NOTIFICATION_MAX 7 #define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A #define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 50109d13d967..eea317dcbe8c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -44,6 +44,7 @@ #include "dm_helpers.h" #include "ddc_service_types.h" +#include "clk_mgr.h" static u32 edid_extract_panel_id(struct edid *edid) { @@ -73,6 +74,10 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps) DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id); edid_caps->panel_patch.remove_sink_ext_caps = true; break; + case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154): + DRM_DEBUG_DRIVER("Disabling VSC on monitor with panel id %X\n", panel_id); + edid_caps->panel_patch.disable_colorimetry = true; + break; default: return; } @@ -1117,6 +1122,8 @@ bool dm_helpers_dp_handle_test_pattern_request( struct pipe_ctx *pipe_ctx = NULL; struct amdgpu_dm_connector *aconnector = link->priv; struct drm_device *dev = aconnector->base.dev; + struct dc_state *dc_state = ctx->dc->current_state; + struct clk_mgr *clk_mgr = ctx->dc->clk_mgr; int i; for (i = 0; i < MAX_PIPES; i++) { @@ -1217,6 +1224,16 @@ bool dm_helpers_dp_handle_test_pattern_request( pipe_ctx->stream->test_pattern.type = test_pattern; pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space; + /* Temp W/A for compliance test failure */ + dc_state->bw_ctx.bw.dcn.clk.p_state_change_support = false; + dc_state->bw_ctx.bw.dcn.clk.dramclk_khz = clk_mgr->dc_mode_softmax_enabled ? + clk_mgr->bw_params->dc_mode_softmax_memclk : clk_mgr->bw_params->max_memclk_mhz; + dc_state->bw_ctx.bw.dcn.clk.idle_dramclk_khz = dc_state->bw_ctx.bw.dcn.clk.dramclk_khz; + ctx->dc->clk_mgr->funcs->update_clocks( + ctx->dc->clk_mgr, + dc_state, + false); + dc_link_dp_set_test_pattern( (struct dc_link *) link, test_pattern, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index c0c61c03984c..a08e8a0b696c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -1027,6 +1027,7 @@ static int try_disable_dsc(struct drm_atomic_state *state, int remaining_to_try = 0; int ret; uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); + int var_pbn; for (i = 0; i < count; i++) { if (vars[i + k].dsc_enabled @@ -1057,13 +1058,18 @@ static int try_disable_dsc(struct drm_atomic_state *state, break; DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index); + var_pbn = vars[next_index].pbn; vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000); ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, vars[next_index].pbn); - if (ret < 0) + if (ret < 0) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC index #%d, failed to set pbn to the state, %d\n", + __func__, __LINE__, next_index, ret); + vars[next_index].pbn = var_pbn; return ret; + } ret = drm_dp_mst_atomic_check(state); if (ret == 0) { @@ -1071,14 +1077,17 @@ static int try_disable_dsc(struct drm_atomic_state *state, vars[next_index].dsc_enabled = false; vars[next_index].bpp_x16 = 0; } else { - DRM_DEBUG_DRIVER("MST_DSC index #%d, restore minimum compression\n", next_index); - vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000); + DRM_DEBUG_DRIVER("MST_DSC index #%d, restore optimized pbn value\n", next_index); + vars[next_index].pbn = var_pbn; ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, vars[next_index].pbn); - if (ret < 0) + if (ret < 0) { + DRM_DEBUG_DRIVER("%s:%d MST_DSC index #%d, failed to set pbn to the state, %d\n", + __func__, __LINE__, next_index, ret); return ret; + } } tried[next_index] = true; @@ -1147,7 +1156,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel; params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported; - dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy); + dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link)); if (!dc_dsc_compute_bandwidth_range( stream->sink->ctx->dc->res_pool->dscs[0], stream->sink->ctx->dc->debug.dsc_min_slice_height_override, @@ -1681,7 +1690,7 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream, { struct dc_dsc_policy dsc_policy = {0}; - dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy); + dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy, dc_link_get_highest_encoding_format(stream->link)); dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0], stream->sink->ctx->dc->debug.dsc_min_slice_height_override, dsc_policy.min_target_bpp * 16, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index 25f63b2e7a8e..495e3cd70426 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -961,6 +961,7 @@ static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane, else domain = AMDGPU_GEM_DOMAIN_VRAM; + rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(rbo, domain); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c index 08c494a7a21b..0d5fefb0f591 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c @@ -114,6 +114,7 @@ static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector domain = amdgpu_display_supported_domains(adev, rbo->flags); + rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; r = amdgpu_bo_pin(rbo, domain); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c index e47e9db062f4..681799468487 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c @@ -569,7 +569,7 @@ static void calculate_bandwidth( break; } data->lb_partitions[i] = bw_floor2(bw_div(data->lb_size_per_component[i], data->lb_line_pitch), bw_int_to_fixed(1)); - /*clamp the partitions to the maxium number supported by the lb*/ + /* clamp the partitions to the maximum number supported by the lb */ if ((surface_type[i] != bw_def_graphics || dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) { data->lb_partitions_max[i] = bw_int_to_fixed(10); } diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 0d8498ab9b23..be8fbb04ad98 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -3127,7 +3127,9 @@ static enum bp_result bios_parser_get_vram_info( struct atom_data_revision revision; // vram info moved to umc_info for DCN4x - if (info && DATA_TABLES(umc_info)) { + if (dcb->ctx->dce_version >= DCN_VERSION_4_01 && + dcb->ctx->dce_version < DCN_VERSION_MAX && + info && DATA_TABLES(umc_info)) { header = GET_IMAGE(struct atom_common_table_header, DATA_TABLES(umc_info)); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index f770828df149..0e243f4344d0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -59,6 +59,7 @@ int clk_mgr_helper_get_active_display_cnt( display_count = 0; for (i = 0; i < context->stream_count; i++) { const struct dc_stream_state *stream = context->streams[i]; + const struct dc_stream_status *stream_status = &context->stream_status[i]; /* Don't count SubVP phantom pipes as part of active * display count @@ -66,13 +67,7 @@ int clk_mgr_helper_get_active_display_cnt( if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM) continue; - /* - * Only notify active stream or virtual stream. - * Need to notify virtual stream to work around - * headless case. HPD does not fire when system is in - * S0i2. - */ - if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL) + if (!stream->dpms_off || (stream_status && stream_status->plane_count)) display_count++; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index 97164b5585a8..b46a3afe48ca 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -1222,6 +1222,12 @@ void dcn35_clk_mgr_construct( ctx->dc->debug.disable_dpp_power_gate = false; ctx->dc->debug.disable_hubp_power_gate = false; ctx->dc->debug.disable_dsc_power_gate = false; + + /* Disable dynamic IPS2 in older PMFW (93.12) for Z8 interop. */ + if (ctx->dc->config.disable_ips == DMUB_IPS_ENABLE && + ctx->dce_version == DCN_VERSION_3_5 && + ((clk_mgr->base.smu_ver & 0x00FFFFFF) <= 0x005d0c00)) + ctx->dc->config.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; } else { /*let's reset the config control flag*/ ctx->dc->config.disable_ips = DMUB_IPS_DISABLE_ALL; /*pmfw not support it, disable it all*/ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index ae788154896c..a88f1b6ea64c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1767,7 +1767,7 @@ bool dc_validate_boot_timing(const struct dc *dc, if (crtc_timing->pix_clk_100hz != pix_clk_100hz) return false; - if (!se->funcs->dp_get_pixel_format) + if (!se || !se->funcs->dp_get_pixel_format) return false; if (!se->funcs->dp_get_pixel_format( @@ -2376,7 +2376,7 @@ static bool is_surface_in_context( return false; } -static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u) +static enum surface_update_type get_plane_info_update_type(const struct dc *dc, const struct dc_surface_update *u) { union surface_update_flags *update_flags = &u->surface->update_flags; enum surface_update_type update_type = UPDATE_TYPE_FAST; @@ -2455,7 +2455,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa /* todo: below are HW dependent, we should add a hook to * DCE/N resource and validated there. */ - if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { + if (!dc->debug.skip_full_updated_if_possible) { /* swizzled mode requires RQ to be setup properly, * thus need to run DML to calculate RQ settings */ @@ -2547,7 +2547,7 @@ static enum surface_update_type det_surface_update(const struct dc *dc, update_flags->raw = 0; // Reset all flags - type = get_plane_info_update_type(u); + type = get_plane_info_update_type(dc, u); elevate_update_type(&overall_type, type); type = get_scaling_info_update_type(dc, u); @@ -2596,6 +2596,12 @@ static enum surface_update_type det_surface_update(const struct dc *dc, elevate_update_type(&overall_type, UPDATE_TYPE_MED); } + if (u->sdr_white_level_nits) + if (u->sdr_white_level_nits != u->surface->sdr_white_level_nits) { + update_flags->bits.sdr_white_level_nits = 1; + elevate_update_type(&overall_type, UPDATE_TYPE_FULL); + } + if (u->cm2_params) { if ((u->cm2_params->component_settings.shaper_3dlut_setting != u->surface->mcm_shaper_3dlut_setting) @@ -2876,6 +2882,10 @@ static void copy_surface_update_to_plane( surface->hdr_mult = srf_update->hdr_mult; + if (srf_update->sdr_white_level_nits) + surface->sdr_white_level_nits = + srf_update->sdr_white_level_nits; + if (srf_update->blend_tf) memcpy(&surface->blend_tf, srf_update->blend_tf, sizeof(surface->blend_tf)); @@ -4679,6 +4689,8 @@ static bool full_update_required(struct dc *dc, srf_updates[i].scaling_info || (srf_updates[i].hdr_mult.value && srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) || + (srf_updates[i].sdr_white_level_nits && + srf_updates[i].sdr_white_level_nits != srf_updates->surface->sdr_white_level_nits) || srf_updates[i].in_transfer_func || srf_updates[i].func_shaper || srf_updates[i].lut3d_func || @@ -5053,11 +5065,26 @@ static bool update_planes_and_stream_v3(struct dc *dc, return true; } +static void clear_update_flags(struct dc_surface_update *srf_updates, + int surface_count, struct dc_stream_state *stream) +{ + int i; + + if (stream) + stream->update_flags.raw = 0; + + for (i = 0; i < surface_count; i++) + if (srf_updates[i].surface) + srf_updates[i].surface->update_flags.raw = 0; +} + bool dc_update_planes_and_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, struct dc_stream_update *stream_update) { + bool ret = false; + dc_exit_ips_for_hw_access(dc); /* * update planes and stream version 3 separates FULL and FAST updates @@ -5074,10 +5101,16 @@ bool dc_update_planes_and_stream(struct dc *dc, * features as they are now transparent to the new sequence. */ if (dc->ctx->dce_version >= DCN_VERSION_4_01) - return update_planes_and_stream_v3(dc, srf_updates, + ret = update_planes_and_stream_v3(dc, srf_updates, surface_count, stream, stream_update); - return update_planes_and_stream_v2(dc, srf_updates, + else + ret = update_planes_and_stream_v2(dc, srf_updates, surface_count, stream, stream_update); + + if (ret) + clear_update_flags(srf_updates, surface_count, stream); + + return ret; } void dc_commit_updates_for_stream(struct dc *dc, @@ -5087,6 +5120,8 @@ void dc_commit_updates_for_stream(struct dc *dc, struct dc_stream_update *stream_update, struct dc_state *state) { + bool ret = false; + dc_exit_ips_for_hw_access(dc); /* TODO: Since change commit sequence can have a huge impact, * we decided to only enable it for DCN3x. However, as soon as @@ -5094,17 +5129,17 @@ void dc_commit_updates_for_stream(struct dc *dc, * the new sequence for all ASICs. */ if (dc->ctx->dce_version >= DCN_VERSION_4_01) { - update_planes_and_stream_v3(dc, srf_updates, surface_count, + ret = update_planes_and_stream_v3(dc, srf_updates, surface_count, stream, stream_update); - return; - } - if (dc->ctx->dce_version >= DCN_VERSION_3_2) { - update_planes_and_stream_v2(dc, srf_updates, surface_count, + } else if (dc->ctx->dce_version >= DCN_VERSION_3_2) { + ret = update_planes_and_stream_v2(dc, srf_updates, surface_count, stream, stream_update); - return; - } - update_planes_and_stream_v1(dc, srf_updates, surface_count, stream, - stream_update, state); + } else + ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream, + stream_update, state); + + if (ret) + clear_update_flags(srf_updates, surface_count, stream); } uint8_t dc_get_current_stream_count(struct dc *dc) @@ -5744,6 +5779,27 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, } /** + * dc_process_dmub_dpia_set_tps_notification - Submits tps notification + * + * @dc: [in] dc structure + * @link_index: [in] link index + * @tps: [in] request tps + * + * Submits set_tps_notification command to dmub via inbox message + */ +void dc_process_dmub_dpia_set_tps_notification(const struct dc *dc, uint32_t link_index, uint8_t tps) +{ + union dmub_rb_cmd cmd = {0}; + + cmd.set_tps_notification.header.type = DMUB_CMD__DPIA; + cmd.set_tps_notification.header.sub_type = DMUB_CMD__DPIA_SET_TPS_NOTIFICATION; + cmd.set_tps_notification.tps_notification.instance = dc->links[link_index]->ddc_hw_inst; + cmd.set_tps_notification.tps_notification.tps = tps; + + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + +/** * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption * * @dc: [in] dc structure diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 4c94dd38be4b..3992ad73165b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -55,7 +55,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.299" +#define DC_VER "3.2.301" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -462,6 +462,7 @@ struct dc_config { bool support_edp0_on_dp1; unsigned int enable_fpo_flicker_detection; bool disable_hbr_audio_dp2; + bool consolidated_dpia_dp_lt; }; enum visual_confirm { @@ -762,7 +763,8 @@ union dpia_debug_options { uint32_t disable_mst_dsc_work_around:1; /* bit 3 */ uint32_t enable_force_tbt3_work_around:1; /* bit 4 */ uint32_t disable_usb4_pm_support:1; /* bit 5 */ - uint32_t reserved:26; + uint32_t enable_consolidated_dpia_dp_lt:1; /* bit 6 */ + uint32_t reserved:25; } bits; uint32_t raw; }; @@ -1056,6 +1058,9 @@ struct dc_debug_options { unsigned int force_lls; bool notify_dpia_hr_bw; bool enable_ips_visual_confirm; + unsigned int sharpen_policy; + unsigned int scale_to_sharpness_policy; + bool skip_full_updated_if_possible; }; @@ -1269,6 +1274,7 @@ union surface_update_flags { uint32_t tmz_changed:1; uint32_t mcm_transfer_function_enable_change:1; /* disable or enable MCM transfer func */ uint32_t full_update:1; + uint32_t sdr_white_level_nits:1; } bits; uint32_t raw; @@ -1351,6 +1357,7 @@ struct dc_plane_state { bool adaptive_sharpness_en; int sharpness_level; enum linear_light_scaling linear_light_scaling; + unsigned int sdr_white_level_nits; }; struct dc_plane_info { @@ -1508,6 +1515,7 @@ struct dc_surface_update { */ struct dc_cm2_parameters *cm2_params; const struct dc_csc_transform *cursor_csc_color_matrix; + unsigned int sdr_white_level_nits; }; /* @@ -2520,6 +2528,8 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, uint8_t mst_alloc_slots, uint8_t *mst_slots_in_use); +void dc_process_dmub_dpia_set_tps_notification(const struct dc *dc, uint32_t link_index, uint8_t tps); + void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, uint32_t hpd_int_enable); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 519c3df78ee5..41bd95e9177a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -969,6 +969,14 @@ union dp_sink_video_fallback_formats { uint8_t raw; }; +union dpcd_max_uncompressed_pixel_rate_cap { + struct { + uint16_t max_uncompressed_pixel_rate_cap :15; + uint16_t valid :1; + } bits; + uint8_t raw[2]; +}; + union dp_fec_capability1 { struct { uint8_t AGGREGATED_ERROR_COUNTERS_CAPABLE :1; @@ -1170,6 +1178,7 @@ struct dpcd_caps { struct dc_lttpr_caps lttpr_caps; struct adaptive_sync_caps adaptive_sync_caps; struct dpcd_usb4_dp_tunneling_info usb4_dp_tun_info; + union dpcd_max_uncompressed_pixel_rate_cap max_uncompressed_pixel_rate_cap; union dp_128b_132b_supported_link_rates dp_128b_132b_supported_link_rates; union dp_main_line_channel_coding_cap channel_coding_cap; @@ -1340,6 +1349,9 @@ struct dp_trace { #ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX #define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX 0x110 #endif +#ifndef DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP +#define DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP 0x221c +#endif #ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE #define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50 #endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index fe3078b8789e..9014c2409817 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -59,6 +59,7 @@ struct dc_dsc_config_options { uint32_t max_target_bpp_limit_override_x16; uint32_t slice_height_granularity; uint32_t dsc_force_odm_hslice_override; + bool force_dsc_when_not_needed; }; bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, @@ -100,7 +101,8 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps( */ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t max_target_bpp_limit_override_x16, - struct dc_dsc_policy *policy); + struct dc_dsc_policy *policy, + const enum dc_link_encoding_format link_encoding); void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit); diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c index cd6de93eb91c..603552dbd771 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c +++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c @@ -186,19 +186,17 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl spl_in->h_active = pipe_ctx->plane_res.scl_data.h_active; spl_in->v_active = pipe_ctx->plane_res.scl_data.v_active; + + spl_in->debug.sharpen_policy = (enum sharpen_policy)pipe_ctx->stream->ctx->dc->debug.sharpen_policy; + spl_in->debug.scale_to_sharpness_policy = + (enum scale_to_sharpness_policy)pipe_ctx->stream->ctx->dc->debug.scale_to_sharpness_policy; + /* Check if it is stream is in fullscreen and if its HDR. * Use this to determine sharpness levels */ spl_in->is_fullscreen = dm_helpers_is_fullscreen(pipe_ctx->stream->ctx, pipe_ctx->stream); spl_in->is_hdr_on = dm_helpers_is_hdr_on(pipe_ctx->stream->ctx, pipe_ctx->stream); - spl_in->hdr_multx100 = 0; - if (spl_in->is_hdr_on) { - spl_in->hdr_multx100 = (uint32_t)dc_fixpt_floor(dc_fixpt_mul(plane_state->hdr_mult, - dc_fixpt_from_int(100))); - /* Disable sharpness for HDR Mult > 6.0 */ - if (spl_in->hdr_multx100 > 600) - spl_in->adaptive_sharpness.enable = false; - } + spl_in->sdr_white_level_nits = plane_state->sdr_white_level_nits; } /// @brief Translate SPL output parameters to pipe context diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index fd6dca735714..6d7989b751e2 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -178,6 +178,7 @@ struct dc_panel_patch { unsigned int skip_avmute; unsigned int mst_start_top_delay; unsigned int remove_sink_ext_caps; + unsigned int disable_colorimetry; }; struct dc_edid_caps { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c index e7019c95ba79..4fce64a030b6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c @@ -313,9 +313,6 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, if (swath_height_c > 0) log2_swath_height_c = dml_log2(swath_height_c); - - if (req128_c && log2_swath_height_c > 0) - log2_swath_height_c -= 1; } rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c index ae5251041728..3fa9a5da02f6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c @@ -313,9 +313,6 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, if (swath_height_c > 0) log2_swath_height_c = dml_log2(swath_height_c); - - if (req128_c && log2_swath_height_c > 0) - log2_swath_height_c -= 1; } rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index 0b132ce1d2cd..2b275e680379 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -1924,15 +1924,6 @@ static unsigned int CalculateVMAndRowBytes( *PixelPTEReqWidth = 32768.0 / BytePerPixel; *PTERequestSize = 64; FractionOfPTEReturnDrop = 0; - } else if (MacroTileSizeBytes == 4096) { - PixelPTEReqHeightPTEs = 1; - *PixelPTEReqHeight = MacroTileHeight; - *PixelPTEReqWidth = 8 * *MacroTileWidth; - *PTERequestSize = 64; - if (ScanDirection != dm_vert) - FractionOfPTEReturnDrop = 0; - else - FractionOfPTEReturnDrop = 7.0 / 8; } else if (GPUVMMinPageSize == 4 && MacroTileSizeBytes > 4096) { PixelPTEReqHeightPTEs = 16; *PixelPTEReqHeight = 16 * BlockHeight256Bytes; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c index 547dfcc80fde..d851c081e376 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c @@ -8926,7 +8926,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc // The prefetch scheduling should only be calculated once as per AllowForPStateChangeOrStutterInVBlank requirement // If the AllowForPStateChangeOrStutterInVBlank requirement is not strict (i.e. only try those power saving feature - // if possible, then will try to program for the best power saving features in order of diffculty (dram, fclk, stutter) + // if possible, then will try to program for the best power saving features in order of difficulty (dram, fclk, stutter) s->iteration = 0; s->MaxTotalRDBandwidth = 0; s->AllPrefetchModeTested = false; @@ -9977,7 +9977,7 @@ void dml_core_get_row_heights( dml_print("DML_DLG: %s: GPUVMMinPageSizeKBytes = %u\n", __func__, GPUVMMinPageSizeKBytes); #endif - // just suppluy with enough parameters to calculate meta and dte + // just supply with enough parameters to calculate meta and dte CalculateVMAndRowBytes( 0, // dml_bool_t ViewportStationary, 1, // dml_bool_t DCCEnable, @@ -10110,7 +10110,7 @@ dml_bool_t dml_mode_support( /// Note: In this function, it is assumed that DCFCLK, SOCCLK freq are the state values, and mode_program will just use the DML calculated DPPCLK and DISPCLK /// @param mode_lib mode_lib data struct that house all the input/output/bbox and calculation values. /// @param state_idx Power state idx chosen -/// @param display_cfg Display Congiuration +/// @param display_cfg Display Configuration /// @param call_standalone Calling mode_programming without calling mode support. Some of the "support" struct member will be pre-calculated before doing mode programming /// TODO: Add clk_cfg input, could be useful for standalone mode dml_bool_t dml_mode_programming( diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c index b0d9aed0f265..8697eac1e1f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c @@ -858,7 +858,9 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm plane->immediate_flip = plane_state->flip_immediate; - plane->composition.rect_out_height_spans_vactive = plane_state->dst_rect.height >= stream->timing.v_addressable; + plane->composition.rect_out_height_spans_vactive = + plane_state->dst_rect.height >= stream->timing.v_addressable && + stream->dst.height >= stream->timing.v_addressable; } //TODO : Could be possibly moved to a common helper layer. diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c index d63558ee3135..1cf9015e854a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c @@ -940,9 +940,11 @@ static void build_synchronized_timing_groups( /* find synchronizable timing groups */ for (j = i + 1; j < display_config->display_config.num_streams; j++) { if (memcmp(master_timing, - &display_config->display_config.stream_descriptors[j].timing, - sizeof(struct dml2_timing_cfg)) == 0 && - display_config->display_config.stream_descriptors[i].output.output_encoder == display_config->display_config.stream_descriptors[j].output.output_encoder) { + &display_config->display_config.stream_descriptors[j].timing, + sizeof(struct dml2_timing_cfg)) == 0 && + display_config->display_config.stream_descriptors[i].output.output_encoder == display_config->display_config.stream_descriptors[j].output.output_encoder && + (display_config->display_config.stream_descriptors[i].output.output_encoder != dml2_hdmi || //hdmi requires formats match + display_config->display_config.stream_descriptors[i].output.output_format == display_config->display_config.stream_descriptors[j].output.output_format)) { set_bit_in_bitfield(&pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], j); set_bit_in_bitfield(&stream_mapped_mask, j); } diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index a1727e5bf024..ebd5df1a36e8 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -668,6 +668,7 @@ static bool decide_dsc_bandwidth_range( */ static bool decide_dsc_target_bpp_x16( const struct dc_dsc_policy *policy, + const struct dc_dsc_config_options *options, const struct dsc_enc_caps *dsc_common_caps, const int target_bandwidth_kbps, const struct dc_crtc_timing *timing, @@ -682,7 +683,7 @@ static bool decide_dsc_target_bpp_x16( if (decide_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16, num_slices_h, dsc_common_caps, timing, link_encoding, &range)) { if (target_bandwidth_kbps >= range.stream_kbps) { - if (policy->enable_dsc_when_not_needed) + if (policy->enable_dsc_when_not_needed || options->force_dsc_when_not_needed) /* enable max bpp even dsc is not needed */ *target_bpp_x16 = range.max_target_bpp_x16; } else if (target_bandwidth_kbps >= range.max_kbps) { @@ -882,7 +883,7 @@ static bool setup_dsc_config( memset(dsc_cfg, 0, sizeof(struct dc_dsc_config)); - dc_dsc_get_policy_for_timing(timing, options->max_target_bpp_limit_override_x16, &policy); + dc_dsc_get_policy_for_timing(timing, options->max_target_bpp_limit_override_x16, &policy, link_encoding); pic_width = timing->h_addressable + timing->h_border_left + timing->h_border_right; pic_height = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; @@ -1080,6 +1081,7 @@ static bool setup_dsc_config( if (target_bandwidth_kbps > 0) { is_dsc_possible = decide_dsc_target_bpp_x16( &policy, + options, &dsc_common_caps, target_bandwidth_kbps, timing, @@ -1171,7 +1173,8 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps( void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t max_target_bpp_limit_override_x16, - struct dc_dsc_policy *policy) + struct dc_dsc_policy *policy, + const enum dc_link_encoding_format link_encoding) { uint32_t bpc = 0; @@ -1235,10 +1238,7 @@ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, policy->max_target_bpp = max_target_bpp_limit_override_x16 / 16; /* enable DSC when not needed, default false */ - if (dsc_policy_enable_dsc_when_not_needed) - policy->enable_dsc_when_not_needed = dsc_policy_enable_dsc_when_not_needed; - else - policy->enable_dsc_when_not_needed = false; + policy->enable_dsc_when_not_needed = dsc_policy_enable_dsc_when_not_needed; } void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit) @@ -1267,4 +1267,5 @@ void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_ options->dsc_force_odm_hslice_override = dc->debug.force_odm_combine; options->max_target_bpp_limit_override_x16 = 0; options->slice_height_granularity = 1; + options->force_dsc_when_not_needed = false; } diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c index 6293173ba2b9..5eb3da8d5206 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c @@ -545,6 +545,7 @@ static void hubbub35_init(struct hubbub *hubbub) DCHUBBUB_ARB_MAX_REQ_OUTSTAND, 256, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 256); + memset(&hubbub2->watermarks.a.cstate_pstate, 0, sizeof(hubbub2->watermarks.a.cstate_pstate)); } /*static void hubbub35_set_request_limit(struct hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index d52ce58c6a98..4fbed0298adf 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -57,6 +57,7 @@ #include "panel_cntl.h" #include "dc_state_priv.h" #include "dpcd_defs.h" +#include "dsc.h" /* include DCE11 register header files */ #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_sh_mask.h" @@ -1823,6 +1824,48 @@ static void get_edp_links_with_sink( } } +static void clean_up_dsc_blocks(struct dc *dc) +{ + struct display_stream_compressor *dsc = NULL; + struct timing_generator *tg = NULL; + struct stream_encoder *se = NULL; + struct dccg *dccg = dc->res_pool->dccg; + struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; + int i; + + if (dc->ctx->dce_version != DCN_VERSION_3_5 && + dc->ctx->dce_version != DCN_VERSION_3_51) + return; + + for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { + struct dcn_dsc_state s = {0}; + + dsc = dc->res_pool->dscs[i]; + dsc->funcs->dsc_read_state(dsc, &s); + if (s.dsc_fw_en) { + /* disable DSC in OPTC */ + if (i < dc->res_pool->timing_generator_count) { + tg = dc->res_pool->timing_generators[i]; + tg->funcs->set_dsc_config(tg, OPTC_DSC_DISABLED, 0, 0); + } + /* disable DSC in stream encoder */ + if (i < dc->res_pool->stream_enc_count) { + se = dc->res_pool->stream_enc[i]; + se->funcs->dp_set_dsc_config(se, OPTC_DSC_DISABLED, 0, 0); + se->funcs->dp_set_dsc_pps_info_packet(se, false, NULL, true); + } + /* disable DSC block */ + if (dccg->funcs->set_ref_dscclk) + dccg->funcs->set_ref_dscclk(dccg, dsc->inst); + dsc->funcs->dsc_disable(dsc); + + /* power down DSC */ + if (pg_cntl != NULL) + pg_cntl->funcs->dsc_pg_control(pg_cntl, dsc->inst, false); + } + } +} + /* * When ASIC goes from VBIOS/VGA mode to driver/accelerated mode we need: * 1. Power down all DC HW blocks @@ -1927,6 +1970,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); power_down_all_hw_blocks(dc); + + /* DSC could be enabled on eDP during VBIOS post. + * To clean up dsc blocks if eDP is in link but not active. + */ + if (edp_link_with_sink && (edp_stream_num == 0)) + clean_up_dsc_blocks(dc); + disable_vga_and_power_gate_all_controllers(dc); if (edp_link_with_sink && !keep_edp_vdd_on) dc->hwss.edp_power_control(edp_link_with_sink, false); @@ -2046,13 +2096,20 @@ static void set_drr(struct pipe_ctx **pipe_ctx, * as well. */ for (i = 0; i < num_pipes; i++) { - pipe_ctx[i]->stream_res.tg->funcs->set_drr( - pipe_ctx[i]->stream_res.tg, ¶ms); - - if (adjust.v_total_max != 0 && adjust.v_total_min != 0) - pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control( - pipe_ctx[i]->stream_res.tg, - event_triggers, num_frames); + /* dc_state_destruct() might null the stream resources, so fetch tg + * here first to avoid a race condition. The lifetime of the pointee + * itself (the timing_generator object) is not a problem here. + */ + struct timing_generator *tg = pipe_ctx[i]->stream_res.tg; + + if ((tg != NULL) && tg->funcs) { + if (tg->funcs->set_drr) + tg->funcs->set_drr(tg, ¶ms); + if (adjust.v_total_max != 0 && adjust.v_total_min != 0) + if (tg->funcs->set_static_screen_control) + tg->funcs->set_static_screen_control( + tg, event_triggers, num_frames); + } } } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index 42c52284a868..bded33575493 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -455,7 +455,7 @@ bool dcn30_mmhubbub_warmup( struct mcif_wb *mcif_wb; struct mcif_warmup_params warmup_params = {0}; unsigned int i, i_buf; - /*make sure there is no active DWB eanbled */ + /* make sure there is no active DWB enabled */ for (i = 0; i < num_dwb; i++) { dwb = dc->res_pool->dwbc[wb_info[i].dwb_pipe_inst]; if (dwb->dwb_is_efc_transition || dwb->dwb_is_drc) { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index a36e11606f90..2e8c9f738259 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -1032,6 +1032,20 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) struct dsc_config dsc_cfg; struct dsc_optc_config dsc_optc_cfg = {0}; enum optc_dsc_mode optc_dsc_mode; + struct dcn_dsc_state dsc_state = {0}; + + if (!dsc) { + DC_LOG_DSC("DSC is NULL for tg instance %d:", pipe_ctx->stream_res.tg->inst); + return; + } + + if (dsc->funcs->dsc_read_state) { + dsc->funcs->dsc_read_state(dsc, &dsc_state); + if (!dsc_state.dsc_fw_en) { + DC_LOG_DSC("DSC has been disabled for tg instance %d:", pipe_ctx->stream_res.tg->inst); + return; + } + } /* Enable DSC hw block */ dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 479fd3e89e5a..bd309dbdf7b2 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -334,7 +334,20 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) struct dsc_config dsc_cfg; struct dsc_optc_config dsc_optc_cfg = {0}; enum optc_dsc_mode optc_dsc_mode; + struct dcn_dsc_state dsc_state = {0}; + if (!dsc) { + DC_LOG_DSC("DSC is NULL for tg instance %d:", pipe_ctx->stream_res.tg->inst); + return; + } + + if (dsc->funcs->dsc_read_state) { + dsc->funcs->dsc_read_state(dsc, &dsc_state); + if (!dsc_state.dsc_fw_en) { + DC_LOG_DSC("DSC has been disabled for tg instance %d:", pipe_ctx->stream_res.tg->inst); + return; + } + } /* Enable DSC hw block */ dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c index 46fb3649bc86..6499807af72a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c @@ -50,8 +50,31 @@ static void update_dpia_stream_allocation_table(struct dc_link *link, DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n", status, mst_alloc_slots, prev_mst_slots_in_use); - ASSERT(link_enc); - link_enc->funcs->update_mst_stream_allocation_table(link_enc, table); + if (link_enc) + link_enc->funcs->update_mst_stream_allocation_table(link_enc, table); +} + +static void set_dio_dpia_link_test_pattern(struct dc_link *link, + const struct link_resource *link_res, + struct encoder_set_dp_phy_pattern_param *tp_params) +{ + if (tp_params->dp_phy_pattern != DP_TEST_PATTERN_VIDEO_MODE) + return; + + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); + + if (!link_enc) + return; + + link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params); + link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); +} + +static void set_dio_dpia_lane_settings(struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_settings, + const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) +{ } static const struct link_hwss dpia_link_hwss = { @@ -65,8 +88,8 @@ static const struct link_hwss dpia_link_hwss = { .ext = { .set_throttled_vcp_size = set_dio_throttled_vcp_size, .enable_dp_link_output = enable_dio_dp_link_output, - .set_dp_link_test_pattern = set_dio_dp_link_test_pattern, - .set_dp_lane_settings = set_dio_dp_lane_settings, + .set_dp_link_test_pattern = set_dio_dpia_link_test_pattern, + .set_dp_lane_settings = set_dio_dpia_lane_settings, .update_stream_allocation_table = update_dpia_stream_allocation_table, }, }; diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c index 1aed55b0ab6a..60f15a9ba7a5 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -287,6 +287,13 @@ static bool dp_validate_mode_timing( req_bw = dc_bandwidth_in_kbps_from_timing(timing, dc_link_get_highest_encoding_format(link)); max_bw = dp_link_bandwidth_kbps(link, link_setting); + bool is_max_uncompressed_pixel_rate_exceeded = link->dpcd_caps.max_uncompressed_pixel_rate_cap.bits.valid && + timing->pix_clk_100hz > link->dpcd_caps.max_uncompressed_pixel_rate_cap.bits.max_uncompressed_pixel_rate_cap * 10000; + + if (is_max_uncompressed_pixel_rate_exceeded && !timing->flags.DSC) { + return false; + } + if (req_bw <= max_bw) { /* remember the biggest mode here, during * initial link training (to get diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index 34a618a7278b..d78c8ec4de79 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -1942,6 +1942,11 @@ static bool retrieve_link_cap(struct dc_link *link) DC_LOG_DP2("\tFEC aggregated error counters are supported"); } + core_link_read_dpcd(link, + DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP, + link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw, + sizeof(link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw)); + retrieve_cable_id(link); dpcd_write_cable_id_to_dprx(link); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index 988999c44475..27b881f947e8 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -515,6 +515,41 @@ bool dp_is_interlane_aligned(union lane_align_status_updated align_status) return align_status.bits.INTERLANE_ALIGN_DONE == 1; } +bool dp_check_interlane_aligned(union lane_align_status_updated align_status, + struct dc_link *link, + uint8_t retries) +{ + /* Take into consideration corner case for DP 1.4a LL Compliance CTS as USB4 + * has to share encoders unlike DP and USBC + */ + return (dp_is_interlane_aligned(align_status) || + (link->skip_fallback_on_link_loss && retries)); +} + +uint32_t dp_get_eq_aux_rd_interval( + const struct dc_link *link, + const struct link_training_settings *lt_settings, + uint32_t offset, + uint8_t retries) +{ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + if (offset == 0 && retries == 1 && lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) + return max(lt_settings->eq_pattern_time, (uint32_t) DPIA_CLK_SYNC_DELAY); + else + return dpia_get_eq_aux_rd_interval(link, lt_settings, offset); + } else if (is_repeater(lt_settings, offset)) + return dp_translate_training_aux_read_interval( + link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]); + else + return lt_settings->eq_pattern_time; +} + +bool dp_check_dpcd_reqeust_status(const struct dc_link *link, + enum dc_status status) +{ + return (status != DC_OK && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA); +} + enum link_training_result dp_check_link_loss_status( struct dc_link *link, const struct link_training_settings *link_training_setting) @@ -973,13 +1008,17 @@ void repeater_training_done(struct dc_link *link, uint32_t offset) dpcd_pattern.v1_4.TRAINING_PATTERN_SET); } -static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding) +static enum link_training_result dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding) { + enum dc_status status; uint8_t sink_status = 0; uint8_t i; /* clear training pattern set */ - dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); + status = dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); + + if (dp_check_dpcd_reqeust_status(link, status)) + return LINK_TRAINING_ABORT; if (encoding == DP_128b_132b_ENCODING) { /* poll for intra-hop disable */ @@ -990,6 +1029,8 @@ static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding fsleep(1000); } } + + return LINK_TRAINING_SUCCESS; } enum dc_status dpcd_configure_channel_coding(struct dc_link *link, @@ -1013,17 +1054,18 @@ enum dc_status dpcd_configure_channel_coding(struct dc_link *link, return status; } -void dpcd_set_training_pattern( +enum dc_status dpcd_set_training_pattern( struct dc_link *link, enum dc_dp_training_pattern training_pattern) { + enum dc_status status; union dpcd_training_pattern dpcd_pattern = {0}; dpcd_pattern.v1_4.TRAINING_PATTERN_SET = dp_training_pattern_to_dpcd_training_pattern( link, training_pattern); - core_link_write_dpcd( + status = core_link_write_dpcd( link, DP_TRAINING_PATTERN_SET, &dpcd_pattern.raw, @@ -1033,6 +1075,8 @@ void dpcd_set_training_pattern( __func__, DP_TRAINING_PATTERN_SET, dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + + return status; } enum dc_status dpcd_set_link_settings( @@ -1185,6 +1229,13 @@ void dpcd_set_lt_pattern_and_lane_settings( dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET] = dpcd_pattern.raw; + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + dpia_set_tps_notification( + link, + lt_settings, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET, + offset); + if (is_repeater(lt_settings, offset)) { DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n", __func__, @@ -1455,7 +1506,8 @@ static enum link_training_result dp_transition_to_video_idle( */ if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) { msleep(5); - status = dp_check_link_loss_status(link, lt_settings); + if (!link->skip_fallback_on_link_loss) + status = dp_check_link_loss_status(link, lt_settings); } return status; } @@ -1521,7 +1573,9 @@ enum link_training_result dp_perform_link_training( ASSERT(0); /* exit training mode */ - dpcd_exit_training_mode(link, encoding); + if ((dpcd_exit_training_mode(link, encoding) != LINK_TRAINING_SUCCESS || status == LINK_TRAINING_ABORT) && + link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + dpia_training_abort(link, <_settings, 0); /* switch to video idle */ if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) @@ -1599,8 +1653,7 @@ bool perform_link_training_with_retries( dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings); return true; } else { - /** @todo Consolidate USB4 DP and DPx.x training. */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + if (!link->dc->config.consolidated_dpia_dp_lt && link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { status = dpia_perform_link_training( link, &pipe_ctx->link_res, @@ -1629,8 +1682,17 @@ bool perform_link_training_with_retries( dp_trace_lt_total_count_increment(link, false); dp_trace_lt_result_update(link, status, false); dp_trace_set_lt_end_timestamp(link, false); - if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) + if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) { + // Update verified link settings to current one + // Because DPIA LT might fallback to lower link setting. + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + link->verified_link_cap.link_rate = link->cur_link_settings.link_rate; + link->verified_link_cap.lane_count = link->cur_link_settings.lane_count; + dm_helpers_dp_mst_update_branch_bandwidth(link->ctx, link); + } return true; + } } fail_count++; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h index 851bd17317a0..0b18aa35c33c 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h @@ -55,7 +55,7 @@ void dp_set_hw_test_pattern( uint8_t *custom_pattern, uint32_t custom_pattern_size); -void dpcd_set_training_pattern( +enum dc_status dpcd_set_training_pattern( struct dc_link *link, enum dc_dp_training_pattern training_pattern); @@ -182,4 +182,18 @@ uint32_t dp_translate_training_aux_read_interval( uint8_t dp_get_nibble_at_index(const uint8_t *buf, uint32_t index); + +bool dp_check_interlane_aligned(union lane_align_status_updated align_status, + struct dc_link *link, + uint8_t retries); + +uint32_t dp_get_eq_aux_rd_interval( + const struct dc_link *link, + const struct link_training_settings *lt_settings, + uint32_t offset, + uint8_t retries); + +bool dp_check_dpcd_reqeust_status(const struct dc_link *link, + enum dc_status status); + #endif /* __DC_LINK_DP_TRAINING_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c index 2b4c15b0b407..3bdce32a85e3 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c @@ -157,6 +157,7 @@ enum link_training_result perform_8b_10b_clock_recovery_sequence( struct link_training_settings *lt_settings, uint32_t offset) { + enum dc_status status; uint32_t retries_cr; uint32_t retry_count; uint32_t wait_time_microsec; @@ -216,7 +217,7 @@ enum link_training_result perform_8b_10b_clock_recovery_sequence( /* 4. Read lane status and requested drive * settings as set by the sink */ - dp_get_lane_status_and_lane_adjust( + status = dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, @@ -224,6 +225,9 @@ enum link_training_result perform_8b_10b_clock_recovery_sequence( dpcd_lane_adjust, offset); + if (dp_check_dpcd_reqeust_status(link, status)) + return LINK_TRAINING_ABORT; + /* 5. check CR done*/ if (dp_is_cr_done(lane_count, dpcd_lane_status)) { DC_LOG_HW_LINK_TRAINING("%s: Clock recovery OK\n", __func__); @@ -273,6 +277,7 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence( struct link_training_settings *lt_settings, uint32_t offset) { + enum dc_status status; enum dc_dp_training_pattern tr_pattern; uint32_t retries_ch_eq; uint32_t wait_time_microsec; @@ -308,12 +313,7 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence( dpcd_set_lane_settings(link, lt_settings, offset); /* 3. wait for receiver to lock-on*/ - wait_time_microsec = lt_settings->eq_pattern_time; - - if (is_repeater(lt_settings, offset)) - wait_time_microsec = - dp_translate_training_aux_read_interval( - link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]); + wait_time_microsec = dp_get_eq_aux_rd_interval(link, lt_settings, offset, retries_ch_eq); dp_wait_for_training_aux_rd_interval( link, @@ -322,7 +322,7 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence( /* 4. Read lane status and requested * drive settings as set by the sink*/ - dp_get_lane_status_and_lane_adjust( + status = dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, @@ -330,6 +330,9 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence( dpcd_lane_adjust, offset); + if (dp_check_dpcd_reqeust_status(link, status)) + return LINK_TRAINING_ABORT; + /* 5. check CR done*/ if (!dp_is_cr_done(lane_count, dpcd_lane_status)) return dpcd_lane_status[0].bits.CR_DONE_0 ? @@ -339,7 +342,7 @@ enum link_training_result perform_8b_10b_channel_equalization_sequence( /* 6. check CHEQ done*/ if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && dp_is_symbol_locked(lane_count, dpcd_lane_status) && - dp_is_interlane_aligned(dpcd_lane_status_updated)) + dp_check_interlane_aligned(dpcd_lane_status_updated, link, retries_ch_eq)) return LINK_TRAINING_SUCCESS; /* 7. update VS/PE/PC2 in lt_settings*/ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c index cd1975c03f38..39e4b7dc9588 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c @@ -43,9 +43,6 @@ #define DC_LOGGER \ link->ctx->logger -/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */ -#define DPIA_CLK_SYNC_DELAY 16000 - /* Extend interval between training status checks for manual testing. */ #define DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US 60000000 @@ -566,28 +563,6 @@ static enum link_training_result dpia_training_cr_phase( return result; } -/* Return status read interval during equalization phase. */ -static uint32_t dpia_get_eq_aux_rd_interval( - const struct dc_link *link, - const struct link_training_settings *lt_settings, - uint32_t hop) -{ - uint32_t wait_time_microsec; - - if (hop == DPRX) - wait_time_microsec = lt_settings->eq_pattern_time; - else - wait_time_microsec = - dp_translate_training_aux_read_interval( - link->dpcd_caps.lttpr_caps.aux_rd_interval[hop - 1]); - - /* Check debug option for extending aux read interval. */ - if (link->dc->debug.dpia_debug.bits.extend_aux_rd_interval) - wait_time_microsec = DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US; - - return wait_time_microsec; -} - /* Execute equalization phase of link training for specified hop in display * path in non-transparent mode: * - driver issues both DPCD and SET_CONFIG transactions. @@ -936,6 +911,22 @@ static enum link_training_result dpia_training_end( return result; } +/* Return status read interval during equalization phase. */ +uint32_t dpia_get_eq_aux_rd_interval( + const struct dc_link *link, + const struct link_training_settings *lt_settings, + uint32_t hop) +{ + /* Check debug option for extending aux read interval. */ + if (link->dc->debug.dpia_debug.bits.extend_aux_rd_interval) + return DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US; + else if (hop == DPRX) + return lt_settings->eq_pattern_time; + else + return dp_translate_training_aux_read_interval( + link->dpcd_caps.lttpr_caps.aux_rd_interval[hop - 1]); +} + /* When aborting training of specified hop in display path, clean up by: * - Attempting to clear DPCD TRAINING_PATTERN_SET, LINK_BW_SET and LANE_COUNT_SET. * - Sending SET_CONFIG(SET_LINK) with lane count and link rate set to 0. @@ -943,7 +934,7 @@ static enum link_training_result dpia_training_end( * @param link DPIA link being trained. * @param hop Hop in display path. DPRX = 0. */ -static void dpia_training_abort( +void dpia_training_abort( struct dc_link *link, struct link_training_settings *lt_settings, uint32_t hop) @@ -968,7 +959,26 @@ static void dpia_training_abort( core_link_write_dpcd(link, dpcd_tps_offset, &data, 1); core_link_write_dpcd(link, DP_LINK_BW_SET, &data, 1); core_link_write_dpcd(link, DP_LANE_COUNT_SET, &data, 1); - core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data); + + if (!link->dc->config.consolidated_dpia_dp_lt) + core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data); +} + +void dpia_set_tps_notification( + struct dc_link *link, + const struct link_training_settings *lt_settings, + uint8_t pattern, + uint32_t hop) +{ + uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */ + + if (lt_settings->lttpr_mode != LTTPR_MODE_NON_TRANSPARENT || pattern == DPCD_TRAINING_PATTERN_VIDEOIDLE) + return; + + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + if (hop != repeater_cnt) + dc_process_dmub_dpia_set_tps_notification(link->ctx->dc, link->link_index, pattern); } enum link_training_result dpia_perform_link_training( diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h index b39fb9faf1c2..9f4eceb494c2 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h @@ -28,6 +28,9 @@ #define __DC_LINK_DP_TRAINING_DPIA_H__ #include "link_dp_training.h" +/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */ +#define DPIA_CLK_SYNC_DELAY 16000 + /* Train DP tunneling link for USB4 DPIA display endpoint. * DPIA equivalent of dc_link_dp_perfrorm_link_training. * Aborts link training upon detection of sink unplug. @@ -38,4 +41,20 @@ enum link_training_result dpia_perform_link_training( const struct dc_link_settings *link_setting, bool skip_video_pattern); +void dpia_training_abort( + struct dc_link *link, + struct link_training_settings *lt_settings, + uint32_t hop); + +uint32_t dpia_get_eq_aux_rd_interval( + const struct dc_link *link, + const struct link_training_settings *lt_settings, + uint32_t hop); + +void dpia_set_tps_notification( + struct dc_link *link, + const struct link_training_settings *lt_settings, + uint8_t pattern, + uint32_t offset); + #endif /* __DC_LINK_DP_TRAINING_DPIA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index 46ad684fe192..893a9d9ee870 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -2155,6 +2155,7 @@ static bool dcn35_resource_construct( dc->dml2_options.max_segments_per_hubp = 24; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/ + dc->dml2_options.override_det_buffer_size_kbytes = true; if (dc->config.sdpif_request_limit_words_per_umc == 0) dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 4c5e722baa3a..70abd32ce2ad 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -736,7 +736,7 @@ static const struct dc_debug_options debug_defaults_drv = { .hdmichar = true, .dpstream = true, .symclk32_se = true, - .symclk32_le = true, + .symclk32_le = false, .symclk_fe = true, .physymclk = false, .dpiasymclk = true, @@ -766,6 +766,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dmub_reallow_idle = false, .static_screen_wait_frames = 2, .notify_dpia_hr_bw = true, + .min_disp_clk_khz = 50000, }; static const struct dc_panel_config panel_config_defaults = { @@ -2133,6 +2134,7 @@ static bool dcn351_resource_construct( dc->dml2_options.max_segments_per_hubp = 24; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/ + dc->dml2_options.override_det_buffer_size_kbytes = true; if (dc->config.sdpif_request_limit_words_per_umc == 0) dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/ diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c index 15f7eda903e6..014e8a296f0c 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c @@ -813,6 +813,14 @@ static bool enable_easf(struct spl_in *spl_in, struct spl_scratch *spl_scratch) return skip_easf; } +/* Check if video is in fullscreen mode */ +static bool spl_is_video_fullscreen(struct spl_in *spl_in) +{ + if (spl_is_yuv420(spl_in->basic_in.format) && spl_in->is_fullscreen) + return true; + return false; +} + static bool spl_get_isharp_en(struct spl_in *spl_in, struct spl_scratch *spl_scratch) { @@ -820,6 +828,7 @@ static bool spl_get_isharp_en(struct spl_in *spl_in, int vratio = 0; int hratio = 0; struct spl_taps taps = spl_scratch->scl_data.taps; + bool fullscreen = spl_is_video_fullscreen(spl_in); /* Return if adaptive sharpness is disabled */ if (spl_in->adaptive_sharpness.enable == false) @@ -835,9 +844,18 @@ static bool spl_get_isharp_en(struct spl_in *spl_in, // Scaling is up to 1:1 (no scaling) or upscaling /* - * Apply sharpness to all RGB surfaces and to - * NV12/P010 surfaces + * Apply sharpness to RGB and YUV (NV12/P010) + * surfaces based on policy setting */ + if (!spl_is_yuv420(spl_in->basic_in.format) && + (spl_in->debug.sharpen_policy == SHARPEN_YUV)) + return enable_isharp; + else if ((spl_is_yuv420(spl_in->basic_in.format) && !fullscreen) && + (spl_in->debug.sharpen_policy == SHARPEN_RGB_FULLSCREEN_YUV)) + return enable_isharp; + else if (!spl_in->is_fullscreen && + spl_in->debug.sharpen_policy == SHARPEN_FULLSCREEN_ALL) + return enable_isharp; /* * Apply sharpness if supports horizontal taps 4,6 AND @@ -1155,14 +1173,19 @@ static void spl_set_dscl_prog_data(struct spl_in *spl_in, struct spl_scratch *sp } /* Calculate C0-C3 coefficients based on HDR_mult */ -static void spl_calculate_c0_c3_hdr(struct dscl_prog_data *dscl_prog_data, uint32_t hdr_multx100) +static void spl_calculate_c0_c3_hdr(struct dscl_prog_data *dscl_prog_data, uint32_t sdr_white_level_nits) { struct spl_fixed31_32 hdr_mult, c0_mult, c1_mult, c2_mult; struct spl_fixed31_32 c0_calc, c1_calc, c2_calc; struct spl_custom_float_format fmt; + uint32_t hdr_multx100_int; - SPL_ASSERT(hdr_multx100); - hdr_mult = spl_fixpt_from_fraction((long long)hdr_multx100, 100LL); + if ((sdr_white_level_nits >= 80) && (sdr_white_level_nits <= 480)) + hdr_multx100_int = sdr_white_level_nits * 100 / 80; + else + hdr_multx100_int = 100; /* default for 80 nits otherwise */ + + hdr_mult = spl_fixpt_from_fraction((long long)hdr_multx100_int, 100LL); c0_mult = spl_fixpt_from_fraction(2126LL, 10000LL); c1_mult = spl_fixpt_from_fraction(7152LL, 10000LL); c2_mult = spl_fixpt_from_fraction(722LL, 10000LL); @@ -1191,7 +1214,7 @@ static void spl_calculate_c0_c3_hdr(struct dscl_prog_data *dscl_prog_data, uint3 static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *spl_out, bool enable_easf_v, bool enable_easf_h, enum linear_light_scaling lls_pref, enum spl_pixel_format format, enum system_setup setup, - uint32_t hdr_multx100) + uint32_t sdr_white_level_nits) { struct dscl_prog_data *dscl_prog_data = spl_out->dscl_prog_data; if (enable_easf_v) { @@ -1499,7 +1522,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s dscl_prog_data->easf_ltonl_en = 1; // Linear input if ((setup == HDR_L) && (spl_is_rgb8(format))) { /* Calculate C0-C3 coefficients based on HDR multiplier */ - spl_calculate_c0_c3_hdr(dscl_prog_data, hdr_multx100); + spl_calculate_c0_c3_hdr(dscl_prog_data, sdr_white_level_nits); } else { // HDR_L ( DWM ) and SDR_L dscl_prog_data->easf_matrix_c0 = 0x4EF7; // fp1.5.10, C0 coefficient (LN_rec709: 0.2126 * (2^14)/125 = 27.86590720) @@ -1557,7 +1580,7 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data, struct adaptive_sharpness adp_sharpness, bool enable_isharp, enum linear_light_scaling lls_pref, enum spl_pixel_format format, const struct spl_scaler_data *data, struct spl_fixed31_32 ratio, - enum system_setup setup) + enum system_setup setup, enum scale_to_sharpness_policy scale_to_sharpness_policy) { /* Turn off sharpener if not required */ if (!enable_isharp) { @@ -1565,6 +1588,11 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data, return; } + spl_build_isharp_1dlut_from_reference_curve(ratio, setup, adp_sharpness, + scale_to_sharpness_policy); + dscl_prog_data->isharp_delta = spl_get_pregen_filter_isharp_1D_lut(setup); + dscl_prog_data->sharpness_level = adp_sharpness.sharpness_level; + dscl_prog_data->isharp_en = 1; // ISHARP_EN // Set ISHARP_NOISEDET_MODE if htaps = 6-tap if (data->taps.h_taps == 6) { @@ -1662,11 +1690,6 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data, dscl_prog_data->isharp_lba.base_seg[5] = 0; // ISHARP LBA PWL for Seg 5. BASE value in U0.6 format } - - spl_build_isharp_1dlut_from_reference_curve(ratio, setup, adp_sharpness); - dscl_prog_data->isharp_delta = spl_get_pregen_filter_isharp_1D_lut(setup); - dscl_prog_data->sharpness_level = adp_sharpness.sharpness_level; - // Program the nldelta soft clip values if (lls_pref == LLS_PREF_YES) { dscl_prog_data->isharp_nldelta_sclip.enable_p = 0; /* ISHARP_NLDELTA_SCLIP_EN_P */ @@ -1750,7 +1773,7 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out) // Set EASF spl_set_easf_data(&spl_scratch, spl_out, enable_easf_v, enable_easf_h, spl_in->lls_pref, - spl_in->basic_in.format, setup, spl_in->hdr_multx100); + spl_in->basic_in.format, setup, spl_in->sdr_white_level_nits); // Set iSHARP vratio = spl_fixpt_ceil(spl_scratch.scl_data.ratios.vert); @@ -1761,7 +1784,8 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out) isharp_scale_ratio = spl_scratch.scl_data.recip_ratios.horz; spl_set_isharp_data(spl_out->dscl_prog_data, spl_in->adaptive_sharpness, enable_isharp, - spl_in->lls_pref, spl_in->basic_in.format, data, isharp_scale_ratio, setup); + spl_in->lls_pref, spl_in->basic_in.format, data, isharp_scale_ratio, setup, + spl_in->debug.scale_to_sharpness_policy); return res; } diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c index 33712f50d303..e0572252c640 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c @@ -500,6 +500,15 @@ struct isharp_1D_lut_pregen filter_isharp_1D_lut_pregen[NUM_SHARPNESS_SETUPS] = }, }; +struct scale_ratio_to_sharpness_level_adj sharpness_level_adj[NUM_SHARPNESS_ADJ_LEVELS] = { + {1125, 1000, 0}, + {11, 10, 1}, + {1075, 1000, 2}, + {105, 100, 3}, + {1025, 1000, 4}, + {1, 1, 5}, +}; + const uint32_t *spl_get_filter_isharp_1D_lut_0(void) { return filter_isharp_1D_lut_0; @@ -541,19 +550,72 @@ uint16_t *spl_get_filter_isharp_bs_3tap_64p(void) return filter_isharp_bs_3tap_64p_s1_12; } -static unsigned int spl_calculate_sharpness_level(int discrete_sharpness_level, enum system_setup setup, - struct spl_sharpness_range sharpness_range) +static unsigned int spl_calculate_sharpness_level_adj(struct spl_fixed31_32 ratio) +{ + int j; + struct spl_fixed31_32 ratio_level; + struct scale_ratio_to_sharpness_level_adj *lookup_ptr; + unsigned int sharpness_level_down_adj; + + /* + * Adjust sharpness level based on current scaling ratio + * + * We have 5 discrete scaling ratios which we will use to adjust the + * sharpness level down by 1 as we pass each ratio. The ratios + * are + * + * 1.125 upscale and higher - no adj + * 1.100 - under 1.125 - adj level down 1 + * 1.075 - under 1.100 - adj level down 2 + * 1.050 - under 1.075 - adj level down 3 + * 1.025 - under 1.050 - adj level down 4 + * 1.000 - under 1.025 - adj level down 5 + * + */ + j = 0; + sharpness_level_down_adj = 0; + lookup_ptr = sharpness_level_adj; + while (j < NUM_SHARPNESS_ADJ_LEVELS) { + ratio_level = spl_fixpt_from_fraction(lookup_ptr->ratio_numer, + lookup_ptr->ratio_denom); + if (ratio.value >= ratio_level.value) { + sharpness_level_down_adj = lookup_ptr->level_down_adj; + break; + } + lookup_ptr++; + j++; + } + return sharpness_level_down_adj; +} + +static unsigned int spl_calculate_sharpness_level(struct spl_fixed31_32 ratio, + int discrete_sharpness_level, enum system_setup setup, + struct spl_sharpness_range sharpness_range, + enum scale_to_sharpness_policy scale_to_sharpness_policy) { unsigned int sharpness_level = 0; + unsigned int sharpness_level_down_adj = 0; int min_sharpness, max_sharpness, mid_sharpness; + /* + * Adjust sharpness level if policy requires we adjust it based on + * scale ratio. Based on scale ratio, we may adjust the sharpness + * level down by a certain number of steps. We will not select + * a sharpness value of 0 so the lowest sharpness level will be + * 0 or 1 depending on what the min_sharpness is + * + * If the policy is no required, this code maybe removed at a later + * date + */ switch (setup) { case HDR_L: min_sharpness = sharpness_range.hdr_rgb_min; max_sharpness = sharpness_range.hdr_rgb_max; mid_sharpness = sharpness_range.hdr_rgb_mid; + if (scale_to_sharpness_policy == SCALE_TO_SHARPNESS_ADJ_ALL) + sharpness_level_down_adj = spl_calculate_sharpness_level_adj(ratio); break; case HDR_NL: /* currently no use case, use Non-linear SDR values for now */ @@ -561,15 +623,26 @@ static unsigned int spl_calculate_sharpness_level(int discrete_sharpness_level, min_sharpness = sharpness_range.sdr_yuv_min; max_sharpness = sharpness_range.sdr_yuv_max; mid_sharpness = sharpness_range.sdr_yuv_mid; + if (scale_to_sharpness_policy >= SCALE_TO_SHARPNESS_ADJ_YUV) + sharpness_level_down_adj = spl_calculate_sharpness_level_adj(ratio); break; case SDR_L: default: min_sharpness = sharpness_range.sdr_rgb_min; max_sharpness = sharpness_range.sdr_rgb_max; mid_sharpness = sharpness_range.sdr_rgb_mid; + if (scale_to_sharpness_policy == SCALE_TO_SHARPNESS_ADJ_ALL) + sharpness_level_down_adj = spl_calculate_sharpness_level_adj(ratio); break; } + if ((min_sharpness == 0) && (sharpness_level_down_adj >= discrete_sharpness_level)) + discrete_sharpness_level = 1; + else if (sharpness_level_down_adj >= discrete_sharpness_level) + discrete_sharpness_level = 0; + else + discrete_sharpness_level -= sharpness_level_down_adj; + int lower_half_step_size = (mid_sharpness - min_sharpness) / 5; int upper_half_step_size = (max_sharpness - mid_sharpness) / 5; @@ -584,7 +657,7 @@ static unsigned int spl_calculate_sharpness_level(int discrete_sharpness_level, } void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, enum system_setup setup, - struct adaptive_sharpness sharpness) + struct adaptive_sharpness sharpness, enum scale_to_sharpness_policy scale_to_sharpness_policy) { uint8_t *byte_ptr_1dlut_src, *byte_ptr_1dlut_dst; struct spl_fixed31_32 sharp_base, sharp_calc, sharp_level; @@ -594,8 +667,9 @@ void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, en uint32_t filter_pregen_store[ISHARP_LUT_TABLE_SIZE]; /* Custom sharpnessX1000 value */ - unsigned int sharpnessX1000 = spl_calculate_sharpness_level(sharpness.sharpness_level, - setup, sharpness.sharpness_range); + unsigned int sharpnessX1000 = spl_calculate_sharpness_level(ratio, + sharpness.sharpness_level, setup, + sharpness.sharpness_range, scale_to_sharpness_policy); sharp_level = spl_fixpt_from_fraction(sharpnessX1000, 1000); /* @@ -606,7 +680,6 @@ void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, en (filter_isharp_1D_lut_pregen[setup].sharpness_denom == 1000)) return; - /* * Calculate LUT_128_gained with this equation: * diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h index fe0b12571f2c..afcc66206ca2 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h @@ -20,11 +20,11 @@ uint16_t *spl_get_filter_isharp_bs_3tap_64p(void); const uint16_t *spl_get_filter_isharp_wide_6tap_64p(void); uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps); -struct scale_ratio_to_sharpness_level_lookup { +#define NUM_SHARPNESS_ADJ_LEVELS 6 +struct scale_ratio_to_sharpness_level_adj { unsigned int ratio_numer; unsigned int ratio_denom; - unsigned int sharpness_numer; - unsigned int sharpness_denom; + unsigned int level_down_adj; /* adjust sharpness level down */ }; struct isharp_1D_lut_pregen { @@ -45,6 +45,7 @@ void spl_init_blur_scale_coeffs(void); void spl_set_blur_scale_data(struct dscl_prog_data *dscl_prog_data, const struct spl_scaler_data *data); -void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, enum system_setup setup, struct adaptive_sharpness sharpness); +void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, enum system_setup setup, + struct adaptive_sharpness sharpness, enum scale_to_sharpness_policy scale_to_sharpness_policy); uint32_t *spl_get_pregen_filter_isharp_1D_lut(enum system_setup setup); #endif /* __DC_SPL_ISHARP_FILTERS_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h index 85b19ebe2c57..2a74ff5fdfdb 100644 --- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h +++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h @@ -487,6 +487,17 @@ enum linear_light_scaling { // convert it in translation logic LLS_PREF_YES, LLS_PREF_NO }; +enum sharpen_policy { + SHARPEN_ALWAYS = 0, + SHARPEN_YUV = 1, + SHARPEN_RGB_FULLSCREEN_YUV = 2, + SHARPEN_FULLSCREEN_ALL = 3 +}; +enum scale_to_sharpness_policy { + NO_SCALE_TO_SHARPNESS_ADJ = 0, + SCALE_TO_SHARPNESS_ADJ_YUV = 1, + SCALE_TO_SHARPNESS_ADJ_ALL = 2 +}; struct spl_funcs { void (*spl_calc_lb_num_partitions) (bool alpha_en, @@ -499,6 +510,8 @@ struct spl_funcs { struct spl_debug { int visual_confirm_base_offset; int visual_confirm_dpp_offset; + enum sharpen_policy sharpen_policy; + enum scale_to_sharpness_policy scale_to_sharpness_policy; }; struct spl_in { @@ -518,7 +531,7 @@ struct spl_in { bool is_hdr_on; int h_active; int v_active; - int hdr_multx100; + int sdr_white_level_nits; }; // end of SPL inputs diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index cd70453aeae0..fe5b6f7a3eb1 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -300,6 +300,7 @@ struct dmub_srv_hw_params { enum dmub_ips_disable_type disable_ips; bool disallow_phy_access; bool disable_sldo_opt; + bool enable_non_transparent_setconfig; }; /** diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index e20c220aa8b4..ebcf68bfae2b 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -682,7 +682,7 @@ union dmub_fw_boot_options { uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/ uint32_t usb4_cm_version: 1; /**< 1 CM support */ uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */ - uint32_t reserved0: 1; + uint32_t enable_non_transparent_setconfig: 1; /* 1 if dpia use conventional dp lt flow*/ uint32_t disable_clk_ds: 1; /* 1 if disallow dispclk_ds and dppclk_ds*/ uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */ uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/ @@ -1308,6 +1308,7 @@ enum dmub_cmd_dpia_type { DMUB_CMD__DPIA_DIG1_DPIA_CONTROL = 0, DMUB_CMD__DPIA_SET_CONFIG_ACCESS = 1, DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2, + DMUB_CMD__DPIA_SET_TPS_NOTIFICATION = 3, }; /* DMUB_OUT_CMD__DPIA_NOTIFICATION command types. */ @@ -2139,6 +2140,24 @@ struct dmub_rb_cmd_set_mst_alloc_slots { }; /** + * Data passed from driver to FW in a DMUB_CMD__SET_TPS_NOTIFICATION command. + */ +struct dmub_cmd_tps_notification_data { + uint8_t instance; /* DPIA instance */ + uint8_t tps; /* requested training pattern */ + uint8_t reserved1; + uint8_t reserved2; +}; + +/** + * DMUB command structure for SET_TPS_NOTIFICATION command. + */ +struct dmub_rb_cmd_set_tps_notification { + struct dmub_cmd_header header; /* header */ + struct dmub_cmd_tps_notification_data tps_notification; /* set tps_notification data */ +}; + +/** * DMUB command structure for DPIA HPD int enable control. */ struct dmub_rb_cmd_dpia_hpd_int_enable { @@ -5305,6 +5324,10 @@ union dmub_rb_cmd { */ struct dmub_rb_cmd_set_mst_alloc_slots set_mst_alloc_slots; /** + * Definition of a DMUB_CMD__DPIA_SET_TPS_NOTIFICATION command. + */ + struct dmub_rb_cmd_set_tps_notification set_tps_notification; + /** * Definition of a DMUB_CMD__EDID_CEA command. */ struct dmub_rb_cmd_edid_cea edid_cea; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c index 746696b6f09a..2ccad79053c5 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c @@ -425,6 +425,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu boot_options.bits.ips_disable = params->disable_ips; boot_options.bits.ips_sequential_ono = params->ips_sequential_ono; boot_options.bits.disable_sldo_opt = params->disable_sldo_opt; + boot_options.bits.enable_non_transparent_setconfig = params->enable_non_transparent_setconfig; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index a40e6590215a..bbd259cea4f4 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -134,7 +134,7 @@ unsigned int mod_freesync_calc_v_total_from_refresh( v_total = div64_u64(div64_u64(((unsigned long long)( frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), - stream->timing.h_total), 1000000); + stream->timing.h_total) + 500000, 1000000); /* v_total cannot be less than nominal */ if (v_total < stream->timing.v_total) { diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 3cd52e7a9c77..95838c7ab054 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -841,6 +841,8 @@ bool is_psr_su_specific_panel(struct dc_link *link) isPSRSUSupported = false; else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03) isPSRSUSupported = false; + else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x01) + isPSRSUSupported = false; else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1) isPSRSUSupported = true; } diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 745fd052840d..3f91926a50e9 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -85,7 +85,7 @@ enum amd_apu_flags { * @AMD_IP_BLOCK_TYPE_MES: Micro-Engine Scheduler * @AMD_IP_BLOCK_TYPE_JPEG: JPEG Engine * @AMD_IP_BLOCK_TYPE_VPE: Video Processing Engine -* @AMD_IP_BLOCK_TYPE_UMSCH_MM: User Mode Schduler for Multimedia +* @AMD_IP_BLOCK_TYPE_UMSCH_MM: User Mode Scheduler for Multimedia * @AMD_IP_BLOCK_TYPE_ISP: Image Signal Processor * @AMD_IP_BLOCK_TYPE_NUM: Total number of IP block types */ diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 7744ca3ef4b1..e3e635a31b8a 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -71,6 +71,11 @@ enum kgd_memory_pool { KGD_POOL_FRAMEBUFFER = 3, }; +struct kfd_cu_occupancy { + u32 wave_cnt; + u32 doorbell_off; +}; + /** * enum kfd_sched_policy * @@ -313,8 +318,9 @@ struct kfd2kgd_calls { uint32_t grace_period, uint32_t *reg_offset, uint32_t *reg_data); - void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid, - int *wave_cnt, int *max_waves_per_cu, uint32_t inst); + void (*get_cu_occupancy)(struct amdgpu_device *adev, + struct kfd_cu_occupancy *cu_occupancy, + int *max_waves_per_cu, uint32_t inst); void (*program_trap_handler_settings)(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, uint32_t inst); diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h index 9118fcddbf11..227bf0e84a13 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h @@ -60,7 +60,7 @@ struct vi_dpm_level { struct vi_dpm_table { uint32_t count; - struct vi_dpm_level dpm_level[] __counted_by(count); + struct vi_dpm_level dpm_level[]; }; #define PCIE_PERF_REQ_REMOVE_REGISTRY 0 @@ -91,7 +91,7 @@ struct phm_set_power_state_input { struct phm_clock_array { uint32_t count; - uint32_t values[] __counted_by(count); + uint32_t values[]; }; struct phm_clock_voltage_dependency_record { @@ -123,7 +123,7 @@ struct phm_acpclock_voltage_dependency_record { struct phm_clock_voltage_dependency_table { uint32_t count; - struct phm_clock_voltage_dependency_record entries[] __counted_by(count); + struct phm_clock_voltage_dependency_record entries[]; }; struct phm_phase_shedding_limits_record { @@ -140,7 +140,7 @@ struct phm_uvd_clock_voltage_dependency_record { struct phm_uvd_clock_voltage_dependency_table { uint8_t count; - struct phm_uvd_clock_voltage_dependency_record entries[] __counted_by(count); + struct phm_uvd_clock_voltage_dependency_record entries[]; }; struct phm_acp_clock_voltage_dependency_record { @@ -150,7 +150,7 @@ struct phm_acp_clock_voltage_dependency_record { struct phm_acp_clock_voltage_dependency_table { uint32_t count; - struct phm_acp_clock_voltage_dependency_record entries[] __counted_by(count); + struct phm_acp_clock_voltage_dependency_record entries[]; }; struct phm_vce_clock_voltage_dependency_record { @@ -161,32 +161,32 @@ struct phm_vce_clock_voltage_dependency_record { struct phm_phase_shedding_limits_table { uint32_t count; - struct phm_phase_shedding_limits_record entries[] __counted_by(count); + struct phm_phase_shedding_limits_record entries[]; }; struct phm_vceclock_voltage_dependency_table { uint8_t count; - struct phm_vceclock_voltage_dependency_record entries[] __counted_by(count); + struct phm_vceclock_voltage_dependency_record entries[]; }; struct phm_uvdclock_voltage_dependency_table { uint8_t count; - struct phm_uvdclock_voltage_dependency_record entries[] __counted_by(count); + struct phm_uvdclock_voltage_dependency_record entries[]; }; struct phm_samuclock_voltage_dependency_table { uint8_t count; - struct phm_samuclock_voltage_dependency_record entries[] __counted_by(count); + struct phm_samuclock_voltage_dependency_record entries[]; }; struct phm_acpclock_voltage_dependency_table { uint32_t count; - struct phm_acpclock_voltage_dependency_record entries[] __counted_by(count); + struct phm_acpclock_voltage_dependency_record entries[]; }; struct phm_vce_clock_voltage_dependency_table { uint8_t count; - struct phm_vce_clock_voltage_dependency_record entries[] __counted_by(count); + struct phm_vce_clock_voltage_dependency_record entries[]; }; @@ -393,7 +393,7 @@ union phm_cac_leakage_record { struct phm_cac_leakage_table { uint32_t count; - union phm_cac_leakage_record entries[] __counted_by(count); + union phm_cac_leakage_record entries[]; }; struct phm_samu_clock_voltage_dependency_record { @@ -404,7 +404,7 @@ struct phm_samu_clock_voltage_dependency_record { struct phm_samu_clock_voltage_dependency_table { uint8_t count; - struct phm_samu_clock_voltage_dependency_record entries[] __counted_by(count); + struct phm_samu_clock_voltage_dependency_record entries[]; }; struct phm_cac_tdp_table { diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index bb3bc68dfc39..ee1bcfaae3e3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1234,6 +1234,14 @@ static void smu_init_xgmi_plpd_mode(struct smu_context *smu) } } +static bool smu_is_workload_profile_available(struct smu_context *smu, + u32 profile) +{ + if (profile >= PP_SMC_POWER_PROFILE_COUNT) + return false; + return smu->workload_map && smu->workload_map[profile].valid_mapping; +} + static int smu_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1251,21 +1259,33 @@ static int smu_sw_init(void *handle) smu->watermarks_bitmap = 0; smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + smu->user_dpm_profile.user_workload_mask = 0; atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); - smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; - smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; - smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; - smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; - smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; - smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; - smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; - smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; + smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; + smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; + smu->workload_priority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; + smu->workload_priority[PP_SMC_POWER_PROFILE_VIDEO] = 3; + smu->workload_priority[PP_SMC_POWER_PROFILE_VR] = 4; + smu->workload_priority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; + smu->workload_priority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; + + if (smu->is_apu || + !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) { + smu->driver_workload_mask = + 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; + } else { + smu->driver_workload_mask = + 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; + smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; + } + smu->workload_mask = smu->driver_workload_mask | + smu->user_dpm_profile.user_workload_mask; smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; @@ -2226,7 +2246,7 @@ static int smu_bump_power_profile_mode(struct smu_context *smu, static int smu_adjust_power_state_dynamic(struct smu_context *smu, enum amd_dpm_forced_level level, bool skip_display_settings, - bool force_update) + bool init) { int ret = 0; int index = 0; @@ -2255,7 +2275,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, } } - if (force_update || smu_dpm_ctx->dpm_level != level) { + if (smu_dpm_ctx->dpm_level != level) { ret = smu_asic_set_performance_level(smu, level); if (ret) { dev_err(smu->adev->dev, "Failed to set performance level!"); @@ -2272,7 +2292,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; workload[0] = smu->workload_setting[index]; - if (force_update || smu->power_profile_mode != workload[0]) + if (init || smu->power_profile_mode != workload[0]) smu_bump_power_profile_mode(smu, workload, 0); } @@ -2335,17 +2355,20 @@ static int smu_switch_power_profile(void *handle, return -EINVAL; if (!en) { - smu->workload_mask &= ~(1 << smu->workload_prority[type]); + smu->driver_workload_mask &= ~(1 << smu->workload_priority[type]); index = fls(smu->workload_mask); index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; workload[0] = smu->workload_setting[index]; } else { - smu->workload_mask |= (1 << smu->workload_prority[type]); + smu->driver_workload_mask |= (1 << smu->workload_priority[type]); index = fls(smu->workload_mask); index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; workload[0] = smu->workload_setting[index]; } + smu->workload_mask = smu->driver_workload_mask | + smu->user_dpm_profile.user_workload_mask; + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) smu_bump_power_profile_mode(smu, workload, 0); @@ -3036,12 +3059,23 @@ static int smu_set_power_profile_mode(void *handle, uint32_t param_size) { struct smu_context *smu = handle; + int ret; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !smu->ppt_funcs->set_power_profile_mode) return -EOPNOTSUPP; - return smu_bump_power_profile_mode(smu, param, param_size); + if (smu->user_dpm_profile.user_workload_mask & + (1 << smu->workload_priority[param[param_size]])) + return 0; + + smu->user_dpm_profile.user_workload_mask = + (1 << smu->workload_priority[param[param_size]]); + smu->workload_mask = smu->user_dpm_profile.user_workload_mask | + smu->driver_workload_mask; + ret = smu_bump_power_profile_mode(smu, param, param_size); + + return ret; } static int smu_get_fan_control_mode(void *handle, u32 *fan_mode) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index b44a185d07e8..d60d9a12a47e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -240,6 +240,7 @@ struct smu_user_dpm_profile { /* user clock state information */ uint32_t clk_mask[SMU_CLK_COUNT]; uint32_t clk_dependency; + uint32_t user_workload_mask; }; #define SMU_TABLE_INIT(tables, table_id, s, a, d) \ @@ -557,7 +558,8 @@ struct smu_context { bool disable_uclk_switch; uint32_t workload_mask; - uint32_t workload_prority[WORKLOAD_POLICY_MAX]; + uint32_t driver_workload_mask; + uint32_t workload_priority[WORKLOAD_POLICY_MAX]; uint32_t workload_setting[WORKLOAD_POLICY_MAX]; uint32_t power_profile_mode; uint32_t default_power_profile_mode; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h index ee457a6f0813..c2fd0a4a13e5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h @@ -25,7 +25,7 @@ #define SMU14_DRIVER_IF_V14_0_H //Increment this version if SkuTable_t or BoardTable_t change -#define PPTABLE_VERSION 0x18 +#define PPTABLE_VERSION 0x1B #define NUM_GFXCLK_DPM_LEVELS 16 #define NUM_SOCCLK_DPM_LEVELS 8 @@ -145,7 +145,7 @@ typedef enum { } FEATURE_BTC_e; // Debug Overrides Bitmask -#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000001 +#define DEBUG_OVERRIDE_NOT_USE 0x00000001 #define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_DCN_FCLK 0x00000002 #define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_MP0_FCLK 0x00000004 #define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCFCLK 0x00000008 @@ -161,6 +161,7 @@ typedef enum { #define DEBUG_OVERRIDE_ENABLE_SOC_VF_BRINGUP_MODE 0x00002000 #define DEBUG_OVERRIDE_ENABLE_PER_WGP_RESIENCY 0x00004000 #define DEBUG_OVERRIDE_DISABLE_MEMORY_VOLTAGE_SCALING 0x00008000 +#define DEBUG_OVERRIDE_DFLL_BTC_FCW_LOG 0x00010000 // VR Mapping Bit Defines #define VR_MAPPING_VR_SELECT_MASK 0x01 @@ -391,6 +392,21 @@ typedef struct { EccInfo_t EccInfo[24]; } EccInfoTable_t; +#define EPCS_HIGH_POWER 600 +#define EPCS_NORMAL_POWER 450 +#define EPCS_LOW_POWER 300 +#define EPCS_SHORTED_POWER 150 +#define EPCS_NO_BOOTUP 0 + +typedef enum{ + EPCS_SHORTED_LIMIT, + EPCS_LOW_POWER_LIMIT, + EPCS_NORMAL_POWER_LIMIT, + EPCS_HIGH_POWER_LIMIT, + EPCS_NOT_CONFIGURED, + EPCS_STATUS_COUNT, +} EPCS_STATUS_e; + //D3HOT sequences typedef enum { BACO_SEQUENCE, @@ -662,7 +678,7 @@ typedef enum { } PP_GRTAVFS_FW_SEP_FUSE_e; #define PP_NUM_RTAVFS_PWL_ZONES 5 - +#define PP_NUM_PSM_DIDT_PWL_ZONES 3 // VBIOS or PPLIB configures telemetry slope and offset. Only slope expected to be set for SVI3 // Slope Q1.7, Offset Q1.2 @@ -746,10 +762,10 @@ typedef struct { uint16_t Padding; //Frequency changes - int16_t GfxclkFmin; // MHz - int16_t GfxclkFmax; // MHz - uint16_t UclkFmin; // MHz - uint16_t UclkFmax; // MHz + int16_t GfxclkFoffset; + uint16_t Padding1; + uint16_t UclkFmin; + uint16_t UclkFmax; uint16_t FclkFmin; uint16_t FclkFmax; @@ -770,19 +786,23 @@ typedef struct { uint8_t MaxOpTemp; uint8_t AdvancedOdModeEnabled; - uint8_t Padding1[3]; + uint8_t Padding2[3]; uint16_t GfxVoltageFullCtrlMode; uint16_t SocVoltageFullCtrlMode; uint16_t GfxclkFullCtrlMode; uint16_t UclkFullCtrlMode; uint16_t FclkFullCtrlMode; - uint16_t Padding2; + uint16_t Padding3; int16_t GfxEdc; int16_t GfxPccLimitControl; - uint32_t Spare[10]; + uint16_t GfxclkFmaxVmax; + uint8_t GfxclkFmaxVmaxTemperature; + uint8_t Padding4[1]; + + uint32_t Spare[9]; uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround } OverDriveTable_t; @@ -802,8 +822,8 @@ typedef struct { uint16_t VddSocVmax; //gfxclk - int16_t GfxclkFmin; // MHz - int16_t GfxclkFmax; // MHz + int16_t GfxclkFoffset; + uint16_t Padding; //uclk uint16_t UclkFmin; // MHz uint16_t UclkFmax; // MHz @@ -828,7 +848,7 @@ typedef struct { uint8_t FanZeroRpmEnable; //temperature uint8_t MaxOpTemp; - uint8_t Padding[2]; + uint8_t Padding1[2]; //Full Ctrl uint16_t GfxVoltageFullCtrlMode; @@ -839,7 +859,7 @@ typedef struct { //EDC int16_t GfxEdc; int16_t GfxPccLimitControl; - int16_t Padding1; + int16_t Padding2; uint32_t Spare[5]; } OverDriveLimits_t; @@ -987,8 +1007,9 @@ typedef struct { uint16_t BaseClockDc; uint16_t GameClockDc; uint16_t BoostClockDc; - - uint32_t Reserved[4]; + uint16_t MaxReportedClock; + uint16_t Padding; + uint32_t Reserved[3]; } DriverReportedClocks_t; typedef struct { @@ -1132,7 +1153,7 @@ typedef struct { uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz uint16_t GfxclkAibFmax; - uint16_t GfxclkFreqCap; + uint16_t GfxDpmPadding; //GFX Idle Power Settings uint16_t GfxclkFgfxoffEntry; // Entry in RLC stage (PLL), in Mhz @@ -1172,8 +1193,7 @@ typedef struct { uint32_t DvoFmaxLowScaler; //Unitless float // GFX DCS - uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase - uint16_t PaddingDcs; + uint32_t PaddingDcs; uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch. @@ -1205,8 +1225,7 @@ typedef struct { uint16_t DalDcModeMaxUclkFreq; uint8_t PaddingsMem[2]; //FCLK Section - uint16_t FclkDpmDisallowPstateFreq; //Frequency which FW will target when indicated that display config cannot support P-state. Set to 0 use FW calculated value - uint16_t PaddingFclk; + uint32_t PaddingFclk; // Link DPM Settings uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 4:PciE-gen5 @@ -1215,12 +1234,19 @@ typedef struct { // SECTION: VDD_GFX AVFS uint8_t OverrideGfxAvfsFuses; - uint8_t GfxAvfsPadding[3]; + uint8_t GfxAvfsPadding[1]; + uint16_t DroopGBStDev; uint32_t SocHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //new added for Soc domain uint32_t GfxL2HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //see fusedoc for encoding //uint32_t GfxSeHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; - uint32_t spare_HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; + + uint16_t PsmDidt_Vcross[PP_NUM_PSM_DIDT_PWL_ZONES-1]; + uint32_t PsmDidt_StaticDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES]; + uint32_t PsmDidt_StaticDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES]; + uint32_t PsmDidt_DynDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES]; + uint32_t PsmDidt_DynDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES]; + uint32_t spare_HwRtAvfsFuses[19]; uint32_t SocCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT]; uint32_t GfxCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT]; @@ -1246,11 +1272,7 @@ typedef struct { uint32_t dGbV_dT_vmin; uint32_t dGbV_dT_vmax; - //Unused: PMFW-9370 - uint32_t V2F_vmin_range_low; - uint32_t V2F_vmin_range_high; - uint32_t V2F_vmax_range_low; - uint32_t V2F_vmax_range_high; + uint32_t PaddingV2F[4]; AvfsDcBtcParams_t DcBtcGfxParams; QuadraticInt_t SSCurve_GFX; @@ -1327,18 +1349,18 @@ typedef struct { uint16_t PsmDidtReleaseTimer; uint32_t PsmDidtStallPattern; //Will be written to both pattern 1 and didt_static_level_prog // CAC EDC - uint32_t Leakage_C0; // in IEEE float - uint32_t Leakage_C1; // in IEEE float - uint32_t Leakage_C2; // in IEEE float - uint32_t Leakage_C3; // in IEEE float - uint32_t Leakage_C4; // in IEEE float - uint32_t Leakage_C5; // in IEEE float - uint32_t GFX_CLK_SCALAR; // in IEEE float - uint32_t GFX_CLK_INTERCEPT; // in IEEE float - uint32_t GFX_CAC_M; // in IEEE float - uint32_t GFX_CAC_B; // in IEEE float - uint32_t VDD_GFX_CurrentLimitGuardband; // in IEEE float - uint32_t DynToTotalCacScalar; // in IEEE + uint32_t CacEdcCacLeakageC0; + uint32_t CacEdcCacLeakageC1; + uint32_t CacEdcCacLeakageC2; + uint32_t CacEdcCacLeakageC3; + uint32_t CacEdcCacLeakageC4; + uint32_t CacEdcCacLeakageC5; + uint32_t CacEdcGfxClkScalar; + uint32_t CacEdcGfxClkIntercept; + uint32_t CacEdcCac_m; + uint32_t CacEdcCac_b; + uint32_t CacEdcCurrLimitGuardband; + uint32_t CacEdcDynToTotalCacRatio; // GFX EDC XVMIN uint32_t XVmin_Gfx_EdcThreshScalar; uint32_t XVmin_Gfx_EdcEnableFreq; @@ -1467,7 +1489,7 @@ typedef struct { uint8_t VddqOffEnabled; uint8_t PaddingUmcFlags[2]; - uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued + uint32_t Paddign1; uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS uint8_t FuseWritePowerMuxPresent; @@ -1530,7 +1552,7 @@ typedef struct { int16_t FuzzyFan_ErrorSetDelta; int16_t FuzzyFan_ErrorRateSetDelta; int16_t FuzzyFan_PwmSetDelta; - uint16_t FuzzyFan_Reserved; + uint16_t FanPadding2; uint16_t FwCtfLimit[TEMP_COUNT]; @@ -1547,9 +1569,10 @@ typedef struct { uint16_t FanSpare[1]; uint8_t FanIntakeSensorSupport; uint8_t FanIntakePadding; - uint32_t FanAmbientPerfBoostThreshold; uint32_t FanSpare2[12]; + uint32_t ODFeatureCtrlMask; + uint16_t TemperatureLimit_Hynix; // In degrees Celsius. Memory temperature limit associated with Hynix uint16_t TemperatureLimit_Micron; // In degrees Celsius. Memory temperature limit associated with Micron uint16_t TemperatureFwCtfLimit_Hynix; @@ -1637,7 +1660,7 @@ typedef struct { uint16_t AverageDclk0Frequency ; uint16_t AverageVclk1Frequency ; uint16_t AverageDclk1Frequency ; - uint16_t PCIeBusy ; + uint16_t AveragePCIeBusy ; uint16_t dGPU_W_MAX ; uint16_t padding ; @@ -1665,12 +1688,12 @@ typedef struct { uint16_t AverageGfxActivity ; uint16_t AverageUclkActivity ; - uint16_t Vcn0ActivityPercentage ; + uint16_t AverageVcn0ActivityPercentage; uint16_t Vcn1ActivityPercentage ; uint32_t EnergyAccumulator; uint16_t AverageSocketPower; - uint16_t MovingAverageTotalBoardPower; + uint16_t AverageTotalBoardPower; uint16_t AvgTemperature[TEMP_COUNT]; uint16_t AvgTemperatureFanIntake; @@ -1684,7 +1707,8 @@ typedef struct { uint8_t ThrottlingPercentage[THROTTLER_COUNT]; - uint8_t padding1[3]; + uint8_t VmaxThrottlingPercentage; + uint8_t padding1[2]; //metrics for D3hot entry/exit and driver ARM msgs uint32_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT]; @@ -1693,7 +1717,7 @@ typedef struct { uint16_t ApuSTAPMSmartShiftLimit; uint16_t ApuSTAPMLimit; - uint16_t MovingAvgApuSocketPower; + uint16_t AvgApuSocketPower; uint16_t AverageUclkActivity_MAX; @@ -1823,6 +1847,17 @@ typedef struct { #define TABLE_TRANSFER_FAILED 0xFF #define TABLE_TRANSFER_PENDING 0xAB +#define TABLE_PPT_FAILED 0x100 +#define TABLE_TDC_FAILED 0x200 +#define TABLE_TEMP_FAILED 0x400 +#define TABLE_FAN_TARGET_TEMP_FAILED 0x800 +#define TABLE_FAN_STOP_TEMP_FAILED 0x1000 +#define TABLE_FAN_START_TEMP_FAILED 0x2000 +#define TABLE_FAN_PWM_MIN_FAILED 0x4000 +#define TABLE_ACOUSTIC_TARGET_RPM_FAILED 0x8000 +#define TABLE_ACOUSTIC_LIMIT_RPM_FAILED 0x10000 +#define TABLE_MGPU_ACOUSTIC_TARGET_RPM_FAILED 0x20000 + // Table types #define TABLE_PPTABLE 0 #define TABLE_COMBO_PPTABLE 1 @@ -1849,5 +1884,6 @@ typedef struct { #define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7 #define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 #define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 +#define IH_INTERRUPT_CONTEXT_ID_DYNAMIC_TABLE 0xA #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h index 0b3c2f54a343..822c6425d90e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h @@ -123,7 +123,7 @@ typedef enum { VOLTAGE_GUARDBAND_COUNT } GFX_GUARDBAND_e; -#define SMU_METRICS_TABLE_VERSION 0xC +#define SMU_METRICS_TABLE_VERSION 0xD typedef struct __attribute__((packed, aligned(4))) { uint32_t AccumulationCounter; @@ -227,6 +227,10 @@ typedef struct __attribute__((packed, aligned(4))) { // PCIE LINK Speed and width uint32_t PCIeLinkSpeed; uint32_t PCIeLinkWidth; + + // PER XCD ACTIVITY + uint32_t GfxBusy[8]; + uint64_t GfxBusyAcc[8]; } MetricsTableX_t; typedef struct __attribute__((packed, aligned(4))) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h index 46b456590a08..727d5b405435 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h @@ -28,7 +28,7 @@ #define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7 #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6 -#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x26 +#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x2E #define FEATURE_MASK(feature) (1ULL << feature) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index c0f6b59369b7..31fe512028f4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -1455,7 +1455,6 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, return -EINVAL; } - if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) && (smu->smc_fw_version >= 0x360d00)) { if (size != 10) @@ -1523,14 +1522,14 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, + smu->workload_mask, NULL); if (ret) { dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type); return ret; } - smu->power_profile_mode = profile_mode; + smu_cmn_assign_power_profile(smu); return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 16af1a329621..12223f507977 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -2081,10 +2081,13 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u smu->power_profile_mode); if (workload_type < 0) return -EINVAL; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + smu->workload_mask, NULL); if (ret) dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); + else + smu_cmn_assign_power_profile(smu); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 9c3c48297cba..3b7b2ec8319a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1786,10 +1786,13 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long * smu->power_profile_mode); if (workload_type < 0) return -EINVAL; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + smu->workload_mask, NULL); if (ret) dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); + else + smu_cmn_assign_power_profile(smu); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 22737b11b1bf..952ee22cbc90 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -242,7 +242,9 @@ static int vangogh_tables_init(struct smu_context *smu) goto err0_out; smu_table->metrics_time = 0; - smu_table->gpu_metrics_table_size = max(sizeof(struct gpu_metrics_v2_3), sizeof(struct gpu_metrics_v2_2)); + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); + smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_3)); + smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_4)); smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); if (!smu_table->gpu_metrics_table) goto err1_out; @@ -1077,7 +1079,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, } ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, + smu->workload_mask, NULL); if (ret) { dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", @@ -1085,7 +1087,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, return ret; } - smu->power_profile_mode = profile_mode; + smu_cmn_assign_power_profile(smu); return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index cc0504b063fa..62316a6707ef 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -890,14 +890,14 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u } ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, + smu->workload_mask, NULL); if (ret) { dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type); return ret; } - smu->power_profile_mode = profile_mode; + smu_cmn_assign_power_profile(smu); return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index a887ab945dfa..5dd7ceca64fe 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -2555,24 +2555,36 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, workload_mask = 1 << workload_type; /* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */ - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) { - if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && - ((smu->adev->pm.fw_version == 0x004e6601) || - (smu->adev->pm.fw_version >= 0x004e7300))) || - (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && - smu->adev->pm.fw_version >= 0x00504500)) { - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - PP_SMC_POWER_PROFILE_POWERSAVING); - if (workload_type >= 0) - workload_mask |= 1 << workload_type; - } + if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && + ((smu->adev->pm.fw_version == 0x004e6601) || + (smu->adev->pm.fw_version >= 0x004e7300))) || + (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && + smu->adev->pm.fw_version >= 0x00504500)) { + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + PP_SMC_POWER_PROFILE_POWERSAVING); + if (workload_type >= 0) + workload_mask |= 1 << workload_type; } - return smu_cmn_send_smc_msg_with_param(smu, + smu->workload_mask |= workload_mask; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - workload_mask, + smu->workload_mask, NULL); + if (!ret) { + smu_cmn_assign_power_profile(smu); + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) { + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + PP_SMC_POWER_PROFILE_FULLSCREEN3D); + smu->power_profile_mode = smu->workload_mask & (1 << workload_type) + ? PP_SMC_POWER_PROFILE_FULLSCREEN3D + : PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + } + } + + return ret; } static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 9974c9f8135e..55ed6247eb61 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -2107,8 +2107,12 @@ static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap, } mutex_lock(&adev->pm.mutex); r = smu_v13_0_6_request_i2c_xfer(smu, req); - if (r) - goto fail; + if (r) { + /* Retry once, in case of an i2c collision */ + r = smu_v13_0_6_request_i2c_xfer(smu, req); + if (r) + goto fail; + } for (c = i = 0; i < num_msgs; i++) { if (!(msg[i].flags & I2C_M_RD)) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 7bc95c404377..9d0b19419de0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -2499,10 +2499,14 @@ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *inp smu->power_profile_mode); if (workload_type < 0) return -EINVAL; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + smu->workload_mask, NULL); + if (ret) dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); + else + smu_cmn_assign_power_profile(smu); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 43820d7d2c54..1aa13d32ceb2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -367,54 +367,6 @@ static int smu_v14_0_2_store_powerplay_table(struct smu_context *smu) return 0; } -#ifndef atom_smc_dpm_info_table_14_0_0 -struct atom_smc_dpm_info_table_14_0_0 { - struct atom_common_table_header table_header; - BoardTable_t BoardTable; -}; -#endif - -static int smu_v14_0_2_append_powerplay_table(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *smc_pptable = table_context->driver_pptable; - struct atom_smc_dpm_info_table_14_0_0 *smc_dpm_table; - BoardTable_t *BoardTable = &smc_pptable->BoardTable; - int index, ret; - - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - smc_dpm_info); - - ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL, - (uint8_t **)&smc_dpm_table); - if (ret) - return ret; - - memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t)); - - return 0; -} - -#if 0 -static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, - void **table, - uint32_t *size) -{ - struct smu_table_context *smu_table = &smu->smu_table; - void *combo_pptable = smu_table->combo_pptable; - int ret = 0; - - ret = smu_cmn_get_combo_pptable(smu); - if (ret) - return ret; - - *table = combo_pptable; - *size = sizeof(struct smu_14_0_powerplay_table); - - return 0; -} -#endif - static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, void **table, uint32_t *size) @@ -436,16 +388,12 @@ static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, static int smu_v14_0_2_setup_pptable(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; - struct amdgpu_device *adev = smu->adev; int ret = 0; if (amdgpu_sriov_vf(smu->adev)) return 0; - if (!adev->scpm_enabled) - ret = smu_v14_0_setup_pptable(smu); - else - ret = smu_v14_0_2_get_pptable_from_pmfw(smu, + ret = smu_v14_0_2_get_pptable_from_pmfw(smu, &smu_table->power_play_table, &smu_table->power_play_table_size); if (ret) @@ -455,16 +403,6 @@ static int smu_v14_0_2_setup_pptable(struct smu_context *smu) if (ret) return ret; - /* - * With SCPM enabled, the operation below will be handled - * by PSP. Driver involvment is unnecessary and useless. - */ - if (!adev->scpm_enabled) { - ret = smu_v14_0_2_append_powerplay_table(smu); - if (ret) - return ret; - } - ret = smu_v14_0_2_check_powerplay_table(smu); if (ret) return ret; @@ -1077,12 +1015,9 @@ static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu, switch (od_feature_bit) { case PP_OD_FEATURE_GFXCLK_FMIN: - od_min_setting = overdrive_lowerlimits->GfxclkFmin; - od_max_setting = overdrive_upperlimits->GfxclkFmin; - break; case PP_OD_FEATURE_GFXCLK_FMAX: - od_min_setting = overdrive_lowerlimits->GfxclkFmax; - od_max_setting = overdrive_upperlimits->GfxclkFmax; + od_min_setting = overdrive_lowerlimits->GfxclkFoffset; + od_max_setting = overdrive_upperlimits->GfxclkFoffset; break; case PP_OD_FEATURE_UCLK_FMIN: od_min_setting = overdrive_lowerlimits->UclkFmin; @@ -1269,10 +1204,16 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, PP_OD_FEATURE_GFXCLK_BIT)) break; - size += sysfs_emit_at(buf, size, "OD_SCLK:\n"); - size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", - od_table->OverDriveTable.GfxclkFmin, - od_table->OverDriveTable.GfxclkFmax); + PPTable_t *pptable = smu->smu_table.driver_pptable; + const OverDriveLimits_t * const overdrive_upperlimits = + &pptable->SkuTable.OverDriveLimitsBasicMax; + const OverDriveLimits_t * const overdrive_lowerlimits = + &pptable->SkuTable.OverDriveLimitsBasicMin; + + size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n"); + size += sysfs_emit_at(buf, size, "0: %dMhz\n1: %uMhz\n", + overdrive_lowerlimits->GfxclkFoffset, + overdrive_upperlimits->GfxclkFoffset); break; case SMU_OD_MCLK: @@ -1414,7 +1355,7 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, PP_OD_FEATURE_GFXCLK_FMAX, NULL, &max_value); - size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n", min_value, max_value); } @@ -1796,7 +1737,7 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); int workload_type, ret = 0; - + uint32_t current_profile_mode = smu->power_profile_mode; smu->power_profile_mode = input[size]; if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { @@ -1854,6 +1795,11 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, } } + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) + smu_v14_0_deep_sleep_control(smu, false); + else if (current_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) + smu_v14_0_deep_sleep_control(smu, true); + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ workload_type = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_WORKLOAD, @@ -1861,10 +1807,13 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, if (workload_type < 0) return -EINVAL; - return smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetWorkloadMask, - 1 << workload_type, - NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + smu->workload_mask, NULL); + + if (!ret) + smu_cmn_assign_power_profile(smu); + + return ret; } static int smu_v14_0_2_baco_enter(struct smu_context *smu) @@ -2154,7 +2103,7 @@ static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu, gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity; gpu_metrics->average_umc_activity = metrics->AverageUclkActivity; - gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage, + gpu_metrics->average_mm_activity = max(metrics->AverageVcn0ActivityPercentage, metrics->Vcn1ActivityPercentage); gpu_metrics->average_socket_power = metrics->AverageSocketPower; @@ -2213,8 +2162,7 @@ static void smu_v14_0_2_dump_od_table(struct smu_context *smu, { struct amdgpu_device *adev = smu->adev; - dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin, - od_table->OverDriveTable.GfxclkFmax); + dev_dbg(adev->dev, "OD: Gfxclk offset: (%d)\n", od_table->OverDriveTable.GfxclkFoffset); dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin, od_table->OverDriveTable.UclkFmax); } @@ -2305,10 +2253,8 @@ static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu) memcpy(user_od_table, boot_od_table, sizeof(OverDriveTableExternal_t)); - user_od_table->OverDriveTable.GfxclkFmin = - user_od_table_bak.OverDriveTable.GfxclkFmin; - user_od_table->OverDriveTable.GfxclkFmax = - user_od_table_bak.OverDriveTable.GfxclkFmax; + user_od_table->OverDriveTable.GfxclkFoffset = + user_od_table_bak.OverDriveTable.GfxclkFoffset; user_od_table->OverDriveTable.UclkFmin = user_od_table_bak.OverDriveTable.UclkFmin; user_od_table->OverDriveTable.UclkFmax = @@ -2437,22 +2383,6 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu, } switch (input[i]) { - case 0: - smu_v14_0_2_get_od_setting_limits(smu, - PP_OD_FEATURE_GFXCLK_FMIN, - &minimum, - &maximum); - if (input[i + 1] < minimum || - input[i + 1] > maximum) { - dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n", - input[i + 1], minimum, maximum); - return -EINVAL; - } - - od_table->OverDriveTable.GfxclkFmin = input[i + 1]; - od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; - break; - case 1: smu_v14_0_2_get_od_setting_limits(smu, PP_OD_FEATURE_GFXCLK_FMAX, @@ -2465,7 +2395,7 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu, return -EINVAL; } - od_table->OverDriveTable.GfxclkFmax = input[i + 1]; + od_table->OverDriveTable.GfxclkFoffset = input[i + 1]; od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; break; @@ -2476,13 +2406,6 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu, } } - if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) { - dev_err(adev->dev, - "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n", - (uint32_t)od_table->OverDriveTable.GfxclkFmin, - (uint32_t)od_table->OverDriveTable.GfxclkFmax); - return -EINVAL; - } break; case PP_OD_EDIT_MCLK_VDDC_TABLE: @@ -2813,7 +2736,6 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { .check_fw_status = smu_v14_0_check_fw_status, .setup_pptable = smu_v14_0_2_setup_pptable, .check_fw_version = smu_v14_0_check_fw_version, - .write_pptable = smu_cmn_write_pptable, .set_driver_table_location = smu_v14_0_set_driver_table_location, .system_features_control = smu_v14_0_system_features_control, .set_allowed_mask = smu_v14_0_set_allowed_mask, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 91ad434bcdae..bdfc5e617333 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -1138,6 +1138,14 @@ int smu_cmn_set_mp1_state(struct smu_context *smu, return ret; } +void smu_cmn_assign_power_profile(struct smu_context *smu) +{ + uint32_t index; + index = fls(smu->workload_mask); + index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; + smu->power_profile_mode = smu->workload_setting[index]; +} + bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) { struct pci_dev *p = NULL; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 1de685defe85..8a801e389659 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -130,6 +130,8 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev); int smu_cmn_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state); +void smu_cmn_assign_power_profile(struct smu_context *smu); + /* * Helper function to make sysfs_emit_at() happy. Align buf to * the current page boundary and record the offset. diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 225817087b4d..3a908bb015fe 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -287,9 +287,9 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; - regs = pcim_iomap(pdev, 1, 0); - if (!regs) - return -EIO; + regs = pcim_iomap_region(pdev, 1, "ast"); + if (IS_ERR(regs)) + return PTR_ERR(regs); if (pdev->revision >= 0x40) { /* @@ -311,9 +311,9 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (len < AST_IO_MM_LENGTH) return -EIO; - ioregs = pcim_iomap(pdev, 2, 0); - if (!ioregs) - return -EIO; + ioregs = pcim_iomap_region(pdev, 2, "ast"); + if (IS_ERR(ioregs)) + return PTR_ERR(ioregs); } else { /* * Anything else is best effort. diff --git a/drivers/gpu/drm/ast/ast_sil164.c b/drivers/gpu/drm/ast/ast_sil164.c index 496c7120e515..c231389936bd 100644 --- a/drivers/gpu/drm/ast/ast_sil164.c +++ b/drivers/gpu/drm/ast/ast_sil164.c @@ -29,6 +29,8 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector if (ast_connector->physical_status == connector_status_connected) { count = drm_connector_helper_get_modes(connector); } else { + drm_edid_connector_update(connector, NULL); + /* * There's no EDID data without a connected monitor. Set BMC- * compatible modes in this case. The XGA default resolution diff --git a/drivers/gpu/drm/ast/ast_vga.c b/drivers/gpu/drm/ast/ast_vga.c index 3e815da43fbd..dd389a0a8f4a 100644 --- a/drivers/gpu/drm/ast/ast_vga.c +++ b/drivers/gpu/drm/ast/ast_vga.c @@ -29,6 +29,8 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector) if (ast_connector->physical_status == connector_status_connected) { count = drm_connector_helper_get_modes(connector); } else { + drm_edid_connector_update(connector, NULL); + /* * There's no EDID data without a connected monitor. Set BMC- * compatible modes in this case. The XGA default resolution diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c index b29980f95379..295e9d031e2d 100644 --- a/drivers/gpu/drm/bridge/aux-bridge.c +++ b/drivers/gpu/drm/bridge/aux-bridge.c @@ -58,9 +58,10 @@ int drm_aux_bridge_register(struct device *parent) adev->id = ret; adev->name = "aux_bridge"; adev->dev.parent = parent; - adev->dev.of_node = of_node_get(parent->of_node); adev->dev.release = drm_aux_bridge_release; + device_set_of_node_from_dev(&adev->dev, parent); + ret = auxiliary_device_init(adev); if (ret) { ida_free(&drm_aux_bridge_ida, adev->id); diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c index dee640ab1d3a..41f72d458487 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c @@ -47,7 +47,7 @@ #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "cdns-mhdp8546-core.h" #include "cdns-mhdp8546-hdcp.h" diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c index 5e3b8edcf794..31832ba4017f 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c @@ -9,7 +9,7 @@ #include <linux/io.h> #include <linux/iopoll.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <drm/display/drm_hdcp_helper.h> diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c index e7e53a9e42af..430f8adebf9c 100644 --- a/drivers/gpu/drm/bridge/samsung-dsim.c +++ b/drivers/gpu/drm/bridge/samsung-dsim.c @@ -10,7 +10,7 @@ * Tomasz Figa <t.figa@samsung.com> */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/clk.h> #include <linux/delay.h> diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 6bb755e9f0a5..26b8d137bce0 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -6,7 +6,7 @@ * Andrzej Hajda <a.hajda@samsung.com> */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <drm/bridge/mhl.h> #include <drm/drm_bridge.h> diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c index 67b8d17a722a..221e9a4edb40 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c @@ -8,6 +8,7 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/vmalloc.h> #include <drm/bridge/dw_hdmi.h> #include <drm/drm_edid.h> @@ -388,15 +389,36 @@ static int dw_hdmi_close(struct snd_pcm_substream *substream) static int dw_hdmi_hw_free(struct snd_pcm_substream *substream) { - return snd_pcm_lib_free_vmalloc_buffer(substream); + struct snd_pcm_runtime *runtime = substream->runtime; + + vfree(runtime->dma_area); + runtime->dma_area = NULL; + return 0; } static int dw_hdmi_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { + struct snd_pcm_runtime *runtime = substream->runtime; + size_t size = params_buffer_bytes(params); + /* Allocate the PCM runtime buffer, which is exposed to userspace. */ - return snd_pcm_lib_alloc_vmalloc_buffer(substream, - params_buffer_bytes(params)); + if (runtime->dma_area) { + if (runtime->dma_bytes >= size) + return 0; /* already large enough */ + vfree(runtime->dma_area); + } + runtime->dma_area = vzalloc(size); + if (!runtime->dma_area) + return -ENOMEM; + runtime->dma_bytes = size; + return 1; +} + +static struct page *dw_hdmi_get_page(struct snd_pcm_substream *substream, + unsigned long offset) +{ + return vmalloc_to_page(substream->runtime->dma_area + offset); } static int dw_hdmi_prepare(struct snd_pcm_substream *substream) @@ -515,7 +537,7 @@ static const struct snd_pcm_ops snd_dw_hdmi_ops = { .prepare = dw_hdmi_prepare, .trigger = dw_hdmi_trigger, .pointer = dw_hdmi_pointer, - .page = snd_pcm_lib_get_vmalloc_page, + .page = dw_hdmi_get_page, }; static int snd_dw_hdmi_probe(struct platform_device *pdev) diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 290e2532fab1..f3afdab55c11 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -2391,6 +2391,7 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc) if (tc->pre_emphasis[0] < 0 || tc->pre_emphasis[0] > 2 || tc->pre_emphasis[1] < 0 || tc->pre_emphasis[1] > 2) { dev_err(dev, "Incorrect Pre-Emphasis setting, use either 0=0dB 1=3.5dB 2=6dB\n"); + of_node_put(node); return -EINVAL; } } diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c index 3b7cc3be2ccd..0b4efaca6d68 100644 --- a/drivers/gpu/drm/bridge/tc358775.c +++ b/drivers/gpu/drm/bridge/tc358775.c @@ -19,7 +19,7 @@ #include <linux/regulator/consumer.h> #include <linux/slab.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <drm/display/drm_dp_helper.h> #include <drm/drm_atomic_helper.h> diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 84698a0b27a8..582cf4f73a74 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -21,7 +21,7 @@ #include <linux/regmap.h> #include <linux/regulator/consumer.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <drm/display/drm_dp_aux_bus.h> #include <drm/display/drm_dp_helper.h> diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index a040d7dfced1..ac90118b9e7a 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -6083,6 +6083,7 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) struct drm_dp_aux *immediate_upstream_aux; struct drm_dp_mst_port *fec_port; struct drm_dp_desc desc = {}; + u8 upstream_dsc; u8 endpoint_fec; u8 endpoint_dsc; @@ -6109,8 +6110,6 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) /* DP-to-DP peer device */ if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) { - u8 upstream_dsc; - if (drm_dp_dpcd_read(&port->aux, DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1) return NULL; @@ -6156,6 +6155,13 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) { u8 dpcd_ext[DP_RECEIVER_CAP_SIZE]; + if (drm_dp_dpcd_read(immediate_upstream_aux, + DP_DSC_SUPPORT, &upstream_dsc, 1) != 1) + return NULL; + + if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED)) + return NULL; + if (drm_dp_read_dpcd_caps(immediate_upstream_aux, dpcd_ext) < 0) return NULL; diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c index 7854820089ec..feb7a3a75981 100644 --- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c +++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c @@ -521,8 +521,6 @@ int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector, } EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_check); -#define HDMI_MAX_INFOFRAME_SIZE 29 - static int clear_device_infoframe(struct drm_connector *connector, enum hdmi_infoframe_type type) { @@ -563,7 +561,7 @@ static int write_device_infoframe(struct drm_connector *connector, { const struct drm_connector_hdmi_funcs *funcs = connector->hdmi.funcs; struct drm_device *dev = connector->dev; - u8 buffer[HDMI_MAX_INFOFRAME_SIZE]; + u8 buffer[HDMI_INFOFRAME_SIZE(MAX)]; int ret; int len; diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 7936c2023955..370dc676e3aa 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -543,7 +543,7 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane, &state->fb_damage_clips, val, -1, - sizeof(struct drm_rect), + sizeof(struct drm_mode_rect), &replaced); return ret; } else if (property == plane->scaling_filter_property) { diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 6b239a24f1df..9d3e6dd68810 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -520,8 +520,6 @@ static const struct file_operations drm_connector_fops = { .write = connector_write }; -#define HDMI_MAX_INFOFRAME_SIZE 29 - static ssize_t audio_infoframe_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { @@ -579,7 +577,7 @@ static ssize_t _f##_read_infoframe(struct file *filp, \ struct drm_connector *connector; \ union hdmi_infoframe *frame; \ struct drm_device *dev; \ - u8 buf[HDMI_MAX_INFOFRAME_SIZE]; \ + u8 buf[HDMI_INFOFRAME_SIZE(MAX)]; \ ssize_t len = 0; \ \ connector = filp->private_data; \ diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c index 2da094bdf8a4..18e366cc4993 100644 --- a/drivers/gpu/drm/drm_exec.c +++ b/drivers/gpu/drm/drm_exec.c @@ -145,8 +145,7 @@ static int drm_exec_obj_locked(struct drm_exec *exec, size_t size = exec->max_objects * sizeof(void *); void *tmp; - tmp = kvrealloc(exec->objects, size, size + PAGE_SIZE, - GFP_KERNEL); + tmp = kvrealloc(exec->objects, size + PAGE_SIZE, GFP_KERNEL); if (!tmp) return -ENOMEM; diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c index b0602c4f3628..51c2d742d199 100644 --- a/drivers/gpu/drm/drm_fbdev_dma.c +++ b/drivers/gpu/drm/drm_fbdev_dma.c @@ -50,7 +50,8 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info) if (!fb_helper->dev) return; - fb_deferred_io_cleanup(info); + if (info->fbdefio) + fb_deferred_io_cleanup(info); drm_fb_helper_fini(fb_helper); drm_client_buffer_vunmap(fb_helper->buffer); diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 01fde94fe2a9..ad1dc638c83b 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -103,7 +103,6 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev) * .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n * .poll = drm_poll, * .read = drm_read, - * .llseek = no_llseek, * .mmap = drm_gem_mmap, * }; * @@ -310,6 +309,8 @@ int drm_open_helper(struct file *filp, struct drm_minor *minor) if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF) return -EINVAL; + if (WARN_ON_ONCE(!(filp->f_op->fop_flags & FOP_UNSIGNED_OFFSET))) + return -EINVAL; drm_dbg_core(dev, "comm=\"%s\", pid=%d, minor=%d\n", current->comm, task_pid_nr(current), minor->index); @@ -327,7 +328,6 @@ int drm_open_helper(struct file *filp, struct drm_minor *minor) } filp->private_data = priv; - filp->f_mode |= FMODE_UNSIGNED_OFFSET; priv->filp = filp; mutex_lock(&dev->filelist_mutex); diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 0830cae9a4d0..2d84d7ea1ab7 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -403,7 +403,6 @@ static const struct dmi_system_id orientation_data[] = { }, { /* Lenovo Yoga Tab 3 X90F */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), }, .driver_data = (void *)&lcd1600x2560_rightside_up, diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index a0e94217b511..8e3d2d7060f8 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -715,16 +715,16 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private, struct fd f = fdget(fd); int ret; - if (!f.file) + if (!fd_file(f)) return -EINVAL; - if (f.file->f_op != &drm_syncobj_file_fops) { + if (fd_file(f)->f_op != &drm_syncobj_file_fops) { fdput(f); return -EINVAL; } /* take a reference to put in the idr */ - syncobj = f.file->private_data; + syncobj = fd_file(f)->private_data; drm_syncobj_get(syncobj); idr_preload(GFP_KERNEL); @@ -1464,6 +1464,7 @@ drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data, struct drm_syncobj *syncobj; struct eventfd_ctx *ev_fd_ctx; struct syncobj_eventfd_entry *entry; + int ret; if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) return -EOPNOTSUPP; @@ -1479,13 +1480,15 @@ drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data, return -ENOENT; ev_fd_ctx = eventfd_ctx_fdget(args->fd); - if (IS_ERR(ev_fd_ctx)) - return PTR_ERR(ev_fd_ctx); + if (IS_ERR(ev_fd_ctx)) { + ret = PTR_ERR(ev_fd_ctx); + goto err_fdget; + } entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { - eventfd_ctx_put(ev_fd_ctx); - return -ENOMEM; + ret = -ENOMEM; + goto err_kzalloc; } entry->syncobj = syncobj; entry->ev_fd_ctx = ev_fd_ctx; @@ -1496,6 +1499,12 @@ drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data, drm_syncobj_put(syncobj); return 0; + +err_kzalloc: + eventfd_ctx_put(ev_fd_ctx); +err_fdget: + drm_syncobj_put(syncobj); + return ret; } int diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c index e2c7373f20c6..6a6761935224 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dma.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c @@ -110,7 +110,7 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev, void *mapping = NULL; if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) - mapping = arm_iommu_create_mapping(&platform_bus_type, + mapping = arm_iommu_create_mapping(dev, EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE); else if (IS_ENABLED(CONFIG_IOMMU_DMA)) mapping = iommu_get_domain_for_dev(priv->dma_dev); diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 8b64f61ffaf9..d67c2b3ad901 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -498,6 +498,7 @@ static const struct file_operations psb_gem_fops = { .mmap = drm_gem_mmap, .poll = drm_poll, .read = drm_read, + .fop_flags = FOP_UNSIGNED_OFFSET, }; static const struct drm_driver driver = { diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index faa253b27664..14ac351fd76d 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -123,9 +123,8 @@ config DRM_I915_USERPTR config DRM_I915_GVT_KVMGT tristate "Enable KVM host support Intel GVT-g graphics virtualization" depends on DRM_I915 - depends on X86 + depends on KVM_X86 depends on 64BIT - depends on KVM depends on VFIO select DRM_I915_GVT select KVM_EXTERNAL_WRITE_TRACKING diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 00fbe9f8c03a..b1c294236cc8 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -916,7 +916,7 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port, * instead of a specific AUX_IO_<port> reference without powering up any * extra wells. */ - if (intel_encoder_can_psr(&dig_port->base)) + if (intel_psr_needs_aux_io_power(&dig_port->base, crtc_state)) return intel_display_power_aux_io_domain(i915, dig_port->aux_ch); else if (DISPLAY_VER(i915) < 14 && (intel_crtc_has_dp_encoder(crtc_state) || diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index a1fcedfd404b..90fa73575feb 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -531,6 +531,10 @@ static void intel_dp_set_source_rates(struct intel_dp *intel_dp) { /* The values must be in increasing order */ + static const int bmg_rates[] = { + 162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000, + 810000, 1000000, 1350000, + }; static const int mtl_rates[] = { 162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000, 810000, 1000000, 2000000, @@ -561,8 +565,13 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp) intel_dp->source_rates || intel_dp->num_source_rates); if (DISPLAY_VER(dev_priv) >= 14) { - source_rates = mtl_rates; - size = ARRAY_SIZE(mtl_rates); + if (IS_BATTLEMAGE(dev_priv)) { + source_rates = bmg_rates; + size = ARRAY_SIZE(bmg_rates); + } else { + source_rates = mtl_rates; + size = ARRAY_SIZE(mtl_rates); + } max_rate = mtl_max_source_rate(intel_dp); } else if (DISPLAY_VER(dev_priv) >= 11) { source_rates = icl_rates; @@ -4058,6 +4067,9 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector drm_dp_is_branch(intel_dp->dpcd)); intel_init_dpcd_quirks(intel_dp, &intel_dp->desc.ident); + intel_dp->colorimetry_support = + intel_dp_get_colorimetry_status(intel_dp); + /* * Read the eDP display control registers. * @@ -4171,6 +4183,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) intel_init_dpcd_quirks(intel_dp, &intel_dp->desc.ident); + intel_dp->colorimetry_support = + intel_dp_get_colorimetry_status(intel_dp); + intel_dp_update_sink_caps(intel_dp); } @@ -6922,9 +6937,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, "HDCP init failed, skipping.\n"); } - intel_dp->colorimetry_support = - intel_dp_get_colorimetry_status(intel_dp); - intel_dp->frl.is_trained = false; intel_dp->frl.trained_rate_gbps = 0; diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 15541932b809..eeaedd979354 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -89,25 +89,19 @@ static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state, static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state, const struct intel_connector *connector, - bool ssc, bool dsc, int bpp_x16) + bool ssc, int dsc_slice_count, int bpp_x16) { const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; unsigned long flags = DRM_DP_BW_OVERHEAD_MST; - int dsc_slice_count = 0; int overhead; flags |= intel_dp_is_uhbr(crtc_state) ? DRM_DP_BW_OVERHEAD_UHBR : 0; flags |= ssc ? DRM_DP_BW_OVERHEAD_SSC_REF_CLK : 0; flags |= crtc_state->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0; - if (dsc) { + if (dsc_slice_count) flags |= DRM_DP_BW_OVERHEAD_DSC; - dsc_slice_count = intel_dp_dsc_get_slice_count(connector, - adjusted_mode->clock, - adjusted_mode->hdisplay, - crtc_state->joiner_pipes); - } overhead = drm_dp_bw_overhead(crtc_state->lane_count, adjusted_mode->hdisplay, @@ -153,6 +147,19 @@ static int intel_dp_mst_calc_pbn(int pixel_clock, int bpp_x16, int bw_overhead) return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000); } +static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connector, + const struct intel_crtc_state *crtc_state) +{ + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + int num_joined_pipes = crtc_state->joiner_pipes; + + return intel_dp_dsc_get_slice_count(connector, + adjusted_mode->clock, + adjusted_mode->hdisplay, + num_joined_pipes); +} + static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, int max_bpp, @@ -172,6 +179,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int bpp, slots = -EINVAL; + int dsc_slice_count = 0; int max_dpt_bpp; int ret = 0; @@ -203,6 +211,15 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp %d\n", min_bpp, max_bpp); + if (dsc) { + dsc_slice_count = intel_dp_mst_dsc_get_slice_count(connector, crtc_state); + if (!dsc_slice_count) { + drm_dbg_kms(&i915->drm, "Can't get valid DSC slice count\n"); + + return -ENOSPC; + } + } + for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) { int local_bw_overhead; int remote_bw_overhead; @@ -216,9 +233,9 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, intel_dp_output_bpp(crtc_state->output_format, bpp)); local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector, - false, dsc, link_bpp_x16); + false, dsc_slice_count, link_bpp_x16); remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector, - true, dsc, link_bpp_x16); + true, dsc_slice_count, link_bpp_x16); intel_dp_mst_compute_m_n(crtc_state, connector, local_bw_overhead, @@ -449,6 +466,9 @@ hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector, if (mode_hblank_period_ns(adjusted_mode) > hblank_limit) return false; + if (!intel_dp_mst_dsc_get_slice_count(connector, crtc_state)) + return false; + return true; } diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index d8951464bd2b..f0e3be0fe420 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -32,7 +32,7 @@ #include <linux/slab.h> #include <linux/string_helpers.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index 5be7bb43e2e0..35557d98d7a7 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -438,6 +438,19 @@ bool intel_fb_needs_64k_phys(u64 modifier) INTEL_PLANE_CAP_NEED64K_PHYS); } +/** + * intel_fb_is_tile4_modifier: Check if a modifier is a tile4 modifier type + * @modifier: Modifier to check + * + * Returns: + * Returns %true if @modifier is a tile4 modifier. + */ +bool intel_fb_is_tile4_modifier(u64 modifier) +{ + return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps, + INTEL_PLANE_CAP_TILING_4); +} + static bool check_modifier_display_ver_range(const struct intel_modifier_desc *md, u8 display_ver_from, u8 display_ver_until) { diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h index 10de437e8ef8..827be3f7934c 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.h +++ b/drivers/gpu/drm/i915/display/intel_fb.h @@ -35,6 +35,7 @@ bool intel_fb_is_ccs_modifier(u64 modifier); bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier); bool intel_fb_is_mc_ccs_modifier(u64 modifier); bool intel_fb_needs_64k_phys(u64 modifier); +bool intel_fb_is_tile4_modifier(u64 modifier); bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane); int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 6980b98792c2..377939de0ff4 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -1094,7 +1094,8 @@ static void intel_hdcp_update_value(struct intel_connector *connector, hdcp->value = value; if (update_property) { drm_connector_get(&connector->base); - queue_work(i915->unordered_wq, &hdcp->prop_work); + if (!queue_work(i915->unordered_wq, &hdcp->prop_work)) + drm_connector_put(&connector->base); } } @@ -2524,7 +2525,8 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, mutex_lock(&hdcp->mutex); hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; drm_connector_get(&connector->base); - queue_work(i915->unordered_wq, &hdcp->prop_work); + if (!queue_work(i915->unordered_wq, &hdcp->prop_work)) + drm_connector_put(&connector->base); mutex_unlock(&hdcp->mutex); } @@ -2541,7 +2543,9 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, */ if (!desired_and_not_enabled && !content_protection_type_changed) { drm_connector_get(&connector->base); - queue_work(i915->unordered_wq, &hdcp->prop_work); + if (!queue_work(i915->unordered_wq, &hdcp->prop_work)) + drm_connector_put(&connector->base); + } } diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 1f83b3b67ea6..136a0d6ca970 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -203,6 +203,25 @@ bool intel_encoder_can_psr(struct intel_encoder *encoder) return false; } +bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + /* + * For PSR/PR modes only eDP requires the AUX IO power to be enabled whenever + * the output is enabled. For non-eDP outputs the main link is always + * on, hence it doesn't require the HW initiated AUX wake-up signaling used + * for eDP. + * + * TODO: + * - Consider leaving AUX IO disabled for eDP / PR as well, in case + * the ALPM with main-link off mode is not enabled. + * - Leave AUX IO enabled for DP / PR, once support for ALPM with + * main-link off mode is added for it and this mode gets enabled. + */ + return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && + intel_encoder_can_psr(encoder); +} + static bool psr_global_enabled(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); @@ -2784,13 +2803,6 @@ static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp) EDP_PSR_STATUS_STATE_MASK, 50); } -static int _panel_replay_ready_for_pipe_update_locked(struct intel_dp *intel_dp) -{ - return intel_dp_is_edp(intel_dp) ? - _psr2_ready_for_pipe_update_locked(intel_dp) : - _psr1_ready_for_pipe_update_locked(intel_dp); -} - /** * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update * @new_crtc_state: new CRTC state @@ -2813,12 +2825,10 @@ void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_stat lockdep_assert_held(&intel_dp->psr.lock); - if (!intel_dp->psr.enabled) + if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled) continue; - if (intel_dp->psr.panel_replay_enabled) - ret = _panel_replay_ready_for_pipe_update_locked(intel_dp); - else if (intel_dp->psr.sel_update_enabled) + if (intel_dp->psr.sel_update_enabled) ret = _psr2_ready_for_pipe_update_locked(intel_dp); else ret = _psr1_ready_for_pipe_update_locked(intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index 4e09c10908e4..6eb5f15f674f 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -25,6 +25,8 @@ struct intel_plane_state; (intel_dp)->psr.source_panel_replay_support) bool intel_encoder_can_psr(struct intel_encoder *encoder); +bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); void intel_psr_init_dpcd(struct intel_dp *intel_dp); void intel_psr_enable_sink(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 17d4c880ecc4..c8720d31d101 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -1591,6 +1591,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, return -EINVAL; } + /* + * Display20 onward tile4 hflip is not supported + */ + if (rotation & DRM_MODE_REFLECT_X && + intel_fb_is_tile4_modifier(fb->modifier) && + DISPLAY_VER(dev_priv) >= 20) { + drm_dbg_kms(&dev_priv->drm, + "horizontal flip is not supported with tile4 surface formats\n"); + return -EINVAL; + } + if (drm_rotation_90_or_270(rotation)) { if (!intel_fb_supports_90_270_rotation(to_intel_framebuffer(fb))) { drm_dbg_kms(&dev_priv->drm, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index c5e1c718a6d2..fe69f2c8527d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -424,7 +424,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj, struct address_space *mapping = obj->base.filp->f_mapping; const struct address_space_operations *aops = mapping->a_ops; char __user *user_data = u64_to_user_ptr(arg->data_ptr); - u64 remain, offset; + u64 remain; + loff_t pos; unsigned int pg; /* Caller already validated user args */ @@ -457,12 +458,12 @@ shmem_pwrite(struct drm_i915_gem_object *obj, */ remain = arg->size; - offset = arg->offset; - pg = offset_in_page(offset); + pos = arg->offset; + pg = offset_in_page(pos); do { unsigned int len, unwritten; - struct page *page; + struct folio *folio; void *data, *vaddr; int err; char __maybe_unused c; @@ -480,21 +481,19 @@ shmem_pwrite(struct drm_i915_gem_object *obj, if (err) return err; - err = aops->write_begin(obj->base.filp, mapping, offset, len, - &page, &data); + err = aops->write_begin(obj->base.filp, mapping, pos, len, + &folio, &data); if (err < 0) return err; - vaddr = kmap_local_page(page); + vaddr = kmap_local_folio(folio, offset_in_folio(folio, pos)); pagefault_disable(); - unwritten = __copy_from_user_inatomic(vaddr + pg, - user_data, - len); + unwritten = __copy_from_user_inatomic(vaddr, user_data, len); pagefault_enable(); kunmap_local(vaddr); - err = aops->write_end(obj->base.filp, mapping, offset, len, - len - unwritten, page, data); + err = aops->write_end(obj->base.filp, mapping, pos, len, + len - unwritten, folio, data); if (err < 0) return err; @@ -504,7 +503,7 @@ shmem_pwrite(struct drm_i915_gem_object *obj, remain -= len; user_data += len; - offset += len; + pos += len; pg = 0; } while (remain); @@ -660,7 +659,7 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, struct drm_i915_gem_object *obj; struct file *file; const struct address_space_operations *aops; - resource_size_t offset; + loff_t pos; int err; GEM_WARN_ON(IS_DGFX(i915)); @@ -672,29 +671,27 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, file = obj->base.filp; aops = file->f_mapping->a_ops; - offset = 0; + pos = 0; do { unsigned int len = min_t(typeof(size), size, PAGE_SIZE); - struct page *page; - void *pgdata, *vaddr; + struct folio *folio; + void *fsdata; - err = aops->write_begin(file, file->f_mapping, offset, len, - &page, &pgdata); + err = aops->write_begin(file, file->f_mapping, pos, len, + &folio, &fsdata); if (err < 0) goto fail; - vaddr = kmap(page); - memcpy(vaddr, data, len); - kunmap(page); + memcpy_to_folio(folio, offset_in_folio(folio, pos), data, len); - err = aops->write_end(file, file->f_mapping, offset, len, len, - page, pgdata); + err = aops->write_end(file, file->f_mapping, pos, len, len, + folio, fsdata); if (err < 0) goto fail; size -= len; data += len; - offset += len; + pos += len; } while (size); return obj; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 5c72462d1f57..b22e2019768f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -1131,7 +1131,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(bo->resource)); } - if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) + if (wakeref && CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND != 0) intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref, msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index d4b918fb11ce..1f55e62044a4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -266,7 +266,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) args->timeout_ns = 0; - /* Asked to wait beyond the jiffie/scheduler precision? */ + /* Asked to wait beyond the jiffy/scheduler precision? */ if (ret == -ETIME && args->timeout_ns) ret = -EAGAIN; } diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c index 4202df5b8c12..222ca7c44951 100644 --- a/drivers/gpu/drm/i915/gt/selftest_execlists.c +++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c @@ -93,7 +93,7 @@ static int wait_for_reset(struct intel_engine_cs *engine, return -EINVAL; } - /* Give the request a jiffie to complete after flushing the worker */ + /* Give the request a jiffy to complete after flushing the worker */ if (i915_request_wait(rq, 0, max(0l, (long)(timeout - jiffies)) + 1) < 0) { pr_err("%s: hanging request %llx:%lld did not complete\n", @@ -3426,7 +3426,7 @@ static int live_preempt_timeout(void *arg) cpu_relax(); saved_timeout = engine->props.preempt_timeout_ms; - engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */ + engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffy */ i915_request_get(rq); i915_request_add(rq); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 8322c913bc3c..ed979847187f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -2843,9 +2843,9 @@ static void prepare_context_registration_info_v70(struct intel_context *ce, ce->parallel.guc.wqi_tail = 0; ce->parallel.guc.wqi_head = 0; - wq_desc_offset = i915_ggtt_offset(ce->state) + + wq_desc_offset = (u64)i915_ggtt_offset(ce->state) + __get_parent_scratch_offset(ce); - wq_base_offset = i915_ggtt_offset(ce->state) + + wq_base_offset = (u64)i915_ggtt_offset(ce->state) + __get_wq_offset(ce); info->wq_desc_lo = lower_32_bits(wq_desc_offset); info->wq_desc_hi = upper_32_bits(wq_desc_offset); diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index fe905d65ddf7..a40f05b993da 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -1675,6 +1675,7 @@ static const struct file_operations i915_driver_fops = { #ifdef CONFIG_PROC_FS .show_fdinfo = drm_show_fdinfo, #endif + .fop_flags = FOP_UNSIGNED_OFFSET, }; static int diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 025a79fe5920..2406cda75b7b 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -3751,7 +3751,6 @@ static int i915_perf_release(struct inode *inode, struct file *file) static const struct file_operations fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .release = i915_perf_release, .poll = i915_perf_poll, .read = i915_perf_read, diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c index b34a2d3d331d..2576f8f6c0f6 100644 --- a/drivers/gpu/drm/i915/i915_utils.c +++ b/drivers/gpu/drm/i915/i915_utils.c @@ -69,7 +69,7 @@ void set_timer_ms(struct timer_list *t, unsigned long timeout) * Paranoia to make sure the compiler computes the timeout before * loading 'jiffies' as jiffies is volatile and may be updated in * the background by a timer tick. All to reduce the complexity - * of the addition and reduce the risk of losing a jiffie. + * of the addition and reduce the risk of losing a jiffy. */ barrier(); diff --git a/drivers/gpu/drm/imagination/pvr_context.c b/drivers/gpu/drm/imagination/pvr_context.c index eded5e955cc0..4cb3494c0bb2 100644 --- a/drivers/gpu/drm/imagination/pvr_context.c +++ b/drivers/gpu/drm/imagination/pvr_context.c @@ -17,10 +17,14 @@ #include <drm/drm_auth.h> #include <drm/drm_managed.h> + +#include <linux/bug.h> #include <linux/errno.h> #include <linux/kernel.h> +#include <linux/list.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> #include <linux/xarray.h> @@ -354,6 +358,10 @@ int pvr_context_create(struct pvr_file *pvr_file, struct drm_pvr_ioctl_create_co return err; } + spin_lock(&pvr_dev->ctx_list_lock); + list_add_tail(&ctx->file_link, &pvr_file->contexts); + spin_unlock(&pvr_dev->ctx_list_lock); + return 0; err_destroy_fw_obj: @@ -380,6 +388,11 @@ pvr_context_release(struct kref *ref_count) container_of(ref_count, struct pvr_context, ref_count); struct pvr_device *pvr_dev = ctx->pvr_dev; + WARN_ON(in_interrupt()); + spin_lock(&pvr_dev->ctx_list_lock); + list_del(&ctx->file_link); + spin_unlock(&pvr_dev->ctx_list_lock); + xa_erase(&pvr_dev->ctx_ids, ctx->ctx_id); pvr_context_destroy_queues(ctx); pvr_fw_object_destroy(ctx->fw_obj); @@ -437,11 +450,30 @@ pvr_context_destroy(struct pvr_file *pvr_file, u32 handle) */ void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file) { + struct pvr_device *pvr_dev = pvr_file->pvr_dev; struct pvr_context *ctx; unsigned long handle; xa_for_each(&pvr_file->ctx_handles, handle, ctx) pvr_context_destroy(pvr_file, handle); + + spin_lock(&pvr_dev->ctx_list_lock); + ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link); + + while (!list_entry_is_head(ctx, &pvr_file->contexts, file_link)) { + list_del_init(&ctx->file_link); + + if (pvr_context_get_if_referenced(ctx)) { + spin_unlock(&pvr_dev->ctx_list_lock); + + pvr_vm_unmap_all(ctx->vm_ctx); + + pvr_context_put(ctx); + spin_lock(&pvr_dev->ctx_list_lock); + } + ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link); + } + spin_unlock(&pvr_dev->ctx_list_lock); } /** @@ -451,6 +483,7 @@ void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file) void pvr_context_device_init(struct pvr_device *pvr_dev) { xa_init_flags(&pvr_dev->ctx_ids, XA_FLAGS_ALLOC1); + spin_lock_init(&pvr_dev->ctx_list_lock); } /** diff --git a/drivers/gpu/drm/imagination/pvr_context.h b/drivers/gpu/drm/imagination/pvr_context.h index 0c7b97dfa6ba..07afa179cdf4 100644 --- a/drivers/gpu/drm/imagination/pvr_context.h +++ b/drivers/gpu/drm/imagination/pvr_context.h @@ -85,6 +85,9 @@ struct pvr_context { /** @compute: Transfer queue. */ struct pvr_queue *transfer; } queues; + + /** @file_link: pvr_file PVR context list link. */ + struct list_head file_link; }; static __always_inline struct pvr_queue * @@ -124,6 +127,24 @@ pvr_context_get(struct pvr_context *ctx) } /** + * pvr_context_get_if_referenced() - Take an additional reference on a still + * referenced context. + * @ctx: Context pointer. + * + * Call pvr_context_put() to release. + * + * Returns: + * * True on success, or + * * false if no context pointer passed, or the context wasn't still + * * referenced. + */ +static __always_inline bool +pvr_context_get_if_referenced(struct pvr_context *ctx) +{ + return ctx != NULL && kref_get_unless_zero(&ctx->ref_count) != 0; +} + +/** * pvr_context_lookup() - Lookup context pointer from handle and file. * @pvr_file: Pointer to pvr_file structure. * @handle: Context handle. diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h index b574e23d484b..6d0dfacb677b 100644 --- a/drivers/gpu/drm/imagination/pvr_device.h +++ b/drivers/gpu/drm/imagination/pvr_device.h @@ -23,6 +23,7 @@ #include <linux/kernel.h> #include <linux/math.h> #include <linux/mutex.h> +#include <linux/spinlock_types.h> #include <linux/timer.h> #include <linux/types.h> #include <linux/wait.h> @@ -293,6 +294,12 @@ struct pvr_device { /** @sched_wq: Workqueue for schedulers. */ struct workqueue_struct *sched_wq; + + /** + * @ctx_list_lock: Lock to be held when accessing the context list in + * struct pvr_file. + */ + spinlock_t ctx_list_lock; }; /** @@ -344,6 +351,9 @@ struct pvr_file { * This array is used to allocate handles returned to userspace. */ struct xarray vm_ctx_handles; + + /** @contexts: PVR context list. */ + struct list_head contexts; }; /** diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c index 1a0cb7aa9cea..fb17196e05f4 100644 --- a/drivers/gpu/drm/imagination/pvr_drv.c +++ b/drivers/gpu/drm/imagination/pvr_drv.c @@ -28,6 +28,7 @@ #include <linux/export.h> #include <linux/fs.h> #include <linux/kernel.h> +#include <linux/list.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/moduleparam.h> @@ -1326,6 +1327,8 @@ pvr_drm_driver_open(struct drm_device *drm_dev, struct drm_file *file) */ pvr_file->pvr_dev = pvr_dev; + INIT_LIST_HEAD(&pvr_file->contexts); + xa_init_flags(&pvr_file->ctx_handles, XA_FLAGS_ALLOC1); xa_init_flags(&pvr_file->free_list_handles, XA_FLAGS_ALLOC1); xa_init_flags(&pvr_file->hwrt_handles, XA_FLAGS_ALLOC1); diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 97c0f772ed65..7bd6ba4c6e8a 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -14,6 +14,7 @@ #include <drm/drm_gem.h> #include <drm/drm_gpuvm.h> +#include <linux/bug.h> #include <linux/container_of.h> #include <linux/err.h> #include <linux/errno.h> @@ -597,12 +598,26 @@ err_free: } /** - * pvr_vm_context_release() - Teardown a VM context. - * @ref_count: Pointer to reference counter of the VM context. + * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context. + * @vm_ctx: Target VM context. * * This function ensures that no mappings are left dangling by unmapping them * all in order of ascending device-virtual address. */ +void +pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx) +{ + WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start, + vm_ctx->gpuvm_mgr.mm_range)); +} + +/** + * pvr_vm_context_release() - Teardown a VM context. + * @ref_count: Pointer to reference counter of the VM context. + * + * This function also ensures that no mappings are left dangling by calling + * pvr_vm_unmap_all. + */ static void pvr_vm_context_release(struct kref *ref_count) { @@ -612,8 +627,7 @@ pvr_vm_context_release(struct kref *ref_count) if (vm_ctx->fw_mem_ctx_obj) pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj); - WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start, - vm_ctx->gpuvm_mgr.mm_range)); + pvr_vm_unmap_all(vm_ctx); pvr_mmu_context_destroy(vm_ctx->mmu_ctx); drm_gem_private_object_fini(&vm_ctx->dummy_gem); diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h index f2a6463f2b05..79406243617c 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.h +++ b/drivers/gpu/drm/imagination/pvr_vm.h @@ -39,6 +39,7 @@ int pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset, u64 device_addr, u64 size); int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size); +void pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx); dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx); struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx); diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c index 175b00e5a253..eb0e1233ad04 100644 --- a/drivers/gpu/drm/mediatek/mtk_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_crtc.c @@ -127,9 +127,8 @@ static void mtk_crtc_destroy(struct drm_crtc *crtc) mtk_mutex_put(mtk_crtc->mutex); #if IS_REACHABLE(CONFIG_MTK_CMDQ) - cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle); - if (mtk_crtc->cmdq_client.chan) { + cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle); mbox_free_channel(mtk_crtc->cmdq_client.chan); mtk_crtc->cmdq_client.chan = NULL; } @@ -913,6 +912,7 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev, BIT(pipe), mtk_crtc_plane_type(mtk_crtc->layer_nr, num_planes), mtk_ddp_comp_supported_rotations(comp), + mtk_ddp_comp_get_blend_modes(comp), mtk_ddp_comp_get_formats(comp), mtk_ddp_comp_get_num_formats(comp), i); if (ret) diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c index be66d94be361..edc6417639e6 100644 --- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c @@ -363,6 +363,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = { .layer_config = mtk_ovl_layer_config, .bgclr_in_on = mtk_ovl_bgclr_in_on, .bgclr_in_off = mtk_ovl_bgclr_in_off, + .get_blend_modes = mtk_ovl_get_blend_modes, .get_formats = mtk_ovl_get_formats, .get_num_formats = mtk_ovl_get_num_formats, }; @@ -416,6 +417,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl_adaptor = { .disconnect = mtk_ovl_adaptor_disconnect, .add = mtk_ovl_adaptor_add_comp, .remove = mtk_ovl_adaptor_remove_comp, + .get_blend_modes = mtk_ovl_adaptor_get_blend_modes, .get_formats = mtk_ovl_adaptor_get_formats, .get_num_formats = mtk_ovl_adaptor_get_num_formats, .mode_valid = mtk_ovl_adaptor_mode_valid, diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h index ecf6dc283cd7..39720b27f4e9 100644 --- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h @@ -80,6 +80,7 @@ struct mtk_ddp_comp_funcs { void (*ctm_set)(struct device *dev, struct drm_crtc_state *state); struct device * (*dma_dev_get)(struct device *dev); + u32 (*get_blend_modes)(struct device *dev); const u32 *(*get_formats)(struct device *dev); size_t (*get_num_formats)(struct device *dev); void (*connect)(struct device *dev, struct device *mmsys_dev, unsigned int next); @@ -267,6 +268,15 @@ static inline struct device *mtk_ddp_comp_dma_dev_get(struct mtk_ddp_comp *comp) } static inline +u32 mtk_ddp_comp_get_blend_modes(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->get_blend_modes) + return comp->funcs->get_blend_modes(comp->dev); + + return 0; +} + +static inline const u32 *mtk_ddp_comp_get_formats(struct mtk_ddp_comp *comp) { if (comp->funcs && comp->funcs->get_formats) diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h index 082ac18fe04a..04154db9085c 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h @@ -103,6 +103,7 @@ void mtk_ovl_register_vblank_cb(struct device *dev, void mtk_ovl_unregister_vblank_cb(struct device *dev); void mtk_ovl_enable_vblank(struct device *dev); void mtk_ovl_disable_vblank(struct device *dev); +u32 mtk_ovl_get_blend_modes(struct device *dev); const u32 *mtk_ovl_get_formats(struct device *dev); size_t mtk_ovl_get_num_formats(struct device *dev); @@ -131,6 +132,7 @@ void mtk_ovl_adaptor_start(struct device *dev); void mtk_ovl_adaptor_stop(struct device *dev); unsigned int mtk_ovl_adaptor_layer_nr(struct device *dev); struct device *mtk_ovl_adaptor_dma_dev_get(struct device *dev); +u32 mtk_ovl_adaptor_get_blend_modes(struct device *dev); const u32 *mtk_ovl_adaptor_get_formats(struct device *dev); size_t mtk_ovl_adaptor_get_num_formats(struct device *dev); enum drm_mode_status mtk_ovl_adaptor_mode_valid(struct device *dev, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 89b439dcf3a6..e0c0bb01f65a 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -65,8 +65,8 @@ #define OVL_CON_CLRFMT_RGB (1 << 12) #define OVL_CON_CLRFMT_ARGB8888 (2 << 12) #define OVL_CON_CLRFMT_RGBA8888 (3 << 12) -#define OVL_CON_CLRFMT_ABGR8888 (OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP) -#define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP) +#define OVL_CON_CLRFMT_ABGR8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP) +#define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP) #define OVL_CON_CLRFMT_UYVY (4 << 12) #define OVL_CON_CLRFMT_YUYV (5 << 12) #define OVL_CON_MTX_YUV_TO_RGB (6 << 16) @@ -146,6 +146,7 @@ struct mtk_disp_ovl_data { bool fmt_rgb565_is_0; bool smi_id_en; bool supports_afbc; + const u32 blend_modes; const u32 *formats; size_t num_formats; bool supports_clrfmt_ext; @@ -214,6 +215,13 @@ void mtk_ovl_disable_vblank(struct device *dev) writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_INTEN); } +u32 mtk_ovl_get_blend_modes(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + return ovl->data->blend_modes; +} + const u32 *mtk_ovl_get_formats(struct device *dev) { struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); @@ -386,14 +394,27 @@ void mtk_ovl_layer_off(struct device *dev, unsigned int idx, DISP_REG_OVL_RDMA_CTRL(idx)); } -static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt, - unsigned int blend_mode) +static unsigned int mtk_ovl_fmt_convert(struct mtk_disp_ovl *ovl, + struct mtk_plane_state *state) { - /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" - * is defined in mediatek HW data sheet. - * The alphabet order in XXX is no relation to data - * arrangement in memory. + unsigned int fmt = state->pending.format; + unsigned int blend_mode = DRM_MODE_BLEND_COVERAGE; + + /* + * For the platforms where OVL_CON_CLRFMT_MAN is defined in the hardware data sheet + * and supports premultiplied color formats, such as OVL_CON_CLRFMT_PARGB8888. + * + * Check blend_modes in the driver data to see if premultiplied mode is supported. + * If not, use coverage mode instead to set it to the supported color formats. + * + * Current DRM assumption is that alpha is default premultiplied, so the bitmask of + * blend_modes must include BIT(DRM_MODE_BLEND_PREMULTI). Otherwise, mtk_plane_init() + * will get an error return from drm_plane_create_blend_mode_property() and + * state->base.pixel_blend_mode should not be used. */ + if (ovl->data->blend_modes & BIT(DRM_MODE_BLEND_PREMULTI)) + blend_mode = state->base.pixel_blend_mode; + switch (fmt) { default: case DRM_FORMAT_RGB565: @@ -471,20 +492,26 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx, return; } - con = ovl_fmt_convert(ovl, fmt, blend_mode); + con = mtk_ovl_fmt_convert(ovl, state); if (state->base.fb) { - con |= OVL_CON_AEN; con |= state->base.alpha & OVL_CON_ALPHA; - } - /* CONST_BLD must be enabled for XRGB formats although the alpha channel - * can be ignored, or OVL will still read the value from memory. - * For RGB888 related formats, whether CONST_BLD is enabled or not won't - * affect the result. Therefore we use !has_alpha as the condition. - */ - if ((state->base.fb && !state->base.fb->format->has_alpha) || - blend_mode == DRM_MODE_BLEND_PIXEL_NONE) - ignore_pixel_alpha = OVL_CONST_BLEND; + /* + * For blend_modes supported SoCs, always enable alpha blending. + * For blend_modes unsupported SoCs, enable alpha blending when has_alpha is set. + */ + if (blend_mode || state->base.fb->format->has_alpha) + con |= OVL_CON_AEN; + + /* + * Although the alpha channel can be ignored, CONST_BLD must be enabled + * for XRGB format, otherwise OVL will still read the value from memory. + * For RGB888 related formats, whether CONST_BLD is enabled or not won't + * affect the result. Therefore we use !has_alpha as the condition. + */ + if (blend_mode == DRM_MODE_BLEND_PIXEL_NONE || !state->base.fb->format->has_alpha) + ignore_pixel_alpha = OVL_CONST_BLEND; + } if (pending->rotation & DRM_MODE_REFLECT_Y) { con |= OVL_CON_VIRT_FLIP; @@ -663,6 +690,9 @@ static const struct mtk_disp_ovl_data mt8192_ovl_driver_data = { .layer_nr = 4, .fmt_rgb565_is_0 = true, .smi_id_en = true, + .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE) | + BIT(DRM_MODE_BLEND_PIXEL_NONE), .formats = mt8173_formats, .num_formats = ARRAY_SIZE(mt8173_formats), }; @@ -673,6 +703,9 @@ static const struct mtk_disp_ovl_data mt8192_ovl_2l_driver_data = { .layer_nr = 2, .fmt_rgb565_is_0 = true, .smi_id_en = true, + .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE) | + BIT(DRM_MODE_BLEND_PIXEL_NONE), .formats = mt8173_formats, .num_formats = ARRAY_SIZE(mt8173_formats), }; @@ -684,6 +717,9 @@ static const struct mtk_disp_ovl_data mt8195_ovl_driver_data = { .fmt_rgb565_is_0 = true, .smi_id_en = true, .supports_afbc = true, + .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE) | + BIT(DRM_MODE_BLEND_PIXEL_NONE), .formats = mt8195_formats, .num_formats = ARRAY_SIZE(mt8195_formats), .supports_clrfmt_ext = true, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c index c6768210b08b..bf2546c4681a 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c @@ -400,6 +400,13 @@ void mtk_ovl_adaptor_disable_vblank(struct device *dev) mtk_ethdr_disable_vblank(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]); } +u32 mtk_ovl_adaptor_get_blend_modes(struct device *dev) +{ + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + + return mtk_ethdr_get_blend_modes(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]); +} + const u32 *mtk_ovl_adaptor_get_formats(struct device *dev) { struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c index d8796a904eca..f2bee617f063 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp.c +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -145,6 +145,89 @@ struct mtk_dp_data { u16 audio_m_div2_bit; }; +static const struct mtk_dp_efuse_fmt mt8188_dp_efuse_fmt[MTK_DP_CAL_MAX] = { + [MTK_DP_CAL_GLB_BIAS_TRIM] = { + .idx = 0, + .shift = 10, + .mask = 0x1f, + .min_val = 1, + .max_val = 0x1e, + .default_val = 0xf, + }, + [MTK_DP_CAL_CLKTX_IMPSE] = { + .idx = 0, + .shift = 15, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] = { + .idx = 1, + .shift = 0, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] = { + .idx = 1, + .shift = 8, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] = { + .idx = 1, + .shift = 16, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] = { + .idx = 1, + .shift = 24, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] = { + .idx = 1, + .shift = 4, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] = { + .idx = 1, + .shift = 12, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] = { + .idx = 1, + .shift = 20, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] = { + .idx = 1, + .shift = 28, + .mask = 0xf, + .min_val = 1, + .max_val = 0xe, + .default_val = 0x8, + }, +}; + static const struct mtk_dp_efuse_fmt mt8195_edp_efuse_fmt[MTK_DP_CAL_MAX] = { [MTK_DP_CAL_GLB_BIAS_TRIM] = { .idx = 3, @@ -2771,7 +2854,7 @@ static SIMPLE_DEV_PM_OPS(mtk_dp_pm_ops, mtk_dp_suspend, mtk_dp_resume); static const struct mtk_dp_data mt8188_dp_data = { .bridge_type = DRM_MODE_CONNECTOR_DisplayPort, .smc_cmd = MTK_DP_SIP_ATF_VIDEO_UNMUTE, - .efuse_fmt = mt8195_dp_efuse_fmt, + .efuse_fmt = mt8188_dp_efuse_fmt, .audio_supported = true, .audio_pkt_in_hblank_area = true, .audio_m_div2_bit = MT8188_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_2, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 77b50c56c124..3e807195a0d0 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -559,11 +559,7 @@ static int mtk_drm_kms_init(struct drm_device *drm) * Configure the DMA segment size to make sure we get contiguous IOVA * when importing PRIME buffers. */ - ret = dma_set_max_seg_size(dma_dev, UINT_MAX); - if (ret) { - dev_err(dma_dev, "Failed to set DMA segment size\n"); - goto err_component_unbind; - } + dma_set_max_seg_size(dma_dev, UINT_MAX); ret = drm_vblank_init(drm, MAX_CRTC); if (ret < 0) diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.c b/drivers/gpu/drm/mediatek/mtk_ethdr.c index d1d9cf8b10e1..0f22e7d337cb 100644 --- a/drivers/gpu/drm/mediatek/mtk_ethdr.c +++ b/drivers/gpu/drm/mediatek/mtk_ethdr.c @@ -145,6 +145,13 @@ static irqreturn_t mtk_ethdr_irq_handler(int irq, void *dev_id) return IRQ_HANDLED; } +u32 mtk_ethdr_get_blend_modes(struct device *dev) +{ + return BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE) | + BIT(DRM_MODE_BLEND_PIXEL_NONE); +} + void mtk_ethdr_layer_config(struct device *dev, unsigned int idx, struct mtk_plane_state *state, struct cmdq_pkt *cmdq_pkt) diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.h b/drivers/gpu/drm/mediatek/mtk_ethdr.h index 81af9edea3f7..a72aeee46829 100644 --- a/drivers/gpu/drm/mediatek/mtk_ethdr.h +++ b/drivers/gpu/drm/mediatek/mtk_ethdr.h @@ -13,6 +13,7 @@ void mtk_ethdr_clk_disable(struct device *dev); void mtk_ethdr_config(struct device *dev, unsigned int w, unsigned int h, unsigned int vrefresh, unsigned int bpc, struct cmdq_pkt *cmdq_pkt); +u32 mtk_ethdr_get_blend_modes(struct device *dev); void mtk_ethdr_layer_config(struct device *dev, unsigned int idx, struct mtk_plane_state *state, struct cmdq_pkt *cmdq_pkt); diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c index 7d2cb4e0fafa..8a48b3b0a956 100644 --- a/drivers/gpu/drm/mediatek/mtk_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_plane.c @@ -320,8 +320,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, enum drm_plane_type type, - unsigned int supported_rotations, const u32 *formats, - size_t num_formats, unsigned int plane_idx) + unsigned int supported_rotations, const u32 blend_modes, + const u32 *formats, size_t num_formats, unsigned int plane_idx) { int err; @@ -366,12 +366,11 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, if (err) DRM_ERROR("failed to create property: alpha\n"); - err = drm_plane_create_blend_mode_property(plane, - BIT(DRM_MODE_BLEND_PREMULTI) | - BIT(DRM_MODE_BLEND_COVERAGE) | - BIT(DRM_MODE_BLEND_PIXEL_NONE)); - if (err) - DRM_ERROR("failed to create property: blend_mode\n"); + if (blend_modes) { + err = drm_plane_create_blend_mode_property(plane, blend_modes); + if (err) + DRM_ERROR("failed to create property: blend_mode\n"); + } drm_plane_helper_add(plane, &mtk_plane_helper_funcs); diff --git a/drivers/gpu/drm/mediatek/mtk_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h index 5b177eac67b7..3b13b89989c7 100644 --- a/drivers/gpu/drm/mediatek/mtk_plane.h +++ b/drivers/gpu/drm/mediatek/mtk_plane.h @@ -48,6 +48,6 @@ to_mtk_plane_state(struct drm_plane_state *state) int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, enum drm_plane_type type, - unsigned int supported_rotations, const u32 *formats, - size_t num_formats, unsigned int plane_idx); + unsigned int supported_rotations, const u32 blend_modes, + const u32 *formats, size_t num_formats, unsigned int plane_idx); #endif diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 6623ee4e3277..9f5925693686 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -18,7 +18,6 @@ #include <drm/drm_managed.h> #include <drm/drm_module.h> #include <drm/drm_pciids.h> -#include <drm/drm_vblank.h> #include "mgag200_drv.h" @@ -85,34 +84,6 @@ resource_size_t mgag200_probe_vram(void __iomem *mem, resource_size_t size) return offset - 65536; } -static irqreturn_t mgag200_irq_handler(int irq, void *arg) -{ - struct drm_device *dev = arg; - struct mga_device *mdev = to_mga_device(dev); - struct drm_crtc *crtc; - u32 status, ien; - - status = RREG32(MGAREG_STATUS); - - if (status & MGAREG_STATUS_VLINEPEN) { - ien = RREG32(MGAREG_IEN); - if (!(ien & MGAREG_IEN_VLINEIEN)) - goto out; - - crtc = drm_crtc_from_index(dev, 0); - if (WARN_ON_ONCE(!crtc)) - goto out; - drm_crtc_handle_vblank(crtc); - - WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR); - - return IRQ_HANDLED; - } - -out: - return IRQ_NONE; -} - /* * DRM driver */ @@ -196,7 +167,6 @@ int mgag200_device_init(struct mga_device *mdev, const struct mgag200_device_funcs *funcs) { struct drm_device *dev = &mdev->base; - struct pci_dev *pdev = to_pci_dev(dev->dev); u8 crtcext3, misc; int ret; @@ -223,14 +193,6 @@ int mgag200_device_init(struct mga_device *mdev, mutex_unlock(&mdev->rmmio_lock); WREG32(MGAREG_IEN, 0); - WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR); - - ret = devm_request_irq(&pdev->dev, pdev->irq, mgag200_irq_handler, IRQF_SHARED, - dev->driver->name, dev); - if (ret) { - drm_err(dev, "Failed to acquire interrupt, error %d\n", ret); - return ret; - } return 0; } diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 4760ba92871b..988967eafbf2 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -391,24 +391,17 @@ int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_st void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *old_state); void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state); void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state); -bool mgag200_crtc_helper_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq, - int *vpos, int *hpos, - ktime_t *stime, ktime_t *etime, - const struct drm_display_mode *mode); #define MGAG200_CRTC_HELPER_FUNCS \ .mode_valid = mgag200_crtc_helper_mode_valid, \ .atomic_check = mgag200_crtc_helper_atomic_check, \ .atomic_flush = mgag200_crtc_helper_atomic_flush, \ .atomic_enable = mgag200_crtc_helper_atomic_enable, \ - .atomic_disable = mgag200_crtc_helper_atomic_disable, \ - .get_scanout_position = mgag200_crtc_helper_get_scanout_position + .atomic_disable = mgag200_crtc_helper_atomic_disable void mgag200_crtc_reset(struct drm_crtc *crtc); struct drm_crtc_state *mgag200_crtc_atomic_duplicate_state(struct drm_crtc *crtc); void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state); -int mgag200_crtc_enable_vblank(struct drm_crtc *crtc); -void mgag200_crtc_disable_vblank(struct drm_crtc *crtc); #define MGAG200_CRTC_FUNCS \ .reset = mgag200_crtc_reset, \ @@ -416,10 +409,7 @@ void mgag200_crtc_disable_vblank(struct drm_crtc *crtc); .set_config = drm_atomic_helper_set_config, \ .page_flip = drm_atomic_helper_page_flip, \ .atomic_duplicate_state = mgag200_crtc_atomic_duplicate_state, \ - .atomic_destroy_state = mgag200_crtc_atomic_destroy_state, \ - .enable_vblank = mgag200_crtc_enable_vblank, \ - .disable_vblank = mgag200_crtc_disable_vblank, \ - .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp + .atomic_destroy_state = mgag200_crtc_atomic_destroy_state void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode, bool set_vidrst); diff --git a/drivers/gpu/drm/mgag200/mgag200_g200.c b/drivers/gpu/drm/mgag200/mgag200_g200.c index 77ce8d36cef0..f874e2949840 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200.c @@ -8,7 +8,6 @@ #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_vblank.h> #include "mgag200_drv.h" @@ -404,9 +403,5 @@ struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh.c b/drivers/gpu/drm/mgag200/mgag200_g200eh.c index 09ced65c1d2f..e2305f8e00f8 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200eh.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200eh.c @@ -8,7 +8,6 @@ #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_vblank.h> #include "mgag200_drv.h" @@ -276,9 +275,5 @@ struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const stru drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c index 5daa469137bd..11ae76eb081d 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c @@ -7,7 +7,6 @@ #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_vblank.h> #include "mgag200_drv.h" @@ -181,9 +180,5 @@ struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev, drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c index 09cfffafe130..c20ed0ab50ec 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200er.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c @@ -8,7 +8,6 @@ #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_vblank.h> #include "mgag200_drv.h" @@ -206,8 +205,6 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc, mgag200_crtc_set_gamma_linear(mdev, format); mgag200_enable_display(mdev); - - drm_crtc_vblank_on(crtc); } static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = { @@ -215,8 +212,7 @@ static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = { .atomic_check = mgag200_crtc_helper_atomic_check, .atomic_flush = mgag200_crtc_helper_atomic_flush, .atomic_enable = mgag200_g200er_crtc_helper_atomic_enable, - .atomic_disable = mgag200_crtc_helper_atomic_disable, - .get_scanout_position = mgag200_crtc_helper_get_scanout_position, + .atomic_disable = mgag200_crtc_helper_atomic_disable }; static const struct drm_crtc_funcs mgag200_g200er_crtc_funcs = { @@ -312,9 +308,5 @@ struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const stru drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c index 3d48baa91d8b..78be964eb97c 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c @@ -8,7 +8,6 @@ #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_vblank.h> #include "mgag200_drv.h" @@ -207,8 +206,6 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc, mgag200_crtc_set_gamma_linear(mdev, format); mgag200_enable_display(mdev); - - drm_crtc_vblank_on(crtc); } static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = { @@ -216,8 +213,7 @@ static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = { .atomic_check = mgag200_crtc_helper_atomic_check, .atomic_flush = mgag200_crtc_helper_atomic_flush, .atomic_enable = mgag200_g200ev_crtc_helper_atomic_enable, - .atomic_disable = mgag200_crtc_helper_atomic_disable, - .get_scanout_position = mgag200_crtc_helper_get_scanout_position, + .atomic_disable = mgag200_crtc_helper_atomic_disable }; static const struct drm_crtc_funcs mgag200_g200ev_crtc_funcs = { @@ -317,9 +313,5 @@ struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const stru drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c index dabc778e64e8..31624c9ab7b7 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c @@ -7,7 +7,6 @@ #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_vblank.h> #include "mgag200_drv.h" @@ -199,9 +198,5 @@ struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev, drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c index 9dcbe8304271..7a32d3b1d226 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200se.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c @@ -8,7 +8,6 @@ #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_vblank.h> #include "mgag200_drv.h" @@ -338,8 +337,6 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc, mgag200_crtc_set_gamma_linear(mdev, format); mgag200_enable_display(mdev); - - drm_crtc_vblank_on(crtc); } static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = { @@ -347,8 +344,7 @@ static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = { .atomic_check = mgag200_crtc_helper_atomic_check, .atomic_flush = mgag200_crtc_helper_atomic_flush, .atomic_enable = mgag200_g200se_crtc_helper_atomic_enable, - .atomic_disable = mgag200_crtc_helper_atomic_disable, - .get_scanout_position = mgag200_crtc_helper_get_scanout_position, + .atomic_disable = mgag200_crtc_helper_atomic_disable }; static const struct drm_crtc_funcs mgag200_g200se_crtc_funcs = { @@ -517,9 +513,5 @@ struct mga_device *mgag200_g200se_device_create(struct pci_dev *pdev, const stru drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_g200wb.c b/drivers/gpu/drm/mgag200/mgag200_g200wb.c index 83a24aedbf2f..a0e7b9ad46cd 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200wb.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200wb.c @@ -8,7 +8,6 @@ #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_vblank.h> #include "mgag200_drv.h" @@ -323,9 +322,5 @@ struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const stru drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); - ret = drm_vblank_init(dev, 1); - if (ret) - return ERR_PTR(ret); - return mdev; } diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 7159909aca1e..fb71658c3117 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -22,7 +22,6 @@ #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_panic.h> #include <drm/drm_print.h> -#include <drm/drm_vblank.h> #include "mgag200_ddc.h" #include "mgag200_drv.h" @@ -227,14 +226,7 @@ void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mod vblkstr = mode->crtc_vblank_start; vblkend = vtotal + 1; - /* - * There's no VBLANK interrupt on Matrox chipsets, so we use - * the VLINE interrupt instead. It triggers when the current - * <linecomp> has been reached. For VBLANK, this is the first - * non-visible line at the bottom of the screen. Therefore, - * keep <linecomp> in sync with <vblkstr>. - */ - linecomp = vblkstr; + linecomp = vdispend; misc = RREG8(MGA_MISC_IN); @@ -645,8 +637,6 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); struct drm_device *dev = crtc->dev; struct mga_device *mdev = to_mga_device(dev); - struct drm_pending_vblank_event *event; - unsigned long flags; if (crtc_state->enable && crtc_state->color_mgmt_changed) { const struct drm_format_info *format = mgag200_crtc_state->format; @@ -656,18 +646,6 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s else mgag200_crtc_set_gamma_linear(mdev, format); } - - event = crtc->state->event; - if (event) { - crtc->state->event = NULL; - - spin_lock_irqsave(&dev->event_lock, flags); - if (drm_crtc_vblank_get(crtc) != 0) - drm_crtc_send_vblank_event(crtc, event); - else - drm_crtc_arm_vblank_event(crtc, event); - spin_unlock_irqrestore(&dev->event_lock, flags); - } } void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state) @@ -692,44 +670,15 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_ mgag200_crtc_set_gamma_linear(mdev, format); mgag200_enable_display(mdev); - - drm_crtc_vblank_on(crtc); } void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state) { struct mga_device *mdev = to_mga_device(crtc->dev); - drm_crtc_vblank_off(crtc); - mgag200_disable_display(mdev); } -bool mgag200_crtc_helper_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq, - int *vpos, int *hpos, - ktime_t *stime, ktime_t *etime, - const struct drm_display_mode *mode) -{ - struct mga_device *mdev = to_mga_device(crtc->dev); - u32 vcount; - - if (stime) - *stime = ktime_get(); - - if (vpos) { - vcount = RREG32(MGAREG_VCOUNT); - *vpos = vcount & GENMASK(11, 0); - } - - if (hpos) - *hpos = mode->htotal >> 1; // near middle of scanline on average - - if (etime) - *etime = ktime_get(); - - return true; -} - void mgag200_crtc_reset(struct drm_crtc *crtc) { struct mgag200_crtc_state *mgag200_crtc_state; @@ -774,30 +723,6 @@ void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_st kfree(mgag200_crtc_state); } -int mgag200_crtc_enable_vblank(struct drm_crtc *crtc) -{ - struct mga_device *mdev = to_mga_device(crtc->dev); - u32 ien; - - WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR); - - ien = RREG32(MGAREG_IEN); - ien |= MGAREG_IEN_VLINEIEN; - WREG32(MGAREG_IEN, ien); - - return 0; -} - -void mgag200_crtc_disable_vblank(struct drm_crtc *crtc) -{ - struct mga_device *mdev = to_mga_device(crtc->dev); - u32 ien; - - ien = RREG32(MGAREG_IEN); - ien &= ~(MGAREG_IEN_VLINEIEN); - WREG32(MGAREG_IEN, ien); -} - /* * Mode config */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 06cab2c6fd66..702b8d4b3497 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -101,9 +101,10 @@ static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter, } static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, - struct msm_ringbuffer *ring, struct msm_file_private *ctx) + struct msm_ringbuffer *ring, struct msm_gem_submit *submit) { bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1; + struct msm_file_private *ctx = submit->queue->ctx; struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; phys_addr_t ttbr; u32 asid; @@ -115,6 +116,15 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid)) return; + if (adreno_gpu->info->family >= ADRENO_7XX_GEN1) { + /* Wait for previous submit to complete before continuing: */ + OUT_PKT7(ring, CP_WAIT_TIMESTAMP, 4); + OUT_RING(ring, 0); + OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); + OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); + OUT_RING(ring, submit->seqno - 1); + } + if (!sysprof) { if (!adreno_is_a7xx(adreno_gpu)) { /* Turn off protected mode to write to special registers */ @@ -193,7 +203,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct msm_ringbuffer *ring = submit->ring; unsigned int i, ibs = 0; - a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx); + a6xx_set_pagetable(a6xx_gpu, ring, submit); get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0), rbmemptr_stats(ring, index, cpcycles_start)); @@ -283,7 +293,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_PKT7(ring, CP_THREAD_CONTROL, 1); OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR); - a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx); + a6xx_set_pagetable(a6xx_gpu, ring, submit); get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0), rbmemptr_stats(ring, index, cpcycles_start)); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 4c1be2f0555f..db6c57900781 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -711,12 +711,13 @@ void dpu_crtc_complete_commit(struct drm_crtc *crtc) _dpu_crtc_complete_flip(crtc); } -static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, +static int _dpu_crtc_check_and_setup_lm_bounds(struct drm_crtc *crtc, struct drm_crtc_state *state) { struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); struct drm_display_mode *adj_mode = &state->adjusted_mode; u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers; + struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); int i; for (i = 0; i < cstate->num_mixers; i++) { @@ -727,7 +728,12 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, r->y2 = adj_mode->vdisplay; trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r); + + if (drm_rect_width(r) > dpu_kms->catalog->caps->max_mixer_width) + return -E2BIG; } + + return 0; } static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state, @@ -803,7 +809,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); - _dpu_crtc_setup_lm_bounds(crtc, crtc->state); + _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc->state); /* encoder will trigger pending mask now */ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) @@ -1091,9 +1097,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc, dpu_core_perf_crtc_update(crtc, 0); - memset(cstate->mixers, 0, sizeof(cstate->mixers)); - cstate->num_mixers = 0; - /* disable clk & bw control until clk & bw properties are set */ cstate->bw_control = false; cstate->bw_split_vote = false; @@ -1192,8 +1195,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, if (crtc_state->active_changed) crtc_state->mode_changed = true; - if (cstate->num_mixers) - _dpu_crtc_setup_lm_bounds(crtc, crtc_state); + if (cstate->num_mixers) { + rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state); + if (rc) + return rc; + } /* FIXME: move this to dpu_plane_atomic_check? */ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h index b26d5fe40c72..febc3e764a63 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h @@ -231,7 +231,7 @@ struct dpu_crtc_state { container_of(x, struct dpu_crtc_state, base) /** - * dpu_crtc_frame_pending - retun the number of pending frames + * dpu_crtc_frame_pending - return the number of pending frames * @crtc: Pointer to drm crtc object */ static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 3b171bf227d1..bd3698bf0cf7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -624,6 +624,40 @@ static struct msm_display_topology dpu_encoder_get_topology( return topology; } +static void dpu_encoder_assign_crtc_resources(struct dpu_kms *dpu_kms, + struct drm_encoder *drm_enc, + struct dpu_global_state *global_state, + struct drm_crtc_state *crtc_state) +{ + struct dpu_crtc_state *cstate; + struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC]; + int num_lm, num_ctl, num_dspp, i; + + cstate = to_dpu_crtc_state(crtc_state); + + memset(cstate->mixers, 0, sizeof(cstate->mixers)); + + num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); + num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); + num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp, + ARRAY_SIZE(hw_dspp)); + + for (i = 0; i < num_lm; i++) { + int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); + + cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); + cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); + cstate->mixers[i].hw_dspp = i < num_dspp ? to_dpu_hw_dspp(hw_dspp[i]) : NULL; + } + + cstate->num_mixers = num_lm; +} + static int dpu_encoder_virt_atomic_check( struct drm_encoder *drm_enc, struct drm_crtc_state *crtc_state, @@ -692,6 +726,9 @@ static int dpu_encoder_virt_atomic_check( if (!crtc_state->active_changed || crtc_state->enable) ret = dpu_rm_reserve(&dpu_kms->rm, global_state, drm_enc, crtc_state, topology); + if (!ret) + dpu_encoder_assign_crtc_resources(dpu_kms, drm_enc, + global_state, crtc_state); } trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags); @@ -1093,14 +1130,11 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, struct dpu_encoder_virt *dpu_enc; struct msm_drm_private *priv; struct dpu_kms *dpu_kms; - struct dpu_crtc_state *cstate; struct dpu_global_state *global_state; struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC]; struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; - struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; - struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL }; struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC]; - int num_lm, num_ctl, num_pp, num_dsc; + int num_ctl, num_pp, num_dsc; unsigned int dsc_mask = 0; int i; @@ -1129,11 +1163,6 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, ARRAY_SIZE(hw_pp)); num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); - num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); - dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp, - ARRAY_SIZE(hw_dspp)); for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i]) @@ -1159,36 +1188,23 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL; } - cstate = to_dpu_crtc_state(crtc_state); - - for (i = 0; i < num_lm; i++) { - int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); - - cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); - cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); - cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]); - } - - cstate->num_mixers = num_lm; - for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; - if (!dpu_enc->hw_pp[i]) { + phys->hw_pp = dpu_enc->hw_pp[i]; + if (!phys->hw_pp) { DPU_ERROR_ENC(dpu_enc, "no pp block assigned at idx: %d\n", i); return; } - if (!hw_ctl[i]) { + phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL; + if (!phys->hw_ctl) { DPU_ERROR_ENC(dpu_enc, "no ctl block assigned at idx: %d\n", i); return; } - phys->hw_pp = dpu_enc->hw_pp[i]; - phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]); - phys->cached_mode = crtc_state->adjusted_mode; if (phys->ops.atomic_mode_set) phys->ops.atomic_mode_set(phys, crtc_state, conn_state); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index ba8878d21cf0..d8a2edebfe8c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -302,7 +302,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine( intf_cfg.stream_sel = 0; /* Don't care value for video mode */ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc); - if (phys_enc->hw_pp->merge_3d) + if (intf_cfg.mode_3d && phys_enc->hw_pp->merge_3d) intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx; spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); @@ -440,10 +440,12 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc) struct dpu_hw_ctl *ctl; const struct msm_format *fmt; u32 fmt_fourcc; + u32 mode_3d; ctl = phys_enc->hw_ctl; fmt_fourcc = dpu_encoder_get_drm_fmt(phys_enc); fmt = mdp_get_format(&phys_enc->dpu_kms->base, fmt_fourcc, 0); + mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); DPU_DEBUG_VIDENC(phys_enc, "\n"); @@ -466,7 +468,8 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc) goto skip_flush; ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx); - if (ctl->ops.update_pending_flush_merge_3d && phys_enc->hw_pp->merge_3d) + if (mode_3d && ctl->ops.update_pending_flush_merge_3d && + phys_enc->hw_pp->merge_3d) ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->idx); if (ctl->ops.update_pending_flush_cdm && phys_enc->hw_cdm) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c index 882c717859ce..07035ab77b79 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c @@ -275,6 +275,7 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc) struct dpu_hw_pingpong *hw_pp; struct dpu_hw_cdm *hw_cdm; u32 pending_flush = 0; + u32 mode_3d; if (!phys_enc) return; @@ -283,6 +284,7 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc) hw_pp = phys_enc->hw_pp; hw_ctl = phys_enc->hw_ctl; hw_cdm = phys_enc->hw_cdm; + mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0); @@ -294,7 +296,8 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc) if (hw_ctl->ops.update_pending_flush_wb) hw_ctl->ops.update_pending_flush_wb(hw_ctl, hw_wb->idx); - if (hw_ctl->ops.update_pending_flush_merge_3d && hw_pp && hw_pp->merge_3d) + if (mode_3d && hw_ctl->ops.update_pending_flush_merge_3d && + hw_pp && hw_pp->merge_3d) hw_ctl->ops.update_pending_flush_merge_3d(hw_ctl, hw_pp->merge_3d->idx); diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c index add72bbc28b1..4d55e3cf570f 100644 --- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c @@ -26,7 +26,7 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b end_addr = base_addr + aligned_len; if (!(*reg)) - *reg = kzalloc(len_padded, GFP_KERNEL); + *reg = kvzalloc(len_padded, GFP_KERNEL); if (*reg) dump_addr = *reg; @@ -48,20 +48,21 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b } } -static void msm_disp_state_print_regs(u32 **reg, u32 len, void __iomem *base_addr, - struct drm_printer *p) +static void msm_disp_state_print_regs(const u32 *dump_addr, u32 len, + void __iomem *base_addr, struct drm_printer *p) { int i; - u32 *dump_addr = NULL; void __iomem *addr; u32 num_rows; + if (!dump_addr) { + drm_printf(p, "Registers not stored\n"); + return; + } + addr = base_addr; num_rows = len / REG_DUMP_ALIGN; - if (*reg) - dump_addr = *reg; - for (i = 0; i < num_rows; i++) { drm_printf(p, "0x%lx : %08x %08x %08x %08x\n", (unsigned long)(addr - base_addr), @@ -89,7 +90,7 @@ void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p) list_for_each_entry_safe(block, tmp, &state->blocks, node) { drm_printf(p, "====================%s================\n", block->name); - msm_disp_state_print_regs(&block->state, block->size, block->base_addr, p); + msm_disp_state_print_regs(block->state, block->size, block->base_addr, p); } drm_printf(p, "===================dpu drm state================\n"); @@ -161,7 +162,7 @@ void msm_disp_state_free(void *data) list_for_each_entry_safe(block, tmp, &disp_state->blocks, node) { list_del(&block->node); - kfree(block->state); + kvfree(block->state); kfree(block); } diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 185d7de0bf37..a98d24b7cb00 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -542,7 +542,7 @@ static unsigned long dsi_adjust_pclk_for_compression(const struct drm_display_mo int new_htotal = mode->htotal - mode->hdisplay + new_hdisplay; - return new_htotal * mode->vtotal * drm_mode_vrefresh(mode); + return mult_frac(mode->clock * 1000u, new_htotal, mode->htotal); } static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode, @@ -550,7 +550,7 @@ static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode, { unsigned long pclk_rate; - pclk_rate = mode->clock * 1000; + pclk_rate = mode->clock * 1000u; if (dsc) pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c index 0e3a2b16a2ce..e6ffaf92d26d 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c @@ -153,15 +153,6 @@ static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk) return dividend - 1; } -static inline u64 pll_cmp_to_fdata(u32 pll_cmp, unsigned long ref_clk) -{ - u64 fdata = ((u64)pll_cmp) * ref_clk * 10; - - do_div(fdata, HDMI_PLL_CMP_CNT); - - return fdata; -} - #define HDMI_REF_CLOCK_HZ ((u64)19200000) #define HDMI_MHZ_TO_HZ ((u64)1000000) static int pll_get_post_div(struct hdmi_8998_post_divider *pd, u64 bclk) diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 4494f6d1c7cb..7ab607252d18 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -357,12 +357,10 @@ void msm_debugfs_init(struct drm_minor *minor) if (priv->kms && priv->kms->funcs->debugfs_init) priv->kms->funcs->debugfs_init(priv->kms, minor); -#ifdef CONFIG_FAULT_INJECTION fault_create_debugfs_attr("fail_gem_alloc", minor->debugfs_root, &fail_gem_alloc); fault_create_debugfs_attr("fail_gem_iova", minor->debugfs_root, &fail_gem_iova); -#endif } #endif diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 9c33f4e3f822..8c13b08708d2 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -7,6 +7,7 @@ #include <linux/dma-mapping.h> #include <linux/fault-inject.h> +#include <linux/debugfs.h> #include <linux/of_address.h> #include <linux/uaccess.h> @@ -58,10 +59,8 @@ static bool modeset = true; MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)"); module_param(modeset, bool, 0600); -#ifdef CONFIG_FAULT_INJECTION DECLARE_FAULT_ATTR(fail_gem_alloc); DECLARE_FAULT_ATTR(fail_gem_iova); -#endif static int msm_drm_uninit(struct device *dev) { diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 0d3adf398bc1..2e28a1344636 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -33,12 +33,8 @@ #include <drm/msm_drm.h> #include <drm/drm_gem.h> -#ifdef CONFIG_FAULT_INJECTION extern struct fault_attr fail_gem_alloc; extern struct fault_attr fail_gem_iova; -#else -# define should_fail(attr, size) 0 -#endif struct msm_kms; struct msm_gpu; diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c index 3d3da79fec2a..d3c7889aaf26 100644 --- a/drivers/gpu/drm/msm/msm_perf.c +++ b/drivers/gpu/drm/msm/msm_perf.c @@ -192,7 +192,6 @@ static const struct file_operations perf_debugfs_fops = { .owner = THIS_MODULE, .open = perf_open, .read = perf_read, - .llseek = no_llseek, .release = perf_release, }; diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index ca44fd291c5b..39138e190cb9 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -227,7 +227,6 @@ static const struct file_operations rd_debugfs_fops = { .owner = THIS_MODULE, .open = rd_open, .read = rd_read, - .llseek = no_llseek, .release = rd_release, }; diff --git a/drivers/gpu/drm/nouveau/include/nvif/os.h b/drivers/gpu/drm/nouveau/include/nvif/os.h index a2eaf3929ac3..4a1123b81fee 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/os.h +++ b/drivers/gpu/drm/nouveau/include/nvif/os.h @@ -30,7 +30,7 @@ #include <linux/iommu.h> #include <linux/of_device.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <soc/tegra/fuse.h> #include <soc/tegra/pmc.h> diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index 9e6f39912368..a2055f2a014a 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -210,7 +210,7 @@ struct nvkm_gsp { } *rm; struct { - struct mutex mutex;; + struct mutex mutex; struct idr idr; } client_id; diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 1f2d649f4b96..1a072568cef6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -193,7 +193,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) if (!spage || !(src & MIGRATE_PFN_MIGRATE)) goto done; - dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address); + dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma, vmf->address); if (!dpage) goto done; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 4a9a9b9c3935..34985771b2a2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -331,7 +331,7 @@ nouveau_accel_ce_init(struct nouveau_drm *drm) return; } - ret = nouveau_channel_new(&drm->client, false, runm, NvDmaFB, NvDmaTT, &drm->cechan); + ret = nouveau_channel_new(&drm->client, true, runm, NvDmaFB, NvDmaTT, &drm->cechan); if (ret) NV_ERROR(drm, "failed to create ce channel, %d\n", ret); } @@ -1292,6 +1292,7 @@ nouveau_driver_fops = { .compat_ioctl = nouveau_compat_ioctl, #endif .llseek = noop_llseek, + .fop_flags = FOP_UNSIGNED_OFFSET, }; static struct drm_driver diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h index 50f0c1914f58..4c3f74396579 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h @@ -46,6 +46,8 @@ u32 gm107_ram_probe_fbp(const struct nvkm_ram_func *, u32 gm200_ram_probe_fbp_amount(const struct nvkm_ram_func *, u32, struct nvkm_device *, int, int *); +int gp100_ram_init(struct nvkm_ram *); + /* RAM type-specific MR calculation routines */ int nvkm_sddr2_calc(struct nvkm_ram *); int nvkm_sddr3_calc(struct nvkm_ram *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c index 378f6fb70990..8987a21e81d1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c @@ -27,7 +27,7 @@ #include <subdev/bios/init.h> #include <subdev/bios/rammap.h> -static int +int gp100_ram_init(struct nvkm_ram *ram) { struct nvkm_subdev *subdev = &ram->fb->subdev; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c index 8550f5e47347..b6b6ee59019d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c @@ -5,6 +5,7 @@ static const struct nvkm_ram_func gp102_ram = { + .init = gp100_ram_init, }; int diff --git a/drivers/gpu/drm/panel/panel-himax-hx83102.c b/drivers/gpu/drm/panel/panel-himax-hx83102.c index 6e4b7e4644ce..8b48bba18131 100644 --- a/drivers/gpu/drm/panel/panel-himax-hx83102.c +++ b/drivers/gpu/drm/panel/panel-himax-hx83102.c @@ -298,7 +298,7 @@ static int ivo_t109nw41_init(struct hx83102 *ctx) msleep(60); hx83102_enable_extended_cmds(&dsi_ctx, true); - mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x2c, 0xed, 0xed, 0x0f, 0xcf, 0x42, + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x2c, 0xed, 0xed, 0x27, 0xe7, 0x52, 0xf5, 0x39, 0x36, 0x36, 0x36, 0x36, 0x32, 0x8b, 0x11, 0x65, 0x00, 0x88, 0xfa, 0xff, 0xff, 0x8f, 0xff, 0x08, 0xd6, 0x33); mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETDISP, 0x00, 0x47, 0xb0, 0x80, 0x00, 0x12, @@ -343,11 +343,11 @@ static int ivo_t109nw41_init(struct hx83102 *ctx) 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); - mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGMA, 0x04, 0x04, 0x06, 0x0a, 0x0a, 0x05, - 0x12, 0x14, 0x17, 0x13, 0x2c, 0x33, 0x39, 0x4b, 0x4c, 0x56, 0x61, 0x78, - 0x7a, 0x41, 0x50, 0x68, 0x73, 0x04, 0x04, 0x06, 0x0a, 0x0a, 0x05, 0x12, - 0x14, 0x17, 0x13, 0x2c, 0x33, 0x39, 0x4b, 0x4c, 0x56, 0x61, 0x78, 0x7a, - 0x41, 0x50, 0x68, 0x73); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGMA, 0x00, 0x07, 0x10, 0x17, 0x1c, 0x33, + 0x48, 0x50, 0x57, 0x50, 0x68, 0x6e, 0x71, 0x7f, 0x81, 0x8a, 0x8e, 0x9b, + 0x9c, 0x4d, 0x56, 0x5d, 0x73, 0x00, 0x07, 0x10, 0x17, 0x1c, 0x33, 0x48, + 0x50, 0x57, 0x50, 0x68, 0x6e, 0x71, 0x7f, 0x81, 0x8a, 0x8e, 0x9b, 0x9c, + 0x4d, 0x56, 0x5d, 0x73); mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0x07, 0x10, 0x10, 0x1a, 0x26, 0x9e, 0x00, 0x4f, 0xa0, 0x14, 0x14, 0x00, 0x00, 0x00, 0x00, 0x12, 0x0a, 0x02, 0x02, 0x00, 0x33, 0x02, 0x04, 0x18, 0x01); diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c index 4082c8f2951d..6fbff516c1c1 100644 --- a/drivers/gpu/drm/panthor/panthor_device.c +++ b/drivers/gpu/drm/panthor/panthor_device.c @@ -390,11 +390,15 @@ int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct * { u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT; + if ((vma->vm_flags & VM_SHARED) == 0) + return -EINVAL; + switch (offset) { case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET: if (vma->vm_end - vma->vm_start != PAGE_SIZE || (vma->vm_flags & (VM_WRITE | VM_EXEC))) return -EINVAL; + vm_flags_clear(vma, VM_MAYWRITE); break; diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index 34182f67136c..c520f156e2d7 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -1383,6 +1383,7 @@ static const struct file_operations panthor_drm_driver_fops = { .read = drm_read, .llseek = noop_llseek, .mmap = panthor_mmap, + .fop_flags = FOP_UNSIGNED_OFFSET, }; #ifdef CONFIG_DEBUG_FS diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c index ef232c0c2049..4e2d3a02ea06 100644 --- a/drivers/gpu/drm/panthor/panthor_fw.c +++ b/drivers/gpu/drm/panthor/panthor_fw.c @@ -487,6 +487,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev, struct panthor_fw_binary_iter *iter, u32 ehdr) { + ssize_t vm_pgsz = panthor_vm_page_size(ptdev->fw->vm); struct panthor_fw_binary_section_entry_hdr hdr; struct panthor_fw_section *section; u32 section_size; @@ -515,8 +516,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev, return -EINVAL; } - if ((hdr.va.start & ~PAGE_MASK) != 0 || - (hdr.va.end & ~PAGE_MASK) != 0) { + if (!IS_ALIGNED(hdr.va.start, vm_pgsz) || !IS_ALIGNED(hdr.va.end, vm_pgsz)) { drm_err(&ptdev->base, "Firmware corrupted, virtual addresses not page aligned: 0x%x-0x%x\n", hdr.va.start, hdr.va.end); return -EINVAL; diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c index 38f560864879..be97d56bc011 100644 --- a/drivers/gpu/drm/panthor/panthor_gem.c +++ b/drivers/gpu/drm/panthor/panthor_gem.c @@ -44,8 +44,7 @@ void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo) to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm))) goto out_free_bo; - ret = panthor_vm_unmap_range(vm, bo->va_node.start, - panthor_kernel_bo_size(bo)); + ret = panthor_vm_unmap_range(vm, bo->va_node.start, bo->va_node.size); if (ret) goto out_free_bo; @@ -95,10 +94,16 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, } bo = to_panthor_bo(&obj->base); - size = obj->base.size; kbo->obj = &obj->base; bo->flags = bo_flags; + /* The system and GPU MMU page size might differ, which becomes a + * problem for FW sections that need to be mapped at explicit address + * since our PAGE_SIZE alignment might cover a VA range that's + * expected to be used for another section. + * Make sure we never map more than we need. + */ + size = ALIGN(size, panthor_vm_page_size(vm)); ret = panthor_vm_alloc_va(vm, gpu_va, size, &kbo->va_node); if (ret) goto err_put_obj; diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index bbc12728437f..7db2edb3374c 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -826,6 +826,14 @@ void panthor_vm_idle(struct panthor_vm *vm) mutex_unlock(&ptdev->mmu->as.slots_lock); } +u32 panthor_vm_page_size(struct panthor_vm *vm) +{ + const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops); + u32 pg_shift = ffs(pgt->cfg.pgsize_bitmap) - 1; + + return 1u << pg_shift; +} + static void panthor_vm_stop(struct panthor_vm *vm) { drm_sched_stop(&vm->sched, NULL); @@ -1025,12 +1033,13 @@ int panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size, struct drm_mm_node *va_node) { + ssize_t vm_pgsz = panthor_vm_page_size(vm); int ret; - if (!size || (size & ~PAGE_MASK)) + if (!size || !IS_ALIGNED(size, vm_pgsz)) return -EINVAL; - if (va != PANTHOR_VM_KERNEL_AUTO_VA && (va & ~PAGE_MASK)) + if (va != PANTHOR_VM_KERNEL_AUTO_VA && !IS_ALIGNED(va, vm_pgsz)) return -EINVAL; mutex_lock(&vm->mm_lock); @@ -1251,9 +1260,17 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx, goto err_cleanup; } + /* drm_gpuvm_bo_obtain_prealloc() will call drm_gpuvm_bo_put() on our + * pre-allocated BO if the <BO,VM> association exists. Given we + * only have one ref on preallocated_vm_bo, drm_gpuvm_bo_destroy() will + * be called immediately, and we have to hold the VM resv lock when + * calling this function. + */ + dma_resv_lock(panthor_vm_resv(vm), NULL); mutex_lock(&bo->gpuva_list_lock); op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo); mutex_unlock(&bo->gpuva_list_lock); + dma_resv_unlock(panthor_vm_resv(vm)); /* If the a vm_bo for this <VM,BO> combination exists, it already * retains a pin ref, and we can release the one we took earlier. @@ -1563,7 +1580,9 @@ panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle) { struct panthor_vm *vm; + xa_lock(&pool->xa); vm = panthor_vm_get(xa_load(&pool->xa, handle)); + xa_unlock(&pool->xa); return vm; } @@ -2358,11 +2377,12 @@ panthor_vm_bind_prepare_op_ctx(struct drm_file *file, const struct drm_panthor_vm_bind_op *op, struct panthor_vm_op_ctx *op_ctx) { + ssize_t vm_pgsz = panthor_vm_page_size(vm); struct drm_gem_object *gem; int ret; /* Aligned on page size. */ - if ((op->va | op->size) & ~PAGE_MASK) + if (!IS_ALIGNED(op->va | op->size, vm_pgsz)) return -EINVAL; switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) { diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h index 6788771071e3..8d21e83d8aba 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.h +++ b/drivers/gpu/drm/panthor/panthor_mmu.h @@ -30,6 +30,7 @@ panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset); int panthor_vm_active(struct panthor_vm *vm); void panthor_vm_idle(struct panthor_vm *vm); +u32 panthor_vm_page_size(struct panthor_vm *vm); int panthor_vm_as(struct panthor_vm *vm); int panthor_vm_flush_all(struct panthor_vm *vm); diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index 91a31b70c037..9929e22f4d8d 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -589,10 +589,11 @@ struct panthor_group { * @timedout: True when a timeout occurred on any of the queues owned by * this group. * - * Timeouts can be reported by drm_sched or by the FW. In any case, any - * timeout situation is unrecoverable, and the group becomes useless. - * We simply wait for all references to be dropped so we can release the - * group object. + * Timeouts can be reported by drm_sched or by the FW. If a reset is required, + * and the group can't be suspended, this also leads to a timeout. In any case, + * any timeout situation is unrecoverable, and the group becomes useless. We + * simply wait for all references to be dropped so we can release the group + * object. */ bool timedout; @@ -1103,7 +1104,13 @@ cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs list_move_tail(&group->wait_node, &group->ptdev->scheduler->groups.waiting); } - group->blocked_queues |= BIT(cs_id); + + /* The queue is only blocked if there's no deferred operation + * pending, which can be checked through the scoreboard status. + */ + if (!cs_iface->output->status_scoreboards) + group->blocked_queues |= BIT(cs_id); + queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr; queue->syncwait.ref = cs_iface->output->status_wait_sync_value; status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK; @@ -2046,6 +2053,7 @@ static void tick_ctx_cleanup(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx) { + struct panthor_device *ptdev = sched->ptdev; struct panthor_group *group, *tmp; u32 i; @@ -2054,7 +2062,7 @@ tick_ctx_cleanup(struct panthor_scheduler *sched, /* If everything went fine, we should only have groups * to be terminated in the old_groups lists. */ - drm_WARN_ON(&group->ptdev->base, !ctx->csg_upd_failed_mask && + drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask && group_can_run(group)); if (!group_can_run(group)) { @@ -2077,7 +2085,7 @@ tick_ctx_cleanup(struct panthor_scheduler *sched, /* If everything went fine, the groups to schedule lists should * be empty. */ - drm_WARN_ON(&group->ptdev->base, + drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i])); list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) { @@ -2633,6 +2641,12 @@ void panthor_sched_suspend(struct panthor_device *ptdev) csgs_upd_ctx_init(&upd_ctx); while (slot_mask) { u32 csg_id = ffs(slot_mask) - 1; + struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; + + /* We consider group suspension failures as fatal and flag the + * group as unusable by setting timedout=true. + */ + csg_slot->group->timedout = true; csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, CSG_STATE_TERMINATE, @@ -3242,6 +3256,18 @@ int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle) return 0; } +static struct panthor_group *group_from_handle(struct panthor_group_pool *pool, + u32 group_handle) +{ + struct panthor_group *group; + + xa_lock(&pool->xa); + group = group_get(xa_load(&pool->xa, group_handle)); + xa_unlock(&pool->xa); + + return group; +} + int panthor_group_get_state(struct panthor_file *pfile, struct drm_panthor_group_get_state *get_state) { @@ -3253,7 +3279,7 @@ int panthor_group_get_state(struct panthor_file *pfile, if (get_state->pad) return -EINVAL; - group = group_get(xa_load(&gpool->xa, get_state->group_handle)); + group = group_from_handle(gpool, get_state->group_handle); if (!group) return -EINVAL; @@ -3384,12 +3410,17 @@ panthor_job_create(struct panthor_file *pfile, job->call_info.latest_flush = qsubmit->latest_flush; INIT_LIST_HEAD(&job->node); - job->group = group_get(xa_load(&gpool->xa, group_handle)); + job->group = group_from_handle(gpool, group_handle); if (!job->group) { ret = -EINVAL; goto err_put_job; } + if (!group_can_run(job->group)) { + ret = -EINVAL; + goto err_put_job; + } + if (job->queue_idx >= job->group->queue_count || !job->group->queues[job->queue_idx]) { ret = -EINVAL; @@ -3424,13 +3455,8 @@ void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched { struct panthor_job *job = container_of(sched_job, struct panthor_job, base); - /* Still not sure why we want USAGE_WRITE for external objects, since I - * was assuming this would be handled through explicit syncs being imported - * to external BOs with DMA_BUF_IOCTL_IMPORT_SYNC_FILE, but other drivers - * seem to pass DMA_RESV_USAGE_WRITE, so there must be a good reason. - */ panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished, - DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE); + DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP); } void panthor_sched_unplug(struct panthor_device *ptdev) diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 5bc3e6b41c34..b31125eb9a65 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -27,7 +27,7 @@ #include <linux/slab.h> #include <linux/string_helpers.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <drm/drm_device.h> #include <drm/drm_util.h> diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index fca8b08535a5..6328627b7c34 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -228,10 +228,8 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector) { struct drm_device *dev = radeon_connector->base.dev; struct radeon_device *rdev = dev->dev_private; - int ret; radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd; - radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev; radeon_connector->ddc_bus->aux.drm_dev = radeon_connector->base.dev; if (ASIC_IS_DCE5(rdev)) { if (radeon_auxch) @@ -242,11 +240,8 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector) radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom; } - ret = drm_dp_aux_register(&radeon_connector->ddc_bus->aux); - if (!ret) - radeon_connector->ddc_bus->has_aux = true; - - WARN(ret, "drm_dp_aux_register() failed with error %d\n", ret); + drm_dp_aux_init(&radeon_connector->ddc_bus->aux); + radeon_connector->ddc_bus->has_aux = true; } /***** general DP utility functions *****/ diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 528a8f3677c2..f9c73c55f04f 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1786,6 +1786,20 @@ static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector return MODE_OK; } +static int +radeon_connector_late_register(struct drm_connector *connector) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + int r = 0; + + if (radeon_connector->ddc_bus->has_aux) { + radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev; + r = drm_dp_aux_register(&radeon_connector->ddc_bus->aux); + } + + return r; +} + static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { .get_modes = radeon_dp_get_modes, .mode_valid = radeon_dp_mode_valid, @@ -1800,6 +1814,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = { .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, + .late_register = radeon_connector_late_register, }; static const struct drm_connector_funcs radeon_edp_connector_funcs = { @@ -1810,6 +1825,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = { .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, + .late_register = radeon_connector_late_register, }; static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = { @@ -1820,6 +1836,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = { .early_unregister = radeon_connector_unregister, .destroy = radeon_connector_destroy, .force = radeon_dvi_force, + .late_register = radeon_connector_late_register, }; void diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index f36aa71c57c7..e5a6f3e7c75b 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -530,6 +530,7 @@ static const struct file_operations radeon_driver_kms_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = radeon_kms_compat_ioctl, #endif + .fop_flags = FOP_UNSIGNED_OFFSET, }; static const struct drm_ioctl_desc radeon_ioctls_kms[] = { diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 0f723292409e..fafed331e0a0 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -43,7 +43,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder *clone_encoder; - uint32_t index_mask = 0; + uint32_t index_mask = drm_encoder_mask(encoder); int count; /* DIG routing gets problematic */ diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 9735f4968b86..bf2d4b16dc2a 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -44,8 +44,6 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); int radeon_gem_prime_pin(struct drm_gem_object *obj); void radeon_gem_prime_unpin(struct drm_gem_object *obj); -const struct drm_gem_object_funcs radeon_gem_object_funcs; - static vm_fault_t radeon_gem_fault(struct vm_fault *vmf) { struct ttm_buffer_object *bo = vmf->vma->vm_private_data; @@ -132,7 +130,6 @@ retry: return r; } *obj = &robj->tbo.base; - (*obj)->funcs = &radeon_gem_object_funcs; robj->pid = task_pid_nr(current); mutex_lock(&rdev->gem.mutex); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d0e4b43d155c..7672404fdb29 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -151,6 +151,7 @@ int radeon_bo_create(struct radeon_device *rdev, if (bo == NULL) return -ENOMEM; drm_gem_private_object_init(rdev_to_drm(rdev), &bo->tbo.base, size); + bo->tbo.base.funcs = &radeon_gem_object_funcs; bo->rdev = rdev; bo->surface_reg = -1; INIT_LIST_HEAD(&bo->list); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 44d769d9234d..11e5d10de4d7 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -103,13 +103,17 @@ static int rockchip_drm_init_iommu(struct drm_device *drm_dev) struct rockchip_drm_private *private = drm_dev->dev_private; struct iommu_domain_geometry *geometry; u64 start, end; + int ret; if (IS_ERR_OR_NULL(private->iommu_dev)) return 0; - private->domain = iommu_domain_alloc(private->iommu_dev->bus); - if (!private->domain) - return -ENOMEM; + private->domain = iommu_paging_domain_alloc(private->iommu_dev); + if (IS_ERR(private->domain)) { + ret = PTR_ERR(private->domain); + private->domain = NULL; + return ret; + } geometry = &private->domain->geometry; start = geometry->aperture_start; diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 58c8161289fe..a75eede8bf8d 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -133,8 +133,10 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, { WARN_ON(!num_sched_list || !sched_list); + spin_lock(&entity->rq_lock); entity->sched_list = sched_list; entity->num_sched_list = num_sched_list; + spin_unlock(&entity->rq_lock); } EXPORT_SYMBOL(drm_sched_entity_modify_sched); @@ -380,7 +382,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, container_of(cb, struct drm_sched_entity, cb); drm_sched_entity_clear_dep(f, cb); - drm_sched_wakeup(entity->rq->sched, entity); + drm_sched_wakeup(entity->rq->sched); } /** @@ -597,6 +599,9 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) /* first job wakes up scheduler */ if (first) { + struct drm_gpu_scheduler *sched; + struct drm_sched_rq *rq; + /* Add the entity to the run queue */ spin_lock(&entity->rq_lock); if (entity->stopped) { @@ -606,13 +611,16 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) return; } - drm_sched_rq_add_entity(entity->rq, entity); + rq = entity->rq; + sched = rq->sched; + + drm_sched_rq_add_entity(rq, entity); spin_unlock(&entity->rq_lock); if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) drm_sched_rq_update_fifo(entity, submit_ts); - drm_sched_wakeup(entity->rq->sched, entity); + drm_sched_wakeup(sched); } } EXPORT_SYMBOL(drm_sched_entity_push_job); diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index ab53ab486fe6..e97c6c60bc96 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -87,6 +87,12 @@ #define CREATE_TRACE_POINTS #include "gpu_scheduler_trace.h" +#ifdef CONFIG_LOCKDEP +static struct lockdep_map drm_sched_lockdep_map = { + .name = "drm_sched_lockdep_map" +}; +#endif + #define to_drm_sched_job(sched_job) \ container_of((sched_job), struct drm_sched_job, queue_node) @@ -1013,15 +1019,12 @@ EXPORT_SYMBOL(drm_sched_job_cleanup); /** * drm_sched_wakeup - Wake up the scheduler if it is ready to queue * @sched: scheduler instance - * @entity: the scheduler entity * * Wake up the scheduler if we can queue jobs. */ -void drm_sched_wakeup(struct drm_gpu_scheduler *sched, - struct drm_sched_entity *entity) +void drm_sched_wakeup(struct drm_gpu_scheduler *sched) { - if (drm_sched_can_queue(sched, entity)) - drm_sched_run_job_queue(sched); + drm_sched_run_job_queue(sched); } /** @@ -1272,7 +1275,13 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, sched->submit_wq = submit_wq; sched->own_submit_wq = false; } else { - sched->submit_wq = alloc_ordered_workqueue(name, 0); +#ifdef CONFIG_LOCKDEP + sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, + WQ_MEM_RECLAIM, + &drm_sched_lockdep_map); +#else + sched->submit_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); +#endif if (!sched->submit_wq) return -ENOMEM; diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index d9f0728c3afd..34d22ba210b0 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -801,6 +801,7 @@ static const struct file_operations tegra_drm_fops = { .read = drm_read, .compat_ioctl = drm_compat_ioctl, .llseek = noop_llseek, + .fop_flags = FOP_UNSIGNED_OFFSET, }; static int tegra_drm_context_cleanup(int id, void *p, void *data) @@ -1135,6 +1136,7 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev) static int host1x_drm_probe(struct host1x_device *dev) { + struct device *dma_dev = dev->dev.parent; struct tegra_drm *tegra; struct drm_device *drm; int err; @@ -1149,10 +1151,10 @@ static int host1x_drm_probe(struct host1x_device *dev) goto put; } - if (host1x_drm_wants_iommu(dev) && iommu_present(&platform_bus_type)) { - tegra->domain = iommu_domain_alloc(&platform_bus_type); - if (!tegra->domain) { - err = -ENOMEM; + if (host1x_drm_wants_iommu(dev) && device_iommu_mapped(dma_dev)) { + tegra->domain = iommu_paging_domain_alloc(dma_dev); + if (IS_ERR(tegra->domain)) { + err = PTR_ERR(tegra->domain); goto free; } diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c index 4de1ea0fc7c0..00c8564520e7 100644 --- a/drivers/gpu/drm/tegra/gr3d.c +++ b/drivers/gpu/drm/tegra/gr3d.c @@ -46,7 +46,6 @@ struct gr3d { unsigned int nclocks; struct reset_control_bulk_data resets[RST_GR3D_MAX]; unsigned int nresets; - struct dev_pm_domain_list *pd_list; DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS); }; @@ -370,12 +369,18 @@ static int gr3d_power_up_legacy_domain(struct device *dev, const char *name, return 0; } +static void gr3d_del_link(void *link) +{ + device_link_del(link); +} + static int gr3d_init_power(struct device *dev, struct gr3d *gr3d) { - struct dev_pm_domain_attach_data pd_data = { - .pd_names = (const char *[]) { "3d0", "3d1" }, - .num_pd_names = 2, - }; + static const char * const opp_genpd_names[] = { "3d0", "3d1", NULL }; + const u32 link_flags = DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME; + struct device **opp_virt_devs, *pd_dev; + struct device_link *link; + unsigned int i; int err; err = of_count_phandle_with_args(dev->of_node, "power-domains", @@ -409,10 +414,29 @@ static int gr3d_init_power(struct device *dev, struct gr3d *gr3d) if (dev->pm_domain) return 0; - err = dev_pm_domain_attach_list(dev, &pd_data, &gr3d->pd_list); - if (err < 0) + err = devm_pm_opp_attach_genpd(dev, opp_genpd_names, &opp_virt_devs); + if (err) return err; + for (i = 0; opp_genpd_names[i]; i++) { + pd_dev = opp_virt_devs[i]; + if (!pd_dev) { + dev_err(dev, "failed to get %s power domain\n", + opp_genpd_names[i]); + return -EINVAL; + } + + link = device_link_add(dev, pd_dev, link_flags); + if (!link) { + dev_err(dev, "failed to link to %s\n", dev_name(pd_dev)); + return -EINVAL; + } + + err = devm_add_action_or_reset(dev, gr3d_del_link, link); + if (err) + return err; + } + return 0; } @@ -503,13 +527,13 @@ static int gr3d_probe(struct platform_device *pdev) err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); if (err) - goto err; + return err; err = host1x_client_register(&gr3d->client.base); if (err < 0) { dev_err(&pdev->dev, "failed to register host1x client: %d\n", err); - goto err; + return err; } /* initialize address register map */ @@ -517,9 +541,6 @@ static int gr3d_probe(struct platform_device *pdev) set_bit(gr3d_addr_regs[i], gr3d->addr_regs); return 0; -err: - dev_pm_domain_detach_list(gr3d->pd_list); - return err; } static void gr3d_remove(struct platform_device *pdev) @@ -528,7 +549,6 @@ static void gr3d_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); host1x_client_unregister(&gr3d->client.base); - dev_pm_domain_detach_list(gr3d->pd_list); } static int __maybe_unused gr3d_runtime_suspend(struct device *dev) diff --git a/drivers/gpu/drm/tests/drm_connector_test.c b/drivers/gpu/drm/tests/drm_connector_test.c index 15e36a8db685..6bba97d0be88 100644 --- a/drivers/gpu/drm/tests/drm_connector_test.c +++ b/drivers/gpu/drm/tests/drm_connector_test.c @@ -996,7 +996,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb(struct kunit *test) unsigned long long rate; struct drm_device *drm = &priv->drm; - mode = drm_display_mode_from_cea_vic(drm, 16); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1017,7 +1017,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_10bpc(struct kunit *test) unsigned long long rate; struct drm_device *drm = &priv->drm; - mode = drm_display_mode_from_cea_vic(drm, 16); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1038,7 +1038,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_10bpc_vic_1(struct kunit *t unsigned long long rate; struct drm_device *drm = &priv->drm; - mode = drm_display_mode_from_cea_vic(drm, 1); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); rate = drm_hdmi_compute_mode_clock(mode, 10, HDMI_COLORSPACE_RGB); @@ -1056,7 +1056,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_12bpc(struct kunit *test) unsigned long long rate; struct drm_device *drm = &priv->drm; - mode = drm_display_mode_from_cea_vic(drm, 16); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1077,7 +1077,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_12bpc_vic_1(struct kunit *t unsigned long long rate; struct drm_device *drm = &priv->drm; - mode = drm_display_mode_from_cea_vic(drm, 1); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); rate = drm_hdmi_compute_mode_clock(mode, 12, HDMI_COLORSPACE_RGB); @@ -1095,7 +1095,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_double(struct kunit *test) unsigned long long rate; struct drm_device *drm = &priv->drm; - mode = drm_display_mode_from_cea_vic(drm, 6); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 6); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_TRUE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1118,7 +1118,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_valid(struct kunit unsigned long long rate; unsigned int vic = *(unsigned int *)test->param_value; - mode = drm_display_mode_from_cea_vic(drm, vic); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1155,7 +1155,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_10_bpc(struct kuni drm_hdmi_compute_mode_clock_yuv420_vic_valid_tests[0]; unsigned long long rate; - mode = drm_display_mode_from_cea_vic(drm, vic); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1180,7 +1180,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_12_bpc(struct kuni drm_hdmi_compute_mode_clock_yuv420_vic_valid_tests[0]; unsigned long long rate; - mode = drm_display_mode_from_cea_vic(drm, vic); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1203,7 +1203,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_8_bpc(struct kunit struct drm_device *drm = &priv->drm; unsigned long long rate; - mode = drm_display_mode_from_cea_vic(drm, 16); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1225,7 +1225,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_10_bpc(struct kuni struct drm_device *drm = &priv->drm; unsigned long long rate; - mode = drm_display_mode_from_cea_vic(drm, 16); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); @@ -1247,7 +1247,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_12_bpc(struct kuni struct drm_device *drm = &priv->drm; unsigned long long rate; - mode = drm_display_mode_from_cea_vic(drm, 16); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); KUNIT_ASSERT_NOT_NULL(test, mode); KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c index 34ee95d41f29..294773342e71 100644 --- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c +++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c @@ -441,7 +441,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test) ctx = drm_kunit_helper_acquire_ctx_alloc(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - mode = drm_display_mode_from_cea_vic(drm, 1); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); drm = &priv->drm; @@ -555,7 +555,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test) ctx = drm_kunit_helper_acquire_ctx_alloc(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - mode = drm_display_mode_from_cea_vic(drm, 1); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); drm = &priv->drm; @@ -671,7 +671,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te ctx = drm_kunit_helper_acquire_ctx_alloc(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - mode = drm_display_mode_from_cea_vic(drm, 1); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); drm = &priv->drm; @@ -1263,7 +1263,7 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test) ctx = drm_kunit_helper_acquire_ctx_alloc(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); - mode = drm_display_mode_from_cea_vic(drm, 1); + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); KUNIT_ASSERT_NOT_NULL(test, mode); /* diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c index aa62719dab0e..04a6b8cc62ac 100644 --- a/drivers/gpu/drm/tests/drm_kunit_helpers.c +++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c @@ -3,6 +3,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_edid.h> #include <drm/drm_fourcc.h> #include <drm/drm_kunit_helpers.h> #include <drm/drm_managed.h> @@ -311,6 +312,47 @@ drm_kunit_helper_create_crtc(struct kunit *test, } EXPORT_SYMBOL_GPL(drm_kunit_helper_create_crtc); +static void kunit_action_drm_mode_destroy(void *ptr) +{ + struct drm_display_mode *mode = ptr; + + drm_mode_destroy(NULL, mode); +} + +/** + * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC + for a KUnit test + * @test: The test context object + * @dev: DRM device + * @video_code: CEA VIC of the mode + * + * Creates a new mode matching the specified CEA VIC for a KUnit test. + * + * Resources will be cleaned up automatically. + * + * Returns: A new drm_display_mode on success or NULL on failure + */ +struct drm_display_mode * +drm_kunit_display_mode_from_cea_vic(struct kunit *test, struct drm_device *dev, + u8 video_code) +{ + struct drm_display_mode *mode; + int ret; + + mode = drm_display_mode_from_cea_vic(dev, video_code); + if (!mode) + return NULL; + + ret = kunit_add_action_or_reset(test, + kunit_action_drm_mode_destroy, + mode); + if (ret) + return NULL; + + return mode; +} +EXPORT_SYMBOL_GPL(drm_kunit_display_mode_from_cea_vic); + MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>"); MODULE_DESCRIPTION("KUnit test suite helper functions"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c index 5ff1037a3453..62224992988f 100644 --- a/drivers/gpu/drm/udl/udl_transfer.c +++ b/drivers/gpu/drm/udl/udl_transfer.c @@ -7,7 +7,7 @@ * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com> */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "udl_drv.h" #include "udl_proto.h" diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index ecb80fd75b1a..ebe52bef4ffb 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -291,7 +291,7 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data, else args->timeout_ns = 0; - /* Asked to wait beyond the jiffie/scheduler precision? */ + /* Asked to wait beyond the jiffy/scheduler precision? */ if (ret == -ETIME && args->timeout_ns) ret = -EAGAIN; diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c index cd7f1eedf17f..00cd081d7873 100644 --- a/drivers/gpu/drm/v3d/v3d_perfmon.c +++ b/drivers/gpu/drm/v3d/v3d_perfmon.c @@ -306,6 +306,11 @@ void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv) static int v3d_perfmon_idr_del(int id, void *elem, void *data) { struct v3d_perfmon *perfmon = elem; + struct v3d_dev *v3d = (struct v3d_dev *)data; + + /* If the active perfmon is being destroyed, stop it first */ + if (perfmon == v3d->active_perfmon) + v3d_perfmon_stop(v3d, perfmon, false); v3d_perfmon_put(perfmon); @@ -314,8 +319,10 @@ static int v3d_perfmon_idr_del(int id, void *elem, void *data) void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv) { + struct v3d_dev *v3d = v3d_priv->v3d; + mutex_lock(&v3d_priv->perfmon.lock); - idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, NULL); + idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, v3d); idr_destroy(&v3d_priv->perfmon.idr); mutex_unlock(&v3d_priv->perfmon.lock); mutex_destroy(&v3d_priv->perfmon.lock); diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_base.c b/drivers/gpu/drm/vboxvideo/hgsmi_base.c index 8c041d7ce4f1..87dccaecc3e5 100644 --- a/drivers/gpu/drm/vboxvideo/hgsmi_base.c +++ b/drivers/gpu/drm/vboxvideo/hgsmi_base.c @@ -139,7 +139,15 @@ int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags, flags |= VBOX_MOUSE_POINTER_VISIBLE; } - p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA, + /* + * The 4 extra bytes come from switching struct vbva_mouse_pointer_shape + * from having a 4 bytes fixed array at the end to using a proper VLA + * at the end. These 4 extra bytes were not subtracted from sizeof(*p) + * before the switch to the VLA, so this way the behavior is unchanged. + * Chances are these 4 extra bytes are not necessary but they are kept + * to avoid regressions. + */ + p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len + 4, HGSMI_CH_VBVA, VBVA_MOUSE_POINTER_SHAPE); if (!p) return -ENOMEM; diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c index d4ade9325401..7f686a0190e6 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_main.c +++ b/drivers/gpu/drm/vboxvideo/vbox_main.c @@ -114,6 +114,10 @@ int vbox_hw_init(struct vbox_private *vbox) DRM_INFO("VRAM %08x\n", vbox->full_vram_size); + ret = pcim_request_region(pdev, 0, "vboxvideo"); + if (ret) + return ret; + /* Map guest-heap at end of vram */ vbox->guest_heap = pcim_iomap_range(pdev, 0, GUEST_HEAP_OFFSET(vbox), GUEST_HEAP_SIZE); diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo.h b/drivers/gpu/drm/vboxvideo/vboxvideo.h index f60d82504da0..79ec8481de0e 100644 --- a/drivers/gpu/drm/vboxvideo/vboxvideo.h +++ b/drivers/gpu/drm/vboxvideo/vboxvideo.h @@ -351,10 +351,8 @@ struct vbva_mouse_pointer_shape { * Bytes in the gap between the AND and the XOR mask are undefined. * XOR mask scanlines have no gap between them and size of XOR mask is: * xor_len = width * 4 * height. - * - * Preallocate 4 bytes for accessing actual data as p->data. */ - u8 data[4]; + u8 data[]; } __packed; /* pointer is visible */ diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c index c4ac2c946238..c00a5cc2316d 100644 --- a/drivers/gpu/drm/vc4/vc4_perfmon.c +++ b/drivers/gpu/drm/vc4/vc4_perfmon.c @@ -116,6 +116,11 @@ void vc4_perfmon_open_file(struct vc4_file *vc4file) static int vc4_perfmon_idr_del(int id, void *elem, void *data) { struct vc4_perfmon *perfmon = elem; + struct vc4_dev *vc4 = (struct vc4_dev *)data; + + /* If the active perfmon is being destroyed, stop it first */ + if (perfmon == vc4->active_perfmon) + vc4_perfmon_stop(vc4, perfmon, false); vc4_perfmon_put(perfmon); @@ -130,7 +135,7 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file) return; mutex_lock(&vc4file->perfmon.lock); - idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL); + idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, vc4); idr_destroy(&vc4file->perfmon.idr); mutex_unlock(&vc4file->perfmon.lock); mutex_destroy(&vc4file->perfmon.lock); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c index 890a66a2361f..64bd7d74854e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c @@ -635,10 +635,8 @@ out: kunmap_atomic(d.src_addr); if (d.dst_addr) kunmap_atomic(d.dst_addr); - if (src_pages) - kvfree(src_pages); - if (dst_pages) - kvfree(dst_pages); + kvfree(src_pages); + kvfree(dst_pages); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 50ad3105c16e..2825dd3149ed 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1609,6 +1609,7 @@ static const struct file_operations vmwgfx_driver_fops = { .compat_ioctl = vmw_compat_ioctl, #endif .llseek = noop_llseek, + .fop_flags = FOP_UNSIGNED_OFFSET, }; static const struct drm_driver driver = { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 3f4719b3c268..4e2807f5f94c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -62,7 +62,7 @@ #define VMWGFX_DRIVER_MINOR 20 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) -#define VMWGFX_MAX_DISPLAYS 16 +#define VMWGFX_NUM_DISPLAY_UNITS 8 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 #define VMWGFX_MIN_INITIAL_WIDTH 1280 @@ -82,7 +82,7 @@ #define VMWGFX_NUM_GB_CONTEXT 256 #define VMWGFX_NUM_GB_SHADER 20000 #define VMWGFX_NUM_GB_SURFACE 32768 -#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS +#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_NUM_DISPLAY_UNITS #define VMWGFX_NUM_DXCONTEXT 256 #define VMWGFX_NUM_DXQUERY 512 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 288ed0bb75cb..63b8d7591253 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1283,7 +1283,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, { struct drm_device *dev = &dev_priv->drm; struct vmw_framebuffer_surface *vfbs; - enum SVGA3dSurfaceFormat format; struct vmw_surface *surface; int ret; @@ -1320,34 +1319,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, return -EINVAL; } - switch (mode_cmd->pixel_format) { - case DRM_FORMAT_ARGB8888: - format = SVGA3D_A8R8G8B8; - break; - case DRM_FORMAT_XRGB8888: - format = SVGA3D_X8R8G8B8; - break; - case DRM_FORMAT_RGB565: - format = SVGA3D_R5G6B5; - break; - case DRM_FORMAT_XRGB1555: - format = SVGA3D_A1R5G5B5; - break; - default: - DRM_ERROR("Invalid pixel format: %p4cc\n", - &mode_cmd->pixel_format); - return -EINVAL; - } - - /* - * For DX, surface format validation is done when surface->scanout - * is set. - */ - if (!has_sm4_context(dev_priv) && format != surface->metadata.format) { - DRM_ERROR("Invalid surface format for requested mode.\n"); - return -EINVAL; - } - vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); if (!vfbs) { ret = -ENOMEM; @@ -1539,6 +1510,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, DRM_ERROR("Surface size cannot exceed %dx%d\n", dev_priv->texture_max_width, dev_priv->texture_max_height); + ret = -EINVAL; goto err_out; } @@ -2225,7 +2197,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_mode_config *mode_config = &dev->mode_config; struct drm_vmw_update_layout_arg *arg = (struct drm_vmw_update_layout_arg *)data; - void __user *user_rects; + const void __user *user_rects; struct drm_vmw_rect *rects; struct drm_rect *drm_rects; unsigned rects_size; @@ -2237,6 +2209,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, VMWGFX_MIN_INITIAL_HEIGHT}; vmw_du_update_layout(dev_priv, 1, &def_rect); return 0; + } else if (arg->num_outputs > VMWGFX_NUM_DISPLAY_UNITS) { + return -E2BIG; } rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 6141fadf81ef..2a6c6d6581e0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -199,9 +199,6 @@ struct vmw_kms_dirty { s32 unit_y2; }; -#define VMWGFX_NUM_DISPLAY_UNITS 8 - - #define vmw_framebuffer_to_vfb(x) \ container_of(x, struct vmw_framebuffer, base) #define vmw_framebuffer_to_vfbs(x) \ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index fab155a68054..82d18b88f4a7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -886,6 +886,10 @@ static int vmw_stdu_connector_atomic_check(struct drm_connector *conn, struct drm_crtc_state *new_crtc_state; conn_state = drm_atomic_get_connector_state(state, conn); + + if (IS_ERR(conn_state)) + return PTR_ERR(conn_state); + du = vmw_connector_to_stdu(conn); if (!conn_state->crtc) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 1625b30d9970..5721c74da3e0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -2276,9 +2276,12 @@ int vmw_dumb_create(struct drm_file *file_priv, const struct SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format); SVGA3dSurfaceAllFlags flags = SVGA3D_SURFACE_HINT_TEXTURE | SVGA3D_SURFACE_HINT_RENDERTARGET | - SVGA3D_SURFACE_SCREENTARGET | - SVGA3D_SURFACE_BIND_SHADER_RESOURCE | - SVGA3D_SURFACE_BIND_RENDER_TARGET; + SVGA3D_SURFACE_SCREENTARGET; + + if (vmw_surface_is_dx_screen_target_format(format)) { + flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE | + SVGA3D_SURFACE_BIND_RENDER_TARGET; + } /* * Without mob support we're just going to use raw memory buffer diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c index 75736faf2a80..c6e0c8d77a70 100644 --- a/drivers/gpu/drm/xe/display/xe_display.c +++ b/drivers/gpu/drm/xe/display/xe_display.c @@ -309,18 +309,7 @@ static void xe_display_flush_cleanup_work(struct xe_device *xe) } /* TODO: System and runtime suspend/resume sequences will be sanitized as a follow-up. */ -void xe_display_pm_runtime_suspend(struct xe_device *xe) -{ - if (!xe->info.probe_display) - return; - - if (xe->d3cold.allowed) - xe_display_pm_suspend(xe, true); - - intel_hpd_poll_enable(xe); -} - -void xe_display_pm_suspend(struct xe_device *xe, bool runtime) +static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime) { struct intel_display *display = &xe->display; bool s2idle = suspend_to_idle(); @@ -353,28 +342,38 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime) intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold); intel_dmc_suspend(xe); + + if (runtime && has_display(xe)) + intel_hpd_poll_enable(xe); } -void xe_display_pm_suspend_late(struct xe_device *xe) +void xe_display_pm_suspend(struct xe_device *xe) +{ + __xe_display_pm_suspend(xe, false); +} + +void xe_display_pm_runtime_suspend(struct xe_device *xe) { - bool s2idle = suspend_to_idle(); if (!xe->info.probe_display) return; - intel_power_domains_suspend(xe, s2idle); + if (xe->d3cold.allowed) { + __xe_display_pm_suspend(xe, true); + return; + } - intel_display_power_suspend_late(xe); + intel_hpd_poll_enable(xe); } -void xe_display_pm_runtime_resume(struct xe_device *xe) +void xe_display_pm_suspend_late(struct xe_device *xe) { + bool s2idle = suspend_to_idle(); if (!xe->info.probe_display) return; - intel_hpd_poll_disable(xe); + intel_power_domains_suspend(xe, s2idle); - if (xe->d3cold.allowed) - xe_display_pm_resume(xe, true); + intel_display_power_suspend_late(xe); } void xe_display_pm_resume_early(struct xe_device *xe) @@ -387,7 +386,7 @@ void xe_display_pm_resume_early(struct xe_device *xe) intel_power_domains_resume(xe); } -void xe_display_pm_resume(struct xe_device *xe, bool runtime) +static void __xe_display_pm_resume(struct xe_device *xe, bool runtime) { struct intel_display *display = &xe->display; @@ -411,9 +410,11 @@ void xe_display_pm_resume(struct xe_device *xe, bool runtime) intel_display_driver_resume(xe); drm_kms_helper_poll_enable(&xe->drm); intel_display_driver_enable_user_access(xe); - intel_hpd_poll_disable(xe); } + if (has_display(xe)) + intel_hpd_poll_disable(xe); + intel_opregion_resume(display); intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false); @@ -421,6 +422,26 @@ void xe_display_pm_resume(struct xe_device *xe, bool runtime) intel_power_domains_enable(xe); } +void xe_display_pm_resume(struct xe_device *xe) +{ + __xe_display_pm_resume(xe, false); +} + +void xe_display_pm_runtime_resume(struct xe_device *xe) +{ + if (!xe->info.probe_display) + return; + + if (xe->d3cold.allowed) { + __xe_display_pm_resume(xe, true); + return; + } + + intel_hpd_init(xe); + intel_hpd_poll_disable(xe); +} + + static void display_device_remove(struct drm_device *dev, void *arg) { struct xe_device *xe = arg; diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h index 53d727fd792b..bed55fd26f30 100644 --- a/drivers/gpu/drm/xe/display/xe_display.h +++ b/drivers/gpu/drm/xe/display/xe_display.h @@ -34,10 +34,10 @@ void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir); void xe_display_irq_reset(struct xe_device *xe); void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt); -void xe_display_pm_suspend(struct xe_device *xe, bool runtime); +void xe_display_pm_suspend(struct xe_device *xe); void xe_display_pm_suspend_late(struct xe_device *xe); void xe_display_pm_resume_early(struct xe_device *xe); -void xe_display_pm_resume(struct xe_device *xe, bool runtime); +void xe_display_pm_resume(struct xe_device *xe); void xe_display_pm_runtime_suspend(struct xe_device *xe); void xe_display_pm_runtime_resume(struct xe_device *xe); @@ -65,10 +65,10 @@ static inline void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir) static inline void xe_display_irq_reset(struct xe_device *xe) {} static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {} -static inline void xe_display_pm_suspend(struct xe_device *xe, bool runtime) {} +static inline void xe_display_pm_suspend(struct xe_device *xe) {} static inline void xe_display_pm_suspend_late(struct xe_device *xe) {} static inline void xe_display_pm_resume_early(struct xe_device *xe) {} -static inline void xe_display_pm_resume(struct xe_device *xe, bool runtime) {} +static inline void xe_display_pm_resume(struct xe_device *xe) {} static inline void xe_display_pm_runtime_suspend(struct xe_device *xe) {} static inline void xe_display_pm_runtime_resume(struct xe_device *xe) {} diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index 660ff42e45a6..bd604b9f08e4 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -169,6 +169,8 @@ #define XEHP_SLICE_COMMON_ECO_CHICKEN1 XE_REG_MCR(0x731c, XE_REG_OPTION_MASKED) #define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14) +#define XE2LPM_CCCHKNREG1 XE_REG(0x82a8) + #define VF_PREEMPTION XE_REG(0x83a4, XE_REG_OPTION_MASKED) #define PREEMPTION_VERTEX_COUNT REG_GENMASK(15, 0) @@ -378,6 +380,9 @@ #define L3SQCREG3 XE_REG_MCR(0xb108) #define COMPPWOVERFETCHEN REG_BIT(28) +#define SCRATCH3_LBCF XE_REG_MCR(0xb154) +#define RWFLUSHALLEN REG_BIT(17) + #define XEHP_L3SQCREG5 XE_REG_MCR(0xb158) #define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0) @@ -388,8 +393,11 @@ #define XE2_GLOBAL_INVAL XE_REG(0xb404) -#define SCRATCH1LPFC XE_REG(0xb474) -#define EN_L3_RW_CCS_CACHE_FLUSH REG_BIT(0) +#define XE2LPM_L3SQCREG2 XE_REG_MCR(0xb604) + +#define XE2LPM_L3SQCREG3 XE_REG_MCR(0xb608) + +#define XE2LPM_SCRATCH3_LBCF XE_REG_MCR(0xb654) #define XE2LPM_L3SQCREG5 XE_REG_MCR(0xb658) @@ -509,7 +517,7 @@ * [4-6] RSVD * [7] Disabled */ -#define CCS_MODE XE_REG(0x14804) +#define CCS_MODE XE_REG(0x14804, XE_REG_OPTION_MASKED) #define CCS_MODE_CSLICE_0_3_MASK REG_GENMASK(11, 0) /* 3 bits per cslice */ #define CCS_MODE_CSLICE_MASK 0x7 /* CCS0-3 + rsvd */ #define CCS_MODE_CSLICE_WIDTH ilog2(CCS_MODE_CSLICE_MASK + 1) diff --git a/drivers/gpu/drm/xe/regs/xe_oa_regs.h b/drivers/gpu/drm/xe/regs/xe_oa_regs.h index 1189f5a540a8..a9b0091cb7ee 100644 --- a/drivers/gpu/drm/xe/regs/xe_oa_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_oa_regs.h @@ -52,6 +52,7 @@ #define OAG_OABUFFER_MEMORY_SELECT REG_BIT(0) /* 0: PPGTT, 1: GGTT */ #define OAG_OACONTROL XE_REG(0xdaf4) +#define OAG_OACONTROL_OA_PES_DISAG_EN REG_GENMASK(27, 22) #define OAG_OACONTROL_OA_CCS_SELECT_MASK REG_GENMASK(18, 16) #define OAG_OACONTROL_OA_COUNTER_SEL_MASK REG_GENMASK(4, 2) #define OAG_OACONTROL_OA_COUNTER_ENABLE REG_BIT(0) diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c index a13e0b3a169e..ef777dbdf4ec 100644 --- a/drivers/gpu/drm/xe/xe_bb.c +++ b/drivers/gpu/drm/xe/xe_bb.c @@ -65,7 +65,8 @@ __xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 *addr) { u32 size = drm_suballoc_size(bb->bo); - bb->cs[bb->len++] = MI_BATCH_BUFFER_END; + if (bb->len == 0 || bb->cs[bb->len - 1] != MI_BATCH_BUFFER_END) + bb->cs[bb->len++] = MI_BATCH_BUFFER_END; xe_gt_assert(q->gt, bb->len * 4 + bb_prefetch(q->gt) <= size); diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 06911e9a3bf5..e5f51fd23c65 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -680,8 +680,8 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, tt_has_data = ttm && (ttm_tt_is_populated(ttm) || (ttm->page_flags & TTM_TT_FLAG_SWAPPED)); - move_lacks_source = handle_system_ccs ? (!bo->ccs_cleared) : - (!mem_type_is_vram(old_mem_type) && !tt_has_data); + move_lacks_source = !old_mem || (handle_system_ccs ? (!bo->ccs_cleared) : + (!mem_type_is_vram(old_mem_type) && !tt_has_data)); needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) || (!ttm && ttm_bo->type == ttm_bo_type_device); @@ -2320,6 +2320,20 @@ void xe_bo_put_commit(struct llist_head *deferred) drm_gem_object_free(&bo->ttm.base.refcount); } +void xe_bo_put(struct xe_bo *bo) +{ + might_sleep(); + if (bo) { +#ifdef CONFIG_PROC_FS + if (bo->client) + might_lock(&bo->client->bos_lock); +#endif + if (bo->ggtt_node && bo->ggtt_node->ggtt) + might_lock(&bo->ggtt_node->ggtt->lock); + drm_gem_object_put(&bo->ttm.base); + } +} + /** * xe_bo_dumb_create - Create a dumb bo as backing for a fb * @file_priv: ... diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index dbfb3209615d..6e4be52306df 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -126,11 +126,7 @@ static inline struct xe_bo *xe_bo_get(struct xe_bo *bo) return bo; } -static inline void xe_bo_put(struct xe_bo *bo) -{ - if (bo) - drm_gem_object_put(&bo->ttm.base); -} +void xe_bo_put(struct xe_bo *bo); static inline void __xe_bo_unset_bulk_move(struct xe_bo *bo) { diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c index a64bae36e0e3..fe4319eb13fd 100644 --- a/drivers/gpu/drm/xe/xe_debugfs.c +++ b/drivers/gpu/drm/xe/xe_debugfs.c @@ -6,6 +6,7 @@ #include "xe_debugfs.h" #include <linux/debugfs.h> +#include <linux/fault-inject.h> #include <linux/string_helpers.h> #include <drm/drm_debugfs.h> @@ -26,10 +27,7 @@ #include "xe_vm.h" #endif -#ifdef CONFIG_FAULT_INJECTION -#include <linux/fault-inject.h> /* XXX: fault-inject.h is broken */ DECLARE_FAULT_ATTR(gt_reset_failure); -#endif static struct xe_device *node_to_xe(struct drm_info_node *node) { @@ -189,7 +187,7 @@ void xe_debugfs_register(struct xe_device *xe) debugfs_create_file("forcewake_all", 0400, root, xe, &forcewake_all_fops); - debugfs_create_file("wedged_mode", 0400, root, xe, + debugfs_create_file("wedged_mode", 0600, root, xe, &wedged_mode_fops); for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) { @@ -213,8 +211,5 @@ void xe_debugfs_register(struct xe_device *xe) for_each_gt(gt, xe, id) xe_gt_debugfs_register(gt); -#ifdef CONFIG_FAULT_INJECTION fault_create_debugfs_attr("fail_gt_reset", root, >_reset_failure); -#endif - } diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 1a0d7fdd094b..a1987b554a8d 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -87,10 +87,6 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) mutex_init(&xef->exec_queue.lock); xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1); - spin_lock(&xe->clients.lock); - xe->clients.count++; - spin_unlock(&xe->clients.lock); - file->driver_priv = xef; kref_init(&xef->refcount); @@ -107,17 +103,12 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) static void xe_file_destroy(struct kref *ref) { struct xe_file *xef = container_of(ref, struct xe_file, refcount); - struct xe_device *xe = xef->xe; xa_destroy(&xef->exec_queue.xa); mutex_destroy(&xef->exec_queue.lock); xa_destroy(&xef->vm.xa); mutex_destroy(&xef->vm.lock); - spin_lock(&xe->clients.lock); - xe->clients.count--; - spin_unlock(&xe->clients.lock); - xe_drm_client_put(xef->client); kfree(xef->process_name); kfree(xef); @@ -171,10 +162,8 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file) xe_exec_queue_kill(q); xe_exec_queue_put(q); } - mutex_lock(&xef->vm.lock); xa_for_each(&xef->vm.xa, idx, vm) xe_vm_close_and_put(vm); - mutex_unlock(&xef->vm.lock); xe_file_put(xef); @@ -253,6 +242,7 @@ static const struct file_operations xe_driver_fops = { #ifdef CONFIG_PROC_FS .show_fdinfo = drm_show_fdinfo, #endif + .fop_flags = FOP_UNSIGNED_OFFSET, }; static struct drm_driver driver = { @@ -297,6 +287,9 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy) if (xe->unordered_wq) destroy_workqueue(xe->unordered_wq); + if (xe->destroy_wq) + destroy_workqueue(xe->destroy_wq); + ttm_device_fini(&xe->ttm); } @@ -331,13 +324,10 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, xe->info.force_execlist = xe_modparam.force_execlist; spin_lock_init(&xe->irq.lock); - spin_lock_init(&xe->clients.lock); init_waitqueue_head(&xe->ufence_wq); - err = drmm_mutex_init(&xe->drm, &xe->usm.lock); - if (err) - goto err; + init_rwsem(&xe->usm.lock); xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC); @@ -362,8 +352,9 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 0); xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0); xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0); + xe->destroy_wq = alloc_workqueue("xe-destroy-wq", 0, 0); if (!xe->ordered_wq || !xe->unordered_wq || - !xe->preempt_fence_wq) { + !xe->preempt_fence_wq || !xe->destroy_wq) { /* * Cleanup done in xe_device_destroy via * drmm_add_action_or_reset register above @@ -889,7 +880,7 @@ void xe_device_l2_flush(struct xe_device *xe) spin_lock(>->global_invl_lock); xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1); - if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true)) + if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true)) xe_gt_err_once(gt, "Global invalidation timeout\n"); spin_unlock(>->global_invl_lock); @@ -979,13 +970,13 @@ void xe_device_declare_wedged(struct xe_device *xe) return; } + xe_pm_runtime_get_noresume(xe); + if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) { drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n"); return; } - xe_pm_runtime_get_noresume(xe); - if (!atomic_xchg(&xe->wedged.flag, 1)) { xe->needs_flr_on_fini = true; drm_err(&xe->drm, diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h index 894f04770454..34620ef855c0 100644 --- a/drivers/gpu/drm/xe/xe_device.h +++ b/drivers/gpu/drm/xe/xe_device.h @@ -178,4 +178,18 @@ void xe_device_declare_wedged(struct xe_device *xe); struct xe_file *xe_file_get(struct xe_file *xef); void xe_file_put(struct xe_file *xef); +/* + * Occasionally it is seen that the G2H worker starts running after a delay of more than + * a second even after being queued and activated by the Linux workqueue subsystem. This + * leads to G2H timeout error. The root cause of issue lies with scheduling latency of + * Lunarlake Hybrid CPU. Issue disappears if we disable Lunarlake atom cores from BIOS + * and this is beyond xe kmd. + * + * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU. + */ +#define LNL_FLUSH_WORKQUEUE(wq__) \ + flush_workqueue(wq__) +#define LNL_FLUSH_WORK(wrk__) \ + flush_work(wrk__) + #endif diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index ec7eb7811126..687f3a9039bb 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -353,15 +353,6 @@ struct xe_device { struct workqueue_struct *wq; } sriov; - /** @clients: drm clients info */ - struct { - /** @clients.lock: Protects drm clients info */ - spinlock_t lock; - - /** @clients.count: number of drm clients */ - u64 count; - } clients; - /** @usm: unified memory state */ struct { /** @usm.asid: convert a ASID to VM */ @@ -369,7 +360,7 @@ struct xe_device { /** @usm.next_asid: next ASID, used to cyclical alloc asids */ u32 next_asid; /** @usm.lock: protects UM state */ - struct mutex lock; + struct rw_semaphore lock; } usm; /** @pinned: pinned BO state */ @@ -396,6 +387,9 @@ struct xe_device { /** @unordered_wq: used to serialize unordered work, mostly display */ struct workqueue_struct *unordered_wq; + /** @destroy_wq: used to serialize user destroy work, like queue */ + struct workqueue_struct *destroy_wq; + /** @tiles: device tiles */ struct xe_tile tiles[XE_MAX_TILES_PER_DEVICE]; @@ -567,15 +561,23 @@ struct xe_file { struct { /** @vm.xe: xarray to store VMs */ struct xarray xa; - /** @vm.lock: protects file VM state */ + /** + * @vm.lock: Protects VM lookup + reference and removal a from + * file xarray. Not an intended to be an outer lock which does + * thing while being held. + */ struct mutex lock; } vm; /** @exec_queue: Submission exec queue state for file */ struct { - /** @exec_queue.xe: xarray to store engines */ + /** @exec_queue.xa: xarray to store exece queues */ struct xarray xa; - /** @exec_queue.lock: protects file engine state */ + /** + * @exec_queue.lock: Protects exec queue lookup + reference and + * removal a frommfile xarray. Not an intended to be an outer + * lock which does thing while being held. + */ struct mutex lock; } exec_queue; diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c index e64f4b645e2e..fb52a23e28f8 100644 --- a/drivers/gpu/drm/xe/xe_drm_client.c +++ b/drivers/gpu/drm/xe/xe_drm_client.c @@ -10,6 +10,7 @@ #include <linux/slab.h> #include <linux/types.h> +#include "xe_assert.h" #include "xe_bo.h" #include "xe_bo_types.h" #include "xe_device_types.h" @@ -151,10 +152,13 @@ void xe_drm_client_add_bo(struct xe_drm_client *client, */ void xe_drm_client_remove_bo(struct xe_bo *bo) { + struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); struct xe_drm_client *client = bo->client; + xe_assert(xe, !kref_read(&bo->ttm.base.refcount)); + spin_lock(&client->bos_lock); - list_del(&bo->client_link); + list_del_init(&bo->client_link); spin_unlock(&client->bos_lock); xe_drm_client_put(client); @@ -164,12 +168,9 @@ static void bo_meminfo(struct xe_bo *bo, struct drm_memory_stats stats[TTM_NUM_MEM_TYPES]) { u64 sz = bo->size; - u32 mem_type; + u32 mem_type = bo->ttm.resource->mem_type; - if (bo->placement.placement) - mem_type = bo->placement.placement->mem_type; - else - mem_type = XE_PL_TT; + xe_bo_assert_held(bo); if (drm_gem_object_is_shared_for_memory_stats(&bo->ttm.base)) stats[mem_type].shared += sz; @@ -196,6 +197,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file) struct xe_drm_client *client; struct drm_gem_object *obj; struct xe_bo *bo; + LLIST_HEAD(deferred); unsigned int id; u32 mem_type; @@ -206,7 +208,20 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file) idr_for_each_entry(&file->object_idr, obj, id) { struct xe_bo *bo = gem_to_xe_bo(obj); - bo_meminfo(bo, stats); + if (dma_resv_trylock(bo->ttm.base.resv)) { + bo_meminfo(bo, stats); + xe_bo_unlock(bo); + } else { + xe_bo_get(bo); + spin_unlock(&file->table_lock); + + xe_bo_lock(bo, false); + bo_meminfo(bo, stats); + xe_bo_unlock(bo); + + xe_bo_put(bo); + spin_lock(&file->table_lock); + } } spin_unlock(&file->table_lock); @@ -215,11 +230,28 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file) list_for_each_entry(bo, &client->bos_list, client_link) { if (!kref_get_unless_zero(&bo->ttm.base.refcount)) continue; - bo_meminfo(bo, stats); - xe_bo_put(bo); + + if (dma_resv_trylock(bo->ttm.base.resv)) { + bo_meminfo(bo, stats); + xe_bo_unlock(bo); + } else { + spin_unlock(&client->bos_lock); + + xe_bo_lock(bo, false); + bo_meminfo(bo, stats); + xe_bo_unlock(bo); + + spin_lock(&client->bos_lock); + /* The bo ref will prevent this bo from being removed from the list */ + xe_assert(xef->xe, !list_empty(&bo->client_link)); + } + + xe_bo_put_deferred(bo, &deferred); } spin_unlock(&client->bos_lock); + xe_bo_put_commit(&deferred); + for (mem_type = XE_PL_SYSTEM; mem_type < TTM_NUM_MEM_TYPES; ++mem_type) { if (!xe_mem_type_to_name[mem_type]) continue; @@ -251,8 +283,15 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file) /* Accumulate all the exec queues from this client */ mutex_lock(&xef->exec_queue.lock); - xa_for_each(&xef->exec_queue.xa, i, q) + xa_for_each(&xef->exec_queue.xa, i, q) { + xe_exec_queue_get(q); + mutex_unlock(&xef->exec_queue.lock); + xe_exec_queue_update_run_ticks(q); + + mutex_lock(&xef->exec_queue.lock); + xe_exec_queue_put(q); + } mutex_unlock(&xef->exec_queue.lock); /* Get the total GPU cycles */ diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c index 7b38485817dc..756b492f13b0 100644 --- a/drivers/gpu/drm/xe/xe_exec.c +++ b/drivers/gpu/drm/xe/xe_exec.c @@ -41,11 +41,6 @@ * user knows an exec writes to a BO and reads from the BO in the next exec, it * is the user's responsibility to pass in / out fence between the two execs). * - * Implicit dependencies for external BOs are handled by using the dma-buf - * implicit dependency uAPI (TODO: add link). To make this works each exec must - * install the job's fence into the DMA_RESV_USAGE_WRITE slot of every external - * BO mapped in the VM. - * * We do not allow a user to trigger a bind at exec time rather we have a VM * bind IOCTL which uses the same in / out fence interface as exec. In that * sense, a VM bind is basically the same operation as an exec from the user @@ -59,8 +54,8 @@ * behind any pending kernel operations on any external BOs in VM or any BOs * private to the VM. This is accomplished by the rebinds waiting on BOs * DMA_RESV_USAGE_KERNEL slot (kernel ops) and kernel ops waiting on all BOs - * slots (inflight execs are in the DMA_RESV_USAGE_BOOKING for private BOs and - * in DMA_RESV_USAGE_WRITE for external BOs). + * slots (inflight execs are in the DMA_RESV_USAGE_BOOKKEEP for private BOs and + * for external BOs). * * Rebinds / dma-resv usage applies to non-compute mode VMs only as for compute * mode VMs we use preempt fences and a rebind worker (TODO: add link). @@ -137,12 +132,16 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) if (XE_IOCTL_DBG(xe, !q)) return -ENOENT; - if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM)) - return -EINVAL; + if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM)) { + err = -EINVAL; + goto err_exec_queue; + } if (XE_IOCTL_DBG(xe, args->num_batch_buffer && - q->width != args->num_batch_buffer)) - return -EINVAL; + q->width != args->num_batch_buffer)) { + err = -EINVAL; + goto err_exec_queue; + } if (XE_IOCTL_DBG(xe, q->ops->reset_status(q))) { err = -ECANCELED; @@ -225,6 +224,7 @@ retry: fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm); if (IS_ERR(fence)) { err = PTR_ERR(fence); + xe_vm_unlock(vm); goto err_unlock_list; } for (i = 0; i < num_syncs; i++) @@ -304,7 +304,8 @@ retry: xe_sched_job_arm(job); if (!xe_vm_in_lr_mode(vm)) drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished, - DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE); + DMA_RESV_USAGE_BOOKKEEP, + DMA_RESV_USAGE_BOOKKEEP); for (i = 0; i < num_syncs; i++) { xe_sync_entry_signal(&syncs[i], &job->drm.s_fence->finished); diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index 7f28b7fc68d5..fd0f3b3c9101 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -260,8 +260,14 @@ void xe_exec_queue_fini(struct xe_exec_queue *q) { int i; + /* + * Before releasing our ref to lrc and xef, accumulate our run ticks + */ + xe_exec_queue_update_run_ticks(q); + for (i = 0; i < q->width; ++i) xe_lrc_put(q->lrc[i]); + __xe_exec_queue_free(q); } @@ -635,14 +641,14 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, } } - mutex_lock(&xef->exec_queue.lock); + q->xef = xe_file_get(xef); + + /* user id alloc must always be last in ioctl to prevent UAF */ err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL); - mutex_unlock(&xef->exec_queue.lock); if (err) goto kill_exec_queue; args->exec_queue_id = id; - q->xef = xe_file_get(xef); return 0; diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c index b263fff15273..7d9fc489dcb8 100644 --- a/drivers/gpu/drm/xe/xe_force_wake.c +++ b/drivers/gpu/drm/xe/xe_force_wake.c @@ -115,9 +115,15 @@ static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain, XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC, &value, true); if (ret) - xe_gt_notice(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n", - domain->id, str_wake_sleep(wake), ERR_PTR(ret), - domain->reg_ack.addr, value); + xe_gt_err(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n", + domain->id, str_wake_sleep(wake), ERR_PTR(ret), + domain->reg_ack.addr, value); + if (value == ~0) { + xe_gt_err(gt, + "Force wake domain %d: %s. MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n", + domain->id, str_wake_sleep(wake)); + ret = -EIO; + } return ret; } diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c index 2895f154654c..ff19eca5d358 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.c +++ b/drivers/gpu/drm/xe/xe_ggtt.c @@ -397,6 +397,16 @@ static void ggtt_invalidate_gt_tlb(struct xe_gt *gt) static void xe_ggtt_invalidate(struct xe_ggtt *ggtt) { + struct xe_device *xe = tile_to_xe(ggtt->tile); + + /* + * XXX: Barrier for GGTT pages. Unsure exactly why this required but + * without this LNL is having issues with the GuC reading scratch page + * vs. correct GGTT page. Not particularly a hot code path so blindly + * do a mmio read here which results in GuC reading correct GGTT page. + */ + xe_mmio_read32(xe_root_mmio_gt(xe), VF_CAP_REG); + /* Each GT in a tile has its own TLB to cache GGTT lookups */ ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt); ggtt_invalidate_gt_tlb(ggtt->tile->media_gt); diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.c b/drivers/gpu/drm/xe/xe_gpu_scheduler.c index c518d1d16d82..50361b4638f9 100644 --- a/drivers/gpu/drm/xe/xe_gpu_scheduler.c +++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.c @@ -90,6 +90,11 @@ void xe_sched_submission_stop(struct xe_gpu_scheduler *sched) cancel_work_sync(&sched->work_process_msg); } +void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched) +{ + drm_sched_resume_timeout(&sched->base, sched->base.timeout); +} + void xe_sched_add_msg(struct xe_gpu_scheduler *sched, struct xe_sched_msg *msg) { diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h index cee9c6809fc0..64b2ae6839db 100644 --- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h +++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h @@ -22,6 +22,8 @@ void xe_sched_fini(struct xe_gpu_scheduler *sched); void xe_sched_submission_start(struct xe_gpu_scheduler *sched); void xe_sched_submission_stop(struct xe_gpu_scheduler *sched); +void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched); + void xe_sched_add_msg(struct xe_gpu_scheduler *sched, struct xe_sched_msg *msg); void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched, @@ -61,7 +63,9 @@ xe_sched_invalidate_job(struct xe_sched_job *job, int threshold) static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched, struct xe_sched_job *job) { + spin_lock(&sched->base.job_list_lock); list_add(&job->drm.list, &sched->base.pending_list); + spin_unlock(&sched->base.job_list_lock); } static inline diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index dd96dec95b19..d5fd6a089b7c 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -12,8 +12,6 @@ #include <generated/xe_wa_oob.h> -#include <generated/xe_wa_oob.h> - #include "instructions/xe_gfxpipe_commands.h" #include "instructions/xe_mi_commands.h" #include "regs/xe_gt_regs.h" @@ -110,7 +108,6 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt) return; if (!xe_gt_is_media_type(gt)) { - xe_mmio_write32(gt, SCRATCH1LPFC, EN_L3_RW_CCS_CACHE_FLUSH); reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL); reg |= CG_DIS_CNTLBUS; xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg); @@ -876,7 +873,9 @@ int xe_gt_sanitize_freq(struct xe_gt *gt) int ret = 0; if ((!xe_uc_fw_is_available(>->uc.gsc.fw) || - xe_uc_fw_is_loaded(>->uc.gsc.fw)) && XE_WA(gt, 22019338487)) + xe_uc_fw_is_loaded(>->uc.gsc.fw) || + xe_uc_fw_is_in_error_state(>->uc.gsc.fw)) && + XE_WA(gt, 22019338487)) ret = xe_guc_pc_restore_stashed_freq(>->uc.guc.pc); return ret; diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h index 8b1a5027dcf2..ee138e9768a2 100644 --- a/drivers/gpu/drm/xe/xe_gt.h +++ b/drivers/gpu/drm/xe/xe_gt.h @@ -6,6 +6,8 @@ #ifndef _XE_GT_H_ #define _XE_GT_H_ +#include <linux/fault-inject.h> + #include <drm/drm_util.h> #include "xe_device.h" @@ -19,19 +21,11 @@ #define CCS_MASK(gt) (((gt)->info.engine_mask & XE_HW_ENGINE_CCS_MASK) >> XE_HW_ENGINE_CCS0) -#ifdef CONFIG_FAULT_INJECTION -#include <linux/fault-inject.h> /* XXX: fault-inject.h is broken */ extern struct fault_attr gt_reset_failure; static inline bool xe_fault_inject_gt_reset(void) { return should_fail(>_reset_failure, 1); } -#else -static inline bool xe_fault_inject_gt_reset(void) -{ - return false; -} -#endif struct xe_gt *xe_gt_alloc(struct xe_tile *tile); int xe_gt_init_hwconfig(struct xe_gt *gt); diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c index d2e4dc3aaf61..ffcbd05671fc 100644 --- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c +++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c @@ -68,6 +68,12 @@ static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines) } } + /* + * Mask bits need to be set for the register. Though only Xe2+ + * platforms require setting of mask bits, it won't harm for older + * platforms as these bits are unused there. + */ + mode |= CCS_MODE_CSLICE_0_3_MASK << 16; xe_mmio_write32(gt, CCS_MODE, mode); xe_gt_dbg(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n", @@ -133,9 +139,10 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr, } /* CCS mode can only be updated when there are no drm clients */ - spin_lock(&xe->clients.lock); - if (xe->clients.count) { - spin_unlock(&xe->clients.lock); + mutex_lock(&xe->drm.filelist_mutex); + if (!list_empty(&xe->drm.filelist)) { + mutex_unlock(&xe->drm.filelist_mutex); + xe_gt_dbg(gt, "Rejecting compute mode change as there are active drm clients\n"); return -EBUSY; } @@ -146,7 +153,7 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr, xe_gt_reset_async(gt); } - spin_unlock(&xe->clients.lock); + mutex_unlock(&xe->drm.filelist_mutex); return count; } diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c index 68a5778b4319..ab76973f3e1e 100644 --- a/drivers/gpu/drm/xe/xe_gt_freq.c +++ b/drivers/gpu/drm/xe/xe_gt_freq.c @@ -237,11 +237,11 @@ int xe_gt_freq_init(struct xe_gt *gt) if (!gt->freq) return -ENOMEM; - err = devm_add_action(xe->drm.dev, freq_fini, gt->freq); + err = sysfs_create_files(gt->freq, freq_attrs); if (err) return err; - err = sysfs_create_files(gt->freq, freq_attrs); + err = devm_add_action_or_reset(xe->drm.dev, freq_fini, gt->freq); if (err) return err; diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c index 7d7bd0be6233..c834f64b0178 100644 --- a/drivers/gpu/drm/xe/xe_gt_mcr.c +++ b/drivers/gpu/drm/xe/xe_gt_mcr.c @@ -439,7 +439,7 @@ void xe_gt_mcr_init(struct xe_gt *gt) if (gt->info.type == XE_GT_TYPE_MEDIA) { drm_WARN_ON(&xe->drm, MEDIA_VER(xe) < 13); - if (MEDIA_VER(xe) >= 20) { + if (MEDIA_VERx100(xe) >= 1301) { gt->steering[OADDRM].ranges = xe2lpm_gpmxmt_steering_table; gt->steering[INSTANCE0].ranges = xe2lpm_instance0_steering_table; } else { diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c index 730eec07795e..79c426dc2505 100644 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c @@ -185,6 +185,21 @@ unlock_dma_resv: return err; } +static struct xe_vm *asid_to_vm(struct xe_device *xe, u32 asid) +{ + struct xe_vm *vm; + + down_read(&xe->usm.lock); + vm = xa_load(&xe->usm.asid_to_vm, asid); + if (vm && xe_vm_in_fault_mode(vm)) + xe_vm_get(vm); + else + vm = ERR_PTR(-EINVAL); + up_read(&xe->usm.lock); + + return vm; +} + static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf) { struct xe_device *xe = gt_to_xe(gt); @@ -197,21 +212,20 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf) if (pf->trva_fault) return -EFAULT; - /* ASID to VM */ - mutex_lock(&xe->usm.lock); - vm = xa_load(&xe->usm.asid_to_vm, pf->asid); - if (vm && xe_vm_in_fault_mode(vm)) - xe_vm_get(vm); - else - vm = NULL; - mutex_unlock(&xe->usm.lock); - if (!vm) - return -EINVAL; + vm = asid_to_vm(xe, pf->asid); + if (IS_ERR(vm)) + return PTR_ERR(vm); /* * TODO: Change to read lock? Using write lock for simplicity. */ down_write(&vm->lock); + + if (xe_vm_is_closed(vm)) { + err = -ENOENT; + goto unlock_vm; + } + vma = lookup_vma(vm, pf->page_addr); if (!vma) { err = -EINVAL; @@ -542,14 +556,9 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc) if (acc->access_type != ACC_TRIGGER) return -EINVAL; - /* ASID to VM */ - mutex_lock(&xe->usm.lock); - vm = xa_load(&xe->usm.asid_to_vm, acc->asid); - if (vm) - xe_vm_get(vm); - mutex_unlock(&xe->usm.lock); - if (!vm || !xe_vm_in_fault_mode(vm)) - return -EINVAL; + vm = asid_to_vm(xe, acc->asid); + if (IS_ERR(vm)) + return PTR_ERR(vm); down_read(&vm->lock); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index 8250ef71e685..afdb477ecf83 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -387,6 +387,8 @@ static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node) * the xe_ggtt_clear() called by below xe_ggtt_remove_node(). */ xe_ggtt_node_remove(node, false); + } else { + xe_ggtt_node_fini(node); } } @@ -442,7 +444,7 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size) config->ggtt_region = node; return 0; err: - xe_ggtt_node_fini(node); + pf_release_ggtt(tile, node); return err; } diff --git a/drivers/gpu/drm/xe/xe_gt_sysfs.c b/drivers/gpu/drm/xe/xe_gt_sysfs.c index a05c3699e8b9..ec2b8246204b 100644 --- a/drivers/gpu/drm/xe/xe_gt_sysfs.c +++ b/drivers/gpu/drm/xe/xe_gt_sysfs.c @@ -51,5 +51,5 @@ int xe_gt_sysfs_init(struct xe_gt *gt) gt->sysfs = &kg->base; - return devm_add_action(xe->drm.dev, gt_sysfs_fini, gt); + return devm_add_action_or_reset(xe->drm.dev, gt_sysfs_fini, gt); } diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index cca9cf536f76..9d82ea30f4df 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -37,6 +37,15 @@ static long tlb_timeout_jiffies(struct xe_gt *gt) return hw_tlb_timeout + 2 * delay; } +static void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence) +{ + if (WARN_ON_ONCE(!fence->gt)) + return; + + xe_pm_runtime_put(gt_to_xe(fence->gt)); + fence->gt = NULL; /* fini() should be called once */ +} + static void __invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence) { @@ -63,6 +72,8 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work) struct xe_device *xe = gt_to_xe(gt); struct xe_gt_tlb_invalidation_fence *fence, *next; + LNL_FLUSH_WORK(>->uc.guc.ct.g2h_worker); + spin_lock_irq(>->tlb_invalidation.pending_lock); list_for_each_entry_safe(fence, next, >->tlb_invalidation.pending_fences, link) { @@ -204,7 +215,7 @@ static int send_tlb_invalidation(struct xe_guc *guc, tlb_timeout_jiffies(gt)); } spin_unlock_irq(>->tlb_invalidation.pending_lock); - } else if (ret < 0) { + } else { __invalidation_fence_signal(xe, fence); } if (!ret) { @@ -267,10 +278,8 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt) xe_gt_tlb_invalidation_fence_init(gt, &fence, true); ret = xe_gt_tlb_invalidation_guc(gt, &fence); - if (ret < 0) { - xe_gt_tlb_invalidation_fence_fini(&fence); + if (ret) return ret; - } xe_gt_tlb_invalidation_fence_wait(&fence); } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) { @@ -496,7 +505,8 @@ static const struct dma_fence_ops invalidation_fence_ops = { * @stack: fence is stack variable * * Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini - * must be called if fence is not signaled. + * will be automatically called when fence is signalled (all fences must signal), + * even on error. */ void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence, @@ -516,14 +526,3 @@ void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt, dma_fence_get(&fence->base); fence->gt = gt; } - -/** - * xe_gt_tlb_invalidation_fence_fini - Finalize TLB invalidation fence - * @fence: TLB invalidation fence to finalize - * - * Drop PM ref which fence took durinig init. - */ -void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence) -{ - xe_pm_runtime_put(gt_to_xe(fence->gt)); -} diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h index a84065fa324c..f430d5797af7 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h @@ -28,7 +28,6 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len); void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence, bool stack); -void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence); static inline void xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence) diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h index c3e6b51f7a09..42116b167c98 100644 --- a/drivers/gpu/drm/xe/xe_guc.h +++ b/drivers/gpu/drm/xe/xe_guc.h @@ -18,8 +18,10 @@ */ #define MAKE_GUC_VER(maj, min, pat) (((maj) << 16) | ((min) << 8) | (pat)) #define MAKE_GUC_VER_STRUCT(ver) MAKE_GUC_VER((ver).major, (ver).minor, (ver).patch) -#define GUC_SUBMIT_VER(guc) MAKE_VER_STRUCT((guc)->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY]) -#define GUC_FIRMWARE_VER(guc) MAKE_VER_STRUCT((guc)->fw.versions.found[XE_UC_FW_VER_RELEASE]) +#define GUC_SUBMIT_VER(guc) \ + MAKE_GUC_VER_STRUCT((guc)->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY]) +#define GUC_FIRMWARE_VER(guc) \ + MAKE_GUC_VER_STRUCT((guc)->fw.versions.found[XE_UC_FW_VER_RELEASE]) struct drm_printer; diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index f24dd5223926..9c505d3517cd 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -667,16 +667,12 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, num_g2h = 1; if (g2h_fence_needs_alloc(g2h_fence)) { - void *ptr; - g2h_fence->seqno = next_ct_seqno(ct, true); - ptr = xa_store(&ct->fence_lookup, - g2h_fence->seqno, - g2h_fence, GFP_ATOMIC); - if (IS_ERR(ptr)) { - ret = PTR_ERR(ptr); + ret = xa_err(xa_store(&ct->fence_lookup, + g2h_fence->seqno, g2h_fence, + GFP_ATOMIC)); + if (ret) goto out; - } } seqno = g2h_fence->seqno; @@ -879,14 +875,11 @@ retry: retry_same_fence: ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence); if (unlikely(ret == -ENOMEM)) { - void *ptr; - /* Retry allocation /w GFP_KERNEL */ - ptr = xa_store(&ct->fence_lookup, - g2h_fence.seqno, - &g2h_fence, GFP_KERNEL); - if (IS_ERR(ptr)) - return PTR_ERR(ptr); + ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno, + &g2h_fence, GFP_KERNEL)); + if (ret) + return ret; goto retry_same_fence; } else if (unlikely(ret)) { @@ -903,16 +896,35 @@ retry_same_fence: } ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ); + if (!ret) { - xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x", - g2h_fence.seqno, action[0]); + LNL_FLUSH_WORK(&ct->g2h_worker); + if (g2h_fence.done) { + xe_gt_warn(gt, "G2H fence %u, action %04x, done\n", + g2h_fence.seqno, action[0]); + ret = 1; + } + } + + /* + * Ensure we serialize with completion side to prevent UAF with fence going out of scope on + * the stack, since we have no clue if it will fire after the timeout before we can erase + * from the xa. Also we have some dependent loads and stores below for which we need the + * correct ordering, and we lack the needed barriers. + */ + mutex_lock(&ct->lock); + if (!ret) { + xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s", + g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done)); xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno); + mutex_unlock(&ct->lock); return -ETIME; } if (g2h_fence.retry) { xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n", action[0], g2h_fence.reason); + mutex_unlock(&ct->lock); goto retry; } if (g2h_fence.fail) { @@ -921,7 +933,12 @@ retry_same_fence: ret = -EIO; } - return ret > 0 ? response_buffer ? g2h_fence.response_len : g2h_fence.response_data : ret; + if (ret > 0) + ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data; + + mutex_unlock(&ct->lock); + + return ret; } /** diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index fbbe6a487bbb..4f5d00aea716 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -224,64 +224,11 @@ static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q) EXEC_QUEUE_STATE_BANNED)); } -#ifdef CONFIG_PROVE_LOCKING -static int alloc_submit_wq(struct xe_guc *guc) -{ - int i; - - for (i = 0; i < NUM_SUBMIT_WQ; ++i) { - guc->submission_state.submit_wq_pool[i] = - alloc_ordered_workqueue("submit_wq", 0); - if (!guc->submission_state.submit_wq_pool[i]) - goto err_free; - } - - return 0; - -err_free: - while (i) - destroy_workqueue(guc->submission_state.submit_wq_pool[--i]); - - return -ENOMEM; -} - -static void free_submit_wq(struct xe_guc *guc) -{ - int i; - - for (i = 0; i < NUM_SUBMIT_WQ; ++i) - destroy_workqueue(guc->submission_state.submit_wq_pool[i]); -} - -static struct workqueue_struct *get_submit_wq(struct xe_guc *guc) -{ - int idx = guc->submission_state.submit_wq_idx++ % NUM_SUBMIT_WQ; - - return guc->submission_state.submit_wq_pool[idx]; -} -#else -static int alloc_submit_wq(struct xe_guc *guc) -{ - return 0; -} - -static void free_submit_wq(struct xe_guc *guc) -{ - -} - -static struct workqueue_struct *get_submit_wq(struct xe_guc *guc) -{ - return NULL; -} -#endif - static void guc_submit_fini(struct drm_device *drm, void *arg) { struct xe_guc *guc = arg; xa_destroy(&guc->submission_state.exec_queue_lookup); - free_submit_wq(guc); } static void guc_submit_wedged_fini(void *arg) @@ -290,9 +237,15 @@ static void guc_submit_wedged_fini(void *arg) struct xe_exec_queue *q; unsigned long index; - xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) - if (exec_queue_wedged(q)) + mutex_lock(&guc->submission_state.lock); + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { + if (exec_queue_wedged(q)) { + mutex_unlock(&guc->submission_state.lock); xe_exec_queue_put(q); + mutex_lock(&guc->submission_state.lock); + } + } + mutex_unlock(&guc->submission_state.lock); } static const struct xe_exec_queue_ops guc_exec_queue_ops; @@ -337,14 +290,12 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids) if (err) return err; - err = alloc_submit_wq(guc); - if (err) - return err; - gt->exec_queue_ops = &guc_exec_queue_ops; xa_init(&guc->submission_state.exec_queue_lookup); + init_waitqueue_head(&guc->submission_state.fini_wq); + primelockdep(guc); return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc); @@ -361,12 +312,14 @@ static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa xe_guc_id_mgr_release_locked(&guc->submission_state.idm, q->guc->id, q->width); + + if (xa_empty(&guc->submission_state.exec_queue_lookup)) + wake_up(&guc->submission_state.fini_wq); } static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) { int ret; - void *ptr; int i; /* @@ -386,12 +339,10 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) q->guc->id = ret; for (i = 0; i < q->width; ++i) { - ptr = xa_store(&guc->submission_state.exec_queue_lookup, - q->guc->id + i, q, GFP_NOWAIT); - if (IS_ERR(ptr)) { - ret = PTR_ERR(ptr); + ret = xa_err(xa_store(&guc->submission_state.exec_queue_lookup, + q->guc->id + i, q, GFP_NOWAIT)); + if (ret) goto err_release; - } } return 0; @@ -794,8 +745,6 @@ static void guc_exec_queue_free_job(struct drm_sched_job *drm_job) { struct xe_sched_job *job = to_xe_sched_job(drm_job); - xe_exec_queue_update_run_ticks(job->q); - trace_xe_sched_job_free(job); xe_sched_job_put(job); } @@ -965,12 +914,22 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w) static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job) { struct xe_gt *gt = guc_to_gt(exec_queue_to_guc(q)); - u32 ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]); - u32 ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]); + u32 ctx_timestamp, ctx_job_timestamp; u32 timeout_ms = q->sched_props.job_timeout_ms; u32 diff; u64 running_time_ms; + if (!xe_sched_job_started(job)) { + xe_gt_warn(gt, "Check job timeout: seqno=%u, lrc_seqno=%u, guc_id=%d, not started", + xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job), + q->guc->id); + + return xe_sched_invalidate_job(job, 2); + } + + ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]); + ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]); + /* * Counter wraps at ~223s at the usual 19.2MHz, be paranoid catch * possible overflows with a high timeout. @@ -1079,10 +1038,13 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) /* * TDR has fired before free job worker. Common if exec queue - * immediately closed after last fence signaled. + * immediately closed after last fence signaled. Add back to pending + * list so job can be freed and kick scheduler ensuring free job is not + * lost. */ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) { - guc_exec_queue_free_job(drm_job); + xe_sched_add_pending_job(sched, job); + xe_sched_submission_start(sched); return DRM_GPU_SCHED_STAT_NOMINAL; } @@ -1095,10 +1057,6 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) exec_queue_killed_or_banned_or_wedged(q) || exec_queue_destroyed(q); - /* Job hasn't started, can't be timed out */ - if (!skip_timeout_check && !xe_sched_job_started(job)) - goto rearm; - /* * XXX: Sampling timeout doesn't work in wedged mode as we have to * modify scheduling state to read timestamp. We could read the @@ -1268,13 +1226,16 @@ static void __guc_exec_queue_fini_async(struct work_struct *w) static void guc_exec_queue_fini_async(struct xe_exec_queue *q) { + struct xe_guc *guc = exec_queue_to_guc(q); + struct xe_device *xe = guc_to_xe(guc); + INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async); /* We must block on kernel engines so slabs are empty on driver unload */ if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q)) __guc_exec_queue_fini_async(&q->guc->fini_async); else - queue_work(system_wq, &q->guc->fini_async); + queue_work(xe->destroy_wq, &q->guc->fini_async); } static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q) @@ -1452,8 +1413,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT : msecs_to_jiffies(q->sched_props.job_timeout_ms); err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops, - get_submit_wq(guc), - q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64, + NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64, timeout, guc_to_gt(guc)->ordered_wq, NULL, q->name, gt_to_xe(q->gt)->drm.dev); if (err) @@ -1770,8 +1730,13 @@ void xe_guc_submit_stop(struct xe_guc *guc) mutex_lock(&guc->submission_state.lock); - xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { + /* Prevent redundant attempts to stop parallel queues */ + if (q->guc->id != index) + continue; + guc_exec_queue_stop(guc, q); + } mutex_unlock(&guc->submission_state.lock); @@ -1796,6 +1761,7 @@ static void guc_exec_queue_start(struct xe_exec_queue *q) } xe_sched_submission_start(sched); + xe_sched_submission_resume_tdr(sched); } int xe_guc_submit_start(struct xe_guc *guc) @@ -1808,8 +1774,13 @@ int xe_guc_submit_start(struct xe_guc *guc) mutex_lock(&guc->submission_state.lock); atomic_dec(&guc->submission_state.stopped); - xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { + /* Prevent redundant attempts to start parallel queues */ + if (q->guc->id != index) + continue; + guc_exec_queue_start(q); + } mutex_unlock(&guc->submission_state.lock); wake_up_all(&guc->ct.wq); diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h index 546ac6350a31..ed150fc09ad0 100644 --- a/drivers/gpu/drm/xe/xe_guc_types.h +++ b/drivers/gpu/drm/xe/xe_guc_types.h @@ -72,15 +72,10 @@ struct xe_guc { atomic_t stopped; /** @submission_state.lock: protects submission state */ struct mutex lock; -#ifdef CONFIG_PROVE_LOCKING -#define NUM_SUBMIT_WQ 256 - /** @submission_state.submit_wq_pool: submission ordered workqueues pool */ - struct workqueue_struct *submit_wq_pool[NUM_SUBMIT_WQ]; - /** @submission_state.submit_wq_idx: submission ordered workqueue index */ - int submit_wq_idx; -#endif /** @submission_state.enabled: submission is enabled */ bool enabled; + /** @submission_state.fini_wq: submit fini wait queue */ + wait_queue_head_t fini_wq; } submission_state; /** @hwconfig: Hardware config state */ struct { diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index 63286ed8457f..2804f14f8f29 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -440,6 +440,10 @@ static void xe_oa_enable(struct xe_oa_stream *stream) val = __format_to_oactrl(format, regs->oa_ctrl_counter_select_mask) | __oa_ccs_select(stream) | OAG_OACONTROL_OA_COUNTER_ENABLE; + if (GRAPHICS_VER(stream->oa->xe) >= 20 && + stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG) + val |= OAG_OACONTROL_OA_PES_DISAG_EN; + xe_mmio_write32(stream->gt, regs->oa_ctrl, val); } @@ -705,8 +709,7 @@ static int xe_oa_configure_oar_context(struct xe_oa_stream *stream, bool enable) { RING_CONTEXT_CONTROL(stream->hwe->mmio_base), regs_offset + CTX_CONTEXT_CONTROL, - _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE, - enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) + _MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE), }, }; struct xe_oa_reg reg_lri = { OAR_OACONTROL, oacontrol }; @@ -738,10 +741,8 @@ static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable) { RING_CONTEXT_CONTROL(stream->hwe->mmio_base), regs_offset + CTX_CONTEXT_CONTROL, - _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE, - enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) | - _MASKED_FIELD(CTX_CTRL_RUN_ALONE, - enable ? CTX_CTRL_RUN_ALONE : 0), + _MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE) | + _MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0), }, }; struct xe_oa_reg reg_lri = { OAC_OACONTROL, oacontrol }; @@ -1259,7 +1260,6 @@ static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma) static const struct file_operations xe_oa_fops = { .owner = THIS_MODULE, - .llseek = no_llseek, .release = xe_oa_release, .poll = xe_oa_poll, .read = xe_oa_read, diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index 937c3e064f0d..5e962e72c97e 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -924,6 +924,8 @@ static int xe_pci_resume(struct device *dev) if (err) return err; + pci_restore_state(pdev); + err = pci_enable_device(pdev); if (err) return err; diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index 7cf2160fe040..33eb039053e4 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -123,7 +123,7 @@ int xe_pm_suspend(struct xe_device *xe) for_each_gt(gt, xe, id) xe_gt_suspend_prepare(gt); - xe_display_pm_suspend(xe, false); + xe_display_pm_suspend(xe); /* FIXME: Super racey... */ err = xe_bo_evict_all(xe); @@ -133,7 +133,7 @@ int xe_pm_suspend(struct xe_device *xe) for_each_gt(gt, xe, id) { err = xe_gt_suspend(gt); if (err) { - xe_display_pm_resume(xe, false); + xe_display_pm_resume(xe); goto err; } } @@ -187,7 +187,7 @@ int xe_pm_resume(struct xe_device *xe) for_each_gt(gt, xe, id) xe_gt_resume(gt); - xe_display_pm_resume(xe, false); + xe_display_pm_resume(xe); err = xe_bo_restore_user(xe); if (err) diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index d6353e8969f0..f27f579f4d85 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -2188,5 +2188,5 @@ void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops) pt_op->num_entries); } - xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred); + xe_pt_update_ops_fini(tile, vops); } diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index 28d9bb3b825d..848da8e68c7a 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -161,7 +161,11 @@ query_engine_cycles(struct xe_device *xe, cpu_clock); xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); - resp.width = 36; + + if (GRAPHICS_VER(xe) >= 20) + resp.width = 64; + else + resp.width = 36; /* Only write to the output fields of user query */ if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp)) diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c index bb3c2a830362..2e72c06fd40d 100644 --- a/drivers/gpu/drm/xe/xe_sync.c +++ b/drivers/gpu/drm/xe/xe_sync.c @@ -54,11 +54,12 @@ static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr, { struct xe_user_fence *ufence; u64 __user *ptr = u64_to_user_ptr(addr); + u64 __maybe_unused prefetch_val; - if (!access_ok(ptr, sizeof(*ptr))) + if (get_user(prefetch_val, ptr)) return ERR_PTR(-EFAULT); - ufence = kmalloc(sizeof(*ufence), GFP_KERNEL); + ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); if (!ufence) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c index faa1bf42e50e..0d5e04158917 100644 --- a/drivers/gpu/drm/xe/xe_tuning.c +++ b/drivers/gpu/drm/xe/xe_tuning.c @@ -42,20 +42,48 @@ static const struct xe_rtp_entry_sr gt_tunings[] = { XE_RTP_ACTIONS(CLR(CCCHKNREG1, ENCOMPPERFFIX), SET(CCCHKNREG1, L3CMPCTRL)) }, + { XE_RTP_NAME("Tuning: Compression Overfetch - media"), + XE_RTP_RULES(MEDIA_VERSION(2000)), + XE_RTP_ACTIONS(CLR(XE2LPM_CCCHKNREG1, ENCOMPPERFFIX), + SET(XE2LPM_CCCHKNREG1, L3CMPCTRL)) + }, { XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3"), XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)), XE_RTP_ACTIONS(SET(L3SQCREG3, COMPPWOVERFETCHEN)) }, + { XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3 - media"), + XE_RTP_RULES(MEDIA_VERSION(2000)), + XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG3, COMPPWOVERFETCHEN)) + }, { XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only"), XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)), XE_RTP_ACTIONS(SET(L3SQCREG2, COMPMEMRD256BOVRFETCHEN)) }, + { XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only - media"), + XE_RTP_RULES(MEDIA_VERSION(2000)), + XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG2, + COMPMEMRD256BOVRFETCHEN)) + }, { XE_RTP_NAME("Tuning: Stateless compression control"), XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)), XE_RTP_ACTIONS(FIELD_SET(STATELESS_COMPRESSION_CTRL, UNIFIED_COMPRESSION_FORMAT, REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0))) }, + { XE_RTP_NAME("Tuning: Stateless compression control - media"), + XE_RTP_RULES(MEDIA_VERSION_RANGE(1301, 2000)), + XE_RTP_ACTIONS(FIELD_SET(STATELESS_COMPRESSION_CTRL, UNIFIED_COMPRESSION_FORMAT, + REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0))) + }, + { XE_RTP_NAME("Tuning: L3 RW flush all Cache"), + XE_RTP_RULES(GRAPHICS_VERSION(2004)), + XE_RTP_ACTIONS(SET(SCRATCH3_LBCF, RWFLUSHALLEN)) + }, + { XE_RTP_NAME("Tuning: L3 RW flush all cache - media"), + XE_RTP_RULES(MEDIA_VERSION(2000)), + XE_RTP_ACTIONS(SET(XE2LPM_SCRATCH3_LBCF, RWFLUSHALLEN)) + }, + {} }; diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 7acd5fc9d032..c99380271de6 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -1613,7 +1613,7 @@ void xe_vm_close_and_put(struct xe_vm *vm) up_write(&vm->lock); - mutex_lock(&xe->usm.lock); + down_write(&xe->usm.lock); if (vm->usm.asid) { void *lookup; @@ -1623,7 +1623,7 @@ void xe_vm_close_and_put(struct xe_vm *vm) lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid); xe_assert(xe, lookup == vm); } - mutex_unlock(&xe->usm.lock); + up_write(&xe->usm.lock); for_each_tile(tile, xe, id) xe_range_fence_tree_fini(&vm->rftree[id]); @@ -1765,25 +1765,18 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, if (IS_ERR(vm)) return PTR_ERR(vm); - mutex_lock(&xef->vm.lock); - err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL); - mutex_unlock(&xef->vm.lock); - if (err) - goto err_close_and_put; - if (xe->info.has_asid) { - mutex_lock(&xe->usm.lock); + down_write(&xe->usm.lock); err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm, XA_LIMIT(1, XE_MAX_ASID - 1), &xe->usm.next_asid, GFP_KERNEL); - mutex_unlock(&xe->usm.lock); + up_write(&xe->usm.lock); if (err < 0) - goto err_free_id; + goto err_close_and_put; vm->usm.asid = asid; } - args->vm_id = id; vm->xef = xe_file_get(xef); /* Record BO memory for VM pagetable created against client */ @@ -1796,12 +1789,15 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE); #endif + /* user id alloc must always be last in ioctl to prevent UAF */ + err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL); + if (err) + goto err_close_and_put; + + args->vm_id = id; + return 0; -err_free_id: - mutex_lock(&xef->vm.lock); - xa_erase(&xef->vm.xa, id); - mutex_unlock(&xef->vm.lock); err_close_and_put: xe_vm_close_and_put(vm); @@ -3203,10 +3199,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma) ret = xe_gt_tlb_invalidation_vma(tile->primary_gt, &fence[fence_id], vma); - if (ret < 0) { - xe_gt_tlb_invalidation_fence_fini(&fence[fence_id]); + if (ret) goto wait; - } ++fence_id; if (!tile->media_gt) @@ -3218,10 +3212,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma) ret = xe_gt_tlb_invalidation_vma(tile->media_gt, &fence[fence_id], vma); - if (ret < 0) { - xe_gt_tlb_invalidation_fence_fini(&fence[fence_id]); + if (ret) goto wait; - } ++fence_id; } } diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c index 5bcd59190353..80ba2fc78837 100644 --- a/drivers/gpu/drm/xe/xe_vram.c +++ b/drivers/gpu/drm/xe/xe_vram.c @@ -182,6 +182,7 @@ static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size) offset = offset_hi << 32; /* HW view bits 39:32 */ offset |= offset_lo << 6; /* HW view bits 31:6 */ offset *= num_enabled; /* convert to SW view */ + offset = round_up(offset, SZ_128K); /* SW must round up to nearest 128K */ /* We don't expect any holes */ xe_assert_msg(xe, offset == (xe_mmio_read64_2x32(gt, GSMBASE) - ccs_size), diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index d424992514a4..353936a0f877 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -710,6 +710,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = { DIS_PARTIAL_AUTOSTRIP | DIS_AUTOSTRIP)) }, + { XE_RTP_NAME("15016589081"), + XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)), + XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX)) + }, /* Xe2_HPG */ { XE_RTP_NAME("15010599737"), diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c b/drivers/gpu/drm/xe/xe_wait_user_fence.c index d46fa8374980..5b4264ea38bd 100644 --- a/drivers/gpu/drm/xe/xe_wait_user_fence.c +++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c @@ -155,6 +155,13 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data, } if (!timeout) { + LNL_FLUSH_WORKQUEUE(xe->ordered_wq); + err = do_compare(addr, args->value, args->mask, + args->op); + if (err <= 0) { + drm_dbg(&xe->drm, "LNL_FLUSH_WORKQUEUE resolved ufence timeout\n"); + break; + } err = -ETIME; break; } @@ -169,9 +176,6 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data, args->timeout = 0; } - if (!timeout && !(err < 0)) - err = -ETIME; - if (q) xe_exec_queue_put(q); diff --git a/drivers/gpu/host1x/context.c b/drivers/gpu/host1x/context.c index 955c971c528d..a6f6779662a3 100644 --- a/drivers/gpu/host1x/context.c +++ b/drivers/gpu/host1x/context.c @@ -58,6 +58,7 @@ int host1x_memory_context_list_init(struct host1x *host1x) ctx->dev.parent = host1x->dev; ctx->dev.release = host1x_memory_context_release; + ctx->dev.dma_parms = &ctx->dma_parms; dma_set_max_seg_size(&ctx->dev, UINT_MAX); err = device_add(&ctx->dev); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index b62e4f0e8130..e98528777faa 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -625,12 +625,6 @@ static int host1x_probe(struct platform_device *pdev) goto free_contexts; } - err = host1x_intr_init(host); - if (err) { - dev_err(&pdev->dev, "failed to initialize interrupts\n"); - goto deinit_syncpt; - } - pm_runtime_enable(&pdev->dev); err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); @@ -642,6 +636,12 @@ static int host1x_probe(struct platform_device *pdev) if (err) goto pm_disable; + err = host1x_intr_init(host); + if (err) { + dev_err(&pdev->dev, "failed to initialize interrupts\n"); + goto pm_put; + } + host1x_debug_init(host); err = host1x_register(host); @@ -658,13 +658,11 @@ unregister: host1x_unregister(host); deinit_debugfs: host1x_debug_deinit(host); - + host1x_intr_deinit(host); +pm_put: pm_runtime_put_sync_suspend(&pdev->dev); pm_disable: pm_runtime_disable(&pdev->dev); - - host1x_intr_deinit(host); -deinit_syncpt: host1x_syncpt_deinit(host); free_contexts: host1x_memory_context_list_free(&host->context_list); |