diff options
author | Xiang Liu <xiang.liu@amd.com> | 2025-02-11 19:45:52 +0800 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2025-02-17 14:09:30 -0500 |
commit | f9d35b945c599e8dbed17f484e82b4ad3d21721a (patch) | |
tree | e082c3ccb913618f60a420c07a0ff79b8b06c359 /drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c | |
parent | 4058e7cbfd0fb0cae7cbb8035bb43c593cc7c964 (diff) | |
download | linux-stable-f9d35b945c599e8dbed17f484e82b4ad3d21721a.tar.gz linux-stable-f9d35b945c599e8dbed17f484e82b4ad3d21721a.tar.bz2 linux-stable-f9d35b945c599e8dbed17f484e82b4ad3d21721a.zip |
drm/amdgpu: Generate bad page threshold cper records
Generate CPER record when bad page threshold exceed and
commit to CPER ring.
v2: return -ENOMEM instead of false
v2: check return value of fill section function
Signed-off-by: Xiang Liu <xiang.liu@amd.com>
Reviewed-by: Tao Zhou <tao.zhou1@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c | 24 |
1 files changed, 23 insertions, 1 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c index 26e0655e7ed4..8805381e19b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c @@ -207,7 +207,7 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev NONSTD_SEC_OFFSET(hdr->sec_cnt, idx)); amdgpu_cper_entry_fill_section_desc(adev, section_desc, true, false, - CPER_SEV_FATAL, RUNTIME, NONSTD_SEC_LEN, + CPER_SEV_NUM, RUNTIME, NONSTD_SEC_LEN, NONSTD_SEC_OFFSET(hdr->sec_cnt, idx)); section->hdr.valid_bits.err_info_cnt = 1; @@ -308,6 +308,28 @@ int amdgpu_cper_generate_ue_record(struct amdgpu_device *adev, return 0; } +int amdgpu_cper_generate_bp_threshold_record(struct amdgpu_device *adev) +{ + struct cper_hdr *bp_threshold = NULL; + struct amdgpu_ring *ring = &adev->cper.ring_buf; + int ret; + + bp_threshold = amdgpu_cper_alloc_entry(adev, AMDGPU_CPER_TYPE_BP_THRESHOLD, 1); + if (!bp_threshold) { + dev_err(adev->dev, "fail to alloc cper entry for bad page threshold record\n"); + return -ENOMEM; + } + + amdgpu_cper_entry_fill_hdr(adev, bp_threshold, AMDGPU_CPER_TYPE_BP_THRESHOLD, CPER_SEV_NUM); + ret = amdgpu_cper_entry_fill_bad_page_threshold_section(adev, bp_threshold, 0); + if (ret) + return ret; + + amdgpu_cper_ring_write(ring, bp_threshold, bp_threshold->record_length); + + return 0; +} + static enum cper_error_severity amdgpu_aca_err_type_to_cper_sev(struct amdgpu_device *adev, enum aca_error_type aca_err_type) { |