diff options
author | Alex Deucher <alexander.deucher@amd.com> | 2012-10-25 17:02:17 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2013-06-27 10:49:22 -0400 |
commit | 7d99e5177477866fce3df146d4fe378248032230 (patch) | |
tree | 080fc3d6bf0ee750503decdc0cc917a636dd6f6b | |
parent | 3a4d8f7b61378d0811ac892a77d4434b01f17d1c (diff) | |
download | linux-stable-7d99e5177477866fce3df146d4fe378248032230.tar.gz linux-stable-7d99e5177477866fce3df146d4fe378248032230.tar.bz2 linux-stable-7d99e5177477866fce3df146d4fe378248032230.zip |
drm/radeon/kms: fix up 6xx/7xx display watermark calc for dpm
Calculate the low and high watermarks based on the low and high
clocks for the current power state. The dynamic pm hw will select
the appropriate watermark based on the internal dpm state.
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r-- | drivers/gpu/drm/radeon/rv515.c | 224 |
1 files changed, 132 insertions, 92 deletions
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 21c7d7b26e55..8ea1573ae820 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -937,13 +937,16 @@ struct rv515_watermark { }; static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, - struct radeon_crtc *crtc, - struct rv515_watermark *wm) + struct radeon_crtc *crtc, + struct rv515_watermark *wm, + bool low) { struct drm_display_mode *mode = &crtc->base.mode; fixed20_12 a, b, c; fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; + fixed20_12 sclk; + u32 selected_sclk; if (!crtc->base.enabled) { /* FIXME: wouldn't it better to set priority mark to maximum */ @@ -951,6 +954,18 @@ static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, return; } + /* rv6xx, rv7xx */ + if ((rdev->family >= CHIP_RV610) && + (rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) + selected_sclk = radeon_dpm_get_sclk(rdev, low); + else + selected_sclk = rdev->pm.current_sclk; + + /* sclk in Mhz */ + a.full = dfixed_const(100); + sclk.full = dfixed_const(selected_sclk); + sclk.full = dfixed_div(sclk, a); + if (crtc->vsc.full > dfixed_const(2)) wm->num_line_pair.full = dfixed_const(2); else @@ -1016,7 +1031,7 @@ static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, * sclk = system clock(Mhz) */ a.full = dfixed_const(600 * 1000); - chunk_time.full = dfixed_div(a, rdev->pm.sclk); + chunk_time.full = dfixed_div(a, sclk); read_delay_latency.full = dfixed_const(1000); /* Determine the worst case latency @@ -1077,152 +1092,177 @@ static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, } } -void rv515_bandwidth_avivo_update(struct radeon_device *rdev) +static void rv515_compute_mode_priority(struct radeon_device *rdev, + struct rv515_watermark *wm0, + struct rv515_watermark *wm1, + struct drm_display_mode *mode0, + struct drm_display_mode *mode1, + u32 *d1mode_priority_a_cnt, + u32 *d2mode_priority_a_cnt) { - struct drm_display_mode *mode0 = NULL; - struct drm_display_mode *mode1 = NULL; - struct rv515_watermark wm0; - struct rv515_watermark wm1; - u32 tmp; - u32 d1mode_priority_a_cnt = MODE_PRIORITY_OFF; - u32 d2mode_priority_a_cnt = MODE_PRIORITY_OFF; fixed20_12 priority_mark02, priority_mark12, fill_rate; fixed20_12 a, b; - if (rdev->mode_info.crtcs[0]->base.enabled) - mode0 = &rdev->mode_info.crtcs[0]->base.mode; - if (rdev->mode_info.crtcs[1]->base.enabled) - mode1 = &rdev->mode_info.crtcs[1]->base.mode; - rs690_line_buffer_adjust(rdev, mode0, mode1); - - rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); - rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); - - tmp = wm0.lb_request_fifo_depth; - tmp |= wm1.lb_request_fifo_depth << 16; - WREG32(LB_MAX_REQ_OUTSTANDING, tmp); + *d1mode_priority_a_cnt = MODE_PRIORITY_OFF; + *d2mode_priority_a_cnt = MODE_PRIORITY_OFF; if (mode0 && mode1) { - if (dfixed_trunc(wm0.dbpp) > 64) - a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); + if (dfixed_trunc(wm0->dbpp) > 64) + a.full = dfixed_div(wm0->dbpp, wm0->num_line_pair); else - a.full = wm0.num_line_pair.full; - if (dfixed_trunc(wm1.dbpp) > 64) - b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); + a.full = wm0->num_line_pair.full; + if (dfixed_trunc(wm1->dbpp) > 64) + b.full = dfixed_div(wm1->dbpp, wm1->num_line_pair); else - b.full = wm1.num_line_pair.full; + b.full = wm1->num_line_pair.full; a.full += b.full; - fill_rate.full = dfixed_div(wm0.sclk, a); - if (wm0.consumption_rate.full > fill_rate.full) { - b.full = wm0.consumption_rate.full - fill_rate.full; - b.full = dfixed_mul(b, wm0.active_time); + fill_rate.full = dfixed_div(wm0->sclk, a); + if (wm0->consumption_rate.full > fill_rate.full) { + b.full = wm0->consumption_rate.full - fill_rate.full; + b.full = dfixed_mul(b, wm0->active_time); a.full = dfixed_const(16); b.full = dfixed_div(b, a); - a.full = dfixed_mul(wm0.worst_case_latency, - wm0.consumption_rate); + a.full = dfixed_mul(wm0->worst_case_latency, + wm0->consumption_rate); priority_mark02.full = a.full + b.full; } else { - a.full = dfixed_mul(wm0.worst_case_latency, - wm0.consumption_rate); + a.full = dfixed_mul(wm0->worst_case_latency, + wm0->consumption_rate); b.full = dfixed_const(16 * 1000); priority_mark02.full = dfixed_div(a, b); } - if (wm1.consumption_rate.full > fill_rate.full) { - b.full = wm1.consumption_rate.full - fill_rate.full; - b.full = dfixed_mul(b, wm1.active_time); + if (wm1->consumption_rate.full > fill_rate.full) { + b.full = wm1->consumption_rate.full - fill_rate.full; + b.full = dfixed_mul(b, wm1->active_time); a.full = dfixed_const(16); b.full = dfixed_div(b, a); - a.full = dfixed_mul(wm1.worst_case_latency, - wm1.consumption_rate); + a.full = dfixed_mul(wm1->worst_case_latency, + wm1->consumption_rate); priority_mark12.full = a.full + b.full; } else { - a.full = dfixed_mul(wm1.worst_case_latency, - wm1.consumption_rate); + a.full = dfixed_mul(wm1->worst_case_latency, + wm1->consumption_rate); b.full = dfixed_const(16 * 1000); priority_mark12.full = dfixed_div(a, b); } - if (wm0.priority_mark.full > priority_mark02.full) - priority_mark02.full = wm0.priority_mark.full; + if (wm0->priority_mark.full > priority_mark02.full) + priority_mark02.full = wm0->priority_mark.full; if (dfixed_trunc(priority_mark02) < 0) priority_mark02.full = 0; - if (wm0.priority_mark_max.full > priority_mark02.full) - priority_mark02.full = wm0.priority_mark_max.full; - if (wm1.priority_mark.full > priority_mark12.full) - priority_mark12.full = wm1.priority_mark.full; + if (wm0->priority_mark_max.full > priority_mark02.full) + priority_mark02.full = wm0->priority_mark_max.full; + if (wm1->priority_mark.full > priority_mark12.full) + priority_mark12.full = wm1->priority_mark.full; if (dfixed_trunc(priority_mark12) < 0) priority_mark12.full = 0; - if (wm1.priority_mark_max.full > priority_mark12.full) - priority_mark12.full = wm1.priority_mark_max.full; - d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); - d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); + if (wm1->priority_mark_max.full > priority_mark12.full) + priority_mark12.full = wm1->priority_mark_max.full; + *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); + *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); if (rdev->disp_priority == 2) { - d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; - d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; + *d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; + *d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; } } else if (mode0) { - if (dfixed_trunc(wm0.dbpp) > 64) - a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); + if (dfixed_trunc(wm0->dbpp) > 64) + a.full = dfixed_div(wm0->dbpp, wm0->num_line_pair); else - a.full = wm0.num_line_pair.full; - fill_rate.full = dfixed_div(wm0.sclk, a); - if (wm0.consumption_rate.full > fill_rate.full) { - b.full = wm0.consumption_rate.full - fill_rate.full; - b.full = dfixed_mul(b, wm0.active_time); + a.full = wm0->num_line_pair.full; + fill_rate.full = dfixed_div(wm0->sclk, a); + if (wm0->consumption_rate.full > fill_rate.full) { + b.full = wm0->consumption_rate.full - fill_rate.full; + b.full = dfixed_mul(b, wm0->active_time); a.full = dfixed_const(16); b.full = dfixed_div(b, a); - a.full = dfixed_mul(wm0.worst_case_latency, - wm0.consumption_rate); + a.full = dfixed_mul(wm0->worst_case_latency, + wm0->consumption_rate); priority_mark02.full = a.full + b.full; } else { - a.full = dfixed_mul(wm0.worst_case_latency, - wm0.consumption_rate); + a.full = dfixed_mul(wm0->worst_case_latency, + wm0->consumption_rate); b.full = dfixed_const(16); priority_mark02.full = dfixed_div(a, b); } - if (wm0.priority_mark.full > priority_mark02.full) - priority_mark02.full = wm0.priority_mark.full; + if (wm0->priority_mark.full > priority_mark02.full) + priority_mark02.full = wm0->priority_mark.full; if (dfixed_trunc(priority_mark02) < 0) priority_mark02.full = 0; - if (wm0.priority_mark_max.full > priority_mark02.full) - priority_mark02.full = wm0.priority_mark_max.full; - d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); + if (wm0->priority_mark_max.full > priority_mark02.full) + priority_mark02.full = wm0->priority_mark_max.full; + *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); if (rdev->disp_priority == 2) - d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; + *d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; } else if (mode1) { - if (dfixed_trunc(wm1.dbpp) > 64) - a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); + if (dfixed_trunc(wm1->dbpp) > 64) + a.full = dfixed_div(wm1->dbpp, wm1->num_line_pair); else - a.full = wm1.num_line_pair.full; - fill_rate.full = dfixed_div(wm1.sclk, a); - if (wm1.consumption_rate.full > fill_rate.full) { - b.full = wm1.consumption_rate.full - fill_rate.full; - b.full = dfixed_mul(b, wm1.active_time); + a.full = wm1->num_line_pair.full; + fill_rate.full = dfixed_div(wm1->sclk, a); + if (wm1->consumption_rate.full > fill_rate.full) { + b.full = wm1->consumption_rate.full - fill_rate.full; + b.full = dfixed_mul(b, wm1->active_time); a.full = dfixed_const(16); b.full = dfixed_div(b, a); - a.full = dfixed_mul(wm1.worst_case_latency, - wm1.consumption_rate); + a.full = dfixed_mul(wm1->worst_case_latency, + wm1->consumption_rate); priority_mark12.full = a.full + b.full; } else { - a.full = dfixed_mul(wm1.worst_case_latency, - wm1.consumption_rate); + a.full = dfixed_mul(wm1->worst_case_latency, + wm1->consumption_rate); b.full = dfixed_const(16 * 1000); priority_mark12.full = dfixed_div(a, b); } - if (wm1.priority_mark.full > priority_mark12.full) - priority_mark12.full = wm1.priority_mark.full; + if (wm1->priority_mark.full > priority_mark12.full) + priority_mark12.full = wm1->priority_mark.full; if (dfixed_trunc(priority_mark12) < 0) priority_mark12.full = 0; - if (wm1.priority_mark_max.full > priority_mark12.full) - priority_mark12.full = wm1.priority_mark_max.full; - d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); + if (wm1->priority_mark_max.full > priority_mark12.full) + priority_mark12.full = wm1->priority_mark_max.full; + *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); if (rdev->disp_priority == 2) - d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; + *d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; } +} + +void rv515_bandwidth_avivo_update(struct radeon_device *rdev) +{ + struct drm_display_mode *mode0 = NULL; + struct drm_display_mode *mode1 = NULL; + struct rv515_watermark wm0_high, wm0_low; + struct rv515_watermark wm1_high, wm1_low; + u32 tmp; + u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt; + u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt; + + if (rdev->mode_info.crtcs[0]->base.enabled) + mode0 = &rdev->mode_info.crtcs[0]->base.mode; + if (rdev->mode_info.crtcs[1]->base.enabled) + mode1 = &rdev->mode_info.crtcs[1]->base.mode; + rs690_line_buffer_adjust(rdev, mode0, mode1); + + rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false); + rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false); + + rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, false); + rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, false); + + tmp = wm0_high.lb_request_fifo_depth; + tmp |= wm1_high.lb_request_fifo_depth << 16; + WREG32(LB_MAX_REQ_OUTSTANDING, tmp); + + rv515_compute_mode_priority(rdev, + &wm0_high, &wm1_high, + mode0, mode1, + &d1mode_priority_a_cnt, &d2mode_priority_a_cnt); + rv515_compute_mode_priority(rdev, + &wm0_low, &wm1_low, + mode0, mode1, + &d1mode_priority_b_cnt, &d2mode_priority_b_cnt); WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); - WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); + WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt); WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); - WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); + WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt); } void rv515_bandwidth_update(struct radeon_device *rdev) |