summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/display/intel_dmc_wl.c
blob: 7e2ce0c2f6c391c063237b28c092254769a09533 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
// SPDX-License-Identifier: MIT
/*
 * Copyright (C) 2024 Intel Corporation
 */

#include <linux/kernel.h>

#include <drm/drm_print.h>

#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_dmc_regs.h"
#include "intel_dmc_wl.h"

/**
 * DOC: DMC wakelock support
 *
 * Wake lock is the mechanism to cause display engine to exit DC
 * states to allow programming to registers that are powered down in
 * those states. Previous projects exited DC states automatically when
 * detecting programming. Now software controls the exit by
 * programming the wake lock. This improves system performance and
 * system interactions and better fits the flip queue style of
 * programming. Wake lock is only required when DC5, DC6, or DC6v have
 * been enabled in DC_STATE_EN and the wake lock mode of operation has
 * been enabled.
 *
 * The wakelock mechanism in DMC allows the display engine to exit DC
 * states explicitly before programming registers that may be powered
 * down.  In earlier hardware, this was done automatically and
 * implicitly when the display engine accessed a register.  With the
 * wakelock implementation, the driver asserts a wakelock in DMC,
 * which forces it to exit the DC state until the wakelock is
 * deasserted.
 *
 * The mechanism can be enabled and disabled by writing to the
 * DMC_WAKELOCK_CFG register.  There are also 13 control registers
 * that can be used to hold and release different wakelocks.  In the
 * current implementation, we only need one wakelock, so only
 * DMC_WAKELOCK1_CTL is used.  The other definitions are here for
 * potential future use.
 */

/*
 * Define DMC_WAKELOCK_CTL_TIMEOUT_US in microseconds because we use the
 * atomic variant of waiting MMIO.
 */
#define DMC_WAKELOCK_CTL_TIMEOUT_US 5000
#define DMC_WAKELOCK_HOLD_TIME 50

/*
 * Possible non-negative values for the enable_dmc_wl param.
 */
enum {
	ENABLE_DMC_WL_DISABLED,
	ENABLE_DMC_WL_ENABLED,
	ENABLE_DMC_WL_ANY_REGISTER,
	ENABLE_DMC_WL_ALWAYS_LOCKED,
	ENABLE_DMC_WL_MAX,
};

struct intel_dmc_wl_range {
	u32 start;
	u32 end;
};

static const struct intel_dmc_wl_range powered_off_ranges[] = {
	{ .start = 0x44400, .end = 0x4447f }, /* PIPE interrupt registers */
	{ .start = 0x60000, .end = 0x7ffff },
	{},
};

static const struct intel_dmc_wl_range xe3lpd_dc5_dc6_dmc_ranges[] = {
	{ .start = 0x45500 }, /* DC_STATE_SEL */
	{ .start = 0x457a0, .end = 0x457b0 }, /* DC*_RESIDENCY_COUNTER */
	{ .start = 0x45504 }, /* DC_STATE_EN */
	{ .start = 0x45400, .end = 0x4540c }, /* PWR_WELL_CTL_* */
	{ .start = 0x454f0 }, /* RETENTION_CTRL */

	/* DBUF_CTL_* */
	{ .start = 0x44300 },
	{ .start = 0x44304 },
	{ .start = 0x44f00 },
	{ .start = 0x44f04 },
	{ .start = 0x44fe8 },
	{ .start = 0x45008 },

	{ .start = 0x46070 }, /* CDCLK_PLL_ENABLE */
	{ .start = 0x46000 }, /* CDCLK_CTL */
	{ .start = 0x46008 }, /* CDCLK_SQUASH_CTL */

	/* TRANS_CMTG_CTL_* */
	{ .start = 0x6fa88 },
	{ .start = 0x6fb88 },

	{ .start = 0x46430 }, /* CHICKEN_DCPR_1 */
	{ .start = 0x46434 }, /* CHICKEN_DCPR_2 */
	{ .start = 0x454a0 }, /* CHICKEN_DCPR_4 */
	{ .start = 0x42084 }, /* CHICKEN_MISC_2 */
	{ .start = 0x42088 }, /* CHICKEN_MISC_3 */
	{ .start = 0x46160 }, /* CMTG_CLK_SEL */
	{ .start = 0x8f000, .end = 0x8ffff }, /* Main DMC registers */
	{ .start = 0x45230 }, /* INITIATE_PM_DMD_REQ */

	{},
};

static const struct intel_dmc_wl_range xe3lpd_dc3co_dmc_ranges[] = {
	{ .start = 0x454a0 }, /* CHICKEN_DCPR_4 */

	{ .start = 0x45504 }, /* DC_STATE_EN */

	/* DBUF_CTL_* */
	{ .start = 0x44300 },
	{ .start = 0x44304 },
	{ .start = 0x44f00 },
	{ .start = 0x44f04 },
	{ .start = 0x44fe8 },
	{ .start = 0x45008 },

	{ .start = 0x46070 }, /* CDCLK_PLL_ENABLE */
	{ .start = 0x46000 }, /* CDCLK_CTL */
	{ .start = 0x46008 }, /* CDCLK_SQUASH_CTL */
	{ .start = 0x8f000, .end = 0x8ffff }, /* Main DMC registers */

	/* Scanline registers */
	{ .start = 0x70000 },
	{ .start = 0x70004 },
	{ .start = 0x70014 },
	{ .start = 0x70018 },
	{ .start = 0x71000 },
	{ .start = 0x71004 },
	{ .start = 0x71014 },
	{ .start = 0x71018 },
	{ .start = 0x72000 },
	{ .start = 0x72004 },
	{ .start = 0x72014 },
	{ .start = 0x72018 },
	{ .start = 0x73000 },
	{ .start = 0x73004 },
	{ .start = 0x73014 },
	{ .start = 0x73018 },
	{ .start = 0x7b000 },
	{ .start = 0x7b004 },
	{ .start = 0x7b014 },
	{ .start = 0x7b018 },
	{ .start = 0x7c000 },
	{ .start = 0x7c004 },
	{ .start = 0x7c014 },
	{ .start = 0x7c018 },

	{},
};

static void __intel_dmc_wl_release(struct intel_display *display)
{
	struct drm_i915_private *i915 = to_i915(display->drm);
	struct intel_dmc_wl *wl = &display->wl;

	WARN_ON(refcount_read(&wl->refcount));

	queue_delayed_work(i915->unordered_wq, &wl->work,
			   msecs_to_jiffies(DMC_WAKELOCK_HOLD_TIME));
}

static void intel_dmc_wl_work(struct work_struct *work)
{
	struct intel_dmc_wl *wl =
		container_of(work, struct intel_dmc_wl, work.work);
	struct intel_display *display =
		container_of(wl, struct intel_display, wl);
	unsigned long flags;

	spin_lock_irqsave(&wl->lock, flags);

	/*
	 * Bail out if refcount became non-zero while waiting for the spinlock,
	 * meaning that the lock is now taken again.
	 */
	if (refcount_read(&wl->refcount))
		goto out_unlock;

	__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);

	if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
						     DMC_WAKELOCK_CTL_ACK, 0,
						     DMC_WAKELOCK_CTL_TIMEOUT_US)) {
		WARN_RATELIMIT(1, "DMC wakelock release timed out");
		goto out_unlock;
	}

	wl->taken = false;

out_unlock:
	spin_unlock_irqrestore(&wl->lock, flags);
}

static void __intel_dmc_wl_take(struct intel_display *display)
{
	struct intel_dmc_wl *wl = &display->wl;

	/*
	 * Only try to take the wakelock if it's not marked as taken
	 * yet.  It may be already taken at this point if we have
	 * already released the last reference, but the work has not
	 * run yet.
	 */
	if (wl->taken)
		return;

	__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
			    DMC_WAKELOCK_CTL_REQ);

	/*
	 * We need to use the atomic variant of the waiting routine
	 * because the DMC wakelock is also taken in atomic context.
	 */
	if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
						     DMC_WAKELOCK_CTL_ACK,
						     DMC_WAKELOCK_CTL_ACK,
						     DMC_WAKELOCK_CTL_TIMEOUT_US)) {
		WARN_RATELIMIT(1, "DMC wakelock ack timed out");
		return;
	}

	wl->taken = true;
}

static bool intel_dmc_wl_reg_in_range(i915_reg_t reg,
				      const struct intel_dmc_wl_range ranges[])
{
	u32 offset = i915_mmio_reg_offset(reg);

	for (int i = 0; ranges[i].start; i++) {
		u32 end = ranges[i].end ?: ranges[i].start;

		if (ranges[i].start <= offset && offset <= end)
			return true;
	}

	return false;
}

static bool intel_dmc_wl_check_range(struct intel_display *display,
				     i915_reg_t reg,
				     u32 dc_state)
{
	const struct intel_dmc_wl_range *ranges;

	if (display->params.enable_dmc_wl == ENABLE_DMC_WL_ANY_REGISTER)
		return true;

	/*
	 * Check that the offset is in one of the ranges for which
	 * registers are powered off during DC states.
	 */
	if (intel_dmc_wl_reg_in_range(reg, powered_off_ranges))
		return true;

	/*
	 * Check that the offset is for a register that is touched by
	 * the DMC and requires a DC exit for proper access.
	 */
	switch (dc_state) {
	case DC_STATE_EN_DC3CO:
		ranges = xe3lpd_dc3co_dmc_ranges;
		break;
	case DC_STATE_EN_UPTO_DC5:
	case DC_STATE_EN_UPTO_DC6:
		ranges = xe3lpd_dc5_dc6_dmc_ranges;
		break;
	default:
		ranges = NULL;
	}

	if (ranges && intel_dmc_wl_reg_in_range(reg, ranges))
		return true;

	return false;
}

static bool __intel_dmc_wl_supported(struct intel_display *display)
{
	return display->params.enable_dmc_wl;
}

static void intel_dmc_wl_sanitize_param(struct intel_display *display)
{
	const char *desc;

	if (!HAS_DMC_WAKELOCK(display)) {
		display->params.enable_dmc_wl = ENABLE_DMC_WL_DISABLED;
	} else if (display->params.enable_dmc_wl < 0) {
		if (DISPLAY_VER(display) >= 30)
			display->params.enable_dmc_wl = ENABLE_DMC_WL_ENABLED;
		else
			display->params.enable_dmc_wl = ENABLE_DMC_WL_DISABLED;
	} else if (display->params.enable_dmc_wl >= ENABLE_DMC_WL_MAX) {
		display->params.enable_dmc_wl = ENABLE_DMC_WL_ENABLED;
	}

	drm_WARN_ON(display->drm,
		    display->params.enable_dmc_wl < 0 ||
		    display->params.enable_dmc_wl >= ENABLE_DMC_WL_MAX);

	switch (display->params.enable_dmc_wl) {
	case ENABLE_DMC_WL_DISABLED:
		desc = "disabled";
		break;
	case ENABLE_DMC_WL_ENABLED:
		desc = "enabled";
		break;
	case ENABLE_DMC_WL_ANY_REGISTER:
		desc = "match any register";
		break;
	case ENABLE_DMC_WL_ALWAYS_LOCKED:
		desc = "always locked";
		break;
	default:
		desc = "unknown";
		break;
	}

	drm_dbg_kms(display->drm, "Sanitized enable_dmc_wl value: %d (%s)\n",
		    display->params.enable_dmc_wl, desc);
}

void intel_dmc_wl_init(struct intel_display *display)
{
	struct intel_dmc_wl *wl = &display->wl;

	intel_dmc_wl_sanitize_param(display);

	if (!display->params.enable_dmc_wl)
		return;

	INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work);
	spin_lock_init(&wl->lock);
	refcount_set(&wl->refcount,
		     display->params.enable_dmc_wl == ENABLE_DMC_WL_ALWAYS_LOCKED ? 1 : 0);
}

/* Must only be called as part of enabling dynamic DC states. */
void intel_dmc_wl_enable(struct intel_display *display, u32 dc_state)
{
	struct intel_dmc_wl *wl = &display->wl;
	unsigned long flags;

	if (!__intel_dmc_wl_supported(display))
		return;

	spin_lock_irqsave(&wl->lock, flags);

	wl->dc_state = dc_state;

	if (drm_WARN_ON(display->drm, wl->enabled))
		goto out_unlock;

	/*
	 * Enable wakelock in DMC.  We shouldn't try to take the
	 * wakelock, because we're just enabling it, so call the
	 * non-locking version directly here.
	 */
	__intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE);

	wl->enabled = true;

	/*
	 * This would be racy in the following scenario:
	 *
	 *   1. Function A calls intel_dmc_wl_get();
	 *   2. Some function calls intel_dmc_wl_disable();
	 *   3. Some function calls intel_dmc_wl_enable();
	 *   4. Concurrently with (3), function A performs the MMIO in between
	 *      setting DMC_WAKELOCK_CFG_ENABLE and asserting the lock with
	 *      __intel_dmc_wl_take().
	 *
	 * TODO: Check with the hardware team whether it is safe to assert the
	 * hardware lock before enabling to avoid such a scenario. Otherwise, we
	 * would need to deal with it via software synchronization.
	 */
	if (refcount_read(&wl->refcount))
		__intel_dmc_wl_take(display);

out_unlock:
	spin_unlock_irqrestore(&wl->lock, flags);
}

/* Must only be called as part of disabling dynamic DC states. */
void intel_dmc_wl_disable(struct intel_display *display)
{
	struct intel_dmc_wl *wl = &display->wl;
	unsigned long flags;

	if (!__intel_dmc_wl_supported(display))
		return;

	intel_dmc_wl_flush_release_work(display);

	spin_lock_irqsave(&wl->lock, flags);

	if (drm_WARN_ON(display->drm, !wl->enabled))
		goto out_unlock;

	/* Disable wakelock in DMC */
	__intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0);

	wl->enabled = false;

	/*
	 * The spec is not explicit about the expectation of existing
	 * lock users at the moment of disabling, but it does say that we must
	 * clear DMC_WAKELOCK_CTL_REQ, which gives us a clue that it is okay to
	 * disable with existing lock users.
	 *
	 * TODO: Get the correct expectation from the hardware team.
	 */
	__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);

	wl->taken = false;

out_unlock:
	spin_unlock_irqrestore(&wl->lock, flags);
}

void intel_dmc_wl_flush_release_work(struct intel_display *display)
{
	struct intel_dmc_wl *wl = &display->wl;

	if (!__intel_dmc_wl_supported(display))
		return;

	flush_delayed_work(&wl->work);
}

void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
{
	struct intel_dmc_wl *wl = &display->wl;
	unsigned long flags;

	if (!__intel_dmc_wl_supported(display))
		return;

	spin_lock_irqsave(&wl->lock, flags);

	if (i915_mmio_reg_valid(reg) &&
	    !intel_dmc_wl_check_range(display, reg, wl->dc_state))
		goto out_unlock;

	if (!wl->enabled) {
		if (!refcount_inc_not_zero(&wl->refcount))
			refcount_set(&wl->refcount, 1);
		goto out_unlock;
	}

	cancel_delayed_work(&wl->work);

	if (refcount_inc_not_zero(&wl->refcount))
		goto out_unlock;

	refcount_set(&wl->refcount, 1);

	__intel_dmc_wl_take(display);

out_unlock:
	spin_unlock_irqrestore(&wl->lock, flags);
}

void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
{
	struct intel_dmc_wl *wl = &display->wl;
	unsigned long flags;

	if (!__intel_dmc_wl_supported(display))
		return;

	spin_lock_irqsave(&wl->lock, flags);

	if (i915_mmio_reg_valid(reg) &&
	    !intel_dmc_wl_check_range(display, reg, wl->dc_state))
		goto out_unlock;

	if (WARN_RATELIMIT(!refcount_read(&wl->refcount),
			   "Tried to put wakelock with refcount zero\n"))
		goto out_unlock;

	if (refcount_dec_and_test(&wl->refcount)) {
		if (!wl->enabled)
			goto out_unlock;

		__intel_dmc_wl_release(display);

		goto out_unlock;
	}

out_unlock:
	spin_unlock_irqrestore(&wl->lock, flags);
}

void intel_dmc_wl_get_noreg(struct intel_display *display)
{
	intel_dmc_wl_get(display, INVALID_MMIO_REG);
}

void intel_dmc_wl_put_noreg(struct intel_display *display)
{
	intel_dmc_wl_put(display, INVALID_MMIO_REG);
}