1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
|
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. */
#ifndef __A6XX_GPU_H__
#define __A6XX_GPU_H__
#include "adreno_gpu.h"
#include "a6xx.xml.h"
#include "a6xx_gmu.h"
extern bool hang_debug;
struct cpu_gpu_lock {
uint32_t gpu_req;
uint32_t cpu_req;
uint32_t turn;
union {
struct {
uint16_t list_length;
uint16_t list_offset;
};
struct {
uint8_t ifpc_list_len;
uint8_t preemption_list_len;
uint16_t dynamic_list_len;
};
};
uint64_t regs[62];
};
/**
* struct a6xx_info - a6xx specific information from device table
*
* @hwcg: hw clock gating register sequence
* @protect: CP_PROTECT settings
* @pwrup_reglist pwrup reglist for preemption
*/
struct a6xx_info {
const struct adreno_reglist *hwcg;
const struct adreno_protect *protect;
const struct adreno_reglist_list *pwrup_reglist;
u32 gmu_chipid;
u32 gmu_cgc_mode;
u32 prim_fifo_threshold;
const struct a6xx_bcm *bcms;
};
struct a6xx_gpu {
struct adreno_gpu base;
struct drm_gem_object *sqe_bo;
uint64_t sqe_iova;
struct msm_ringbuffer *cur_ring;
struct msm_ringbuffer *next_ring;
struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
void *preempt[MSM_GPU_MAX_RINGS];
uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
struct drm_gem_object *preempt_smmu_bo[MSM_GPU_MAX_RINGS];
void *preempt_smmu[MSM_GPU_MAX_RINGS];
uint64_t preempt_smmu_iova[MSM_GPU_MAX_RINGS];
uint32_t last_seqno[MSM_GPU_MAX_RINGS];
atomic_t preempt_state;
spinlock_t eval_lock;
struct timer_list preempt_timer;
unsigned int preempt_level;
bool uses_gmem;
bool skip_save_restore;
struct drm_gem_object *preempt_postamble_bo;
void *preempt_postamble_ptr;
uint64_t preempt_postamble_iova;
uint64_t preempt_postamble_len;
bool postamble_enabled;
struct a6xx_gmu gmu;
struct drm_gem_object *shadow_bo;
uint64_t shadow_iova;
uint32_t *shadow;
struct drm_gem_object *pwrup_reglist_bo;
void *pwrup_reglist_ptr;
uint64_t pwrup_reglist_iova;
bool pwrup_reglist_emitted;
bool has_whereami;
void __iomem *llc_mmio;
void *llc_slice;
void *htw_llc_slice;
bool have_mmu500;
bool hung;
};
#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
/*
* In order to do lockless preemption we use a simple state machine to progress
* through the process.
*
* PREEMPT_NONE - no preemption in progress. Next state START.
* PREEMPT_START - The trigger is evaluating if preemption is possible. Next
* states: TRIGGERED, NONE
* PREEMPT_FINISH - An intermediate state before moving back to NONE. Next
* state: NONE.
* PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
* states: FAULTED, PENDING
* PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
* recovery. Next state: N/A
* PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
* checking the success of the operation. Next state: FAULTED, NONE.
*/
enum a6xx_preempt_state {
PREEMPT_NONE = 0,
PREEMPT_START,
PREEMPT_FINISH,
PREEMPT_TRIGGERED,
PREEMPT_FAULTED,
PREEMPT_PENDING,
};
/*
* struct a6xx_preempt_record is a shared buffer between the microcode and the
* CPU to store the state for preemption. The record itself is much larger
* (2112k) but most of that is used by the CP for storage.
*
* There is a preemption record assigned per ringbuffer. When the CPU triggers a
* preemption, it fills out the record with the useful information (wptr, ring
* base, etc) and the microcode uses that information to set up the CP following
* the preemption. When a ring is switched out, the CP will save the ringbuffer
* state back to the record. In this way, once the records are properly set up
* the CPU can quickly switch back and forth between ringbuffers by only
* updating a few registers (often only the wptr).
*
* These are the CPU aware registers in the record:
* @magic: Must always be 0xAE399D6EUL
* @info: Type of the record - written 0 by the CPU, updated by the CP
* @errno: preemption error record
* @data: Data field in YIELD and SET_MARKER packets, Written and used by CP
* @cntl: Value of RB_CNTL written by CPU, save/restored by CP
* @rptr: Value of RB_RPTR written by CPU, save/restored by CP
* @wptr: Value of RB_WPTR written by CPU, save/restored by CP
* @_pad: Reserved/padding
* @rptr_addr: Value of RB_RPTR_ADDR_LO|HI written by CPU, save/restored by CP
* @rbase: Value of RB_BASE written by CPU, save/restored by CP
* @counter: GPU address of the storage area for the preemption counters
* @bv_rptr_addr: Value of BV_RB_RPTR_ADDR_LO|HI written by CPU, save/restored by CP
*/
struct a6xx_preempt_record {
u32 magic;
u32 info;
u32 errno;
u32 data;
u32 cntl;
u32 rptr;
u32 wptr;
u32 _pad;
u64 rptr_addr;
u64 rbase;
u64 counter;
u64 bv_rptr_addr;
};
#define A6XX_PREEMPT_RECORD_MAGIC 0xAE399D6EUL
#define PREEMPT_SMMU_INFO_SIZE 4096
#define PREEMPT_RECORD_SIZE(adreno_gpu) \
((adreno_gpu->info->preempt_record_size) == 0 ? \
4192 * SZ_1K : (adreno_gpu->info->preempt_record_size))
/*
* The preemption counter block is a storage area for the value of the
* preemption counters that are saved immediately before context switch. We
* append it on to the end of the allocation for the preemption record.
*/
#define A6XX_PREEMPT_COUNTER_SIZE (16 * 4)
struct a7xx_cp_smmu_info {
u32 magic;
u32 _pad4;
u64 ttbr0;
u32 asid;
u32 context_idr;
u32 context_bank;
};
#define GEN7_CP_SMMU_INFO_MAGIC 0x241350d5UL
/*
* Given a register and a count, return a value to program into
* REG_CP_PROTECT_REG(n) - this will block both reads and writes for
* _len + 1 registers starting at _reg.
*/
#define A6XX_PROTECT_NORDWR(_reg, _len) \
((1 << 31) | \
(((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
/*
* Same as above, but allow reads over the range. For areas of mixed use (such
* as performance counters) this allows us to protect a much larger range with a
* single register
*/
#define A6XX_PROTECT_RDONLY(_reg, _len) \
((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
static inline bool a6xx_has_gbif(struct adreno_gpu *gpu)
{
if(adreno_is_a630(gpu))
return false;
return true;
}
static inline void a6xx_llc_rmw(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 mask, u32 or)
{
return msm_rmw(a6xx_gpu->llc_mmio + (reg << 2), mask, or);
}
static inline u32 a6xx_llc_read(struct a6xx_gpu *a6xx_gpu, u32 reg)
{
return readl(a6xx_gpu->llc_mmio + (reg << 2));
}
static inline void a6xx_llc_write(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 value)
{
writel(value, a6xx_gpu->llc_mmio + (reg << 2));
}
#define shadowptr(_a6xx_gpu, _ring) ((_a6xx_gpu)->shadow_iova + \
((_ring)->id * sizeof(uint32_t)))
int a6xx_gmu_resume(struct a6xx_gpu *gpu);
int a6xx_gmu_stop(struct a6xx_gpu *gpu);
int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu);
bool a6xx_gmu_isidle(struct a6xx_gmu *gmu);
int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
void a6xx_preempt_init(struct msm_gpu *gpu);
void a6xx_preempt_hw_init(struct msm_gpu *gpu);
void a6xx_preempt_trigger(struct msm_gpu *gpu);
void a6xx_preempt_irq(struct msm_gpu *gpu);
void a6xx_preempt_fini(struct msm_gpu *gpu);
int a6xx_preempt_submitqueue_setup(struct msm_gpu *gpu,
struct msm_gpu_submitqueue *queue);
void a6xx_preempt_submitqueue_close(struct msm_gpu *gpu,
struct msm_gpu_submitqueue *queue);
/* Return true if we are in a preempt state */
static inline bool a6xx_in_preempt(struct a6xx_gpu *a6xx_gpu)
{
/*
* Make sure the read to preempt_state is ordered with respect to reads
* of other variables before ...
*/
smp_rmb();
int preempt_state = atomic_read(&a6xx_gpu->preempt_state);
/* ... and after. */
smp_rmb();
return !(preempt_state == PREEMPT_NONE ||
preempt_state == PREEMPT_FINISH);
}
void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
bool suspended);
unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
struct drm_printer *p);
struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu);
int a6xx_gpu_state_put(struct msm_gpu_state *state);
void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off);
void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert);
#endif /* __A6XX_GPU_H__ */
|