1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
|
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2019 Intel Corporation
*/
#ifndef __INTEL_GT__
#define __INTEL_GT__
#include "intel_engine_types.h"
#include "intel_gt_types.h"
#include "intel_reset.h"
struct drm_i915_private;
struct drm_printer;
/*
* Check that the GT is a graphics GT and has an IP version within the
* specified range (inclusive).
*/
#define IS_GFX_GT_IP_RANGE(gt, from, until) ( \
BUILD_BUG_ON_ZERO((from) < IP_VER(2, 0)) + \
BUILD_BUG_ON_ZERO((until) < (from)) + \
((gt)->type != GT_MEDIA && \
GRAPHICS_VER_FULL((gt)->i915) >= (from) && \
GRAPHICS_VER_FULL((gt)->i915) <= (until)))
/*
* Check that the GT is a media GT and has an IP version within the
* specified range (inclusive).
*
* Only usable on platforms with a standalone media design (i.e., IP version 13
* and higher).
*/
#define IS_MEDIA_GT_IP_RANGE(gt, from, until) ( \
BUILD_BUG_ON_ZERO((from) < IP_VER(13, 0)) + \
BUILD_BUG_ON_ZERO((until) < (from)) + \
((gt) && (gt)->type == GT_MEDIA && \
MEDIA_VER_FULL((gt)->i915) >= (from) && \
MEDIA_VER_FULL((gt)->i915) <= (until)))
/*
* Check that the GT is a graphics GT with a specific IP version and has
* a stepping in the range [from, until). The lower stepping bound is
* inclusive, the upper bound is exclusive. The most common use-case of this
* macro is for checking bounds for workarounds, which usually have a stepping
* ("from") at which the hardware issue is first present and another stepping
* ("until") at which a hardware fix is present and the software workaround is
* no longer necessary. E.g.,
*
* IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0)
* IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B1, STEP_FOREVER)
*
* "STEP_FOREVER" can be passed as "until" for workarounds that have no upper
* stepping bound for the specified IP version.
*/
#define IS_GFX_GT_IP_STEP(gt, ipver, from, until) ( \
BUILD_BUG_ON_ZERO((until) <= (from)) + \
(IS_GFX_GT_IP_RANGE((gt), (ipver), (ipver)) && \
IS_GRAPHICS_STEP((gt)->i915, (from), (until))))
/*
* Check that the GT is a media GT with a specific IP version and has
* a stepping in the range [from, until). The lower stepping bound is
* inclusive, the upper bound is exclusive. The most common use-case of this
* macro is for checking bounds for workarounds, which usually have a stepping
* ("from") at which the hardware issue is first present and another stepping
* ("until") at which a hardware fix is present and the software workaround is
* no longer necessary. "STEP_FOREVER" can be passed as "until" for
* workarounds that have no upper stepping bound for the specified IP version.
*
* This macro may only be used to match on platforms that have a standalone
* media design (i.e., media version 13 or higher).
*/
#define IS_MEDIA_GT_IP_STEP(gt, ipver, from, until) ( \
BUILD_BUG_ON_ZERO((until) <= (from)) + \
(IS_MEDIA_GT_IP_RANGE((gt), (ipver), (ipver)) && \
IS_MEDIA_STEP((gt)->i915, (from), (until))))
#define GT_TRACE(gt, fmt, ...) do { \
const struct intel_gt *gt__ __maybe_unused = (gt); \
GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \
##__VA_ARGS__); \
} while (0)
#define NEEDS_FASTCOLOR_BLT_WABB(engine) ( \
IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 55), IP_VER(12, 71)) && \
engine->class == COPY_ENGINE_CLASS && engine->instance == 0)
static inline bool gt_is_root(struct intel_gt *gt)
{
return !gt->info.id;
}
bool intel_gt_needs_wa_22016122933(struct intel_gt *gt);
static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
{
return container_of(uc, struct intel_gt, uc);
}
static inline struct intel_gt *guc_to_gt(struct intel_guc *guc)
{
return container_of(guc, struct intel_gt, uc.guc);
}
static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
{
return container_of(huc, struct intel_gt, uc.huc);
}
static inline struct intel_gt *gsc_uc_to_gt(struct intel_gsc_uc *gsc_uc)
{
return container_of(gsc_uc, struct intel_gt, uc.gsc);
}
static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
{
return container_of(gsc, struct intel_gt, gsc);
}
static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
{
return guc_to_gt(guc)->i915;
}
void intel_gt_common_init_early(struct intel_gt *gt);
int intel_root_gt_init_early(struct drm_i915_private *i915);
int intel_gt_assign_ggtt(struct intel_gt *gt);
int intel_gt_init_mmio(struct intel_gt *gt);
int __must_check intel_gt_init_hw(struct intel_gt *gt);
int intel_gt_init(struct intel_gt *gt);
void intel_gt_driver_register(struct intel_gt *gt);
void intel_gt_driver_unregister(struct intel_gt *gt);
void intel_gt_driver_remove(struct intel_gt *gt);
void intel_gt_driver_release(struct intel_gt *gt);
void intel_gt_driver_late_release_all(struct drm_i915_private *i915);
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
void intel_gt_check_and_clear_faults(struct intel_gt *gt);
i915_reg_t intel_gt_perf_limit_reasons_reg(struct intel_gt *gt);
void intel_gt_clear_error_registers(struct intel_gt *gt,
intel_engine_mask_t engine_mask);
void intel_gt_flush_ggtt_writes(struct intel_gt *gt);
void intel_gt_chipset_flush(struct intel_gt *gt);
static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
enum intel_gt_scratch_field field)
{
return i915_ggtt_offset(gt->scratch) + field;
}
static inline bool intel_gt_has_unrecoverable_error(const struct intel_gt *gt)
{
return test_bit(I915_WEDGED_ON_INIT, >->reset.flags) ||
test_bit(I915_WEDGED_ON_FINI, >->reset.flags);
}
static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
{
GEM_BUG_ON(intel_gt_has_unrecoverable_error(gt) &&
!test_bit(I915_WEDGED, >->reset.flags));
return unlikely(test_bit(I915_WEDGED, >->reset.flags));
}
int intel_gt_probe_all(struct drm_i915_private *i915);
int intel_gt_tiles_init(struct drm_i915_private *i915);
void intel_gt_release_all(struct drm_i915_private *i915);
#define for_each_gt(gt__, i915__, id__) \
for ((id__) = 0; \
(id__) < I915_MAX_GT; \
(id__)++) \
for_each_if(((gt__) = (i915__)->gt[(id__)]))
/* Simple iterator over all initialised engines */
#define for_each_engine(engine__, gt__, id__) \
for ((id__) = 0; \
(id__) < I915_NUM_ENGINES; \
(id__)++) \
for_each_if ((engine__) = (gt__)->engine[(id__)])
/* Iterator over subset of engines selected by mask */
#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
(tmp__) ? \
((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
0;)
void intel_gt_info_print(const struct intel_gt_info *info,
struct drm_printer *p);
void intel_gt_watchdog_work(struct work_struct *work);
enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
struct drm_i915_gem_object *obj,
bool always_coherent);
void intel_gt_bind_context_set_ready(struct intel_gt *gt);
void intel_gt_bind_context_set_unready(struct intel_gt *gt);
bool intel_gt_is_bind_context_ready(struct intel_gt *gt);
#endif /* __INTEL_GT_H__ */
|