1 | /* SPDX-License-Identifier: MIT */ |
2 | /* |
3 | * Copyright © 2019 Intel Corporation |
4 | */ |
5 | |
6 | #ifndef __INTEL_GT__ |
7 | #define __INTEL_GT__ |
8 | |
9 | #include "intel_engine_types.h" |
10 | #include "intel_gt_types.h" |
11 | #include "intel_reset.h" |
12 | |
13 | struct drm_i915_private; |
14 | struct drm_printer; |
15 | |
16 | /* |
17 | * Check that the GT is a graphics GT and has an IP version within the |
18 | * specified range (inclusive). |
19 | */ |
20 | #define IS_GFX_GT_IP_RANGE(gt, from, until) ( \ |
21 | BUILD_BUG_ON_ZERO((from) < IP_VER(2, 0)) + \ |
22 | BUILD_BUG_ON_ZERO((until) < (from)) + \ |
23 | ((gt)->type != GT_MEDIA && \ |
24 | GRAPHICS_VER_FULL((gt)->i915) >= (from) && \ |
25 | GRAPHICS_VER_FULL((gt)->i915) <= (until))) |
26 | |
27 | /* |
28 | * Check that the GT is a media GT and has an IP version within the |
29 | * specified range (inclusive). |
30 | * |
31 | * Only usable on platforms with a standalone media design (i.e., IP version 13 |
32 | * and higher). |
33 | */ |
34 | #define IS_MEDIA_GT_IP_RANGE(gt, from, until) ( \ |
35 | BUILD_BUG_ON_ZERO((from) < IP_VER(13, 0)) + \ |
36 | BUILD_BUG_ON_ZERO((until) < (from)) + \ |
37 | ((gt) && (gt)->type == GT_MEDIA && \ |
38 | MEDIA_VER_FULL((gt)->i915) >= (from) && \ |
39 | MEDIA_VER_FULL((gt)->i915) <= (until))) |
40 | |
41 | /* |
42 | * Check that the GT is a graphics GT with a specific IP version and has |
43 | * a stepping in the range [from, until). The lower stepping bound is |
44 | * inclusive, the upper bound is exclusive. The most common use-case of this |
45 | * macro is for checking bounds for workarounds, which usually have a stepping |
46 | * ("from") at which the hardware issue is first present and another stepping |
47 | * ("until") at which a hardware fix is present and the software workaround is |
48 | * no longer necessary. E.g., |
49 | * |
50 | * IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) |
51 | * IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B1, STEP_FOREVER) |
52 | * |
53 | * "STEP_FOREVER" can be passed as "until" for workarounds that have no upper |
54 | * stepping bound for the specified IP version. |
55 | */ |
56 | #define IS_GFX_GT_IP_STEP(gt, ipver, from, until) ( \ |
57 | BUILD_BUG_ON_ZERO((until) <= (from)) + \ |
58 | (IS_GFX_GT_IP_RANGE((gt), (ipver), (ipver)) && \ |
59 | IS_GRAPHICS_STEP((gt)->i915, (from), (until)))) |
60 | |
61 | /* |
62 | * Check that the GT is a media GT with a specific IP version and has |
63 | * a stepping in the range [from, until). The lower stepping bound is |
64 | * inclusive, the upper bound is exclusive. The most common use-case of this |
65 | * macro is for checking bounds for workarounds, which usually have a stepping |
66 | * ("from") at which the hardware issue is first present and another stepping |
67 | * ("until") at which a hardware fix is present and the software workaround is |
68 | * no longer necessary. "STEP_FOREVER" can be passed as "until" for |
69 | * workarounds that have no upper stepping bound for the specified IP version. |
70 | * |
71 | * This macro may only be used to match on platforms that have a standalone |
72 | * media design (i.e., media version 13 or higher). |
73 | */ |
74 | #define IS_MEDIA_GT_IP_STEP(gt, ipver, from, until) ( \ |
75 | BUILD_BUG_ON_ZERO((until) <= (from)) + \ |
76 | (IS_MEDIA_GT_IP_RANGE((gt), (ipver), (ipver)) && \ |
77 | IS_MEDIA_STEP((gt)->i915, (from), (until)))) |
78 | |
79 | #define GT_TRACE(gt, fmt, ...) do { \ |
80 | const struct intel_gt *gt__ __maybe_unused = (gt); \ |
81 | GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \ |
82 | ##__VA_ARGS__); \ |
83 | } while (0) |
84 | |
85 | static inline bool gt_is_root(struct intel_gt *gt) |
86 | { |
87 | return !gt->info.id; |
88 | } |
89 | |
90 | bool intel_gt_needs_wa_16018031267(struct intel_gt *gt); |
91 | bool intel_gt_needs_wa_22016122933(struct intel_gt *gt); |
92 | |
93 | #define NEEDS_FASTCOLOR_BLT_WABB(engine) ( \ |
94 | intel_gt_needs_wa_16018031267(engine->gt) && \ |
95 | engine->class == COPY_ENGINE_CLASS && engine->instance == 0) |
96 | |
97 | static inline struct intel_gt *uc_to_gt(struct intel_uc *uc) |
98 | { |
99 | return container_of(uc, struct intel_gt, uc); |
100 | } |
101 | |
102 | static inline struct intel_gt *guc_to_gt(struct intel_guc *guc) |
103 | { |
104 | return container_of(guc, struct intel_gt, uc.guc); |
105 | } |
106 | |
107 | static inline struct intel_gt *huc_to_gt(struct intel_huc *huc) |
108 | { |
109 | return container_of(huc, struct intel_gt, uc.huc); |
110 | } |
111 | |
112 | static inline struct intel_gt *gsc_uc_to_gt(struct intel_gsc_uc *gsc_uc) |
113 | { |
114 | return container_of(gsc_uc, struct intel_gt, uc.gsc); |
115 | } |
116 | |
117 | static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc) |
118 | { |
119 | return container_of(gsc, struct intel_gt, gsc); |
120 | } |
121 | |
122 | static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) |
123 | { |
124 | return guc_to_gt(guc)->i915; |
125 | } |
126 | |
127 | void intel_gt_common_init_early(struct intel_gt *gt); |
128 | int intel_root_gt_init_early(struct drm_i915_private *i915); |
129 | int intel_gt_assign_ggtt(struct intel_gt *gt); |
130 | int intel_gt_init_mmio(struct intel_gt *gt); |
131 | int __must_check intel_gt_init_hw(struct intel_gt *gt); |
132 | int intel_gt_init(struct intel_gt *gt); |
133 | void intel_gt_driver_register(struct intel_gt *gt); |
134 | |
135 | void intel_gt_driver_unregister(struct intel_gt *gt); |
136 | void intel_gt_driver_remove(struct intel_gt *gt); |
137 | void intel_gt_driver_release(struct intel_gt *gt); |
138 | void intel_gt_driver_late_release_all(struct drm_i915_private *i915); |
139 | |
140 | int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout); |
141 | |
142 | void intel_gt_check_and_clear_faults(struct intel_gt *gt); |
143 | i915_reg_t intel_gt_perf_limit_reasons_reg(struct intel_gt *gt); |
144 | void intel_gt_clear_error_registers(struct intel_gt *gt, |
145 | intel_engine_mask_t engine_mask); |
146 | |
147 | void intel_gt_flush_ggtt_writes(struct intel_gt *gt); |
148 | void intel_gt_chipset_flush(struct intel_gt *gt); |
149 | |
150 | static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt, |
151 | enum intel_gt_scratch_field field) |
152 | { |
153 | return i915_ggtt_offset(vma: gt->scratch) + field; |
154 | } |
155 | |
156 | static inline bool intel_gt_has_unrecoverable_error(const struct intel_gt *gt) |
157 | { |
158 | return test_bit(I915_WEDGED_ON_INIT, >->reset.flags) || |
159 | test_bit(I915_WEDGED_ON_FINI, >->reset.flags); |
160 | } |
161 | |
162 | static inline bool intel_gt_is_wedged(const struct intel_gt *gt) |
163 | { |
164 | GEM_BUG_ON(intel_gt_has_unrecoverable_error(gt) && |
165 | !test_bit(I915_WEDGED, >->reset.flags)); |
166 | |
167 | return unlikely(test_bit(I915_WEDGED, >->reset.flags)); |
168 | } |
169 | |
170 | int intel_gt_probe_all(struct drm_i915_private *i915); |
171 | int intel_gt_tiles_init(struct drm_i915_private *i915); |
172 | void intel_gt_release_all(struct drm_i915_private *i915); |
173 | |
174 | #define for_each_gt(gt__, i915__, id__) \ |
175 | for ((id__) = 0; \ |
176 | (id__) < I915_MAX_GT; \ |
177 | (id__)++) \ |
178 | for_each_if(((gt__) = (i915__)->gt[(id__)])) |
179 | |
180 | /* Simple iterator over all initialised engines */ |
181 | #define for_each_engine(engine__, gt__, id__) \ |
182 | for ((id__) = 0; \ |
183 | (id__) < I915_NUM_ENGINES; \ |
184 | (id__)++) \ |
185 | for_each_if ((engine__) = (gt__)->engine[(id__)]) |
186 | |
187 | /* Iterator over subset of engines selected by mask */ |
188 | #define for_each_engine_masked(engine__, gt__, mask__, tmp__) \ |
189 | for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \ |
190 | (tmp__) ? \ |
191 | ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \ |
192 | 0;) |
193 | |
194 | void intel_gt_info_print(const struct intel_gt_info *info, |
195 | struct drm_printer *p); |
196 | |
197 | void intel_gt_watchdog_work(struct work_struct *work); |
198 | |
199 | enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt, |
200 | struct drm_i915_gem_object *obj, |
201 | bool always_coherent); |
202 | |
203 | void intel_gt_bind_context_set_ready(struct intel_gt *gt); |
204 | void intel_gt_bind_context_set_unready(struct intel_gt *gt); |
205 | bool intel_gt_is_bind_context_ready(struct intel_gt *gt); |
206 | #endif /* __INTEL_GT_H__ */ |
207 | |