1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2015-2018 Etnaviv Project |
4 | */ |
5 | |
6 | #include <linux/clk.h> |
7 | #include <linux/component.h> |
8 | #include <linux/delay.h> |
9 | #include <linux/dma-fence.h> |
10 | #include <linux/dma-mapping.h> |
11 | #include <linux/mod_devicetable.h> |
12 | #include <linux/module.h> |
13 | #include <linux/platform_device.h> |
14 | #include <linux/pm_runtime.h> |
15 | #include <linux/regulator/consumer.h> |
16 | #include <linux/thermal.h> |
17 | |
18 | #include "etnaviv_cmdbuf.h" |
19 | #include "etnaviv_dump.h" |
20 | #include "etnaviv_gpu.h" |
21 | #include "etnaviv_gem.h" |
22 | #include "etnaviv_mmu.h" |
23 | #include "etnaviv_perfmon.h" |
24 | #include "etnaviv_sched.h" |
25 | #include "common.xml.h" |
26 | #include "state.xml.h" |
27 | #include "state_hi.xml.h" |
28 | #include "cmdstream.xml.h" |
29 | |
30 | static const struct platform_device_id gpu_ids[] = { |
31 | { .name = "etnaviv-gpu,2d" }, |
32 | { }, |
33 | }; |
34 | |
35 | /* |
36 | * Driver functions: |
37 | */ |
38 | |
39 | int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) |
40 | { |
41 | struct etnaviv_drm_private *priv = gpu->drm->dev_private; |
42 | |
43 | switch (param) { |
44 | case ETNAVIV_PARAM_GPU_MODEL: |
45 | *value = gpu->identity.model; |
46 | break; |
47 | |
48 | case ETNAVIV_PARAM_GPU_REVISION: |
49 | *value = gpu->identity.revision; |
50 | break; |
51 | |
52 | case ETNAVIV_PARAM_GPU_FEATURES_0: |
53 | *value = gpu->identity.features; |
54 | break; |
55 | |
56 | case ETNAVIV_PARAM_GPU_FEATURES_1: |
57 | *value = gpu->identity.minor_features0; |
58 | break; |
59 | |
60 | case ETNAVIV_PARAM_GPU_FEATURES_2: |
61 | *value = gpu->identity.minor_features1; |
62 | break; |
63 | |
64 | case ETNAVIV_PARAM_GPU_FEATURES_3: |
65 | *value = gpu->identity.minor_features2; |
66 | break; |
67 | |
68 | case ETNAVIV_PARAM_GPU_FEATURES_4: |
69 | *value = gpu->identity.minor_features3; |
70 | break; |
71 | |
72 | case ETNAVIV_PARAM_GPU_FEATURES_5: |
73 | *value = gpu->identity.minor_features4; |
74 | break; |
75 | |
76 | case ETNAVIV_PARAM_GPU_FEATURES_6: |
77 | *value = gpu->identity.minor_features5; |
78 | break; |
79 | |
80 | case ETNAVIV_PARAM_GPU_FEATURES_7: |
81 | *value = gpu->identity.minor_features6; |
82 | break; |
83 | |
84 | case ETNAVIV_PARAM_GPU_FEATURES_8: |
85 | *value = gpu->identity.minor_features7; |
86 | break; |
87 | |
88 | case ETNAVIV_PARAM_GPU_FEATURES_9: |
89 | *value = gpu->identity.minor_features8; |
90 | break; |
91 | |
92 | case ETNAVIV_PARAM_GPU_FEATURES_10: |
93 | *value = gpu->identity.minor_features9; |
94 | break; |
95 | |
96 | case ETNAVIV_PARAM_GPU_FEATURES_11: |
97 | *value = gpu->identity.minor_features10; |
98 | break; |
99 | |
100 | case ETNAVIV_PARAM_GPU_FEATURES_12: |
101 | *value = gpu->identity.minor_features11; |
102 | break; |
103 | |
104 | case ETNAVIV_PARAM_GPU_STREAM_COUNT: |
105 | *value = gpu->identity.stream_count; |
106 | break; |
107 | |
108 | case ETNAVIV_PARAM_GPU_REGISTER_MAX: |
109 | *value = gpu->identity.register_max; |
110 | break; |
111 | |
112 | case ETNAVIV_PARAM_GPU_THREAD_COUNT: |
113 | *value = gpu->identity.thread_count; |
114 | break; |
115 | |
116 | case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE: |
117 | *value = gpu->identity.vertex_cache_size; |
118 | break; |
119 | |
120 | case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT: |
121 | *value = gpu->identity.shader_core_count; |
122 | break; |
123 | |
124 | case ETNAVIV_PARAM_GPU_PIXEL_PIPES: |
125 | *value = gpu->identity.pixel_pipes; |
126 | break; |
127 | |
128 | case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE: |
129 | *value = gpu->identity.vertex_output_buffer_size; |
130 | break; |
131 | |
132 | case ETNAVIV_PARAM_GPU_BUFFER_SIZE: |
133 | *value = gpu->identity.buffer_size; |
134 | break; |
135 | |
136 | case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT: |
137 | *value = gpu->identity.instruction_count; |
138 | break; |
139 | |
140 | case ETNAVIV_PARAM_GPU_NUM_CONSTANTS: |
141 | *value = gpu->identity.num_constants; |
142 | break; |
143 | |
144 | case ETNAVIV_PARAM_GPU_NUM_VARYINGS: |
145 | *value = gpu->identity.varyings_count; |
146 | break; |
147 | |
148 | case ETNAVIV_PARAM_SOFTPIN_START_ADDR: |
149 | if (priv->mmu_global->version == ETNAVIV_IOMMU_V2) |
150 | *value = ETNAVIV_SOFTPIN_START_ADDRESS; |
151 | else |
152 | *value = ~0ULL; |
153 | break; |
154 | |
155 | case ETNAVIV_PARAM_GPU_PRODUCT_ID: |
156 | *value = gpu->identity.product_id; |
157 | break; |
158 | |
159 | case ETNAVIV_PARAM_GPU_CUSTOMER_ID: |
160 | *value = gpu->identity.customer_id; |
161 | break; |
162 | |
163 | case ETNAVIV_PARAM_GPU_ECO_ID: |
164 | *value = gpu->identity.eco_id; |
165 | break; |
166 | |
167 | case ETNAVIV_PARAM_GPU_NN_CORE_COUNT: |
168 | *value = gpu->identity.nn_core_count; |
169 | break; |
170 | |
171 | case ETNAVIV_PARAM_GPU_NN_MAD_PER_CORE: |
172 | *value = gpu->identity.nn_mad_per_core; |
173 | break; |
174 | |
175 | case ETNAVIV_PARAM_GPU_TP_CORE_COUNT: |
176 | *value = gpu->identity.tp_core_count; |
177 | break; |
178 | |
179 | case ETNAVIV_PARAM_GPU_ON_CHIP_SRAM_SIZE: |
180 | *value = gpu->identity.on_chip_sram_size; |
181 | break; |
182 | |
183 | case ETNAVIV_PARAM_GPU_AXI_SRAM_SIZE: |
184 | *value = gpu->identity.axi_sram_size; |
185 | break; |
186 | |
187 | default: |
188 | DBG("%s: invalid param: %u" , dev_name(gpu->dev), param); |
189 | return -EINVAL; |
190 | } |
191 | |
192 | return 0; |
193 | } |
194 | |
195 | |
196 | #define etnaviv_is_model_rev(gpu, mod, rev) \ |
197 | ((gpu)->identity.model == chipModel_##mod && \ |
198 | (gpu)->identity.revision == rev) |
199 | #define etnaviv_field(val, field) \ |
200 | (((val) & field##__MASK) >> field##__SHIFT) |
201 | |
202 | static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) |
203 | { |
204 | if (gpu->identity.minor_features0 & |
205 | chipMinorFeatures0_MORE_MINOR_FEATURES) { |
206 | u32 specs[4]; |
207 | unsigned int streams; |
208 | |
209 | specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS); |
210 | specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2); |
211 | specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3); |
212 | specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4); |
213 | |
214 | gpu->identity.stream_count = etnaviv_field(specs[0], |
215 | VIVS_HI_CHIP_SPECS_STREAM_COUNT); |
216 | gpu->identity.register_max = etnaviv_field(specs[0], |
217 | VIVS_HI_CHIP_SPECS_REGISTER_MAX); |
218 | gpu->identity.thread_count = etnaviv_field(specs[0], |
219 | VIVS_HI_CHIP_SPECS_THREAD_COUNT); |
220 | gpu->identity.vertex_cache_size = etnaviv_field(specs[0], |
221 | VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE); |
222 | gpu->identity.shader_core_count = etnaviv_field(specs[0], |
223 | VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT); |
224 | gpu->identity.pixel_pipes = etnaviv_field(specs[0], |
225 | VIVS_HI_CHIP_SPECS_PIXEL_PIPES); |
226 | gpu->identity.vertex_output_buffer_size = |
227 | etnaviv_field(specs[0], |
228 | VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE); |
229 | |
230 | gpu->identity.buffer_size = etnaviv_field(specs[1], |
231 | VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE); |
232 | gpu->identity.instruction_count = etnaviv_field(specs[1], |
233 | VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT); |
234 | gpu->identity.num_constants = etnaviv_field(specs[1], |
235 | VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS); |
236 | |
237 | gpu->identity.varyings_count = etnaviv_field(specs[2], |
238 | VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT); |
239 | |
240 | /* This overrides the value from older register if non-zero */ |
241 | streams = etnaviv_field(specs[3], |
242 | VIVS_HI_CHIP_SPECS_4_STREAM_COUNT); |
243 | if (streams) |
244 | gpu->identity.stream_count = streams; |
245 | } |
246 | |
247 | /* Fill in the stream count if not specified */ |
248 | if (gpu->identity.stream_count == 0) { |
249 | if (gpu->identity.model >= 0x1000) |
250 | gpu->identity.stream_count = 4; |
251 | else |
252 | gpu->identity.stream_count = 1; |
253 | } |
254 | |
255 | /* Convert the register max value */ |
256 | if (gpu->identity.register_max) |
257 | gpu->identity.register_max = 1 << gpu->identity.register_max; |
258 | else if (gpu->identity.model == chipModel_GC400) |
259 | gpu->identity.register_max = 32; |
260 | else |
261 | gpu->identity.register_max = 64; |
262 | |
263 | /* Convert thread count */ |
264 | if (gpu->identity.thread_count) |
265 | gpu->identity.thread_count = 1 << gpu->identity.thread_count; |
266 | else if (gpu->identity.model == chipModel_GC400) |
267 | gpu->identity.thread_count = 64; |
268 | else if (gpu->identity.model == chipModel_GC500 || |
269 | gpu->identity.model == chipModel_GC530) |
270 | gpu->identity.thread_count = 128; |
271 | else |
272 | gpu->identity.thread_count = 256; |
273 | |
274 | if (gpu->identity.vertex_cache_size == 0) |
275 | gpu->identity.vertex_cache_size = 8; |
276 | |
277 | if (gpu->identity.shader_core_count == 0) { |
278 | if (gpu->identity.model >= 0x1000) |
279 | gpu->identity.shader_core_count = 2; |
280 | else |
281 | gpu->identity.shader_core_count = 1; |
282 | } |
283 | |
284 | if (gpu->identity.pixel_pipes == 0) |
285 | gpu->identity.pixel_pipes = 1; |
286 | |
287 | /* Convert virtex buffer size */ |
288 | if (gpu->identity.vertex_output_buffer_size) { |
289 | gpu->identity.vertex_output_buffer_size = |
290 | 1 << gpu->identity.vertex_output_buffer_size; |
291 | } else if (gpu->identity.model == chipModel_GC400) { |
292 | if (gpu->identity.revision < 0x4000) |
293 | gpu->identity.vertex_output_buffer_size = 512; |
294 | else if (gpu->identity.revision < 0x4200) |
295 | gpu->identity.vertex_output_buffer_size = 256; |
296 | else |
297 | gpu->identity.vertex_output_buffer_size = 128; |
298 | } else { |
299 | gpu->identity.vertex_output_buffer_size = 512; |
300 | } |
301 | |
302 | switch (gpu->identity.instruction_count) { |
303 | case 0: |
304 | if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) || |
305 | gpu->identity.model == chipModel_GC880) |
306 | gpu->identity.instruction_count = 512; |
307 | else |
308 | gpu->identity.instruction_count = 256; |
309 | break; |
310 | |
311 | case 1: |
312 | gpu->identity.instruction_count = 1024; |
313 | break; |
314 | |
315 | case 2: |
316 | gpu->identity.instruction_count = 2048; |
317 | break; |
318 | |
319 | default: |
320 | gpu->identity.instruction_count = 256; |
321 | break; |
322 | } |
323 | |
324 | if (gpu->identity.num_constants == 0) |
325 | gpu->identity.num_constants = 168; |
326 | |
327 | if (gpu->identity.varyings_count == 0) { |
328 | if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0) |
329 | gpu->identity.varyings_count = 12; |
330 | else |
331 | gpu->identity.varyings_count = 8; |
332 | } |
333 | |
334 | /* |
335 | * For some cores, two varyings are consumed for position, so the |
336 | * maximum varying count needs to be reduced by one. |
337 | */ |
338 | if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) || |
339 | etnaviv_is_model_rev(gpu, GC4000, 0x5222) || |
340 | etnaviv_is_model_rev(gpu, GC4000, 0x5245) || |
341 | etnaviv_is_model_rev(gpu, GC4000, 0x5208) || |
342 | etnaviv_is_model_rev(gpu, GC3000, 0x5435) || |
343 | etnaviv_is_model_rev(gpu, GC2200, 0x5244) || |
344 | etnaviv_is_model_rev(gpu, GC2100, 0x5108) || |
345 | etnaviv_is_model_rev(gpu, GC2000, 0x5108) || |
346 | etnaviv_is_model_rev(gpu, GC1500, 0x5246) || |
347 | etnaviv_is_model_rev(gpu, GC880, 0x5107) || |
348 | etnaviv_is_model_rev(gpu, GC880, 0x5106)) |
349 | gpu->identity.varyings_count -= 1; |
350 | } |
351 | |
352 | static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) |
353 | { |
354 | u32 chipIdentity; |
355 | |
356 | chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY); |
357 | |
358 | /* Special case for older graphic cores. */ |
359 | if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) { |
360 | gpu->identity.model = chipModel_GC500; |
361 | gpu->identity.revision = etnaviv_field(chipIdentity, |
362 | VIVS_HI_CHIP_IDENTITY_REVISION); |
363 | } else { |
364 | u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE); |
365 | |
366 | gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL); |
367 | gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV); |
368 | gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID); |
369 | |
370 | /* |
371 | * Reading these two registers on GC600 rev 0x19 result in a |
372 | * unhandled fault: external abort on non-linefetch |
373 | */ |
374 | if (!etnaviv_is_model_rev(gpu, GC600, 0x19)) { |
375 | gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID); |
376 | gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID); |
377 | } |
378 | |
379 | /* |
380 | * !!!! HACK ALERT !!!! |
381 | * Because people change device IDs without letting software |
382 | * know about it - here is the hack to make it all look the |
383 | * same. Only for GC400 family. |
384 | */ |
385 | if ((gpu->identity.model & 0xff00) == 0x0400 && |
386 | gpu->identity.model != chipModel_GC420) { |
387 | gpu->identity.model = gpu->identity.model & 0x0400; |
388 | } |
389 | |
390 | /* Another special case */ |
391 | if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) { |
392 | u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME); |
393 | |
394 | if (chipDate == 0x20080814 && chipTime == 0x12051100) { |
395 | /* |
396 | * This IP has an ECO; put the correct |
397 | * revision in it. |
398 | */ |
399 | gpu->identity.revision = 0x1051; |
400 | } |
401 | } |
402 | |
403 | /* |
404 | * NXP likes to call the GPU on the i.MX6QP GC2000+, but in |
405 | * reality it's just a re-branded GC3000. We can identify this |
406 | * core by the upper half of the revision register being all 1. |
407 | * Fix model/rev here, so all other places can refer to this |
408 | * core by its real identity. |
409 | */ |
410 | if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) { |
411 | gpu->identity.model = chipModel_GC3000; |
412 | gpu->identity.revision &= 0xffff; |
413 | } |
414 | |
415 | if (etnaviv_is_model_rev(gpu, GC1000, 0x5037) && (chipDate == 0x20120617)) |
416 | gpu->identity.eco_id = 1; |
417 | |
418 | if (etnaviv_is_model_rev(gpu, GC320, 0x5303) && (chipDate == 0x20140511)) |
419 | gpu->identity.eco_id = 1; |
420 | } |
421 | |
422 | dev_info(gpu->dev, "model: GC%x, revision: %x\n" , |
423 | gpu->identity.model, gpu->identity.revision); |
424 | |
425 | gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP; |
426 | /* |
427 | * If there is a match in the HWDB, we aren't interested in the |
428 | * remaining register values, as they might be wrong. |
429 | */ |
430 | if (etnaviv_fill_identity_from_hwdb(gpu)) |
431 | return; |
432 | |
433 | gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE); |
434 | |
435 | /* Disable fast clear on GC700. */ |
436 | if (gpu->identity.model == chipModel_GC700) |
437 | gpu->identity.features &= ~chipFeatures_FAST_CLEAR; |
438 | |
439 | /* These models/revisions don't have the 2D pipe bit */ |
440 | if ((gpu->identity.model == chipModel_GC500 && |
441 | gpu->identity.revision <= 2) || |
442 | gpu->identity.model == chipModel_GC300) |
443 | gpu->identity.features |= chipFeatures_PIPE_2D; |
444 | |
445 | if ((gpu->identity.model == chipModel_GC500 && |
446 | gpu->identity.revision < 2) || |
447 | (gpu->identity.model == chipModel_GC300 && |
448 | gpu->identity.revision < 0x2000)) { |
449 | |
450 | /* |
451 | * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these |
452 | * registers. |
453 | */ |
454 | gpu->identity.minor_features0 = 0; |
455 | gpu->identity.minor_features1 = 0; |
456 | gpu->identity.minor_features2 = 0; |
457 | gpu->identity.minor_features3 = 0; |
458 | gpu->identity.minor_features4 = 0; |
459 | gpu->identity.minor_features5 = 0; |
460 | } else |
461 | gpu->identity.minor_features0 = |
462 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0); |
463 | |
464 | if (gpu->identity.minor_features0 & |
465 | chipMinorFeatures0_MORE_MINOR_FEATURES) { |
466 | gpu->identity.minor_features1 = |
467 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1); |
468 | gpu->identity.minor_features2 = |
469 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2); |
470 | gpu->identity.minor_features3 = |
471 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3); |
472 | gpu->identity.minor_features4 = |
473 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4); |
474 | gpu->identity.minor_features5 = |
475 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5); |
476 | } |
477 | |
478 | /* GC600/300 idle register reports zero bits where modules aren't present */ |
479 | if (gpu->identity.model == chipModel_GC600 || |
480 | gpu->identity.model == chipModel_GC300) |
481 | gpu->idle_mask = VIVS_HI_IDLE_STATE_TX | |
482 | VIVS_HI_IDLE_STATE_RA | |
483 | VIVS_HI_IDLE_STATE_SE | |
484 | VIVS_HI_IDLE_STATE_PA | |
485 | VIVS_HI_IDLE_STATE_SH | |
486 | VIVS_HI_IDLE_STATE_PE | |
487 | VIVS_HI_IDLE_STATE_DE | |
488 | VIVS_HI_IDLE_STATE_FE; |
489 | |
490 | etnaviv_hw_specs(gpu); |
491 | } |
492 | |
493 | static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock) |
494 | { |
495 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, data: clock | |
496 | VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD); |
497 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, data: clock); |
498 | } |
499 | |
500 | static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu) |
501 | { |
502 | if (gpu->identity.minor_features2 & |
503 | chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING) { |
504 | clk_set_rate(clk: gpu->clk_core, |
505 | rate: gpu->base_rate_core >> gpu->freq_scale); |
506 | clk_set_rate(clk: gpu->clk_shader, |
507 | rate: gpu->base_rate_shader >> gpu->freq_scale); |
508 | } else { |
509 | unsigned int fscale = 1 << (6 - gpu->freq_scale); |
510 | u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); |
511 | |
512 | clock &= ~VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK; |
513 | clock |= VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale); |
514 | etnaviv_gpu_load_clock(gpu, clock); |
515 | } |
516 | |
517 | /* |
518 | * Choose number of wait cycles to target a ~30us (1/32768) max latency |
519 | * until new work is picked up by the FE when it polls in the idle loop. |
520 | * If the GPU base frequency is unknown use 200 wait cycles. |
521 | */ |
522 | gpu->fe_waitcycles = clamp(gpu->base_rate_core >> (15 - gpu->freq_scale), |
523 | 200UL, 0xffffUL); |
524 | } |
525 | |
526 | static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) |
527 | { |
528 | u32 control, idle; |
529 | unsigned long timeout; |
530 | bool failed = true; |
531 | |
532 | /* We hope that the GPU resets in under one second */ |
533 | timeout = jiffies + msecs_to_jiffies(m: 1000); |
534 | |
535 | while (time_is_after_jiffies(timeout)) { |
536 | unsigned int fscale = 1 << (6 - gpu->freq_scale); |
537 | u32 pulse_eater = 0x01590880; |
538 | |
539 | /* disable clock gating */ |
540 | gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, data: 0x0); |
541 | |
542 | /* disable pulse eater */ |
543 | pulse_eater |= BIT(17); |
544 | gpu_write_power(gpu, VIVS_PM_PULSE_EATER, data: pulse_eater); |
545 | pulse_eater |= BIT(0); |
546 | gpu_write_power(gpu, VIVS_PM_PULSE_EATER, data: pulse_eater); |
547 | |
548 | /* enable clock */ |
549 | control = VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale); |
550 | etnaviv_gpu_load_clock(gpu, clock: control); |
551 | |
552 | /* isolate the GPU. */ |
553 | control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU; |
554 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, data: control); |
555 | |
556 | if (gpu->sec_mode == ETNA_SEC_KERNEL) { |
557 | gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL, |
558 | VIVS_MMUv2_AHB_CONTROL_RESET); |
559 | } else { |
560 | /* set soft reset. */ |
561 | control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET; |
562 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, data: control); |
563 | } |
564 | |
565 | /* wait for reset. */ |
566 | usleep_range(min: 10, max: 20); |
567 | |
568 | /* reset soft reset bit. */ |
569 | control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET; |
570 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, data: control); |
571 | |
572 | /* reset GPU isolation. */ |
573 | control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU; |
574 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, data: control); |
575 | |
576 | /* read idle register. */ |
577 | idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); |
578 | |
579 | /* try resetting again if FE is not idle */ |
580 | if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) { |
581 | dev_dbg(gpu->dev, "FE is not idle\n" ); |
582 | continue; |
583 | } |
584 | |
585 | /* read reset register. */ |
586 | control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); |
587 | |
588 | /* is the GPU idle? */ |
589 | if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) || |
590 | ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) { |
591 | dev_dbg(gpu->dev, "GPU is not idle\n" ); |
592 | continue; |
593 | } |
594 | |
595 | /* disable debug registers, as they are not normally needed */ |
596 | control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; |
597 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, data: control); |
598 | |
599 | failed = false; |
600 | break; |
601 | } |
602 | |
603 | if (failed) { |
604 | idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); |
605 | control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); |
606 | |
607 | dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n" , |
608 | idle & VIVS_HI_IDLE_STATE_FE ? "" : "not " , |
609 | control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not " , |
610 | control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not " ); |
611 | |
612 | return -EBUSY; |
613 | } |
614 | |
615 | /* We rely on the GPU running, so program the clock */ |
616 | etnaviv_gpu_update_clock(gpu); |
617 | |
618 | gpu->state = ETNA_GPU_STATE_RESET; |
619 | gpu->exec_state = -1; |
620 | if (gpu->mmu_context) |
621 | etnaviv_iommu_context_put(ctx: gpu->mmu_context); |
622 | gpu->mmu_context = NULL; |
623 | |
624 | return 0; |
625 | } |
626 | |
627 | static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu) |
628 | { |
629 | u32 pmc, ppc; |
630 | |
631 | /* enable clock gating */ |
632 | ppc = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); |
633 | ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; |
634 | |
635 | /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */ |
636 | if (gpu->identity.revision == 0x4301 || |
637 | gpu->identity.revision == 0x4302) |
638 | ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING; |
639 | |
640 | gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, data: ppc); |
641 | |
642 | pmc = gpu_read_power(gpu, VIVS_PM_MODULE_CONTROLS); |
643 | |
644 | /* Disable PA clock gating for GC400+ without bugfix except for GC420 */ |
645 | if (gpu->identity.model >= chipModel_GC400 && |
646 | gpu->identity.model != chipModel_GC420 && |
647 | !(gpu->identity.minor_features3 & chipMinorFeatures3_BUG_FIXES12)) |
648 | pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA; |
649 | |
650 | /* |
651 | * Disable PE clock gating on revs < 5.0.0.0 when HZ is |
652 | * present without a bug fix. |
653 | */ |
654 | if (gpu->identity.revision < 0x5000 && |
655 | gpu->identity.minor_features0 & chipMinorFeatures0_HZ && |
656 | !(gpu->identity.minor_features1 & |
657 | chipMinorFeatures1_DISABLE_PE_GATING)) |
658 | pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE; |
659 | |
660 | if (gpu->identity.revision < 0x5422) |
661 | pmc |= BIT(15); /* Unknown bit */ |
662 | |
663 | /* Disable TX clock gating on affected core revisions. */ |
664 | if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) || |
665 | etnaviv_is_model_rev(gpu, GC2000, 0x5108) || |
666 | etnaviv_is_model_rev(gpu, GC2000, 0x6202) || |
667 | etnaviv_is_model_rev(gpu, GC2000, 0x6203)) |
668 | pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX; |
669 | |
670 | /* Disable SE and RA clock gating on affected core revisions. */ |
671 | if (etnaviv_is_model_rev(gpu, GC7000, 0x6202)) |
672 | pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SE | |
673 | VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA; |
674 | |
675 | pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ; |
676 | pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ; |
677 | |
678 | gpu_write_power(gpu, VIVS_PM_MODULE_CONTROLS, data: pmc); |
679 | } |
680 | |
681 | void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch) |
682 | { |
683 | gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, data: address); |
684 | gpu_write(gpu, VIVS_FE_COMMAND_CONTROL, |
685 | VIVS_FE_COMMAND_CONTROL_ENABLE | |
686 | VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch)); |
687 | |
688 | if (gpu->sec_mode == ETNA_SEC_KERNEL) { |
689 | gpu_write(gpu, VIVS_MMUv2_SEC_COMMAND_CONTROL, |
690 | VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE | |
691 | VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch)); |
692 | } |
693 | } |
694 | |
695 | static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu, |
696 | struct etnaviv_iommu_context *context) |
697 | { |
698 | u16 prefetch; |
699 | u32 address; |
700 | |
701 | WARN_ON(gpu->state != ETNA_GPU_STATE_INITIALIZED); |
702 | |
703 | /* setup the MMU */ |
704 | etnaviv_iommu_restore(gpu, ctx: context); |
705 | |
706 | /* Start command processor */ |
707 | prefetch = etnaviv_buffer_init(gpu); |
708 | address = etnaviv_cmdbuf_get_va(buf: &gpu->buffer, |
709 | mapping: &gpu->mmu_context->cmdbuf_mapping); |
710 | |
711 | etnaviv_gpu_start_fe(gpu, address, prefetch); |
712 | |
713 | gpu->state = ETNA_GPU_STATE_RUNNING; |
714 | } |
715 | |
716 | static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu) |
717 | { |
718 | /* |
719 | * Base value for VIVS_PM_PULSE_EATER register on models where it |
720 | * cannot be read, extracted from vivante kernel driver. |
721 | */ |
722 | u32 pulse_eater = 0x01590880; |
723 | |
724 | if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) || |
725 | etnaviv_is_model_rev(gpu, GC4000, 0x5222)) { |
726 | pulse_eater |= BIT(23); |
727 | |
728 | } |
729 | |
730 | if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) || |
731 | etnaviv_is_model_rev(gpu, GC1000, 0x5040)) { |
732 | pulse_eater &= ~BIT(16); |
733 | pulse_eater |= BIT(17); |
734 | } |
735 | |
736 | if ((gpu->identity.revision > 0x5420) && |
737 | (gpu->identity.features & chipFeatures_PIPE_3D)) |
738 | { |
739 | /* Performance fix: disable internal DFS */ |
740 | pulse_eater = gpu_read_power(gpu, VIVS_PM_PULSE_EATER); |
741 | pulse_eater |= BIT(18); |
742 | } |
743 | |
744 | gpu_write_power(gpu, VIVS_PM_PULSE_EATER, data: pulse_eater); |
745 | } |
746 | |
747 | static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) |
748 | { |
749 | WARN_ON(!(gpu->state == ETNA_GPU_STATE_IDENTIFIED || |
750 | gpu->state == ETNA_GPU_STATE_RESET)); |
751 | |
752 | if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) || |
753 | etnaviv_is_model_rev(gpu, GC320, 0x5220)) && |
754 | gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) { |
755 | u32 mc_memory_debug; |
756 | |
757 | mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff; |
758 | |
759 | if (gpu->identity.revision == 0x5007) |
760 | mc_memory_debug |= 0x0c; |
761 | else |
762 | mc_memory_debug |= 0x08; |
763 | |
764 | gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, data: mc_memory_debug); |
765 | } |
766 | |
767 | /* enable module-level clock gating */ |
768 | etnaviv_gpu_enable_mlcg(gpu); |
769 | |
770 | /* |
771 | * Update GPU AXI cache atttribute to "cacheable, no allocate". |
772 | * This is necessary to prevent the iMX6 SoC locking up. |
773 | */ |
774 | gpu_write(gpu, VIVS_HI_AXI_CONFIG, |
775 | VIVS_HI_AXI_CONFIG_AWCACHE(2) | |
776 | VIVS_HI_AXI_CONFIG_ARCACHE(2)); |
777 | |
778 | /* GC2000 rev 5108 needs a special bus config */ |
779 | if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) { |
780 | u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG); |
781 | bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK | |
782 | VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK); |
783 | bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) | |
784 | VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0); |
785 | gpu_write(gpu, VIVS_MC_BUS_CONFIG, data: bus_config); |
786 | } |
787 | |
788 | if (gpu->sec_mode == ETNA_SEC_KERNEL) { |
789 | u32 val = gpu_read(gpu, VIVS_MMUv2_AHB_CONTROL); |
790 | val |= VIVS_MMUv2_AHB_CONTROL_NONSEC_ACCESS; |
791 | gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL, data: val); |
792 | } |
793 | |
794 | /* setup the pulse eater */ |
795 | etnaviv_gpu_setup_pulse_eater(gpu); |
796 | |
797 | gpu_write(gpu, VIVS_HI_INTR_ENBL, data: ~0U); |
798 | |
799 | gpu->state = ETNA_GPU_STATE_INITIALIZED; |
800 | } |
801 | |
802 | int etnaviv_gpu_init(struct etnaviv_gpu *gpu) |
803 | { |
804 | struct etnaviv_drm_private *priv = gpu->drm->dev_private; |
805 | dma_addr_t cmdbuf_paddr; |
806 | int ret, i; |
807 | |
808 | ret = pm_runtime_get_sync(dev: gpu->dev); |
809 | if (ret < 0) { |
810 | dev_err(gpu->dev, "Failed to enable GPU power domain\n" ); |
811 | goto pm_put; |
812 | } |
813 | |
814 | etnaviv_hw_identify(gpu); |
815 | |
816 | if (gpu->identity.model == 0) { |
817 | dev_err(gpu->dev, "Unknown GPU model\n" ); |
818 | ret = -ENXIO; |
819 | goto fail; |
820 | } |
821 | |
822 | if (gpu->identity.nn_core_count > 0) |
823 | dev_warn(gpu->dev, "etnaviv has been instantiated on a NPU, " |
824 | "for which the UAPI is still experimental\n" ); |
825 | |
826 | /* Exclude VG cores with FE2.0 */ |
827 | if (gpu->identity.features & chipFeatures_PIPE_VG && |
828 | gpu->identity.features & chipFeatures_FE20) { |
829 | dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n" ); |
830 | ret = -ENXIO; |
831 | goto fail; |
832 | } |
833 | |
834 | /* |
835 | * On cores with security features supported, we claim control over the |
836 | * security states. |
837 | */ |
838 | if ((gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) && |
839 | (gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB)) |
840 | gpu->sec_mode = ETNA_SEC_KERNEL; |
841 | |
842 | gpu->state = ETNA_GPU_STATE_IDENTIFIED; |
843 | |
844 | ret = etnaviv_hw_reset(gpu); |
845 | if (ret) { |
846 | dev_err(gpu->dev, "GPU reset failed\n" ); |
847 | goto fail; |
848 | } |
849 | |
850 | ret = etnaviv_iommu_global_init(gpu); |
851 | if (ret) |
852 | goto fail; |
853 | |
854 | /* |
855 | * If the GPU is part of a system with DMA addressing limitations, |
856 | * request pages for our SHM backend buffers from the DMA32 zone to |
857 | * hopefully avoid performance killing SWIOTLB bounce buffering. |
858 | */ |
859 | if (dma_addressing_limited(dev: gpu->dev)) |
860 | priv->shm_gfp_mask |= GFP_DMA32; |
861 | |
862 | /* Create buffer: */ |
863 | ret = etnaviv_cmdbuf_init(suballoc: priv->cmdbuf_suballoc, cmdbuf: &gpu->buffer, |
864 | PAGE_SIZE); |
865 | if (ret) { |
866 | dev_err(gpu->dev, "could not create command buffer\n" ); |
867 | goto fail; |
868 | } |
869 | |
870 | /* |
871 | * Set the GPU linear window to cover the cmdbuf region, as the GPU |
872 | * won't be able to start execution otherwise. The alignment to 128M is |
873 | * chosen arbitrarily but helps in debugging, as the MMU offset |
874 | * calculations are much more straight forward this way. |
875 | * |
876 | * On MC1.0 cores the linear window offset is ignored by the TS engine, |
877 | * leading to inconsistent memory views. Avoid using the offset on those |
878 | * cores if possible, otherwise disable the TS feature. |
879 | */ |
880 | cmdbuf_paddr = ALIGN_DOWN(etnaviv_cmdbuf_get_pa(&gpu->buffer), SZ_128M); |
881 | |
882 | if (!(gpu->identity.features & chipFeatures_PIPE_3D) || |
883 | (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) { |
884 | if (cmdbuf_paddr >= SZ_2G) |
885 | priv->mmu_global->memory_base = SZ_2G; |
886 | else |
887 | priv->mmu_global->memory_base = cmdbuf_paddr; |
888 | } else if (cmdbuf_paddr + SZ_128M >= SZ_2G) { |
889 | dev_info(gpu->dev, |
890 | "Need to move linear window on MC1.0, disabling TS\n" ); |
891 | gpu->identity.features &= ~chipFeatures_FAST_CLEAR; |
892 | priv->mmu_global->memory_base = SZ_2G; |
893 | } |
894 | |
895 | /* Setup event management */ |
896 | spin_lock_init(&gpu->event_spinlock); |
897 | init_completion(x: &gpu->event_free); |
898 | bitmap_zero(dst: gpu->event_bitmap, ETNA_NR_EVENTS); |
899 | for (i = 0; i < ARRAY_SIZE(gpu->event); i++) |
900 | complete(&gpu->event_free); |
901 | |
902 | /* Now program the hardware */ |
903 | mutex_lock(&gpu->lock); |
904 | etnaviv_gpu_hw_init(gpu); |
905 | mutex_unlock(lock: &gpu->lock); |
906 | |
907 | pm_runtime_mark_last_busy(dev: gpu->dev); |
908 | pm_runtime_put_autosuspend(dev: gpu->dev); |
909 | |
910 | return 0; |
911 | |
912 | fail: |
913 | pm_runtime_mark_last_busy(dev: gpu->dev); |
914 | pm_put: |
915 | pm_runtime_put_autosuspend(dev: gpu->dev); |
916 | |
917 | return ret; |
918 | } |
919 | |
920 | #ifdef CONFIG_DEBUG_FS |
921 | struct dma_debug { |
922 | u32 address[2]; |
923 | u32 state[2]; |
924 | }; |
925 | |
926 | static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug) |
927 | { |
928 | u32 i; |
929 | |
930 | debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); |
931 | debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE); |
932 | |
933 | for (i = 0; i < 500; i++) { |
934 | debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); |
935 | debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE); |
936 | |
937 | if (debug->address[0] != debug->address[1]) |
938 | break; |
939 | |
940 | if (debug->state[0] != debug->state[1]) |
941 | break; |
942 | } |
943 | } |
944 | |
945 | int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) |
946 | { |
947 | struct dma_debug debug; |
948 | u32 dma_lo, dma_hi, axi, idle; |
949 | int ret; |
950 | |
951 | seq_printf(m, fmt: "%s Status:\n" , dev_name(dev: gpu->dev)); |
952 | |
953 | ret = pm_runtime_get_sync(dev: gpu->dev); |
954 | if (ret < 0) |
955 | goto pm_put; |
956 | |
957 | dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW); |
958 | dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH); |
959 | axi = gpu_read(gpu, VIVS_HI_AXI_STATUS); |
960 | idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); |
961 | |
962 | verify_dma(gpu, debug: &debug); |
963 | |
964 | seq_puts(m, s: "\tidentity\n" ); |
965 | seq_printf(m, fmt: "\t model: 0x%x\n" , gpu->identity.model); |
966 | seq_printf(m, fmt: "\t revision: 0x%x\n" , gpu->identity.revision); |
967 | seq_printf(m, fmt: "\t product_id: 0x%x\n" , gpu->identity.product_id); |
968 | seq_printf(m, fmt: "\t customer_id: 0x%x\n" , gpu->identity.customer_id); |
969 | seq_printf(m, fmt: "\t eco_id: 0x%x\n" , gpu->identity.eco_id); |
970 | |
971 | seq_puts(m, s: "\tfeatures\n" ); |
972 | seq_printf(m, fmt: "\t major_features: 0x%08x\n" , |
973 | gpu->identity.features); |
974 | seq_printf(m, fmt: "\t minor_features0: 0x%08x\n" , |
975 | gpu->identity.minor_features0); |
976 | seq_printf(m, fmt: "\t minor_features1: 0x%08x\n" , |
977 | gpu->identity.minor_features1); |
978 | seq_printf(m, fmt: "\t minor_features2: 0x%08x\n" , |
979 | gpu->identity.minor_features2); |
980 | seq_printf(m, fmt: "\t minor_features3: 0x%08x\n" , |
981 | gpu->identity.minor_features3); |
982 | seq_printf(m, fmt: "\t minor_features4: 0x%08x\n" , |
983 | gpu->identity.minor_features4); |
984 | seq_printf(m, fmt: "\t minor_features5: 0x%08x\n" , |
985 | gpu->identity.minor_features5); |
986 | seq_printf(m, fmt: "\t minor_features6: 0x%08x\n" , |
987 | gpu->identity.minor_features6); |
988 | seq_printf(m, fmt: "\t minor_features7: 0x%08x\n" , |
989 | gpu->identity.minor_features7); |
990 | seq_printf(m, fmt: "\t minor_features8: 0x%08x\n" , |
991 | gpu->identity.minor_features8); |
992 | seq_printf(m, fmt: "\t minor_features9: 0x%08x\n" , |
993 | gpu->identity.minor_features9); |
994 | seq_printf(m, fmt: "\t minor_features10: 0x%08x\n" , |
995 | gpu->identity.minor_features10); |
996 | seq_printf(m, fmt: "\t minor_features11: 0x%08x\n" , |
997 | gpu->identity.minor_features11); |
998 | |
999 | seq_puts(m, s: "\tspecs\n" ); |
1000 | seq_printf(m, fmt: "\t stream_count: %d\n" , |
1001 | gpu->identity.stream_count); |
1002 | seq_printf(m, fmt: "\t register_max: %d\n" , |
1003 | gpu->identity.register_max); |
1004 | seq_printf(m, fmt: "\t thread_count: %d\n" , |
1005 | gpu->identity.thread_count); |
1006 | seq_printf(m, fmt: "\t vertex_cache_size: %d\n" , |
1007 | gpu->identity.vertex_cache_size); |
1008 | seq_printf(m, fmt: "\t shader_core_count: %d\n" , |
1009 | gpu->identity.shader_core_count); |
1010 | seq_printf(m, fmt: "\t nn_core_count: %d\n" , |
1011 | gpu->identity.nn_core_count); |
1012 | seq_printf(m, fmt: "\t pixel_pipes: %d\n" , |
1013 | gpu->identity.pixel_pipes); |
1014 | seq_printf(m, fmt: "\t vertex_output_buffer_size: %d\n" , |
1015 | gpu->identity.vertex_output_buffer_size); |
1016 | seq_printf(m, fmt: "\t buffer_size: %d\n" , |
1017 | gpu->identity.buffer_size); |
1018 | seq_printf(m, fmt: "\t instruction_count: %d\n" , |
1019 | gpu->identity.instruction_count); |
1020 | seq_printf(m, fmt: "\t num_constants: %d\n" , |
1021 | gpu->identity.num_constants); |
1022 | seq_printf(m, fmt: "\t varyings_count: %d\n" , |
1023 | gpu->identity.varyings_count); |
1024 | |
1025 | seq_printf(m, fmt: "\taxi: 0x%08x\n" , axi); |
1026 | seq_printf(m, fmt: "\tidle: 0x%08x\n" , idle); |
1027 | idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP; |
1028 | if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) |
1029 | seq_puts(m, s: "\t FE is not idle\n" ); |
1030 | if ((idle & VIVS_HI_IDLE_STATE_DE) == 0) |
1031 | seq_puts(m, s: "\t DE is not idle\n" ); |
1032 | if ((idle & VIVS_HI_IDLE_STATE_PE) == 0) |
1033 | seq_puts(m, s: "\t PE is not idle\n" ); |
1034 | if ((idle & VIVS_HI_IDLE_STATE_SH) == 0) |
1035 | seq_puts(m, s: "\t SH is not idle\n" ); |
1036 | if ((idle & VIVS_HI_IDLE_STATE_PA) == 0) |
1037 | seq_puts(m, s: "\t PA is not idle\n" ); |
1038 | if ((idle & VIVS_HI_IDLE_STATE_SE) == 0) |
1039 | seq_puts(m, s: "\t SE is not idle\n" ); |
1040 | if ((idle & VIVS_HI_IDLE_STATE_RA) == 0) |
1041 | seq_puts(m, s: "\t RA is not idle\n" ); |
1042 | if ((idle & VIVS_HI_IDLE_STATE_TX) == 0) |
1043 | seq_puts(m, s: "\t TX is not idle\n" ); |
1044 | if ((idle & VIVS_HI_IDLE_STATE_VG) == 0) |
1045 | seq_puts(m, s: "\t VG is not idle\n" ); |
1046 | if ((idle & VIVS_HI_IDLE_STATE_IM) == 0) |
1047 | seq_puts(m, s: "\t IM is not idle\n" ); |
1048 | if ((idle & VIVS_HI_IDLE_STATE_FP) == 0) |
1049 | seq_puts(m, s: "\t FP is not idle\n" ); |
1050 | if ((idle & VIVS_HI_IDLE_STATE_TS) == 0) |
1051 | seq_puts(m, s: "\t TS is not idle\n" ); |
1052 | if ((idle & VIVS_HI_IDLE_STATE_BL) == 0) |
1053 | seq_puts(m, s: "\t BL is not idle\n" ); |
1054 | if ((idle & VIVS_HI_IDLE_STATE_ASYNCFE) == 0) |
1055 | seq_puts(m, s: "\t ASYNCFE is not idle\n" ); |
1056 | if ((idle & VIVS_HI_IDLE_STATE_MC) == 0) |
1057 | seq_puts(m, s: "\t MC is not idle\n" ); |
1058 | if ((idle & VIVS_HI_IDLE_STATE_PPA) == 0) |
1059 | seq_puts(m, s: "\t PPA is not idle\n" ); |
1060 | if ((idle & VIVS_HI_IDLE_STATE_WD) == 0) |
1061 | seq_puts(m, s: "\t WD is not idle\n" ); |
1062 | if ((idle & VIVS_HI_IDLE_STATE_NN) == 0) |
1063 | seq_puts(m, s: "\t NN is not idle\n" ); |
1064 | if ((idle & VIVS_HI_IDLE_STATE_TP) == 0) |
1065 | seq_puts(m, s: "\t TP is not idle\n" ); |
1066 | if (idle & VIVS_HI_IDLE_STATE_AXI_LP) |
1067 | seq_puts(m, s: "\t AXI low power mode\n" ); |
1068 | |
1069 | if (gpu->identity.features & chipFeatures_DEBUG_MODE) { |
1070 | u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0); |
1071 | u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1); |
1072 | u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE); |
1073 | |
1074 | seq_puts(m, s: "\tMC\n" ); |
1075 | seq_printf(m, fmt: "\t read0: 0x%08x\n" , read0); |
1076 | seq_printf(m, fmt: "\t read1: 0x%08x\n" , read1); |
1077 | seq_printf(m, fmt: "\t write: 0x%08x\n" , write); |
1078 | } |
1079 | |
1080 | seq_puts(m, s: "\tDMA " ); |
1081 | |
1082 | if (debug.address[0] == debug.address[1] && |
1083 | debug.state[0] == debug.state[1]) { |
1084 | seq_puts(m, s: "seems to be stuck\n" ); |
1085 | } else if (debug.address[0] == debug.address[1]) { |
1086 | seq_puts(m, s: "address is constant\n" ); |
1087 | } else { |
1088 | seq_puts(m, s: "is running\n" ); |
1089 | } |
1090 | |
1091 | seq_printf(m, fmt: "\t address 0: 0x%08x\n" , debug.address[0]); |
1092 | seq_printf(m, fmt: "\t address 1: 0x%08x\n" , debug.address[1]); |
1093 | seq_printf(m, fmt: "\t state 0: 0x%08x\n" , debug.state[0]); |
1094 | seq_printf(m, fmt: "\t state 1: 0x%08x\n" , debug.state[1]); |
1095 | seq_printf(m, fmt: "\t last fetch 64 bit word: 0x%08x 0x%08x\n" , |
1096 | dma_lo, dma_hi); |
1097 | |
1098 | ret = 0; |
1099 | |
1100 | pm_runtime_mark_last_busy(dev: gpu->dev); |
1101 | pm_put: |
1102 | pm_runtime_put_autosuspend(dev: gpu->dev); |
1103 | |
1104 | return ret; |
1105 | } |
1106 | #endif |
1107 | |
1108 | /* fence object management */ |
1109 | struct etnaviv_fence { |
1110 | struct etnaviv_gpu *gpu; |
1111 | struct dma_fence base; |
1112 | }; |
1113 | |
1114 | static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence) |
1115 | { |
1116 | return container_of(fence, struct etnaviv_fence, base); |
1117 | } |
1118 | |
1119 | static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence) |
1120 | { |
1121 | return "etnaviv" ; |
1122 | } |
1123 | |
1124 | static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence) |
1125 | { |
1126 | struct etnaviv_fence *f = to_etnaviv_fence(fence); |
1127 | |
1128 | return dev_name(dev: f->gpu->dev); |
1129 | } |
1130 | |
1131 | static bool etnaviv_fence_signaled(struct dma_fence *fence) |
1132 | { |
1133 | struct etnaviv_fence *f = to_etnaviv_fence(fence); |
1134 | |
1135 | return (s32)(f->gpu->completed_fence - f->base.seqno) >= 0; |
1136 | } |
1137 | |
1138 | static void etnaviv_fence_release(struct dma_fence *fence) |
1139 | { |
1140 | struct etnaviv_fence *f = to_etnaviv_fence(fence); |
1141 | |
1142 | kfree_rcu(f, base.rcu); |
1143 | } |
1144 | |
1145 | static const struct dma_fence_ops etnaviv_fence_ops = { |
1146 | .get_driver_name = etnaviv_fence_get_driver_name, |
1147 | .get_timeline_name = etnaviv_fence_get_timeline_name, |
1148 | .signaled = etnaviv_fence_signaled, |
1149 | .release = etnaviv_fence_release, |
1150 | }; |
1151 | |
1152 | static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) |
1153 | { |
1154 | struct etnaviv_fence *f; |
1155 | |
1156 | /* |
1157 | * GPU lock must already be held, otherwise fence completion order might |
1158 | * not match the seqno order assigned here. |
1159 | */ |
1160 | lockdep_assert_held(&gpu->lock); |
1161 | |
1162 | f = kzalloc(size: sizeof(*f), GFP_KERNEL); |
1163 | if (!f) |
1164 | return NULL; |
1165 | |
1166 | f->gpu = gpu; |
1167 | |
1168 | dma_fence_init(fence: &f->base, ops: &etnaviv_fence_ops, lock: &gpu->fence_spinlock, |
1169 | context: gpu->fence_context, seqno: ++gpu->next_fence); |
1170 | |
1171 | return &f->base; |
1172 | } |
1173 | |
1174 | /* returns true if fence a comes after fence b */ |
1175 | static inline bool fence_after(u32 a, u32 b) |
1176 | { |
1177 | return (s32)(a - b) > 0; |
1178 | } |
1179 | |
1180 | /* |
1181 | * event management: |
1182 | */ |
1183 | |
1184 | static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events, |
1185 | unsigned int *events) |
1186 | { |
1187 | unsigned long timeout = msecs_to_jiffies(m: 10 * 10000); |
1188 | unsigned i, acquired = 0, rpm_count = 0; |
1189 | int ret; |
1190 | |
1191 | for (i = 0; i < nr_events; i++) { |
1192 | unsigned long remaining; |
1193 | |
1194 | remaining = wait_for_completion_timeout(x: &gpu->event_free, timeout); |
1195 | |
1196 | if (!remaining) { |
1197 | dev_err(gpu->dev, "wait_for_completion_timeout failed" ); |
1198 | ret = -EBUSY; |
1199 | goto out; |
1200 | } |
1201 | |
1202 | acquired++; |
1203 | timeout = remaining; |
1204 | } |
1205 | |
1206 | spin_lock(lock: &gpu->event_spinlock); |
1207 | |
1208 | for (i = 0; i < nr_events; i++) { |
1209 | int event = find_first_zero_bit(addr: gpu->event_bitmap, ETNA_NR_EVENTS); |
1210 | |
1211 | events[i] = event; |
1212 | memset(&gpu->event[event], 0, sizeof(struct etnaviv_event)); |
1213 | set_bit(nr: event, addr: gpu->event_bitmap); |
1214 | } |
1215 | |
1216 | spin_unlock(lock: &gpu->event_spinlock); |
1217 | |
1218 | for (i = 0; i < nr_events; i++) { |
1219 | ret = pm_runtime_resume_and_get(dev: gpu->dev); |
1220 | if (ret) |
1221 | goto out_rpm; |
1222 | rpm_count++; |
1223 | } |
1224 | |
1225 | return 0; |
1226 | |
1227 | out_rpm: |
1228 | for (i = 0; i < rpm_count; i++) |
1229 | pm_runtime_put_autosuspend(dev: gpu->dev); |
1230 | out: |
1231 | for (i = 0; i < acquired; i++) |
1232 | complete(&gpu->event_free); |
1233 | |
1234 | return ret; |
1235 | } |
1236 | |
1237 | static void event_free(struct etnaviv_gpu *gpu, unsigned int event) |
1238 | { |
1239 | if (!test_bit(event, gpu->event_bitmap)) { |
1240 | dev_warn(gpu->dev, "event %u is already marked as free" , |
1241 | event); |
1242 | } else { |
1243 | clear_bit(nr: event, addr: gpu->event_bitmap); |
1244 | complete(&gpu->event_free); |
1245 | } |
1246 | |
1247 | pm_runtime_put_autosuspend(dev: gpu->dev); |
1248 | } |
1249 | |
1250 | /* |
1251 | * Cmdstream submission/retirement: |
1252 | */ |
1253 | int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, |
1254 | u32 id, struct drm_etnaviv_timespec *timeout) |
1255 | { |
1256 | struct dma_fence *fence; |
1257 | int ret; |
1258 | |
1259 | /* |
1260 | * Look up the fence and take a reference. We might still find a fence |
1261 | * whose refcount has already dropped to zero. dma_fence_get_rcu |
1262 | * pretends we didn't find a fence in that case. |
1263 | */ |
1264 | rcu_read_lock(); |
1265 | fence = xa_load(&gpu->user_fences, index: id); |
1266 | if (fence) |
1267 | fence = dma_fence_get_rcu(fence); |
1268 | rcu_read_unlock(); |
1269 | |
1270 | if (!fence) |
1271 | return 0; |
1272 | |
1273 | if (!timeout) { |
1274 | /* No timeout was requested: just test for completion */ |
1275 | ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY; |
1276 | } else { |
1277 | unsigned long remaining = etnaviv_timeout_to_jiffies(timeout); |
1278 | |
1279 | ret = dma_fence_wait_timeout(fence, intr: true, timeout: remaining); |
1280 | if (ret == 0) |
1281 | ret = -ETIMEDOUT; |
1282 | else if (ret != -ERESTARTSYS) |
1283 | ret = 0; |
1284 | |
1285 | } |
1286 | |
1287 | dma_fence_put(fence); |
1288 | return ret; |
1289 | } |
1290 | |
1291 | /* |
1292 | * Wait for an object to become inactive. This, on it's own, is not race |
1293 | * free: the object is moved by the scheduler off the active list, and |
1294 | * then the iova is put. Moreover, the object could be re-submitted just |
1295 | * after we notice that it's become inactive. |
1296 | * |
1297 | * Although the retirement happens under the gpu lock, we don't want to hold |
1298 | * that lock in this function while waiting. |
1299 | */ |
1300 | int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu, |
1301 | struct etnaviv_gem_object *etnaviv_obj, |
1302 | struct drm_etnaviv_timespec *timeout) |
1303 | { |
1304 | unsigned long remaining; |
1305 | long ret; |
1306 | |
1307 | if (!timeout) |
1308 | return !is_active(etnaviv_obj) ? 0 : -EBUSY; |
1309 | |
1310 | remaining = etnaviv_timeout_to_jiffies(timeout); |
1311 | |
1312 | ret = wait_event_interruptible_timeout(gpu->fence_event, |
1313 | !is_active(etnaviv_obj), |
1314 | remaining); |
1315 | if (ret > 0) |
1316 | return 0; |
1317 | else if (ret == -ERESTARTSYS) |
1318 | return -ERESTARTSYS; |
1319 | else |
1320 | return -ETIMEDOUT; |
1321 | } |
1322 | |
1323 | static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu, |
1324 | struct etnaviv_event *event, unsigned int flags) |
1325 | { |
1326 | const struct etnaviv_gem_submit *submit = event->submit; |
1327 | unsigned int i; |
1328 | |
1329 | for (i = 0; i < submit->nr_pmrs; i++) { |
1330 | const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; |
1331 | |
1332 | if (pmr->flags == flags) |
1333 | etnaviv_perfmon_process(gpu, pmr, exec_state: submit->exec_state); |
1334 | } |
1335 | } |
1336 | |
1337 | static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu, |
1338 | struct etnaviv_event *event) |
1339 | { |
1340 | u32 val; |
1341 | |
1342 | /* disable clock gating */ |
1343 | val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); |
1344 | val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; |
1345 | gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, data: val); |
1346 | |
1347 | /* enable debug register */ |
1348 | val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); |
1349 | val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; |
1350 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, data: val); |
1351 | |
1352 | sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE); |
1353 | } |
1354 | |
1355 | static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu, |
1356 | struct etnaviv_event *event) |
1357 | { |
1358 | const struct etnaviv_gem_submit *submit = event->submit; |
1359 | unsigned int i; |
1360 | u32 val; |
1361 | |
1362 | sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST); |
1363 | |
1364 | for (i = 0; i < submit->nr_pmrs; i++) { |
1365 | const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; |
1366 | |
1367 | *pmr->bo_vma = pmr->sequence; |
1368 | } |
1369 | |
1370 | /* disable debug register */ |
1371 | val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); |
1372 | val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; |
1373 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, data: val); |
1374 | |
1375 | /* enable clock gating */ |
1376 | val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); |
1377 | val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; |
1378 | gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, data: val); |
1379 | } |
1380 | |
1381 | |
1382 | /* add bo's to gpu's ring, and kick gpu: */ |
1383 | struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) |
1384 | { |
1385 | struct etnaviv_gpu *gpu = submit->gpu; |
1386 | struct dma_fence *gpu_fence; |
1387 | unsigned int i, nr_events = 1, event[3]; |
1388 | int ret; |
1389 | |
1390 | /* |
1391 | * if there are performance monitor requests we need to have |
1392 | * - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE |
1393 | * requests. |
1394 | * - a sync point to re-configure gpu, process ETNA_PM_PROCESS_POST requests |
1395 | * and update the sequence number for userspace. |
1396 | */ |
1397 | if (submit->nr_pmrs) |
1398 | nr_events = 3; |
1399 | |
1400 | ret = event_alloc(gpu, nr_events, events: event); |
1401 | if (ret) { |
1402 | DRM_ERROR("no free events\n" ); |
1403 | pm_runtime_put_noidle(dev: gpu->dev); |
1404 | return NULL; |
1405 | } |
1406 | |
1407 | mutex_lock(&gpu->lock); |
1408 | |
1409 | gpu_fence = etnaviv_gpu_fence_alloc(gpu); |
1410 | if (!gpu_fence) { |
1411 | for (i = 0; i < nr_events; i++) |
1412 | event_free(gpu, event: event[i]); |
1413 | |
1414 | goto out_unlock; |
1415 | } |
1416 | |
1417 | if (gpu->state == ETNA_GPU_STATE_INITIALIZED) |
1418 | etnaviv_gpu_start_fe_idleloop(gpu, context: submit->mmu_context); |
1419 | |
1420 | if (submit->prev_mmu_context) |
1421 | etnaviv_iommu_context_put(ctx: submit->prev_mmu_context); |
1422 | submit->prev_mmu_context = etnaviv_iommu_context_get(ctx: gpu->mmu_context); |
1423 | |
1424 | if (submit->nr_pmrs) { |
1425 | gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre; |
1426 | kref_get(kref: &submit->refcount); |
1427 | gpu->event[event[1]].submit = submit; |
1428 | etnaviv_sync_point_queue(gpu, event: event[1]); |
1429 | } |
1430 | |
1431 | gpu->event[event[0]].fence = gpu_fence; |
1432 | submit->cmdbuf.user_size = submit->cmdbuf.size - 8; |
1433 | etnaviv_buffer_queue(gpu, exec_state: submit->exec_state, mmu: submit->mmu_context, |
1434 | event: event[0], cmdbuf: &submit->cmdbuf); |
1435 | |
1436 | if (submit->nr_pmrs) { |
1437 | gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post; |
1438 | kref_get(kref: &submit->refcount); |
1439 | gpu->event[event[2]].submit = submit; |
1440 | etnaviv_sync_point_queue(gpu, event: event[2]); |
1441 | } |
1442 | |
1443 | out_unlock: |
1444 | mutex_unlock(lock: &gpu->lock); |
1445 | |
1446 | return gpu_fence; |
1447 | } |
1448 | |
1449 | static void sync_point_worker(struct work_struct *work) |
1450 | { |
1451 | struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu, |
1452 | sync_point_work); |
1453 | struct etnaviv_event *event = &gpu->event[gpu->sync_point_event]; |
1454 | u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); |
1455 | |
1456 | event->sync_point(gpu, event); |
1457 | etnaviv_submit_put(submit: event->submit); |
1458 | event_free(gpu, event: gpu->sync_point_event); |
1459 | |
1460 | /* restart FE last to avoid GPU and IRQ racing against this worker */ |
1461 | etnaviv_gpu_start_fe(gpu, address: addr + 2, prefetch: 2); |
1462 | } |
1463 | |
1464 | void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit) |
1465 | { |
1466 | struct etnaviv_gpu *gpu = submit->gpu; |
1467 | char *comm = NULL, *cmd = NULL; |
1468 | struct task_struct *task; |
1469 | unsigned int i; |
1470 | |
1471 | dev_err(gpu->dev, "recover hung GPU!\n" ); |
1472 | |
1473 | task = get_pid_task(pid: submit->pid, PIDTYPE_PID); |
1474 | if (task) { |
1475 | comm = kstrdup(s: task->comm, GFP_KERNEL); |
1476 | cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL); |
1477 | put_task_struct(t: task); |
1478 | } |
1479 | |
1480 | if (comm && cmd) |
1481 | dev_err(gpu->dev, "offending task: %s (%s)\n" , comm, cmd); |
1482 | |
1483 | kfree(objp: cmd); |
1484 | kfree(objp: comm); |
1485 | |
1486 | if (pm_runtime_get_sync(dev: gpu->dev) < 0) |
1487 | goto pm_put; |
1488 | |
1489 | mutex_lock(&gpu->lock); |
1490 | |
1491 | etnaviv_hw_reset(gpu); |
1492 | |
1493 | /* complete all events, the GPU won't do it after the reset */ |
1494 | spin_lock(lock: &gpu->event_spinlock); |
1495 | for_each_set_bit(i, gpu->event_bitmap, ETNA_NR_EVENTS) |
1496 | event_free(gpu, event: i); |
1497 | spin_unlock(lock: &gpu->event_spinlock); |
1498 | |
1499 | etnaviv_gpu_hw_init(gpu); |
1500 | |
1501 | mutex_unlock(lock: &gpu->lock); |
1502 | pm_runtime_mark_last_busy(dev: gpu->dev); |
1503 | pm_put: |
1504 | pm_runtime_put_autosuspend(dev: gpu->dev); |
1505 | } |
1506 | |
1507 | static void dump_mmu_fault(struct etnaviv_gpu *gpu) |
1508 | { |
1509 | static const char *fault_reasons[] = { |
1510 | "slave not present" , |
1511 | "page not present" , |
1512 | "write violation" , |
1513 | "out of bounds" , |
1514 | "read security violation" , |
1515 | "write security violation" , |
1516 | }; |
1517 | |
1518 | u32 status_reg, status; |
1519 | int i; |
1520 | |
1521 | if (gpu->sec_mode == ETNA_SEC_NONE) |
1522 | status_reg = VIVS_MMUv2_STATUS; |
1523 | else |
1524 | status_reg = VIVS_MMUv2_SEC_STATUS; |
1525 | |
1526 | status = gpu_read(gpu, reg: status_reg); |
1527 | dev_err_ratelimited(gpu->dev, "MMU fault status 0x%08x\n" , status); |
1528 | |
1529 | for (i = 0; i < 4; i++) { |
1530 | const char *reason = "unknown" ; |
1531 | u32 address_reg; |
1532 | u32 mmu_status; |
1533 | |
1534 | mmu_status = (status >> (i * 4)) & VIVS_MMUv2_STATUS_EXCEPTION0__MASK; |
1535 | if (!mmu_status) |
1536 | continue; |
1537 | |
1538 | if ((mmu_status - 1) < ARRAY_SIZE(fault_reasons)) |
1539 | reason = fault_reasons[mmu_status - 1]; |
1540 | |
1541 | if (gpu->sec_mode == ETNA_SEC_NONE) |
1542 | address_reg = VIVS_MMUv2_EXCEPTION_ADDR(i); |
1543 | else |
1544 | address_reg = VIVS_MMUv2_SEC_EXCEPTION_ADDR; |
1545 | |
1546 | dev_err_ratelimited(gpu->dev, |
1547 | "MMU %d fault (%s) addr 0x%08x\n" , |
1548 | i, reason, gpu_read(gpu, address_reg)); |
1549 | } |
1550 | } |
1551 | |
1552 | static irqreturn_t irq_handler(int irq, void *data) |
1553 | { |
1554 | struct etnaviv_gpu *gpu = data; |
1555 | irqreturn_t ret = IRQ_NONE; |
1556 | |
1557 | u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE); |
1558 | |
1559 | if (intr != 0) { |
1560 | int event; |
1561 | |
1562 | pm_runtime_mark_last_busy(dev: gpu->dev); |
1563 | |
1564 | dev_dbg(gpu->dev, "intr 0x%08x\n" , intr); |
1565 | |
1566 | if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) { |
1567 | dev_err(gpu->dev, "AXI bus error\n" ); |
1568 | intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR; |
1569 | } |
1570 | |
1571 | if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) { |
1572 | dump_mmu_fault(gpu); |
1573 | gpu->state = ETNA_GPU_STATE_FAULT; |
1574 | drm_sched_fault(sched: &gpu->sched); |
1575 | intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION; |
1576 | } |
1577 | |
1578 | while ((event = ffs(intr)) != 0) { |
1579 | struct dma_fence *fence; |
1580 | |
1581 | event -= 1; |
1582 | |
1583 | intr &= ~(1 << event); |
1584 | |
1585 | dev_dbg(gpu->dev, "event %u\n" , event); |
1586 | |
1587 | if (gpu->event[event].sync_point) { |
1588 | gpu->sync_point_event = event; |
1589 | queue_work(wq: gpu->wq, work: &gpu->sync_point_work); |
1590 | } |
1591 | |
1592 | fence = gpu->event[event].fence; |
1593 | if (!fence) |
1594 | continue; |
1595 | |
1596 | gpu->event[event].fence = NULL; |
1597 | |
1598 | /* |
1599 | * Events can be processed out of order. Eg, |
1600 | * - allocate and queue event 0 |
1601 | * - allocate event 1 |
1602 | * - event 0 completes, we process it |
1603 | * - allocate and queue event 0 |
1604 | * - event 1 and event 0 complete |
1605 | * we can end up processing event 0 first, then 1. |
1606 | */ |
1607 | if (fence_after(a: fence->seqno, b: gpu->completed_fence)) |
1608 | gpu->completed_fence = fence->seqno; |
1609 | dma_fence_signal(fence); |
1610 | |
1611 | event_free(gpu, event); |
1612 | } |
1613 | |
1614 | ret = IRQ_HANDLED; |
1615 | } |
1616 | |
1617 | return ret; |
1618 | } |
1619 | |
1620 | static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu) |
1621 | { |
1622 | int ret; |
1623 | |
1624 | ret = clk_prepare_enable(clk: gpu->clk_reg); |
1625 | if (ret) |
1626 | return ret; |
1627 | |
1628 | ret = clk_prepare_enable(clk: gpu->clk_bus); |
1629 | if (ret) |
1630 | goto disable_clk_reg; |
1631 | |
1632 | ret = clk_prepare_enable(clk: gpu->clk_core); |
1633 | if (ret) |
1634 | goto disable_clk_bus; |
1635 | |
1636 | ret = clk_prepare_enable(clk: gpu->clk_shader); |
1637 | if (ret) |
1638 | goto disable_clk_core; |
1639 | |
1640 | return 0; |
1641 | |
1642 | disable_clk_core: |
1643 | clk_disable_unprepare(clk: gpu->clk_core); |
1644 | disable_clk_bus: |
1645 | clk_disable_unprepare(clk: gpu->clk_bus); |
1646 | disable_clk_reg: |
1647 | clk_disable_unprepare(clk: gpu->clk_reg); |
1648 | |
1649 | return ret; |
1650 | } |
1651 | |
1652 | static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu) |
1653 | { |
1654 | clk_disable_unprepare(clk: gpu->clk_shader); |
1655 | clk_disable_unprepare(clk: gpu->clk_core); |
1656 | clk_disable_unprepare(clk: gpu->clk_bus); |
1657 | clk_disable_unprepare(clk: gpu->clk_reg); |
1658 | |
1659 | return 0; |
1660 | } |
1661 | |
1662 | int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms) |
1663 | { |
1664 | unsigned long timeout = jiffies + msecs_to_jiffies(m: timeout_ms); |
1665 | |
1666 | do { |
1667 | u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE); |
1668 | |
1669 | if ((idle & gpu->idle_mask) == gpu->idle_mask) |
1670 | return 0; |
1671 | |
1672 | if (time_is_before_jiffies(timeout)) { |
1673 | dev_warn(gpu->dev, |
1674 | "timed out waiting for idle: idle=0x%x\n" , |
1675 | idle); |
1676 | return -ETIMEDOUT; |
1677 | } |
1678 | |
1679 | udelay(5); |
1680 | } while (1); |
1681 | } |
1682 | |
1683 | static void etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) |
1684 | { |
1685 | if (gpu->state == ETNA_GPU_STATE_RUNNING) { |
1686 | /* Replace the last WAIT with END */ |
1687 | mutex_lock(&gpu->lock); |
1688 | etnaviv_buffer_end(gpu); |
1689 | mutex_unlock(lock: &gpu->lock); |
1690 | |
1691 | /* |
1692 | * We know that only the FE is busy here, this should |
1693 | * happen quickly (as the WAIT is only 200 cycles). If |
1694 | * we fail, just warn and continue. |
1695 | */ |
1696 | etnaviv_gpu_wait_idle(gpu, timeout_ms: 100); |
1697 | |
1698 | gpu->state = ETNA_GPU_STATE_INITIALIZED; |
1699 | } |
1700 | |
1701 | gpu->exec_state = -1; |
1702 | } |
1703 | |
1704 | static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu) |
1705 | { |
1706 | int ret; |
1707 | |
1708 | ret = mutex_lock_killable(&gpu->lock); |
1709 | if (ret) |
1710 | return ret; |
1711 | |
1712 | etnaviv_gpu_update_clock(gpu); |
1713 | etnaviv_gpu_hw_init(gpu); |
1714 | |
1715 | mutex_unlock(lock: &gpu->lock); |
1716 | |
1717 | return 0; |
1718 | } |
1719 | |
1720 | static int |
1721 | etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev, |
1722 | unsigned long *state) |
1723 | { |
1724 | *state = 6; |
1725 | |
1726 | return 0; |
1727 | } |
1728 | |
1729 | static int |
1730 | etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev, |
1731 | unsigned long *state) |
1732 | { |
1733 | struct etnaviv_gpu *gpu = cdev->devdata; |
1734 | |
1735 | *state = gpu->freq_scale; |
1736 | |
1737 | return 0; |
1738 | } |
1739 | |
1740 | static int |
1741 | etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev, |
1742 | unsigned long state) |
1743 | { |
1744 | struct etnaviv_gpu *gpu = cdev->devdata; |
1745 | |
1746 | mutex_lock(&gpu->lock); |
1747 | gpu->freq_scale = state; |
1748 | if (!pm_runtime_suspended(dev: gpu->dev)) |
1749 | etnaviv_gpu_update_clock(gpu); |
1750 | mutex_unlock(lock: &gpu->lock); |
1751 | |
1752 | return 0; |
1753 | } |
1754 | |
1755 | static const struct thermal_cooling_device_ops cooling_ops = { |
1756 | .get_max_state = etnaviv_gpu_cooling_get_max_state, |
1757 | .get_cur_state = etnaviv_gpu_cooling_get_cur_state, |
1758 | .set_cur_state = etnaviv_gpu_cooling_set_cur_state, |
1759 | }; |
1760 | |
1761 | static int etnaviv_gpu_bind(struct device *dev, struct device *master, |
1762 | void *data) |
1763 | { |
1764 | struct drm_device *drm = data; |
1765 | struct etnaviv_drm_private *priv = drm->dev_private; |
1766 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); |
1767 | int ret; |
1768 | |
1769 | if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) { |
1770 | gpu->cooling = thermal_of_cooling_device_register(np: dev->of_node, |
1771 | (char *)dev_name(dev), gpu, &cooling_ops); |
1772 | if (IS_ERR(ptr: gpu->cooling)) |
1773 | return PTR_ERR(ptr: gpu->cooling); |
1774 | } |
1775 | |
1776 | gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0); |
1777 | if (!gpu->wq) { |
1778 | ret = -ENOMEM; |
1779 | goto out_thermal; |
1780 | } |
1781 | |
1782 | ret = etnaviv_sched_init(gpu); |
1783 | if (ret) |
1784 | goto out_workqueue; |
1785 | |
1786 | if (!IS_ENABLED(CONFIG_PM)) { |
1787 | ret = etnaviv_gpu_clk_enable(gpu); |
1788 | if (ret < 0) |
1789 | goto out_sched; |
1790 | } |
1791 | |
1792 | gpu->drm = drm; |
1793 | gpu->fence_context = dma_fence_context_alloc(num: 1); |
1794 | xa_init_flags(xa: &gpu->user_fences, XA_FLAGS_ALLOC); |
1795 | spin_lock_init(&gpu->fence_spinlock); |
1796 | |
1797 | INIT_WORK(&gpu->sync_point_work, sync_point_worker); |
1798 | init_waitqueue_head(&gpu->fence_event); |
1799 | |
1800 | priv->gpu[priv->num_gpus++] = gpu; |
1801 | |
1802 | return 0; |
1803 | |
1804 | out_sched: |
1805 | etnaviv_sched_fini(gpu); |
1806 | |
1807 | out_workqueue: |
1808 | destroy_workqueue(wq: gpu->wq); |
1809 | |
1810 | out_thermal: |
1811 | if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) |
1812 | thermal_cooling_device_unregister(gpu->cooling); |
1813 | |
1814 | return ret; |
1815 | } |
1816 | |
1817 | static void etnaviv_gpu_unbind(struct device *dev, struct device *master, |
1818 | void *data) |
1819 | { |
1820 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); |
1821 | |
1822 | DBG("%s" , dev_name(gpu->dev)); |
1823 | |
1824 | destroy_workqueue(wq: gpu->wq); |
1825 | |
1826 | etnaviv_sched_fini(gpu); |
1827 | |
1828 | if (IS_ENABLED(CONFIG_PM)) { |
1829 | pm_runtime_get_sync(dev: gpu->dev); |
1830 | pm_runtime_put_sync_suspend(dev: gpu->dev); |
1831 | } else { |
1832 | etnaviv_gpu_hw_suspend(gpu); |
1833 | etnaviv_gpu_clk_disable(gpu); |
1834 | } |
1835 | |
1836 | if (gpu->mmu_context) |
1837 | etnaviv_iommu_context_put(ctx: gpu->mmu_context); |
1838 | |
1839 | etnaviv_cmdbuf_free(cmdbuf: &gpu->buffer); |
1840 | etnaviv_iommu_global_fini(gpu); |
1841 | |
1842 | gpu->drm = NULL; |
1843 | xa_destroy(&gpu->user_fences); |
1844 | |
1845 | if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) |
1846 | thermal_cooling_device_unregister(gpu->cooling); |
1847 | gpu->cooling = NULL; |
1848 | } |
1849 | |
1850 | static const struct component_ops gpu_ops = { |
1851 | .bind = etnaviv_gpu_bind, |
1852 | .unbind = etnaviv_gpu_unbind, |
1853 | }; |
1854 | |
1855 | static const struct of_device_id etnaviv_gpu_match[] = { |
1856 | { |
1857 | .compatible = "vivante,gc" |
1858 | }, |
1859 | { /* sentinel */ } |
1860 | }; |
1861 | MODULE_DEVICE_TABLE(of, etnaviv_gpu_match); |
1862 | |
1863 | static int etnaviv_gpu_platform_probe(struct platform_device *pdev) |
1864 | { |
1865 | struct device *dev = &pdev->dev; |
1866 | struct etnaviv_gpu *gpu; |
1867 | int err; |
1868 | |
1869 | gpu = devm_kzalloc(dev, size: sizeof(*gpu), GFP_KERNEL); |
1870 | if (!gpu) |
1871 | return -ENOMEM; |
1872 | |
1873 | gpu->dev = &pdev->dev; |
1874 | mutex_init(&gpu->lock); |
1875 | mutex_init(&gpu->sched_lock); |
1876 | |
1877 | /* Map registers: */ |
1878 | gpu->mmio = devm_platform_ioremap_resource(pdev, index: 0); |
1879 | if (IS_ERR(ptr: gpu->mmio)) |
1880 | return PTR_ERR(ptr: gpu->mmio); |
1881 | |
1882 | /* Get Interrupt: */ |
1883 | gpu->irq = platform_get_irq(pdev, 0); |
1884 | if (gpu->irq < 0) |
1885 | return gpu->irq; |
1886 | |
1887 | err = devm_request_irq(dev: &pdev->dev, irq: gpu->irq, handler: irq_handler, irqflags: 0, |
1888 | devname: dev_name(dev: gpu->dev), dev_id: gpu); |
1889 | if (err) { |
1890 | dev_err(dev, "failed to request IRQ%u: %d\n" , gpu->irq, err); |
1891 | return err; |
1892 | } |
1893 | |
1894 | /* Get Clocks: */ |
1895 | gpu->clk_reg = devm_clk_get_optional(dev: &pdev->dev, id: "reg" ); |
1896 | DBG("clk_reg: %p" , gpu->clk_reg); |
1897 | if (IS_ERR(ptr: gpu->clk_reg)) |
1898 | return PTR_ERR(ptr: gpu->clk_reg); |
1899 | |
1900 | gpu->clk_bus = devm_clk_get_optional(dev: &pdev->dev, id: "bus" ); |
1901 | DBG("clk_bus: %p" , gpu->clk_bus); |
1902 | if (IS_ERR(ptr: gpu->clk_bus)) |
1903 | return PTR_ERR(ptr: gpu->clk_bus); |
1904 | |
1905 | gpu->clk_core = devm_clk_get(dev: &pdev->dev, id: "core" ); |
1906 | DBG("clk_core: %p" , gpu->clk_core); |
1907 | if (IS_ERR(ptr: gpu->clk_core)) |
1908 | return PTR_ERR(ptr: gpu->clk_core); |
1909 | gpu->base_rate_core = clk_get_rate(clk: gpu->clk_core); |
1910 | |
1911 | gpu->clk_shader = devm_clk_get_optional(dev: &pdev->dev, id: "shader" ); |
1912 | DBG("clk_shader: %p" , gpu->clk_shader); |
1913 | if (IS_ERR(ptr: gpu->clk_shader)) |
1914 | return PTR_ERR(ptr: gpu->clk_shader); |
1915 | gpu->base_rate_shader = clk_get_rate(clk: gpu->clk_shader); |
1916 | |
1917 | /* TODO: figure out max mapped size */ |
1918 | dev_set_drvdata(dev, data: gpu); |
1919 | |
1920 | /* |
1921 | * We treat the device as initially suspended. The runtime PM |
1922 | * autosuspend delay is rather arbitary: no measurements have |
1923 | * yet been performed to determine an appropriate value. |
1924 | */ |
1925 | pm_runtime_use_autosuspend(dev: gpu->dev); |
1926 | pm_runtime_set_autosuspend_delay(dev: gpu->dev, delay: 200); |
1927 | pm_runtime_enable(dev: gpu->dev); |
1928 | |
1929 | err = component_add(&pdev->dev, &gpu_ops); |
1930 | if (err < 0) { |
1931 | dev_err(&pdev->dev, "failed to register component: %d\n" , err); |
1932 | return err; |
1933 | } |
1934 | |
1935 | return 0; |
1936 | } |
1937 | |
1938 | static void etnaviv_gpu_platform_remove(struct platform_device *pdev) |
1939 | { |
1940 | component_del(&pdev->dev, &gpu_ops); |
1941 | pm_runtime_disable(dev: &pdev->dev); |
1942 | } |
1943 | |
1944 | static int etnaviv_gpu_rpm_suspend(struct device *dev) |
1945 | { |
1946 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); |
1947 | u32 idle, mask; |
1948 | |
1949 | /* If there are any jobs in the HW queue, we're not idle */ |
1950 | if (atomic_read(v: &gpu->sched.credit_count)) |
1951 | return -EBUSY; |
1952 | |
1953 | /* Check whether the hardware (except FE and MC) is idle */ |
1954 | mask = gpu->idle_mask & ~(VIVS_HI_IDLE_STATE_FE | |
1955 | VIVS_HI_IDLE_STATE_MC); |
1956 | idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask; |
1957 | if (idle != mask) { |
1958 | dev_warn_ratelimited(dev, "GPU not yet idle, mask: 0x%08x\n" , |
1959 | idle); |
1960 | return -EBUSY; |
1961 | } |
1962 | |
1963 | etnaviv_gpu_hw_suspend(gpu); |
1964 | |
1965 | gpu->state = ETNA_GPU_STATE_IDENTIFIED; |
1966 | |
1967 | return etnaviv_gpu_clk_disable(gpu); |
1968 | } |
1969 | |
1970 | static int etnaviv_gpu_rpm_resume(struct device *dev) |
1971 | { |
1972 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); |
1973 | int ret; |
1974 | |
1975 | ret = etnaviv_gpu_clk_enable(gpu); |
1976 | if (ret) |
1977 | return ret; |
1978 | |
1979 | /* Re-initialise the basic hardware state */ |
1980 | if (gpu->state == ETNA_GPU_STATE_IDENTIFIED) { |
1981 | ret = etnaviv_gpu_hw_resume(gpu); |
1982 | if (ret) { |
1983 | etnaviv_gpu_clk_disable(gpu); |
1984 | return ret; |
1985 | } |
1986 | } |
1987 | |
1988 | return 0; |
1989 | } |
1990 | |
1991 | static const struct dev_pm_ops etnaviv_gpu_pm_ops = { |
1992 | RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume, NULL) |
1993 | }; |
1994 | |
1995 | struct platform_driver etnaviv_gpu_driver = { |
1996 | .driver = { |
1997 | .name = "etnaviv-gpu" , |
1998 | .owner = THIS_MODULE, |
1999 | .pm = pm_ptr(&etnaviv_gpu_pm_ops), |
2000 | .of_match_table = etnaviv_gpu_match, |
2001 | }, |
2002 | .probe = etnaviv_gpu_platform_probe, |
2003 | .remove_new = etnaviv_gpu_platform_remove, |
2004 | .id_table = gpu_ids, |
2005 | }; |
2006 | |