1
2
3
4
5
6#include <linux/component.h>
7#include <linux/dma-fence.h>
8#include <linux/moduleparam.h>
9#include <linux/of_device.h>
10#include <linux/thermal.h>
11
12#include "etnaviv_cmdbuf.h"
13#include "etnaviv_dump.h"
14#include "etnaviv_gpu.h"
15#include "etnaviv_gem.h"
16#include "etnaviv_mmu.h"
17#include "etnaviv_perfmon.h"
18#include "etnaviv_sched.h"
19#include "common.xml.h"
20#include "state.xml.h"
21#include "state_hi.xml.h"
22#include "cmdstream.xml.h"
23
24#ifndef PHYS_OFFSET
25#define PHYS_OFFSET 0
26#endif
27
28static const struct platform_device_id gpu_ids[] = {
29 { .name = "etnaviv-gpu,2d" },
30 { },
31};
32
33
34
35
36
37int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
38{
39 switch (param) {
40 case ETNAVIV_PARAM_GPU_MODEL:
41 *value = gpu->identity.model;
42 break;
43
44 case ETNAVIV_PARAM_GPU_REVISION:
45 *value = gpu->identity.revision;
46 break;
47
48 case ETNAVIV_PARAM_GPU_FEATURES_0:
49 *value = gpu->identity.features;
50 break;
51
52 case ETNAVIV_PARAM_GPU_FEATURES_1:
53 *value = gpu->identity.minor_features0;
54 break;
55
56 case ETNAVIV_PARAM_GPU_FEATURES_2:
57 *value = gpu->identity.minor_features1;
58 break;
59
60 case ETNAVIV_PARAM_GPU_FEATURES_3:
61 *value = gpu->identity.minor_features2;
62 break;
63
64 case ETNAVIV_PARAM_GPU_FEATURES_4:
65 *value = gpu->identity.minor_features3;
66 break;
67
68 case ETNAVIV_PARAM_GPU_FEATURES_5:
69 *value = gpu->identity.minor_features4;
70 break;
71
72 case ETNAVIV_PARAM_GPU_FEATURES_6:
73 *value = gpu->identity.minor_features5;
74 break;
75
76 case ETNAVIV_PARAM_GPU_FEATURES_7:
77 *value = gpu->identity.minor_features6;
78 break;
79
80 case ETNAVIV_PARAM_GPU_FEATURES_8:
81 *value = gpu->identity.minor_features7;
82 break;
83
84 case ETNAVIV_PARAM_GPU_FEATURES_9:
85 *value = gpu->identity.minor_features8;
86 break;
87
88 case ETNAVIV_PARAM_GPU_FEATURES_10:
89 *value = gpu->identity.minor_features9;
90 break;
91
92 case ETNAVIV_PARAM_GPU_FEATURES_11:
93 *value = gpu->identity.minor_features10;
94 break;
95
96 case ETNAVIV_PARAM_GPU_FEATURES_12:
97 *value = gpu->identity.minor_features11;
98 break;
99
100 case ETNAVIV_PARAM_GPU_STREAM_COUNT:
101 *value = gpu->identity.stream_count;
102 break;
103
104 case ETNAVIV_PARAM_GPU_REGISTER_MAX:
105 *value = gpu->identity.register_max;
106 break;
107
108 case ETNAVIV_PARAM_GPU_THREAD_COUNT:
109 *value = gpu->identity.thread_count;
110 break;
111
112 case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
113 *value = gpu->identity.vertex_cache_size;
114 break;
115
116 case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
117 *value = gpu->identity.shader_core_count;
118 break;
119
120 case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
121 *value = gpu->identity.pixel_pipes;
122 break;
123
124 case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
125 *value = gpu->identity.vertex_output_buffer_size;
126 break;
127
128 case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
129 *value = gpu->identity.buffer_size;
130 break;
131
132 case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
133 *value = gpu->identity.instruction_count;
134 break;
135
136 case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
137 *value = gpu->identity.num_constants;
138 break;
139
140 case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
141 *value = gpu->identity.varyings_count;
142 break;
143
144 default:
145 DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
146 return -EINVAL;
147 }
148
149 return 0;
150}
151
152
153#define etnaviv_is_model_rev(gpu, mod, rev) \
154 ((gpu)->identity.model == chipModel_##mod && \
155 (gpu)->identity.revision == rev)
156#define etnaviv_field(val, field) \
157 (((val) & field##__MASK) >> field##__SHIFT)
158
159static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
160{
161 if (gpu->identity.minor_features0 &
162 chipMinorFeatures0_MORE_MINOR_FEATURES) {
163 u32 specs[4];
164 unsigned int streams;
165
166 specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
167 specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
168 specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
169 specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
170
171 gpu->identity.stream_count = etnaviv_field(specs[0],
172 VIVS_HI_CHIP_SPECS_STREAM_COUNT);
173 gpu->identity.register_max = etnaviv_field(specs[0],
174 VIVS_HI_CHIP_SPECS_REGISTER_MAX);
175 gpu->identity.thread_count = etnaviv_field(specs[0],
176 VIVS_HI_CHIP_SPECS_THREAD_COUNT);
177 gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
178 VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
179 gpu->identity.shader_core_count = etnaviv_field(specs[0],
180 VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
181 gpu->identity.pixel_pipes = etnaviv_field(specs[0],
182 VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
183 gpu->identity.vertex_output_buffer_size =
184 etnaviv_field(specs[0],
185 VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
186
187 gpu->identity.buffer_size = etnaviv_field(specs[1],
188 VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
189 gpu->identity.instruction_count = etnaviv_field(specs[1],
190 VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
191 gpu->identity.num_constants = etnaviv_field(specs[1],
192 VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
193
194 gpu->identity.varyings_count = etnaviv_field(specs[2],
195 VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
196
197
198 streams = etnaviv_field(specs[3],
199 VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
200 if (streams)
201 gpu->identity.stream_count = streams;
202 }
203
204
205 if (gpu->identity.stream_count == 0) {
206 if (gpu->identity.model >= 0x1000)
207 gpu->identity.stream_count = 4;
208 else
209 gpu->identity.stream_count = 1;
210 }
211
212
213 if (gpu->identity.register_max)
214 gpu->identity.register_max = 1 << gpu->identity.register_max;
215 else if (gpu->identity.model == chipModel_GC400)
216 gpu->identity.register_max = 32;
217 else
218 gpu->identity.register_max = 64;
219
220
221 if (gpu->identity.thread_count)
222 gpu->identity.thread_count = 1 << gpu->identity.thread_count;
223 else if (gpu->identity.model == chipModel_GC400)
224 gpu->identity.thread_count = 64;
225 else if (gpu->identity.model == chipModel_GC500 ||
226 gpu->identity.model == chipModel_GC530)
227 gpu->identity.thread_count = 128;
228 else
229 gpu->identity.thread_count = 256;
230
231 if (gpu->identity.vertex_cache_size == 0)
232 gpu->identity.vertex_cache_size = 8;
233
234 if (gpu->identity.shader_core_count == 0) {
235 if (gpu->identity.model >= 0x1000)
236 gpu->identity.shader_core_count = 2;
237 else
238 gpu->identity.shader_core_count = 1;
239 }
240
241 if (gpu->identity.pixel_pipes == 0)
242 gpu->identity.pixel_pipes = 1;
243
244
245 if (gpu->identity.vertex_output_buffer_size) {
246 gpu->identity.vertex_output_buffer_size =
247 1 << gpu->identity.vertex_output_buffer_size;
248 } else if (gpu->identity.model == chipModel_GC400) {
249 if (gpu->identity.revision < 0x4000)
250 gpu->identity.vertex_output_buffer_size = 512;
251 else if (gpu->identity.revision < 0x4200)
252 gpu->identity.vertex_output_buffer_size = 256;
253 else
254 gpu->identity.vertex_output_buffer_size = 128;
255 } else {
256 gpu->identity.vertex_output_buffer_size = 512;
257 }
258
259 switch (gpu->identity.instruction_count) {
260 case 0:
261 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
262 gpu->identity.model == chipModel_GC880)
263 gpu->identity.instruction_count = 512;
264 else
265 gpu->identity.instruction_count = 256;
266 break;
267
268 case 1:
269 gpu->identity.instruction_count = 1024;
270 break;
271
272 case 2:
273 gpu->identity.instruction_count = 2048;
274 break;
275
276 default:
277 gpu->identity.instruction_count = 256;
278 break;
279 }
280
281 if (gpu->identity.num_constants == 0)
282 gpu->identity.num_constants = 168;
283
284 if (gpu->identity.varyings_count == 0) {
285 if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
286 gpu->identity.varyings_count = 12;
287 else
288 gpu->identity.varyings_count = 8;
289 }
290
291
292
293
294
295 if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
296 etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
297 etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
298 etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
299 etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
300 etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
301 etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
302 etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
303 etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
304 etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
305 etnaviv_is_model_rev(gpu, GC880, 0x5106))
306 gpu->identity.varyings_count -= 1;
307}
308
309static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
310{
311 u32 chipIdentity;
312
313 chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
314
315
316 if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
317 gpu->identity.model = chipModel_GC500;
318 gpu->identity.revision = etnaviv_field(chipIdentity,
319 VIVS_HI_CHIP_IDENTITY_REVISION);
320 } else {
321
322 gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
323 gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
324
325
326
327
328
329
330
331 if ((gpu->identity.model & 0xff00) == 0x0400 &&
332 gpu->identity.model != chipModel_GC420) {
333 gpu->identity.model = gpu->identity.model & 0x0400;
334 }
335
336
337 if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
338 u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
339 u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
340
341 if (chipDate == 0x20080814 && chipTime == 0x12051100) {
342
343
344
345
346 gpu->identity.revision = 0x1051;
347 }
348 }
349
350
351
352
353
354
355
356
357 if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) {
358 gpu->identity.model = chipModel_GC3000;
359 gpu->identity.revision &= 0xffff;
360 }
361 }
362
363 dev_info(gpu->dev, "model: GC%x, revision: %x\n",
364 gpu->identity.model, gpu->identity.revision);
365
366
367
368
369
370 if (etnaviv_fill_identity_from_hwdb(gpu))
371 return;
372
373 gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
374
375
376 if (gpu->identity.model == chipModel_GC700)
377 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
378
379 if ((gpu->identity.model == chipModel_GC500 &&
380 gpu->identity.revision < 2) ||
381 (gpu->identity.model == chipModel_GC300 &&
382 gpu->identity.revision < 0x2000)) {
383
384
385
386
387
388 gpu->identity.minor_features0 = 0;
389 gpu->identity.minor_features1 = 0;
390 gpu->identity.minor_features2 = 0;
391 gpu->identity.minor_features3 = 0;
392 gpu->identity.minor_features4 = 0;
393 gpu->identity.minor_features5 = 0;
394 } else
395 gpu->identity.minor_features0 =
396 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
397
398 if (gpu->identity.minor_features0 &
399 chipMinorFeatures0_MORE_MINOR_FEATURES) {
400 gpu->identity.minor_features1 =
401 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
402 gpu->identity.minor_features2 =
403 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
404 gpu->identity.minor_features3 =
405 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
406 gpu->identity.minor_features4 =
407 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
408 gpu->identity.minor_features5 =
409 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
410 }
411
412
413 if (gpu->identity.model == chipModel_GC600) {
414 gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
415 VIVS_HI_IDLE_STATE_RA |
416 VIVS_HI_IDLE_STATE_SE |
417 VIVS_HI_IDLE_STATE_PA |
418 VIVS_HI_IDLE_STATE_SH |
419 VIVS_HI_IDLE_STATE_PE |
420 VIVS_HI_IDLE_STATE_DE |
421 VIVS_HI_IDLE_STATE_FE;
422 } else {
423 gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
424 }
425
426 etnaviv_hw_specs(gpu);
427}
428
429static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
430{
431 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
432 VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
433 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
434}
435
436static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
437{
438 if (gpu->identity.minor_features2 &
439 chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING) {
440 clk_set_rate(gpu->clk_core,
441 gpu->base_rate_core >> gpu->freq_scale);
442 clk_set_rate(gpu->clk_shader,
443 gpu->base_rate_shader >> gpu->freq_scale);
444 } else {
445 unsigned int fscale = 1 << (6 - gpu->freq_scale);
446 u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
447
448 clock &= ~VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK;
449 clock |= VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
450 etnaviv_gpu_load_clock(gpu, clock);
451 }
452}
453
454static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
455{
456 u32 control, idle;
457 unsigned long timeout;
458 bool failed = true;
459
460
461 timeout = jiffies + msecs_to_jiffies(1000);
462
463 while (time_is_after_jiffies(timeout)) {
464
465 unsigned int fscale = 1 << (6 - gpu->freq_scale);
466 control = VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
467 etnaviv_gpu_load_clock(gpu, control);
468
469
470 control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
471 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
472
473 if (gpu->sec_mode == ETNA_SEC_KERNEL) {
474 gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL,
475 VIVS_MMUv2_AHB_CONTROL_RESET);
476 } else {
477
478 control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
479 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
480 }
481
482
483 usleep_range(10, 20);
484
485
486 control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
487 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
488
489
490 control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
491 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
492
493
494 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
495
496
497 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
498 dev_dbg(gpu->dev, "FE is not idle\n");
499 continue;
500 }
501
502
503 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
504
505
506 if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
507 ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
508 dev_dbg(gpu->dev, "GPU is not idle\n");
509 continue;
510 }
511
512
513 control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
514 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
515
516 failed = false;
517 break;
518 }
519
520 if (failed) {
521 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
522 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
523
524 dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
525 idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
526 control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
527 control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
528
529 return -EBUSY;
530 }
531
532
533 etnaviv_gpu_update_clock(gpu);
534
535 return 0;
536}
537
538static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
539{
540 u32 pmc, ppc;
541
542
543 ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
544 ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
545
546
547 if (gpu->identity.revision == 0x4301 ||
548 gpu->identity.revision == 0x4302)
549 ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
550
551 gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
552
553 pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
554
555
556 if (gpu->identity.model >= chipModel_GC400 &&
557 gpu->identity.model != chipModel_GC420 &&
558 !(gpu->identity.minor_features3 & chipMinorFeatures3_BUG_FIXES12))
559 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
560
561
562
563
564
565 if (gpu->identity.revision < 0x5000 &&
566 gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
567 !(gpu->identity.minor_features1 &
568 chipMinorFeatures1_DISABLE_PE_GATING))
569 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
570
571 if (gpu->identity.revision < 0x5422)
572 pmc |= BIT(15);
573
574
575 if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
576 etnaviv_is_model_rev(gpu, GC2000, 0x5108))
577 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
578
579 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
580 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
581
582 gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
583}
584
585void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
586{
587 gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address);
588 gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
589 VIVS_FE_COMMAND_CONTROL_ENABLE |
590 VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
591
592 if (gpu->sec_mode == ETNA_SEC_KERNEL) {
593 gpu_write(gpu, VIVS_MMUv2_SEC_COMMAND_CONTROL,
594 VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
595 VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
596 }
597}
598
599static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
600{
601
602
603
604
605 u32 pulse_eater = 0x01590880;
606
607 if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
608 etnaviv_is_model_rev(gpu, GC4000, 0x5222)) {
609 pulse_eater |= BIT(23);
610
611 }
612
613 if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) ||
614 etnaviv_is_model_rev(gpu, GC1000, 0x5040)) {
615 pulse_eater &= ~BIT(16);
616 pulse_eater |= BIT(17);
617 }
618
619 if ((gpu->identity.revision > 0x5420) &&
620 (gpu->identity.features & chipFeatures_PIPE_3D))
621 {
622
623 pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER);
624 pulse_eater |= BIT(18);
625 }
626
627 gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
628}
629
630static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
631{
632 u16 prefetch;
633
634 if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
635 etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
636 gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
637 u32 mc_memory_debug;
638
639 mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
640
641 if (gpu->identity.revision == 0x5007)
642 mc_memory_debug |= 0x0c;
643 else
644 mc_memory_debug |= 0x08;
645
646 gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
647 }
648
649
650 etnaviv_gpu_enable_mlcg(gpu);
651
652
653
654
655
656 gpu_write(gpu, VIVS_HI_AXI_CONFIG,
657 VIVS_HI_AXI_CONFIG_AWCACHE(2) |
658 VIVS_HI_AXI_CONFIG_ARCACHE(2));
659
660
661 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
662 u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
663 bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
664 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
665 bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
666 VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
667 gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
668 }
669
670 if (gpu->sec_mode == ETNA_SEC_KERNEL) {
671 u32 val = gpu_read(gpu, VIVS_MMUv2_AHB_CONTROL);
672 val |= VIVS_MMUv2_AHB_CONTROL_NONSEC_ACCESS;
673 gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL, val);
674 }
675
676
677 etnaviv_gpu_setup_pulse_eater(gpu);
678
679
680 etnaviv_iommu_restore(gpu);
681
682
683 prefetch = etnaviv_buffer_init(gpu);
684
685 gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
686 etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(&gpu->buffer),
687 prefetch);
688}
689
690int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
691{
692 int ret, i;
693
694 ret = pm_runtime_get_sync(gpu->dev);
695 if (ret < 0) {
696 dev_err(gpu->dev, "Failed to enable GPU power domain\n");
697 return ret;
698 }
699
700 etnaviv_hw_identify(gpu);
701
702 if (gpu->identity.model == 0) {
703 dev_err(gpu->dev, "Unknown GPU model\n");
704 ret = -ENXIO;
705 goto fail;
706 }
707
708
709 if (gpu->identity.features & chipFeatures_PIPE_VG &&
710 gpu->identity.features & chipFeatures_FE20) {
711 dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
712 ret = -ENXIO;
713 goto fail;
714 }
715
716
717
718
719
720
721
722
723
724
725 if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
726 (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
727 u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
728 if (dma_mask < PHYS_OFFSET + SZ_2G)
729 gpu->memory_base = PHYS_OFFSET;
730 else
731 gpu->memory_base = dma_mask - SZ_2G + 1;
732 } else if (PHYS_OFFSET >= SZ_2G) {
733 dev_info(gpu->dev, "Need to move linear window on MC1.0, disabling TS\n");
734 gpu->memory_base = PHYS_OFFSET;
735 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
736 }
737
738
739
740
741
742 if ((gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) &&
743 (gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB))
744 gpu->sec_mode = ETNA_SEC_KERNEL;
745
746 ret = etnaviv_hw_reset(gpu);
747 if (ret) {
748 dev_err(gpu->dev, "GPU reset failed\n");
749 goto fail;
750 }
751
752 gpu->mmu = etnaviv_iommu_new(gpu);
753 if (IS_ERR(gpu->mmu)) {
754 dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
755 ret = PTR_ERR(gpu->mmu);
756 goto fail;
757 }
758
759 gpu->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(gpu);
760 if (IS_ERR(gpu->cmdbuf_suballoc)) {
761 dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n");
762 ret = PTR_ERR(gpu->cmdbuf_suballoc);
763 goto fail;
764 }
765
766
767 ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &gpu->buffer,
768 PAGE_SIZE);
769 if (ret) {
770 dev_err(gpu->dev, "could not create command buffer\n");
771 goto destroy_iommu;
772 }
773
774 if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
775 etnaviv_cmdbuf_get_va(&gpu->buffer) > 0x80000000) {
776 ret = -EINVAL;
777 dev_err(gpu->dev,
778 "command buffer outside valid memory window\n");
779 goto free_buffer;
780 }
781
782
783 spin_lock_init(&gpu->event_spinlock);
784 init_completion(&gpu->event_free);
785 bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
786 for (i = 0; i < ARRAY_SIZE(gpu->event); i++)
787 complete(&gpu->event_free);
788
789
790 mutex_lock(&gpu->lock);
791 etnaviv_gpu_hw_init(gpu);
792 gpu->exec_state = -1;
793 mutex_unlock(&gpu->lock);
794
795 pm_runtime_mark_last_busy(gpu->dev);
796 pm_runtime_put_autosuspend(gpu->dev);
797
798 return 0;
799
800free_buffer:
801 etnaviv_cmdbuf_free(&gpu->buffer);
802destroy_iommu:
803 etnaviv_iommu_destroy(gpu->mmu);
804 gpu->mmu = NULL;
805fail:
806 pm_runtime_mark_last_busy(gpu->dev);
807 pm_runtime_put_autosuspend(gpu->dev);
808
809 return ret;
810}
811
812#ifdef CONFIG_DEBUG_FS
813struct dma_debug {
814 u32 address[2];
815 u32 state[2];
816};
817
818static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
819{
820 u32 i;
821
822 debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
823 debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
824
825 for (i = 0; i < 500; i++) {
826 debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
827 debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
828
829 if (debug->address[0] != debug->address[1])
830 break;
831
832 if (debug->state[0] != debug->state[1])
833 break;
834 }
835}
836
837int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
838{
839 struct dma_debug debug;
840 u32 dma_lo, dma_hi, axi, idle;
841 int ret;
842
843 seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
844
845 ret = pm_runtime_get_sync(gpu->dev);
846 if (ret < 0)
847 return ret;
848
849 dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
850 dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
851 axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
852 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
853
854 verify_dma(gpu, &debug);
855
856 seq_puts(m, "\tfeatures\n");
857 seq_printf(m, "\t major_features: 0x%08x\n",
858 gpu->identity.features);
859 seq_printf(m, "\t minor_features0: 0x%08x\n",
860 gpu->identity.minor_features0);
861 seq_printf(m, "\t minor_features1: 0x%08x\n",
862 gpu->identity.minor_features1);
863 seq_printf(m, "\t minor_features2: 0x%08x\n",
864 gpu->identity.minor_features2);
865 seq_printf(m, "\t minor_features3: 0x%08x\n",
866 gpu->identity.minor_features3);
867 seq_printf(m, "\t minor_features4: 0x%08x\n",
868 gpu->identity.minor_features4);
869 seq_printf(m, "\t minor_features5: 0x%08x\n",
870 gpu->identity.minor_features5);
871 seq_printf(m, "\t minor_features6: 0x%08x\n",
872 gpu->identity.minor_features6);
873 seq_printf(m, "\t minor_features7: 0x%08x\n",
874 gpu->identity.minor_features7);
875 seq_printf(m, "\t minor_features8: 0x%08x\n",
876 gpu->identity.minor_features8);
877 seq_printf(m, "\t minor_features9: 0x%08x\n",
878 gpu->identity.minor_features9);
879 seq_printf(m, "\t minor_features10: 0x%08x\n",
880 gpu->identity.minor_features10);
881 seq_printf(m, "\t minor_features11: 0x%08x\n",
882 gpu->identity.minor_features11);
883
884 seq_puts(m, "\tspecs\n");
885 seq_printf(m, "\t stream_count: %d\n",
886 gpu->identity.stream_count);
887 seq_printf(m, "\t register_max: %d\n",
888 gpu->identity.register_max);
889 seq_printf(m, "\t thread_count: %d\n",
890 gpu->identity.thread_count);
891 seq_printf(m, "\t vertex_cache_size: %d\n",
892 gpu->identity.vertex_cache_size);
893 seq_printf(m, "\t shader_core_count: %d\n",
894 gpu->identity.shader_core_count);
895 seq_printf(m, "\t pixel_pipes: %d\n",
896 gpu->identity.pixel_pipes);
897 seq_printf(m, "\t vertex_output_buffer_size: %d\n",
898 gpu->identity.vertex_output_buffer_size);
899 seq_printf(m, "\t buffer_size: %d\n",
900 gpu->identity.buffer_size);
901 seq_printf(m, "\t instruction_count: %d\n",
902 gpu->identity.instruction_count);
903 seq_printf(m, "\t num_constants: %d\n",
904 gpu->identity.num_constants);
905 seq_printf(m, "\t varyings_count: %d\n",
906 gpu->identity.varyings_count);
907
908 seq_printf(m, "\taxi: 0x%08x\n", axi);
909 seq_printf(m, "\tidle: 0x%08x\n", idle);
910 idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
911 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
912 seq_puts(m, "\t FE is not idle\n");
913 if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
914 seq_puts(m, "\t DE is not idle\n");
915 if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
916 seq_puts(m, "\t PE is not idle\n");
917 if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
918 seq_puts(m, "\t SH is not idle\n");
919 if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
920 seq_puts(m, "\t PA is not idle\n");
921 if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
922 seq_puts(m, "\t SE is not idle\n");
923 if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
924 seq_puts(m, "\t RA is not idle\n");
925 if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
926 seq_puts(m, "\t TX is not idle\n");
927 if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
928 seq_puts(m, "\t VG is not idle\n");
929 if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
930 seq_puts(m, "\t IM is not idle\n");
931 if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
932 seq_puts(m, "\t FP is not idle\n");
933 if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
934 seq_puts(m, "\t TS is not idle\n");
935 if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
936 seq_puts(m, "\t AXI low power mode\n");
937
938 if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
939 u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
940 u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
941 u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
942
943 seq_puts(m, "\tMC\n");
944 seq_printf(m, "\t read0: 0x%08x\n", read0);
945 seq_printf(m, "\t read1: 0x%08x\n", read1);
946 seq_printf(m, "\t write: 0x%08x\n", write);
947 }
948
949 seq_puts(m, "\tDMA ");
950
951 if (debug.address[0] == debug.address[1] &&
952 debug.state[0] == debug.state[1]) {
953 seq_puts(m, "seems to be stuck\n");
954 } else if (debug.address[0] == debug.address[1]) {
955 seq_puts(m, "address is constant\n");
956 } else {
957 seq_puts(m, "is running\n");
958 }
959
960 seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
961 seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
962 seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
963 seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
964 seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
965 dma_lo, dma_hi);
966
967 ret = 0;
968
969 pm_runtime_mark_last_busy(gpu->dev);
970 pm_runtime_put_autosuspend(gpu->dev);
971
972 return ret;
973}
974#endif
975
976void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
977{
978 unsigned long flags;
979 unsigned int i = 0;
980
981 dev_err(gpu->dev, "recover hung GPU!\n");
982
983 if (pm_runtime_get_sync(gpu->dev) < 0)
984 return;
985
986 mutex_lock(&gpu->lock);
987
988 etnaviv_hw_reset(gpu);
989
990
991 spin_lock_irqsave(&gpu->event_spinlock, flags);
992 for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS)
993 complete(&gpu->event_free);
994 bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
995 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
996 gpu->completed_fence = gpu->active_fence;
997
998 etnaviv_gpu_hw_init(gpu);
999 gpu->lastctx = NULL;
1000 gpu->exec_state = -1;
1001
1002 mutex_unlock(&gpu->lock);
1003 pm_runtime_mark_last_busy(gpu->dev);
1004 pm_runtime_put_autosuspend(gpu->dev);
1005}
1006
1007
1008struct etnaviv_fence {
1009 struct etnaviv_gpu *gpu;
1010 struct dma_fence base;
1011};
1012
1013static inline struct etnaviv_fence *to_etnaviv_fence(struct dma_fence *fence)
1014{
1015 return container_of(fence, struct etnaviv_fence, base);
1016}
1017
1018static const char *etnaviv_fence_get_driver_name(struct dma_fence *fence)
1019{
1020 return "etnaviv";
1021}
1022
1023static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
1024{
1025 struct etnaviv_fence *f = to_etnaviv_fence(fence);
1026
1027 return dev_name(f->gpu->dev);
1028}
1029
1030static bool etnaviv_fence_enable_signaling(struct dma_fence *fence)
1031{
1032 return true;
1033}
1034
1035static bool etnaviv_fence_signaled(struct dma_fence *fence)
1036{
1037 struct etnaviv_fence *f = to_etnaviv_fence(fence);
1038
1039 return fence_completed(f->gpu, f->base.seqno);
1040}
1041
1042static void etnaviv_fence_release(struct dma_fence *fence)
1043{
1044 struct etnaviv_fence *f = to_etnaviv_fence(fence);
1045
1046 kfree_rcu(f, base.rcu);
1047}
1048
1049static const struct dma_fence_ops etnaviv_fence_ops = {
1050 .get_driver_name = etnaviv_fence_get_driver_name,
1051 .get_timeline_name = etnaviv_fence_get_timeline_name,
1052 .enable_signaling = etnaviv_fence_enable_signaling,
1053 .signaled = etnaviv_fence_signaled,
1054 .wait = dma_fence_default_wait,
1055 .release = etnaviv_fence_release,
1056};
1057
1058static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
1059{
1060 struct etnaviv_fence *f;
1061
1062
1063
1064
1065
1066 lockdep_assert_held(&gpu->lock);
1067
1068 f = kzalloc(sizeof(*f), GFP_KERNEL);
1069 if (!f)
1070 return NULL;
1071
1072 f->gpu = gpu;
1073
1074 dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
1075 gpu->fence_context, ++gpu->next_fence);
1076
1077 return &f->base;
1078}
1079
1080
1081
1082
1083
1084static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
1085 unsigned int *events)
1086{
1087 unsigned long flags, timeout = msecs_to_jiffies(10 * 10000);
1088 unsigned i, acquired = 0;
1089
1090 for (i = 0; i < nr_events; i++) {
1091 unsigned long ret;
1092
1093 ret = wait_for_completion_timeout(&gpu->event_free, timeout);
1094
1095 if (!ret) {
1096 dev_err(gpu->dev, "wait_for_completion_timeout failed");
1097 goto out;
1098 }
1099
1100 acquired++;
1101 timeout = ret;
1102 }
1103
1104 spin_lock_irqsave(&gpu->event_spinlock, flags);
1105
1106 for (i = 0; i < nr_events; i++) {
1107 int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS);
1108
1109 events[i] = event;
1110 memset(&gpu->event[event], 0, sizeof(struct etnaviv_event));
1111 set_bit(event, gpu->event_bitmap);
1112 }
1113
1114 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1115
1116 return 0;
1117
1118out:
1119 for (i = 0; i < acquired; i++)
1120 complete(&gpu->event_free);
1121
1122 return -EBUSY;
1123}
1124
1125static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1126{
1127 unsigned long flags;
1128
1129 spin_lock_irqsave(&gpu->event_spinlock, flags);
1130
1131 if (!test_bit(event, gpu->event_bitmap)) {
1132 dev_warn(gpu->dev, "event %u is already marked as free",
1133 event);
1134 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1135 } else {
1136 clear_bit(event, gpu->event_bitmap);
1137 spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1138
1139 complete(&gpu->event_free);
1140 }
1141}
1142
1143
1144
1145
1146int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1147 u32 id, struct timespec *timeout)
1148{
1149 struct dma_fence *fence;
1150 int ret;
1151
1152
1153
1154
1155
1156
1157 rcu_read_lock();
1158 fence = idr_find(&gpu->fence_idr, id);
1159 if (fence)
1160 fence = dma_fence_get_rcu(fence);
1161 rcu_read_unlock();
1162
1163 if (!fence)
1164 return 0;
1165
1166 if (!timeout) {
1167
1168 ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
1169 } else {
1170 unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
1171
1172 ret = dma_fence_wait_timeout(fence, true, remaining);
1173 if (ret == 0)
1174 ret = -ETIMEDOUT;
1175 else if (ret != -ERESTARTSYS)
1176 ret = 0;
1177
1178 }
1179
1180 dma_fence_put(fence);
1181 return ret;
1182}
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
1194 struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
1195{
1196 unsigned long remaining;
1197 long ret;
1198
1199 if (!timeout)
1200 return !is_active(etnaviv_obj) ? 0 : -EBUSY;
1201
1202 remaining = etnaviv_timeout_to_jiffies(timeout);
1203
1204 ret = wait_event_interruptible_timeout(gpu->fence_event,
1205 !is_active(etnaviv_obj),
1206 remaining);
1207 if (ret > 0)
1208 return 0;
1209 else if (ret == -ERESTARTSYS)
1210 return -ERESTARTSYS;
1211 else
1212 return -ETIMEDOUT;
1213}
1214
1215static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu,
1216 struct etnaviv_event *event, unsigned int flags)
1217{
1218 const struct etnaviv_gem_submit *submit = event->submit;
1219 unsigned int i;
1220
1221 for (i = 0; i < submit->nr_pmrs; i++) {
1222 const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
1223
1224 if (pmr->flags == flags)
1225 etnaviv_perfmon_process(gpu, pmr, submit->exec_state);
1226 }
1227}
1228
1229static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
1230 struct etnaviv_event *event)
1231{
1232 u32 val;
1233
1234
1235 val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
1236 val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
1237 gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
1238
1239
1240 val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
1241 val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
1242 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
1243
1244 sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE);
1245}
1246
1247static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
1248 struct etnaviv_event *event)
1249{
1250 const struct etnaviv_gem_submit *submit = event->submit;
1251 unsigned int i;
1252 u32 val;
1253
1254 sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
1255
1256 for (i = 0; i < submit->nr_pmrs; i++) {
1257 const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
1258
1259 *pmr->bo_vma = pmr->sequence;
1260 }
1261
1262
1263 val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
1264 val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
1265 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
1266
1267
1268 val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
1269 val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
1270 gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
1271}
1272
1273
1274
1275struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
1276{
1277 struct etnaviv_gpu *gpu = submit->gpu;
1278 struct dma_fence *gpu_fence;
1279 unsigned int i, nr_events = 1, event[3];
1280 int ret;
1281
1282 if (!submit->runtime_resumed) {
1283 ret = pm_runtime_get_sync(gpu->dev);
1284 if (ret < 0)
1285 return NULL;
1286 submit->runtime_resumed = true;
1287 }
1288
1289
1290
1291
1292
1293
1294
1295
1296 if (submit->nr_pmrs)
1297 nr_events = 3;
1298
1299 ret = event_alloc(gpu, nr_events, event);
1300 if (ret) {
1301 DRM_ERROR("no free events\n");
1302 return NULL;
1303 }
1304
1305 mutex_lock(&gpu->lock);
1306
1307 gpu_fence = etnaviv_gpu_fence_alloc(gpu);
1308 if (!gpu_fence) {
1309 for (i = 0; i < nr_events; i++)
1310 event_free(gpu, event[i]);
1311
1312 goto out_unlock;
1313 }
1314
1315 gpu->active_fence = gpu_fence->seqno;
1316
1317 if (submit->nr_pmrs) {
1318 gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
1319 kref_get(&submit->refcount);
1320 gpu->event[event[1]].submit = submit;
1321 etnaviv_sync_point_queue(gpu, event[1]);
1322 }
1323
1324 gpu->event[event[0]].fence = gpu_fence;
1325 submit->cmdbuf.user_size = submit->cmdbuf.size - 8;
1326 etnaviv_buffer_queue(gpu, submit->exec_state, event[0],
1327 &submit->cmdbuf);
1328
1329 if (submit->nr_pmrs) {
1330 gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
1331 kref_get(&submit->refcount);
1332 gpu->event[event[2]].submit = submit;
1333 etnaviv_sync_point_queue(gpu, event[2]);
1334 }
1335
1336out_unlock:
1337 mutex_unlock(&gpu->lock);
1338
1339 return gpu_fence;
1340}
1341
1342static void sync_point_worker(struct work_struct *work)
1343{
1344 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1345 sync_point_work);
1346 struct etnaviv_event *event = &gpu->event[gpu->sync_point_event];
1347 u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
1348
1349 event->sync_point(gpu, event);
1350 etnaviv_submit_put(event->submit);
1351 event_free(gpu, gpu->sync_point_event);
1352
1353
1354 etnaviv_gpu_start_fe(gpu, addr + 2, 2);
1355}
1356
1357static void dump_mmu_fault(struct etnaviv_gpu *gpu)
1358{
1359 u32 status_reg, status;
1360 int i;
1361
1362 if (gpu->sec_mode == ETNA_SEC_NONE)
1363 status_reg = VIVS_MMUv2_STATUS;
1364 else
1365 status_reg = VIVS_MMUv2_SEC_STATUS;
1366
1367 status = gpu_read(gpu, status_reg);
1368 dev_err_ratelimited(gpu->dev, "MMU fault status 0x%08x\n", status);
1369
1370 for (i = 0; i < 4; i++) {
1371 u32 address_reg;
1372
1373 if (!(status & (VIVS_MMUv2_STATUS_EXCEPTION0__MASK << (i * 4))))
1374 continue;
1375
1376 if (gpu->sec_mode == ETNA_SEC_NONE)
1377 address_reg = VIVS_MMUv2_EXCEPTION_ADDR(i);
1378 else
1379 address_reg = VIVS_MMUv2_SEC_EXCEPTION_ADDR;
1380
1381 dev_err_ratelimited(gpu->dev, "MMU %d fault addr 0x%08x\n", i,
1382 gpu_read(gpu, address_reg));
1383 }
1384}
1385
1386static irqreturn_t irq_handler(int irq, void *data)
1387{
1388 struct etnaviv_gpu *gpu = data;
1389 irqreturn_t ret = IRQ_NONE;
1390
1391 u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
1392
1393 if (intr != 0) {
1394 int event;
1395
1396 pm_runtime_mark_last_busy(gpu->dev);
1397
1398 dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1399
1400 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
1401 dev_err(gpu->dev, "AXI bus error\n");
1402 intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
1403 }
1404
1405 if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
1406 dump_mmu_fault(gpu);
1407 intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
1408 }
1409
1410 while ((event = ffs(intr)) != 0) {
1411 struct dma_fence *fence;
1412
1413 event -= 1;
1414
1415 intr &= ~(1 << event);
1416
1417 dev_dbg(gpu->dev, "event %u\n", event);
1418
1419 if (gpu->event[event].sync_point) {
1420 gpu->sync_point_event = event;
1421 queue_work(gpu->wq, &gpu->sync_point_work);
1422 }
1423
1424 fence = gpu->event[event].fence;
1425 if (!fence)
1426 continue;
1427
1428 gpu->event[event].fence = NULL;
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439 if (fence_after(fence->seqno, gpu->completed_fence))
1440 gpu->completed_fence = fence->seqno;
1441 dma_fence_signal(fence);
1442
1443 event_free(gpu, event);
1444 }
1445
1446 ret = IRQ_HANDLED;
1447 }
1448
1449 return ret;
1450}
1451
1452static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1453{
1454 int ret;
1455
1456 if (gpu->clk_reg) {
1457 ret = clk_prepare_enable(gpu->clk_reg);
1458 if (ret)
1459 return ret;
1460 }
1461
1462 if (gpu->clk_bus) {
1463 ret = clk_prepare_enable(gpu->clk_bus);
1464 if (ret)
1465 return ret;
1466 }
1467
1468 if (gpu->clk_core) {
1469 ret = clk_prepare_enable(gpu->clk_core);
1470 if (ret)
1471 goto disable_clk_bus;
1472 }
1473
1474 if (gpu->clk_shader) {
1475 ret = clk_prepare_enable(gpu->clk_shader);
1476 if (ret)
1477 goto disable_clk_core;
1478 }
1479
1480 return 0;
1481
1482disable_clk_core:
1483 if (gpu->clk_core)
1484 clk_disable_unprepare(gpu->clk_core);
1485disable_clk_bus:
1486 if (gpu->clk_bus)
1487 clk_disable_unprepare(gpu->clk_bus);
1488
1489 return ret;
1490}
1491
1492static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
1493{
1494 if (gpu->clk_shader)
1495 clk_disable_unprepare(gpu->clk_shader);
1496 if (gpu->clk_core)
1497 clk_disable_unprepare(gpu->clk_core);
1498 if (gpu->clk_bus)
1499 clk_disable_unprepare(gpu->clk_bus);
1500 if (gpu->clk_reg)
1501 clk_disable_unprepare(gpu->clk_reg);
1502
1503 return 0;
1504}
1505
1506int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
1507{
1508 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
1509
1510 do {
1511 u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
1512
1513 if ((idle & gpu->idle_mask) == gpu->idle_mask)
1514 return 0;
1515
1516 if (time_is_before_jiffies(timeout)) {
1517 dev_warn(gpu->dev,
1518 "timed out waiting for idle: idle=0x%x\n",
1519 idle);
1520 return -ETIMEDOUT;
1521 }
1522
1523 udelay(5);
1524 } while (1);
1525}
1526
1527static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1528{
1529 if (gpu->buffer.suballoc) {
1530
1531 mutex_lock(&gpu->lock);
1532 etnaviv_buffer_end(gpu);
1533 mutex_unlock(&gpu->lock);
1534
1535
1536
1537
1538
1539
1540 etnaviv_gpu_wait_idle(gpu, 100);
1541 }
1542
1543 return etnaviv_gpu_clk_disable(gpu);
1544}
1545
1546#ifdef CONFIG_PM
1547static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1548{
1549 int ret;
1550
1551 ret = mutex_lock_killable(&gpu->lock);
1552 if (ret)
1553 return ret;
1554
1555 etnaviv_gpu_update_clock(gpu);
1556 etnaviv_gpu_hw_init(gpu);
1557
1558 gpu->lastctx = NULL;
1559 gpu->exec_state = -1;
1560
1561 mutex_unlock(&gpu->lock);
1562
1563 return 0;
1564}
1565#endif
1566
1567static int
1568etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
1569 unsigned long *state)
1570{
1571 *state = 6;
1572
1573 return 0;
1574}
1575
1576static int
1577etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
1578 unsigned long *state)
1579{
1580 struct etnaviv_gpu *gpu = cdev->devdata;
1581
1582 *state = gpu->freq_scale;
1583
1584 return 0;
1585}
1586
1587static int
1588etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
1589 unsigned long state)
1590{
1591 struct etnaviv_gpu *gpu = cdev->devdata;
1592
1593 mutex_lock(&gpu->lock);
1594 gpu->freq_scale = state;
1595 if (!pm_runtime_suspended(gpu->dev))
1596 etnaviv_gpu_update_clock(gpu);
1597 mutex_unlock(&gpu->lock);
1598
1599 return 0;
1600}
1601
1602static struct thermal_cooling_device_ops cooling_ops = {
1603 .get_max_state = etnaviv_gpu_cooling_get_max_state,
1604 .get_cur_state = etnaviv_gpu_cooling_get_cur_state,
1605 .set_cur_state = etnaviv_gpu_cooling_set_cur_state,
1606};
1607
1608static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1609 void *data)
1610{
1611 struct drm_device *drm = data;
1612 struct etnaviv_drm_private *priv = drm->dev_private;
1613 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1614 int ret;
1615
1616 if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) {
1617 gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
1618 (char *)dev_name(dev), gpu, &cooling_ops);
1619 if (IS_ERR(gpu->cooling))
1620 return PTR_ERR(gpu->cooling);
1621 }
1622
1623 gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0);
1624 if (!gpu->wq) {
1625 ret = -ENOMEM;
1626 goto out_thermal;
1627 }
1628
1629 ret = etnaviv_sched_init(gpu);
1630 if (ret)
1631 goto out_workqueue;
1632
1633#ifdef CONFIG_PM
1634 ret = pm_runtime_get_sync(gpu->dev);
1635#else
1636 ret = etnaviv_gpu_clk_enable(gpu);
1637#endif
1638 if (ret < 0)
1639 goto out_sched;
1640
1641
1642 gpu->drm = drm;
1643 gpu->fence_context = dma_fence_context_alloc(1);
1644 idr_init(&gpu->fence_idr);
1645 spin_lock_init(&gpu->fence_spinlock);
1646
1647 INIT_WORK(&gpu->sync_point_work, sync_point_worker);
1648 init_waitqueue_head(&gpu->fence_event);
1649
1650 priv->gpu[priv->num_gpus++] = gpu;
1651
1652 pm_runtime_mark_last_busy(gpu->dev);
1653 pm_runtime_put_autosuspend(gpu->dev);
1654
1655 return 0;
1656
1657out_sched:
1658 etnaviv_sched_fini(gpu);
1659
1660out_workqueue:
1661 destroy_workqueue(gpu->wq);
1662
1663out_thermal:
1664 if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
1665 thermal_cooling_device_unregister(gpu->cooling);
1666
1667 return ret;
1668}
1669
1670static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1671 void *data)
1672{
1673 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1674
1675 DBG("%s", dev_name(gpu->dev));
1676
1677 flush_workqueue(gpu->wq);
1678 destroy_workqueue(gpu->wq);
1679
1680 etnaviv_sched_fini(gpu);
1681
1682#ifdef CONFIG_PM
1683 pm_runtime_get_sync(gpu->dev);
1684 pm_runtime_put_sync_suspend(gpu->dev);
1685#else
1686 etnaviv_gpu_hw_suspend(gpu);
1687#endif
1688
1689 if (gpu->buffer.suballoc)
1690 etnaviv_cmdbuf_free(&gpu->buffer);
1691
1692 if (gpu->cmdbuf_suballoc) {
1693 etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
1694 gpu->cmdbuf_suballoc = NULL;
1695 }
1696
1697 if (gpu->mmu) {
1698 etnaviv_iommu_destroy(gpu->mmu);
1699 gpu->mmu = NULL;
1700 }
1701
1702 gpu->drm = NULL;
1703 idr_destroy(&gpu->fence_idr);
1704
1705 if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
1706 thermal_cooling_device_unregister(gpu->cooling);
1707 gpu->cooling = NULL;
1708}
1709
1710static const struct component_ops gpu_ops = {
1711 .bind = etnaviv_gpu_bind,
1712 .unbind = etnaviv_gpu_unbind,
1713};
1714
1715static const struct of_device_id etnaviv_gpu_match[] = {
1716 {
1717 .compatible = "vivante,gc"
1718 },
1719 { }
1720};
1721MODULE_DEVICE_TABLE(of, etnaviv_gpu_match);
1722
1723static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1724{
1725 struct device *dev = &pdev->dev;
1726 struct etnaviv_gpu *gpu;
1727 struct resource *res;
1728 int err;
1729
1730 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
1731 if (!gpu)
1732 return -ENOMEM;
1733
1734 gpu->dev = &pdev->dev;
1735 mutex_init(&gpu->lock);
1736 mutex_init(&gpu->fence_idr_lock);
1737
1738
1739 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1740 gpu->mmio = devm_ioremap_resource(&pdev->dev, res);
1741 if (IS_ERR(gpu->mmio))
1742 return PTR_ERR(gpu->mmio);
1743
1744
1745 gpu->irq = platform_get_irq(pdev, 0);
1746 if (gpu->irq < 0) {
1747 dev_err(dev, "failed to get irq: %d\n", gpu->irq);
1748 return gpu->irq;
1749 }
1750
1751 err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
1752 dev_name(gpu->dev), gpu);
1753 if (err) {
1754 dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
1755 return err;
1756 }
1757
1758
1759 gpu->clk_reg = devm_clk_get(&pdev->dev, "reg");
1760 DBG("clk_reg: %p", gpu->clk_reg);
1761 if (IS_ERR(gpu->clk_reg))
1762 gpu->clk_reg = NULL;
1763
1764 gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
1765 DBG("clk_bus: %p", gpu->clk_bus);
1766 if (IS_ERR(gpu->clk_bus))
1767 gpu->clk_bus = NULL;
1768
1769 gpu->clk_core = devm_clk_get(&pdev->dev, "core");
1770 DBG("clk_core: %p", gpu->clk_core);
1771 if (IS_ERR(gpu->clk_core))
1772 gpu->clk_core = NULL;
1773 gpu->base_rate_core = clk_get_rate(gpu->clk_core);
1774
1775 gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
1776 DBG("clk_shader: %p", gpu->clk_shader);
1777 if (IS_ERR(gpu->clk_shader))
1778 gpu->clk_shader = NULL;
1779 gpu->base_rate_shader = clk_get_rate(gpu->clk_shader);
1780
1781
1782 dev_set_drvdata(dev, gpu);
1783
1784
1785
1786
1787
1788
1789 pm_runtime_use_autosuspend(gpu->dev);
1790 pm_runtime_set_autosuspend_delay(gpu->dev, 200);
1791 pm_runtime_enable(gpu->dev);
1792
1793 err = component_add(&pdev->dev, &gpu_ops);
1794 if (err < 0) {
1795 dev_err(&pdev->dev, "failed to register component: %d\n", err);
1796 return err;
1797 }
1798
1799 return 0;
1800}
1801
1802static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
1803{
1804 component_del(&pdev->dev, &gpu_ops);
1805 pm_runtime_disable(&pdev->dev);
1806 return 0;
1807}
1808
1809#ifdef CONFIG_PM
1810static int etnaviv_gpu_rpm_suspend(struct device *dev)
1811{
1812 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1813 u32 idle, mask;
1814
1815
1816 if (gpu->completed_fence != gpu->active_fence)
1817 return -EBUSY;
1818
1819
1820 mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
1821 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
1822 if (idle != mask)
1823 return -EBUSY;
1824
1825 return etnaviv_gpu_hw_suspend(gpu);
1826}
1827
1828static int etnaviv_gpu_rpm_resume(struct device *dev)
1829{
1830 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1831 int ret;
1832
1833 ret = etnaviv_gpu_clk_enable(gpu);
1834 if (ret)
1835 return ret;
1836
1837
1838 if (gpu->drm && gpu->buffer.suballoc) {
1839 ret = etnaviv_gpu_hw_resume(gpu);
1840 if (ret) {
1841 etnaviv_gpu_clk_disable(gpu);
1842 return ret;
1843 }
1844 }
1845
1846 return 0;
1847}
1848#endif
1849
1850static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
1851 SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
1852 NULL)
1853};
1854
1855struct platform_driver etnaviv_gpu_driver = {
1856 .driver = {
1857 .name = "etnaviv-gpu",
1858 .owner = THIS_MODULE,
1859 .pm = &etnaviv_gpu_pm_ops,
1860 .of_match_table = etnaviv_gpu_match,
1861 },
1862 .probe = etnaviv_gpu_platform_probe,
1863 .remove = etnaviv_gpu_platform_remove,
1864 .id_table = gpu_ids,
1865};
1866