1
2
3
4
5
6
7#ifndef __MSM_GPU_H__
8#define __MSM_GPU_H__
9
10#include <linux/adreno-smmu-priv.h>
11#include <linux/clk.h>
12#include <linux/devfreq.h>
13#include <linux/interconnect.h>
14#include <linux/pm_opp.h>
15#include <linux/regulator/consumer.h>
16
17#include "msm_drv.h"
18#include "msm_fence.h"
19#include "msm_ringbuffer.h"
20#include "msm_gem.h"
21
22struct msm_gem_submit;
23struct msm_gpu_perfcntr;
24struct msm_gpu_state;
25struct msm_file_private;
26
27struct msm_gpu_config {
28 const char *ioname;
29 unsigned int nr_rings;
30};
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46struct msm_gpu_funcs {
47 int (*get_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
48 uint32_t param, uint64_t *value, uint32_t *len);
49 int (*set_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
50 uint32_t param, uint64_t value, uint32_t len);
51 int (*hw_init)(struct msm_gpu *gpu);
52 int (*pm_suspend)(struct msm_gpu *gpu);
53 int (*pm_resume)(struct msm_gpu *gpu);
54 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
55 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
56 irqreturn_t (*irq)(struct msm_gpu *irq);
57 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
58 void (*recover)(struct msm_gpu *gpu);
59 void (*destroy)(struct msm_gpu *gpu);
60#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
61
62 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
63 struct drm_printer *p);
64
65 void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
66#endif
67 u64 (*gpu_busy)(struct msm_gpu *gpu, unsigned long *out_sample_rate);
68 struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
69 int (*gpu_state_put)(struct msm_gpu_state *state);
70 unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
71 void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp);
72 struct msm_gem_address_space *(*create_address_space)
73 (struct msm_gpu *gpu, struct platform_device *pdev);
74 struct msm_gem_address_space *(*create_private_address_space)
75 (struct msm_gpu *gpu);
76 uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
77};
78
79
80struct msm_gpu_fault_info {
81 u64 ttbr0;
82 unsigned long iova;
83 int flags;
84 const char *type;
85 const char *block;
86};
87
88
89
90
91struct msm_gpu_devfreq {
92
93 struct devfreq *devfreq;
94
95
96
97
98
99
100 struct dev_pm_qos_request idle_freq;
101
102
103
104
105
106
107
108 struct dev_pm_qos_request boost_freq;
109
110
111
112
113
114 u64 busy_cycles;
115
116
117 ktime_t time;
118
119
120 ktime_t idle_time;
121
122 struct devfreq_dev_status average_status;
123
124
125
126
127
128
129 struct msm_hrtimer_work idle_work;
130
131
132
133
134
135
136
137 struct msm_hrtimer_work boost_work;
138};
139
140struct msm_gpu {
141 const char *name;
142 struct drm_device *dev;
143 struct platform_device *pdev;
144 const struct msm_gpu_funcs *funcs;
145
146 struct adreno_smmu_priv adreno_smmu;
147
148
149 spinlock_t perf_lock;
150 bool perfcntr_active;
151 struct {
152 bool active;
153 ktime_t time;
154 } last_sample;
155 uint32_t totaltime, activetime;
156 uint32_t last_cntrs[5];
157 const struct msm_gpu_perfcntr *perfcntrs;
158 uint32_t num_perfcntrs;
159
160 struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
161 int nr_rings;
162
163
164
165
166
167
168 refcount_t sysprof_active;
169
170
171
172
173
174
175
176
177
178
179 int cur_ctx_seqno;
180
181
182
183
184
185 struct list_head active_list;
186
187
188
189
190
191
192
193
194
195 struct mutex lock;
196
197
198
199
200
201
202
203
204
205 int active_submits;
206
207
208 struct mutex active_lock;
209
210
211 bool needs_hw_init;
212
213
214
215
216
217 int global_faults;
218
219 void __iomem *mmio;
220 int irq;
221
222 struct msm_gem_address_space *aspace;
223
224
225 struct regulator *gpu_reg, *gpu_cx;
226 struct clk_bulk_data *grp_clks;
227 int nr_clocks;
228 struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
229 uint32_t fast_rate;
230
231
232
233#define DRM_MSM_INACTIVE_PERIOD 66
234
235#define DRM_MSM_HANGCHECK_DEFAULT_PERIOD 500
236 struct timer_list hangcheck_timer;
237
238
239 struct msm_gpu_fault_info fault_info;
240
241
242 struct kthread_work fault_work;
243
244
245 struct kthread_work recover_work;
246
247
248 wait_queue_head_t retire_event;
249
250
251 struct kthread_work retire_work;
252
253
254 struct kthread_worker *worker;
255
256 struct drm_gem_object *memptrs_bo;
257
258 struct msm_gpu_devfreq devfreq;
259
260 uint32_t suspend_count;
261
262 struct msm_gpu_state *crashstate;
263
264
265 bool clamp_to_idle;
266
267
268 bool hw_apriv;
269
270 struct thermal_cooling_device *cooling;
271};
272
273static inline struct msm_gpu *dev_to_gpu(struct device *dev)
274{
275 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
276 return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
277}
278
279
280#define MSM_GPU_RINGBUFFER_SZ SZ_32K
281#define MSM_GPU_RINGBUFFER_BLKSIZE 32
282
283#define MSM_GPU_RB_CNTL_DEFAULT \
284 (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
285 AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
286
287static inline bool msm_gpu_active(struct msm_gpu *gpu)
288{
289 int i;
290
291 for (i = 0; i < gpu->nr_rings; i++) {
292 struct msm_ringbuffer *ring = gpu->rb[i];
293
294 if (fence_after(ring->fctx->last_fence, ring->memptrs->fence))
295 return true;
296 }
297
298 return false;
299}
300
301
302
303
304
305
306
307struct msm_gpu_perfcntr {
308 uint32_t select_reg;
309 uint32_t sample_reg;
310 uint32_t select_val;
311 const char *name;
312};
313
314
315
316
317
318
319#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
320
321
322
323
324
325
326
327
328
329
330
331
332struct msm_file_private {
333 rwlock_t queuelock;
334 struct list_head submitqueues;
335 int queueid;
336 struct msm_gem_address_space *aspace;
337 struct kref ref;
338 int seqno;
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356 int sysprof;
357
358
359 char *comm;
360
361
362 char *cmdline;
363
364
365
366
367
368
369
370
371
372
373
374
375 struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
376};
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
404 unsigned *ring_nr, enum drm_sched_priority *sched_prio)
405{
406 unsigned rn, sp;
407
408 rn = div_u64_rem(prio, NR_SCHED_PRIORITIES, &sp);
409
410
411
412
413 sp = NR_SCHED_PRIORITIES - sp - 1;
414
415 if (rn >= gpu->nr_rings)
416 return -EINVAL;
417
418 *ring_nr = rn;
419 *sched_prio = sp;
420
421 return 0;
422}
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448struct msm_gpu_submitqueue {
449 int id;
450 u32 flags;
451 u32 ring_nr;
452 int faults;
453 uint32_t last_fence;
454 struct msm_file_private *ctx;
455 struct list_head node;
456 struct idr fence_idr;
457 struct mutex lock;
458 struct kref ref;
459 struct drm_sched_entity *entity;
460};
461
462struct msm_gpu_state_bo {
463 u64 iova;
464 size_t size;
465 void *data;
466 bool encoded;
467};
468
469struct msm_gpu_state {
470 struct kref ref;
471 struct timespec64 time;
472
473 struct {
474 u64 iova;
475 u32 fence;
476 u32 seqno;
477 u32 rptr;
478 u32 wptr;
479 void *data;
480 int data_size;
481 bool encoded;
482 } ring[MSM_GPU_MAX_RINGS];
483
484 int nr_registers;
485 u32 *registers;
486
487 u32 rbbm_status;
488
489 char *comm;
490 char *cmd;
491
492 struct msm_gpu_fault_info fault_info;
493
494 int nr_bos;
495 struct msm_gpu_state_bo *bos;
496};
497
498static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
499{
500 msm_writel(data, gpu->mmio + (reg << 2));
501}
502
503static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
504{
505 return msm_readl(gpu->mmio + (reg << 2));
506}
507
508static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
509{
510 msm_rmw(gpu->mmio + (reg << 2), mask, or);
511}
512
513static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
514{
515 u64 val;
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531 val = (u64) msm_readl(gpu->mmio + (lo << 2));
532 val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
533
534 return val;
535}
536
537static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
538{
539
540 msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
541 msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
542}
543
544int msm_gpu_pm_suspend(struct msm_gpu *gpu);
545int msm_gpu_pm_resume(struct msm_gpu *gpu);
546
547int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
548struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
549 u32 id);
550int msm_submitqueue_create(struct drm_device *drm,
551 struct msm_file_private *ctx,
552 u32 prio, u32 flags, u32 *id);
553int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
554 struct drm_msm_submitqueue_query *args);
555int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
556void msm_submitqueue_close(struct msm_file_private *ctx);
557
558void msm_submitqueue_destroy(struct kref *kref);
559
560int msm_file_private_set_sysprof(struct msm_file_private *ctx,
561 struct msm_gpu *gpu, int sysprof);
562void __msm_file_private_destroy(struct kref *kref);
563
564static inline void msm_file_private_put(struct msm_file_private *ctx)
565{
566 kref_put(&ctx->ref, __msm_file_private_destroy);
567}
568
569static inline struct msm_file_private *msm_file_private_get(
570 struct msm_file_private *ctx)
571{
572 kref_get(&ctx->ref);
573 return ctx;
574}
575
576void msm_devfreq_init(struct msm_gpu *gpu);
577void msm_devfreq_cleanup(struct msm_gpu *gpu);
578void msm_devfreq_resume(struct msm_gpu *gpu);
579void msm_devfreq_suspend(struct msm_gpu *gpu);
580void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor);
581void msm_devfreq_active(struct msm_gpu *gpu);
582void msm_devfreq_idle(struct msm_gpu *gpu);
583
584int msm_gpu_hw_init(struct msm_gpu *gpu);
585
586void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
587void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
588int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
589 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
590
591void msm_gpu_retire(struct msm_gpu *gpu);
592void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
593
594int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
595 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
596 const char *name, struct msm_gpu_config *config);
597
598struct msm_gem_address_space *
599msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
600
601void msm_gpu_cleanup(struct msm_gpu *gpu);
602
603struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
604void __init adreno_register(void);
605void __exit adreno_unregister(void);
606
607static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
608{
609 if (queue)
610 kref_put(&queue->ref, msm_submitqueue_destroy);
611}
612
613static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
614{
615 struct msm_gpu_state *state = NULL;
616
617 mutex_lock(&gpu->lock);
618
619 if (gpu->crashstate) {
620 kref_get(&gpu->crashstate->ref);
621 state = gpu->crashstate;
622 }
623
624 mutex_unlock(&gpu->lock);
625
626 return state;
627}
628
629static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
630{
631 mutex_lock(&gpu->lock);
632
633 if (gpu->crashstate) {
634 if (gpu->funcs->gpu_state_put(gpu->crashstate))
635 gpu->crashstate = NULL;
636 }
637
638 mutex_unlock(&gpu->lock);
639}
640
641
642
643
644
645#define check_apriv(gpu, flags) \
646 (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))
647
648
649#endif
650