1
2
3
4#ifndef __A5XX_GPU_H__
5#define __A5XX_GPU_H__
6
7#include "adreno_gpu.h"
8
9
10#undef ROP_COPY
11#undef ROP_XOR
12
13#include "a5xx.xml.h"
14
15struct a5xx_gpu {
16 struct adreno_gpu base;
17
18 struct drm_gem_object *pm4_bo;
19 uint64_t pm4_iova;
20
21 struct drm_gem_object *pfp_bo;
22 uint64_t pfp_iova;
23
24 struct drm_gem_object *gpmu_bo;
25 uint64_t gpmu_iova;
26 uint32_t gpmu_dwords;
27
28 uint32_t lm_leakage;
29
30 struct msm_ringbuffer *cur_ring;
31 struct msm_ringbuffer *next_ring;
32
33 struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
34 struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS];
35 struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
36 uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
37
38 atomic_t preempt_state;
39 struct timer_list preempt_timer;
40
41 struct drm_gem_object *shadow_bo;
42 uint64_t shadow_iova;
43 uint32_t *shadow;
44
45
46 bool has_whereami;
47};
48
49#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
50
51#ifdef CONFIG_DEBUG_FS
52void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
53#endif
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72enum preempt_state {
73 PREEMPT_NONE = 0,
74 PREEMPT_START,
75 PREEMPT_ABORT,
76 PREEMPT_TRIGGERED,
77 PREEMPT_FAULTED,
78 PREEMPT_PENDING,
79};
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106struct a5xx_preempt_record {
107 uint32_t magic;
108 uint32_t info;
109 uint32_t data;
110 uint32_t cntl;
111 uint32_t rptr;
112 uint32_t wptr;
113 uint64_t rptr_addr;
114 uint64_t rbase;
115 uint64_t counter;
116};
117
118
119#define A5XX_PREEMPT_RECORD_MAGIC 0x27C4BAFCUL
120
121
122
123
124
125#define A5XX_PREEMPT_RECORD_SIZE (64 * 1024)
126
127
128
129
130
131
132#define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
133
134
135int a5xx_power_init(struct msm_gpu *gpu);
136void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
137
138static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
139 uint32_t reg, uint32_t mask, uint32_t value)
140{
141 while (usecs--) {
142 udelay(1);
143 if ((gpu_read(gpu, reg) & mask) == value)
144 return 0;
145 cpu_relax();
146 }
147
148 return -ETIMEDOUT;
149}
150
151#define shadowptr(a5xx_gpu, ring) ((a5xx_gpu)->shadow_iova + \
152 ((ring)->id * sizeof(uint32_t)))
153
154bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
155void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
156
157void a5xx_preempt_init(struct msm_gpu *gpu);
158void a5xx_preempt_hw_init(struct msm_gpu *gpu);
159void a5xx_preempt_trigger(struct msm_gpu *gpu);
160void a5xx_preempt_irq(struct msm_gpu *gpu);
161void a5xx_preempt_fini(struct msm_gpu *gpu);
162
163void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, bool sync);
164
165
166static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
167{
168 int preempt_state = atomic_read(&a5xx_gpu->preempt_state);
169
170 return !(preempt_state == PREEMPT_NONE ||
171 preempt_state == PREEMPT_ABORT);
172}
173
174#endif
175