1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#ifndef __ADRENO_GPU_H__
21#define __ADRENO_GPU_H__
22
23#include <linux/firmware.h>
24
25#include "msm_gpu.h"
26
27#include "adreno_common.xml.h"
28#include "adreno_pm4.xml.h"
29
30#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
31#define REG_SKIP ~0
32#define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP
33
34
35
36
37
38
39
40enum adreno_regs {
41 REG_ADRENO_CP_RB_BASE,
42 REG_ADRENO_CP_RB_BASE_HI,
43 REG_ADRENO_CP_RB_RPTR_ADDR,
44 REG_ADRENO_CP_RB_RPTR_ADDR_HI,
45 REG_ADRENO_CP_RB_RPTR,
46 REG_ADRENO_CP_RB_WPTR,
47 REG_ADRENO_CP_RB_CNTL,
48 REG_ADRENO_REGISTER_MAX,
49};
50
51enum {
52 ADRENO_FW_PM4 = 0,
53 ADRENO_FW_PFP = 1,
54 ADRENO_FW_GPMU = 2,
55 ADRENO_FW_MAX,
56};
57
58enum adreno_quirks {
59 ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
60 ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
61};
62
63struct adreno_rev {
64 uint8_t core;
65 uint8_t major;
66 uint8_t minor;
67 uint8_t patchid;
68};
69
70#define ADRENO_REV(core, major, minor, patchid) \
71 ((struct adreno_rev){ core, major, minor, patchid })
72
73struct adreno_gpu_funcs {
74 struct msm_gpu_funcs base;
75 int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
76};
77
78struct adreno_info {
79 struct adreno_rev rev;
80 uint32_t revn;
81 const char *name;
82 const char *fw[ADRENO_FW_MAX];
83 uint32_t gmem;
84 enum adreno_quirks quirks;
85 struct msm_gpu *(*init)(struct drm_device *dev);
86 const char *zapfw;
87};
88
89const struct adreno_info *adreno_info(struct adreno_rev rev);
90
91struct adreno_gpu {
92 struct msm_gpu base;
93 struct adreno_rev rev;
94 const struct adreno_info *info;
95 uint32_t gmem;
96 uint32_t revn;
97 const struct adreno_gpu_funcs *funcs;
98
99
100 const unsigned int *registers;
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116 enum {
117 FW_LOCATION_UNKNOWN = 0,
118 FW_LOCATION_NEW,
119 FW_LOCATION_LEGACY,
120 FW_LOCATION_HELPER,
121 } fwloc;
122
123
124 const struct firmware *fw[ADRENO_FW_MAX];
125
126
127
128
129
130
131 const unsigned int *reg_offsets;
132};
133#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
134
135
136struct adreno_platform_config {
137 struct adreno_rev rev;
138};
139
140#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
141
142#define spin_until(X) ({ \
143 int __ret = -ETIMEDOUT; \
144 unsigned long __t = jiffies + ADRENO_IDLE_TIMEOUT; \
145 do { \
146 if (X) { \
147 __ret = 0; \
148 break; \
149 } \
150 } while (time_before(jiffies, __t)); \
151 __ret; \
152})
153
154
155static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
156{
157 return (gpu->revn >= 300) && (gpu->revn < 400);
158}
159
160static inline bool adreno_is_a305(struct adreno_gpu *gpu)
161{
162 return gpu->revn == 305;
163}
164
165static inline bool adreno_is_a306(struct adreno_gpu *gpu)
166{
167
168 return gpu->revn == 307;
169}
170
171static inline bool adreno_is_a320(struct adreno_gpu *gpu)
172{
173 return gpu->revn == 320;
174}
175
176static inline bool adreno_is_a330(struct adreno_gpu *gpu)
177{
178 return gpu->revn == 330;
179}
180
181static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
182{
183 return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
184}
185
186static inline bool adreno_is_a4xx(struct adreno_gpu *gpu)
187{
188 return (gpu->revn >= 400) && (gpu->revn < 500);
189}
190
191static inline int adreno_is_a420(struct adreno_gpu *gpu)
192{
193 return gpu->revn == 420;
194}
195
196static inline int adreno_is_a430(struct adreno_gpu *gpu)
197{
198 return gpu->revn == 430;
199}
200
201static inline int adreno_is_a530(struct adreno_gpu *gpu)
202{
203 return gpu->revn == 530;
204}
205
206int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
207const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
208 const char *fwname);
209struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
210 const struct firmware *fw, u64 *iova);
211int adreno_hw_init(struct msm_gpu *gpu);
212void adreno_recover(struct msm_gpu *gpu);
213void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
214 struct msm_file_private *ctx);
215void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
216bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
217#ifdef CONFIG_DEBUG_FS
218void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
219#endif
220void adreno_dump_info(struct msm_gpu *gpu);
221void adreno_dump(struct msm_gpu *gpu);
222void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
223struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
224
225int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
226 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
227 int nr_rings);
228void adreno_gpu_cleanup(struct adreno_gpu *gpu);
229
230
231
232
233static inline void
234OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
235{
236 adreno_wait_ring(ring, cnt+1);
237 OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
238}
239
240
241static inline void
242OUT_PKT2(struct msm_ringbuffer *ring)
243{
244 adreno_wait_ring(ring, 1);
245 OUT_RING(ring, CP_TYPE2_PKT);
246}
247
248static inline void
249OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
250{
251 adreno_wait_ring(ring, cnt+1);
252 OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
253}
254
255static inline u32 PM4_PARITY(u32 val)
256{
257 return (0x9669 >> (0xF & (val ^
258 (val >> 4) ^ (val >> 8) ^ (val >> 12) ^
259 (val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^
260 (val >> 28)))) & 1;
261}
262
263
264#define TYPE4_MAX_PAYLOAD 127
265
266#define PKT4(_reg, _cnt) \
267 (CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \
268 (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27))
269
270static inline void
271OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
272{
273 adreno_wait_ring(ring, cnt + 1);
274 OUT_RING(ring, PKT4(regindx, cnt));
275}
276
277static inline void
278OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
279{
280 adreno_wait_ring(ring, cnt + 1);
281 OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
282 ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
283}
284
285
286
287
288
289
290static inline bool adreno_reg_check(struct adreno_gpu *gpu,
291 enum adreno_regs offset_name)
292{
293 if (offset_name >= REG_ADRENO_REGISTER_MAX ||
294 !gpu->reg_offsets[offset_name]) {
295 BUG();
296 }
297
298
299
300
301
302
303
304 if (gpu->reg_offsets[offset_name] == REG_SKIP)
305 return false;
306
307 return true;
308}
309
310static inline u32 adreno_gpu_read(struct adreno_gpu *gpu,
311 enum adreno_regs offset_name)
312{
313 u32 reg = gpu->reg_offsets[offset_name];
314 u32 val = 0;
315 if(adreno_reg_check(gpu,offset_name))
316 val = gpu_read(&gpu->base, reg - 1);
317 return val;
318}
319
320static inline void adreno_gpu_write(struct adreno_gpu *gpu,
321 enum adreno_regs offset_name, u32 data)
322{
323 u32 reg = gpu->reg_offsets[offset_name];
324 if(adreno_reg_check(gpu, offset_name))
325 gpu_write(&gpu->base, reg - 1, data);
326}
327
328struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
329struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
330struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
331
332static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
333 enum adreno_regs lo, enum adreno_regs hi, u64 data)
334{
335 adreno_gpu_write(gpu, lo, lower_32_bits(data));
336 adreno_gpu_write(gpu, hi, upper_32_bits(data));
337}
338
339static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
340{
341 return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
342}
343
344
345
346
347
348
349
350
351
352
353
354
355#define ADRENO_PROTECT_RW(_reg, _len) \
356 ((1 << 30) | (1 << 29) | \
357 ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
358
359
360
361
362
363
364#define ADRENO_PROTECT_RDONLY(_reg, _len) \
365 ((1 << 29) \
366 ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
367
368#endif
369