1
2
3
4#include <linux/mm_types.h>
5#include <drm/drmP.h>
6#include <drm/drm_encoder.h>
7#include <drm/drm_gem.h>
8#include <drm/drm_gem_shmem_helper.h>
9#include <drm/gpu_scheduler.h>
10#include "uapi/drm/v3d_drm.h"
11
12#define GMP_GRANULARITY (128 * 1024)
13
14
15enum v3d_queue {
16 V3D_BIN,
17 V3D_RENDER,
18 V3D_TFU,
19 V3D_CSD,
20 V3D_CACHE_CLEAN,
21};
22
23#define V3D_MAX_QUEUES (V3D_CACHE_CLEAN + 1)
24
25struct v3d_queue_state {
26 struct drm_gpu_scheduler sched;
27
28 u64 fence_context;
29 u64 emit_seqno;
30};
31
32struct v3d_dev {
33 struct drm_device drm;
34
35
36
37
38 int ver;
39 bool single_irq_line;
40
41 struct device *dev;
42 struct platform_device *pdev;
43 void __iomem *hub_regs;
44 void __iomem *core_regs[3];
45 void __iomem *bridge_regs;
46 void __iomem *gca_regs;
47 struct clk *clk;
48 struct reset_control *reset;
49
50
51 volatile u32 *pt;
52 dma_addr_t pt_paddr;
53
54
55
56
57
58 void *mmu_scratch;
59 dma_addr_t mmu_scratch_paddr;
60
61 int va_width;
62
63
64 u32 cores;
65
66
67
68
69 struct drm_mm mm;
70 spinlock_t mm_lock;
71
72 struct work_struct overflow_mem_work;
73
74 struct v3d_bin_job *bin_job;
75 struct v3d_render_job *render_job;
76 struct v3d_tfu_job *tfu_job;
77 struct v3d_csd_job *csd_job;
78
79 struct v3d_queue_state queue[V3D_MAX_QUEUES];
80
81
82
83
84 spinlock_t job_lock;
85
86
87 struct mutex bo_lock;
88
89
90
91
92
93 struct mutex reset_lock;
94
95
96
97
98 struct mutex sched_lock;
99
100
101
102
103
104 struct mutex cache_clean_lock;
105
106 struct {
107 u32 num_allocated;
108 u32 pages_allocated;
109 } bo_stats;
110};
111
112static inline struct v3d_dev *
113to_v3d_dev(struct drm_device *dev)
114{
115 return (struct v3d_dev *)dev->dev_private;
116}
117
118static inline bool
119v3d_has_csd(struct v3d_dev *v3d)
120{
121 return v3d->ver >= 41;
122}
123
124
125struct v3d_file_priv {
126 struct v3d_dev *v3d;
127
128 struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
129};
130
131struct v3d_bo {
132 struct drm_gem_shmem_object base;
133
134 struct drm_mm_node node;
135
136
137
138
139 struct list_head unref_head;
140};
141
142static inline struct v3d_bo *
143to_v3d_bo(struct drm_gem_object *bo)
144{
145 return (struct v3d_bo *)bo;
146}
147
148struct v3d_fence {
149 struct dma_fence base;
150 struct drm_device *dev;
151
152 u64 seqno;
153 enum v3d_queue queue;
154};
155
156static inline struct v3d_fence *
157to_v3d_fence(struct dma_fence *fence)
158{
159 return (struct v3d_fence *)fence;
160}
161
162#define V3D_READ(offset) readl(v3d->hub_regs + offset)
163#define V3D_WRITE(offset, val) writel(val, v3d->hub_regs + offset)
164
165#define V3D_BRIDGE_READ(offset) readl(v3d->bridge_regs + offset)
166#define V3D_BRIDGE_WRITE(offset, val) writel(val, v3d->bridge_regs + offset)
167
168#define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset)
169#define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset)
170
171#define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset)
172#define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset)
173
174struct v3d_job {
175 struct drm_sched_job base;
176
177 struct kref refcount;
178
179 struct v3d_dev *v3d;
180
181
182
183
184 struct drm_gem_object **bo;
185 u32 bo_count;
186
187
188
189 struct xarray deps;
190 unsigned long last_dep;
191
192
193 struct dma_fence *irq_fence;
194
195
196
197
198 struct dma_fence *done_fence;
199
200
201 void (*free)(struct kref *ref);
202};
203
204struct v3d_bin_job {
205 struct v3d_job base;
206
207
208 u32 start, end;
209
210 u32 timedout_ctca, timedout_ctra;
211
212
213 struct v3d_render_job *render;
214
215
216 u32 qma, qms, qts;
217};
218
219struct v3d_render_job {
220 struct v3d_job base;
221
222
223 u32 start, end;
224
225 u32 timedout_ctca, timedout_ctra;
226
227
228
229
230 struct list_head unref_list;
231};
232
233struct v3d_tfu_job {
234 struct v3d_job base;
235
236 struct drm_v3d_submit_tfu args;
237};
238
239struct v3d_csd_job {
240 struct v3d_job base;
241
242 u32 timedout_batches;
243
244 struct drm_v3d_submit_csd args;
245};
246
247
248
249
250
251
252
253
254
255#define wait_for(COND, MS) ({ \
256 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
257 int ret__ = 0; \
258 while (!(COND)) { \
259 if (time_after(jiffies, timeout__)) { \
260 if (!(COND)) \
261 ret__ = -ETIMEDOUT; \
262 break; \
263 } \
264 msleep(1); \
265 } \
266 ret__; \
267})
268
269static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
270{
271
272 if (NSEC_PER_SEC % HZ &&
273 div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
274 return MAX_JIFFY_OFFSET;
275
276 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
277}
278
279
280struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size);
281void v3d_free_object(struct drm_gem_object *gem_obj);
282struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
283 size_t size);
284int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
285 struct drm_file *file_priv);
286int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
287 struct drm_file *file_priv);
288int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
289 struct drm_file *file_priv);
290struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
291 struct dma_buf_attachment *attach,
292 struct sg_table *sgt);
293
294
295int v3d_debugfs_init(struct drm_minor *minor);
296
297
298extern const struct dma_fence_ops v3d_fence_ops;
299struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
300
301
302int v3d_gem_init(struct drm_device *dev);
303void v3d_gem_destroy(struct drm_device *dev);
304int v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
305 struct drm_file *file_priv);
306int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
307 struct drm_file *file_priv);
308int v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
309 struct drm_file *file_priv);
310int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
311 struct drm_file *file_priv);
312void v3d_job_put(struct v3d_job *job);
313void v3d_reset(struct v3d_dev *v3d);
314void v3d_invalidate_caches(struct v3d_dev *v3d);
315void v3d_clean_caches(struct v3d_dev *v3d);
316
317
318int v3d_irq_init(struct v3d_dev *v3d);
319void v3d_irq_enable(struct v3d_dev *v3d);
320void v3d_irq_disable(struct v3d_dev *v3d);
321void v3d_irq_reset(struct v3d_dev *v3d);
322
323
324int v3d_mmu_get_offset(struct drm_file *file_priv, struct v3d_bo *bo,
325 u32 *offset);
326int v3d_mmu_set_page_table(struct v3d_dev *v3d);
327void v3d_mmu_insert_ptes(struct v3d_bo *bo);
328void v3d_mmu_remove_ptes(struct v3d_bo *bo);
329
330
331int v3d_sched_init(struct v3d_dev *v3d);
332void v3d_sched_fini(struct v3d_dev *v3d);
333