1
2
3
4
5
6
7
8#include <linux/dma-mapping.h>
9#include <linux/err.h>
10#include <linux/host1x.h>
11#include <linux/kref.h>
12#include <linux/module.h>
13#include <linux/scatterlist.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <trace/events/host1x.h>
17
18#include "channel.h"
19#include "dev.h"
20#include "job.h"
21#include "syncpt.h"
22
23#define HOST1X_WAIT_SYNCPT_OFFSET 0x8
24
25struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
26 u32 num_cmdbufs, u32 num_relocs)
27{
28 struct host1x_job *job = NULL;
29 unsigned int num_unpins = num_cmdbufs + num_relocs;
30 u64 total;
31 void *mem;
32
33
34 total = sizeof(struct host1x_job) +
35 (u64)num_relocs * sizeof(struct host1x_reloc) +
36 (u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
37 (u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
38 (u64)num_unpins * sizeof(dma_addr_t) +
39 (u64)num_unpins * sizeof(u32 *);
40 if (total > ULONG_MAX)
41 return NULL;
42
43 mem = job = kzalloc(total, GFP_KERNEL);
44 if (!job)
45 return NULL;
46
47 kref_init(&job->ref);
48 job->channel = ch;
49
50
51 mem += sizeof(struct host1x_job);
52 job->relocs = num_relocs ? mem : NULL;
53 mem += num_relocs * sizeof(struct host1x_reloc);
54 job->unpins = num_unpins ? mem : NULL;
55 mem += num_unpins * sizeof(struct host1x_job_unpin_data);
56 job->gathers = num_cmdbufs ? mem : NULL;
57 mem += num_cmdbufs * sizeof(struct host1x_job_gather);
58 job->addr_phys = num_unpins ? mem : NULL;
59
60 job->reloc_addr_phys = job->addr_phys;
61 job->gather_addr_phys = &job->addr_phys[num_relocs];
62
63 return job;
64}
65EXPORT_SYMBOL(host1x_job_alloc);
66
67struct host1x_job *host1x_job_get(struct host1x_job *job)
68{
69 kref_get(&job->ref);
70 return job;
71}
72EXPORT_SYMBOL(host1x_job_get);
73
74static void job_free(struct kref *ref)
75{
76 struct host1x_job *job = container_of(ref, struct host1x_job, ref);
77
78 kfree(job);
79}
80
81void host1x_job_put(struct host1x_job *job)
82{
83 kref_put(&job->ref, job_free);
84}
85EXPORT_SYMBOL(host1x_job_put);
86
87void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
88 unsigned int words, unsigned int offset)
89{
90 struct host1x_job_gather *gather = &job->gathers[job->num_gathers];
91
92 gather->words = words;
93 gather->bo = bo;
94 gather->offset = offset;
95
96 job->num_gathers++;
97}
98EXPORT_SYMBOL(host1x_job_add_gather);
99
100static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
101{
102 struct host1x_client *client = job->client;
103 struct device *dev = client->dev;
104 unsigned int i;
105 int err;
106
107 job->num_unpins = 0;
108
109 for (i = 0; i < job->num_relocs; i++) {
110 struct host1x_reloc *reloc = &job->relocs[i];
111 dma_addr_t phys_addr, *phys;
112 struct sg_table *sgt;
113
114 reloc->target.bo = host1x_bo_get(reloc->target.bo);
115 if (!reloc->target.bo) {
116 err = -EINVAL;
117 goto unpin;
118 }
119
120 if (client->group)
121 phys = &phys_addr;
122 else
123 phys = NULL;
124
125 sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
126 if (IS_ERR(sgt)) {
127 err = PTR_ERR(sgt);
128 goto unpin;
129 }
130
131 if (sgt) {
132 unsigned long mask = HOST1X_RELOC_READ |
133 HOST1X_RELOC_WRITE;
134 enum dma_data_direction dir;
135
136 switch (reloc->flags & mask) {
137 case HOST1X_RELOC_READ:
138 dir = DMA_TO_DEVICE;
139 break;
140
141 case HOST1X_RELOC_WRITE:
142 dir = DMA_FROM_DEVICE;
143 break;
144
145 case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
146 dir = DMA_BIDIRECTIONAL;
147 break;
148
149 default:
150 err = -EINVAL;
151 goto unpin;
152 }
153
154 err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
155 if (!err) {
156 err = -ENOMEM;
157 goto unpin;
158 }
159
160 job->unpins[job->num_unpins].dev = dev;
161 job->unpins[job->num_unpins].dir = dir;
162 phys_addr = sg_dma_address(sgt->sgl);
163 }
164
165 job->addr_phys[job->num_unpins] = phys_addr;
166 job->unpins[job->num_unpins].bo = reloc->target.bo;
167 job->unpins[job->num_unpins].sgt = sgt;
168 job->num_unpins++;
169 }
170
171 for (i = 0; i < job->num_gathers; i++) {
172 struct host1x_job_gather *g = &job->gathers[i];
173 size_t gather_size = 0;
174 struct scatterlist *sg;
175 struct sg_table *sgt;
176 dma_addr_t phys_addr;
177 unsigned long shift;
178 struct iova *alloc;
179 unsigned int j;
180
181 g->bo = host1x_bo_get(g->bo);
182 if (!g->bo) {
183 err = -EINVAL;
184 goto unpin;
185 }
186
187 sgt = host1x_bo_pin(host->dev, g->bo, NULL);
188 if (IS_ERR(sgt)) {
189 err = PTR_ERR(sgt);
190 goto unpin;
191 }
192
193 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
194 for_each_sg(sgt->sgl, sg, sgt->nents, j)
195 gather_size += sg->length;
196 gather_size = iova_align(&host->iova, gather_size);
197
198 shift = iova_shift(&host->iova);
199 alloc = alloc_iova(&host->iova, gather_size >> shift,
200 host->iova_end >> shift, true);
201 if (!alloc) {
202 err = -ENOMEM;
203 goto unpin;
204 }
205
206 err = iommu_map_sg(host->domain,
207 iova_dma_addr(&host->iova, alloc),
208 sgt->sgl, sgt->nents, IOMMU_READ);
209 if (err == 0) {
210 __free_iova(&host->iova, alloc);
211 err = -EINVAL;
212 goto unpin;
213 }
214
215 job->unpins[job->num_unpins].size = gather_size;
216 phys_addr = iova_dma_addr(&host->iova, alloc);
217 } else {
218 err = dma_map_sg(host->dev, sgt->sgl, sgt->nents,
219 DMA_TO_DEVICE);
220 if (!err) {
221 err = -ENOMEM;
222 goto unpin;
223 }
224
225 job->unpins[job->num_unpins].dev = host->dev;
226 phys_addr = sg_dma_address(sgt->sgl);
227 }
228
229 job->addr_phys[job->num_unpins] = phys_addr;
230 job->gather_addr_phys[i] = phys_addr;
231
232 job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
233 job->unpins[job->num_unpins].bo = g->bo;
234 job->unpins[job->num_unpins].sgt = sgt;
235 job->num_unpins++;
236 }
237
238 return 0;
239
240unpin:
241 host1x_job_unpin(job);
242 return err;
243}
244
245static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
246{
247 u32 last_page = ~0;
248 void *cmdbuf_page_addr = NULL;
249 struct host1x_bo *cmdbuf = g->bo;
250 unsigned int i;
251
252
253 for (i = 0; i < job->num_relocs; i++) {
254 struct host1x_reloc *reloc = &job->relocs[i];
255 u32 reloc_addr = (job->reloc_addr_phys[i] +
256 reloc->target.offset) >> reloc->shift;
257 u32 *target;
258
259
260 if (cmdbuf != reloc->cmdbuf.bo)
261 continue;
262
263 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
264 target = (u32 *)job->gather_copy_mapped +
265 reloc->cmdbuf.offset / sizeof(u32) +
266 g->offset / sizeof(u32);
267 goto patch_reloc;
268 }
269
270 if (last_page != reloc->cmdbuf.offset >> PAGE_SHIFT) {
271 if (cmdbuf_page_addr)
272 host1x_bo_kunmap(cmdbuf, last_page,
273 cmdbuf_page_addr);
274
275 cmdbuf_page_addr = host1x_bo_kmap(cmdbuf,
276 reloc->cmdbuf.offset >> PAGE_SHIFT);
277 last_page = reloc->cmdbuf.offset >> PAGE_SHIFT;
278
279 if (unlikely(!cmdbuf_page_addr)) {
280 pr_err("Could not map cmdbuf for relocation\n");
281 return -ENOMEM;
282 }
283 }
284
285 target = cmdbuf_page_addr + (reloc->cmdbuf.offset & ~PAGE_MASK);
286patch_reloc:
287 *target = reloc_addr;
288 }
289
290 if (cmdbuf_page_addr)
291 host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr);
292
293 return 0;
294}
295
296static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
297 unsigned int offset)
298{
299 offset *= sizeof(u32);
300
301 if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
302 return false;
303
304
305 if (reloc->shift)
306 return false;
307
308 return true;
309}
310
311struct host1x_firewall {
312 struct host1x_job *job;
313 struct device *dev;
314
315 unsigned int num_relocs;
316 struct host1x_reloc *reloc;
317
318 struct host1x_bo *cmdbuf;
319 unsigned int offset;
320
321 u32 words;
322 u32 class;
323 u32 reg;
324 u32 mask;
325 u32 count;
326};
327
328static int check_register(struct host1x_firewall *fw, unsigned long offset)
329{
330 if (!fw->job->is_addr_reg)
331 return 0;
332
333 if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
334 if (!fw->num_relocs)
335 return -EINVAL;
336
337 if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
338 return -EINVAL;
339
340 fw->num_relocs--;
341 fw->reloc++;
342 }
343
344 return 0;
345}
346
347static int check_class(struct host1x_firewall *fw, u32 class)
348{
349 if (!fw->job->is_valid_class) {
350 if (fw->class != class)
351 return -EINVAL;
352 } else {
353 if (!fw->job->is_valid_class(fw->class))
354 return -EINVAL;
355 }
356
357 return 0;
358}
359
360static int check_mask(struct host1x_firewall *fw)
361{
362 u32 mask = fw->mask;
363 u32 reg = fw->reg;
364 int ret;
365
366 while (mask) {
367 if (fw->words == 0)
368 return -EINVAL;
369
370 if (mask & 1) {
371 ret = check_register(fw, reg);
372 if (ret < 0)
373 return ret;
374
375 fw->words--;
376 fw->offset++;
377 }
378 mask >>= 1;
379 reg++;
380 }
381
382 return 0;
383}
384
385static int check_incr(struct host1x_firewall *fw)
386{
387 u32 count = fw->count;
388 u32 reg = fw->reg;
389 int ret;
390
391 while (count) {
392 if (fw->words == 0)
393 return -EINVAL;
394
395 ret = check_register(fw, reg);
396 if (ret < 0)
397 return ret;
398
399 reg++;
400 fw->words--;
401 fw->offset++;
402 count--;
403 }
404
405 return 0;
406}
407
408static int check_nonincr(struct host1x_firewall *fw)
409{
410 u32 count = fw->count;
411 int ret;
412
413 while (count) {
414 if (fw->words == 0)
415 return -EINVAL;
416
417 ret = check_register(fw, fw->reg);
418 if (ret < 0)
419 return ret;
420
421 fw->words--;
422 fw->offset++;
423 count--;
424 }
425
426 return 0;
427}
428
429static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
430{
431 u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
432 (g->offset / sizeof(u32));
433 u32 job_class = fw->class;
434 int err = 0;
435
436 fw->words = g->words;
437 fw->cmdbuf = g->bo;
438 fw->offset = 0;
439
440 while (fw->words && !err) {
441 u32 word = cmdbuf_base[fw->offset];
442 u32 opcode = (word & 0xf0000000) >> 28;
443
444 fw->mask = 0;
445 fw->reg = 0;
446 fw->count = 0;
447 fw->words--;
448 fw->offset++;
449
450 switch (opcode) {
451 case 0:
452 fw->class = word >> 6 & 0x3ff;
453 fw->mask = word & 0x3f;
454 fw->reg = word >> 16 & 0xfff;
455 err = check_class(fw, job_class);
456 if (!err)
457 err = check_mask(fw);
458 if (err)
459 goto out;
460 break;
461 case 1:
462 fw->reg = word >> 16 & 0xfff;
463 fw->count = word & 0xffff;
464 err = check_incr(fw);
465 if (err)
466 goto out;
467 break;
468
469 case 2:
470 fw->reg = word >> 16 & 0xfff;
471 fw->count = word & 0xffff;
472 err = check_nonincr(fw);
473 if (err)
474 goto out;
475 break;
476
477 case 3:
478 fw->mask = word & 0xffff;
479 fw->reg = word >> 16 & 0xfff;
480 err = check_mask(fw);
481 if (err)
482 goto out;
483 break;
484 case 4:
485 case 14:
486 break;
487 default:
488 err = -EINVAL;
489 break;
490 }
491 }
492
493out:
494 return err;
495}
496
497static inline int copy_gathers(struct device *host, struct host1x_job *job,
498 struct device *dev)
499{
500 struct host1x_firewall fw;
501 size_t size = 0;
502 size_t offset = 0;
503 unsigned int i;
504
505 fw.job = job;
506 fw.dev = dev;
507 fw.reloc = job->relocs;
508 fw.num_relocs = job->num_relocs;
509 fw.class = job->class;
510
511 for (i = 0; i < job->num_gathers; i++) {
512 struct host1x_job_gather *g = &job->gathers[i];
513
514 size += g->words * sizeof(u32);
515 }
516
517
518
519
520
521 job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
522 GFP_NOWAIT);
523
524
525 if (!job->gather_copy_mapped)
526 job->gather_copy_mapped = dma_alloc_wc(host, size,
527 &job->gather_copy,
528 GFP_KERNEL);
529 if (!job->gather_copy_mapped)
530 return -ENOMEM;
531
532 job->gather_copy_size = size;
533
534 for (i = 0; i < job->num_gathers; i++) {
535 struct host1x_job_gather *g = &job->gathers[i];
536 void *gather;
537
538
539 gather = host1x_bo_mmap(g->bo);
540 memcpy(job->gather_copy_mapped + offset, gather + g->offset,
541 g->words * sizeof(u32));
542 host1x_bo_munmap(g->bo, gather);
543
544
545 g->base = job->gather_copy;
546 g->offset = offset;
547
548
549 if (validate(&fw, g))
550 return -EINVAL;
551
552 offset += g->words * sizeof(u32);
553 }
554
555
556 if (fw.num_relocs)
557 return -EINVAL;
558
559 return 0;
560}
561
562int host1x_job_pin(struct host1x_job *job, struct device *dev)
563{
564 int err;
565 unsigned int i, j;
566 struct host1x *host = dev_get_drvdata(dev->parent);
567
568
569 err = pin_job(host, job);
570 if (err)
571 goto out;
572
573 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
574 err = copy_gathers(host->dev, job, dev);
575 if (err)
576 goto out;
577 }
578
579
580 for (i = 0; i < job->num_gathers; i++) {
581 struct host1x_job_gather *g = &job->gathers[i];
582
583
584 if (g->handled)
585 continue;
586
587
588 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
589 g->base = job->gather_addr_phys[i];
590
591 for (j = i + 1; j < job->num_gathers; j++) {
592 if (job->gathers[j].bo == g->bo) {
593 job->gathers[j].handled = true;
594 job->gathers[j].base = g->base;
595 }
596 }
597
598 err = do_relocs(job, g);
599 if (err)
600 break;
601 }
602
603out:
604 if (err)
605 host1x_job_unpin(job);
606 wmb();
607
608 return err;
609}
610EXPORT_SYMBOL(host1x_job_pin);
611
612void host1x_job_unpin(struct host1x_job *job)
613{
614 struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
615 unsigned int i;
616
617 for (i = 0; i < job->num_unpins; i++) {
618 struct host1x_job_unpin_data *unpin = &job->unpins[i];
619 struct device *dev = unpin->dev ?: host->dev;
620 struct sg_table *sgt = unpin->sgt;
621
622 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
623 unpin->size && host->domain) {
624 iommu_unmap(host->domain, job->addr_phys[i],
625 unpin->size);
626 free_iova(&host->iova,
627 iova_pfn(&host->iova, job->addr_phys[i]));
628 }
629
630 if (unpin->dev && sgt)
631 dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents,
632 unpin->dir);
633
634 host1x_bo_unpin(dev, unpin->bo, sgt);
635 host1x_bo_put(unpin->bo);
636 }
637
638 job->num_unpins = 0;
639
640 if (job->gather_copy_size)
641 dma_free_wc(host->dev, job->gather_copy_size,
642 job->gather_copy_mapped, job->gather_copy);
643}
644EXPORT_SYMBOL(host1x_job_unpin);
645
646
647
648
649void host1x_job_dump(struct device *dev, struct host1x_job *job)
650{
651 dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt_id);
652 dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end);
653 dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get);
654 dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
655 dev_dbg(dev, " NUM_SLOTS %d\n", job->num_slots);
656 dev_dbg(dev, " NUM_HANDLES %d\n", job->num_unpins);
657}
658