1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/device.h>
15#include <linux/dma-mapping.h>
16#include <linux/gfp.h>
17#include <linux/slab.h>
18#include <linux/workqueue.h>
19
20#include "vsp1.h"
21#include "vsp1_dl.h"
22
23#define VSP1_DL_NUM_ENTRIES 256
24
25#define VSP1_DLH_INT_ENABLE (1 << 1)
26#define VSP1_DLH_AUTO_START (1 << 0)
27
28struct vsp1_dl_header_list {
29 u32 num_bytes;
30 u32 addr;
31} __attribute__((__packed__));
32
33struct vsp1_dl_header {
34 u32 num_lists;
35 struct vsp1_dl_header_list lists[8];
36 u32 next_header;
37 u32 flags;
38} __attribute__((__packed__));
39
40struct vsp1_dl_entry {
41 u32 addr;
42 u32 data;
43} __attribute__((__packed__));
44
45
46
47
48
49
50
51
52
53
54struct vsp1_dl_body {
55 struct list_head list;
56 struct vsp1_device *vsp1;
57
58 struct vsp1_dl_entry *entries;
59 dma_addr_t dma;
60 size_t size;
61
62 unsigned int num_entries;
63};
64
65
66
67
68
69
70
71
72
73
74
75
76struct vsp1_dl_list {
77 struct list_head list;
78 struct vsp1_dl_manager *dlm;
79
80 struct vsp1_dl_header *header;
81 dma_addr_t dma;
82
83 struct vsp1_dl_body body0;
84 struct list_head fragments;
85
86 bool has_chain;
87 struct list_head chain;
88};
89
90enum vsp1_dl_mode {
91 VSP1_DL_MODE_HEADER,
92 VSP1_DL_MODE_HEADERLESS,
93};
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109struct vsp1_dl_manager {
110 unsigned int index;
111 enum vsp1_dl_mode mode;
112 bool singleshot;
113 struct vsp1_device *vsp1;
114
115 spinlock_t lock;
116 struct list_head free;
117 struct vsp1_dl_list *active;
118 struct vsp1_dl_list *queued;
119 struct vsp1_dl_list *pending;
120
121 struct work_struct gc_work;
122 struct list_head gc_fragments;
123};
124
125
126
127
128
129
130
131
132
133
134static int vsp1_dl_body_init(struct vsp1_device *vsp1,
135 struct vsp1_dl_body *dlb, unsigned int num_entries,
136 size_t extra_size)
137{
138 size_t size = num_entries * sizeof(*dlb->entries) + extra_size;
139
140 dlb->vsp1 = vsp1;
141 dlb->size = size;
142
143 dlb->entries = dma_alloc_wc(vsp1->bus_master, dlb->size, &dlb->dma,
144 GFP_KERNEL);
145 if (!dlb->entries)
146 return -ENOMEM;
147
148 return 0;
149}
150
151
152
153
154static void vsp1_dl_body_cleanup(struct vsp1_dl_body *dlb)
155{
156 dma_free_wc(dlb->vsp1->bus_master, dlb->size, dlb->entries, dlb->dma);
157}
158
159
160
161
162
163
164
165
166
167
168
169
170struct vsp1_dl_body *vsp1_dl_fragment_alloc(struct vsp1_device *vsp1,
171 unsigned int num_entries)
172{
173 struct vsp1_dl_body *dlb;
174 int ret;
175
176 dlb = kzalloc(sizeof(*dlb), GFP_KERNEL);
177 if (!dlb)
178 return NULL;
179
180 ret = vsp1_dl_body_init(vsp1, dlb, num_entries, 0);
181 if (ret < 0) {
182 kfree(dlb);
183 return NULL;
184 }
185
186 return dlb;
187}
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203void vsp1_dl_fragment_free(struct vsp1_dl_body *dlb)
204{
205 if (!dlb)
206 return;
207
208 vsp1_dl_body_cleanup(dlb);
209 kfree(dlb);
210}
211
212
213
214
215
216
217
218
219
220
221
222void vsp1_dl_fragment_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
223{
224 dlb->entries[dlb->num_entries].addr = reg;
225 dlb->entries[dlb->num_entries].data = data;
226 dlb->num_entries++;
227}
228
229
230
231
232
233static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
234{
235 struct vsp1_dl_list *dl;
236 size_t header_size;
237 int ret;
238
239 dl = kzalloc(sizeof(*dl), GFP_KERNEL);
240 if (!dl)
241 return NULL;
242
243 INIT_LIST_HEAD(&dl->fragments);
244 dl->dlm = dlm;
245
246
247
248
249
250
251
252 header_size = dlm->mode == VSP1_DL_MODE_HEADER
253 ? ALIGN(sizeof(struct vsp1_dl_header), 8)
254 : 0;
255
256 ret = vsp1_dl_body_init(dlm->vsp1, &dl->body0, VSP1_DL_NUM_ENTRIES,
257 header_size);
258 if (ret < 0) {
259 kfree(dl);
260 return NULL;
261 }
262
263 if (dlm->mode == VSP1_DL_MODE_HEADER) {
264 size_t header_offset = VSP1_DL_NUM_ENTRIES
265 * sizeof(*dl->body0.entries);
266
267 dl->header = ((void *)dl->body0.entries) + header_offset;
268 dl->dma = dl->body0.dma + header_offset;
269
270 memset(dl->header, 0, sizeof(*dl->header));
271 dl->header->lists[0].addr = dl->body0.dma;
272 }
273
274 return dl;
275}
276
277static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
278{
279 vsp1_dl_body_cleanup(&dl->body0);
280 list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
281 kfree(dl);
282}
283
284
285
286
287
288
289
290
291
292struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
293{
294 struct vsp1_dl_list *dl = NULL;
295 unsigned long flags;
296
297 spin_lock_irqsave(&dlm->lock, flags);
298
299 if (!list_empty(&dlm->free)) {
300 dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
301 list_del(&dl->list);
302
303
304
305
306
307 INIT_LIST_HEAD(&dl->chain);
308 }
309
310 spin_unlock_irqrestore(&dlm->lock, flags);
311
312 return dl;
313}
314
315
316static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
317{
318 struct vsp1_dl_list *dl_child;
319
320 if (!dl)
321 return;
322
323
324
325
326
327 if (dl->has_chain) {
328 list_for_each_entry(dl_child, &dl->chain, chain)
329 __vsp1_dl_list_put(dl_child);
330 }
331
332 dl->has_chain = false;
333
334
335
336
337
338
339
340 if (!list_empty(&dl->fragments)) {
341 list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
342 schedule_work(&dl->dlm->gc_work);
343 }
344
345 dl->body0.num_entries = 0;
346
347 list_add_tail(&dl->list, &dl->dlm->free);
348}
349
350
351
352
353
354
355
356
357
358
359void vsp1_dl_list_put(struct vsp1_dl_list *dl)
360{
361 unsigned long flags;
362
363 if (!dl)
364 return;
365
366 spin_lock_irqsave(&dl->dlm->lock, flags);
367 __vsp1_dl_list_put(dl);
368 spin_unlock_irqrestore(&dl->dlm->lock, flags);
369}
370
371
372
373
374
375
376
377
378
379
380void vsp1_dl_list_write(struct vsp1_dl_list *dl, u32 reg, u32 data)
381{
382 vsp1_dl_fragment_write(&dl->body0, reg, data);
383}
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl,
402 struct vsp1_dl_body *dlb)
403{
404
405 if (dl->dlm->mode != VSP1_DL_MODE_HEADER)
406 return -EINVAL;
407
408 list_add_tail(&dlb->list, &dl->fragments);
409 return 0;
410}
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
430 struct vsp1_dl_list *dl)
431{
432
433 if (head->dlm->mode != VSP1_DL_MODE_HEADER)
434 return -EINVAL;
435
436 head->has_chain = true;
437 list_add_tail(&dl->chain, &head->chain);
438 return 0;
439}
440
441static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
442{
443 struct vsp1_dl_manager *dlm = dl->dlm;
444 struct vsp1_dl_header_list *hdr = dl->header->lists;
445 struct vsp1_dl_body *dlb;
446 unsigned int num_lists = 0;
447
448
449
450
451
452
453
454 hdr->num_bytes = dl->body0.num_entries
455 * sizeof(*dl->header->lists);
456
457 list_for_each_entry(dlb, &dl->fragments, list) {
458 num_lists++;
459 hdr++;
460
461 hdr->addr = dlb->dma;
462 hdr->num_bytes = dlb->num_entries
463 * sizeof(*dl->header->lists);
464 }
465
466 dl->header->num_lists = num_lists;
467
468 if (!list_empty(&dl->chain) && !is_last) {
469
470
471
472
473
474 struct vsp1_dl_list *next = list_next_entry(dl, chain);
475
476 dl->header->next_header = next->dma;
477 dl->header->flags = VSP1_DLH_AUTO_START;
478 } else if (!dlm->singleshot) {
479
480
481
482
483
484 dl->header->next_header = dl->dma;
485 dl->header->flags = VSP1_DLH_INT_ENABLE | VSP1_DLH_AUTO_START;
486 } else {
487
488
489
490
491 dl->header->flags = VSP1_DLH_INT_ENABLE;
492 }
493}
494
495static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
496{
497 struct vsp1_device *vsp1 = dlm->vsp1;
498
499 if (!dlm->queued)
500 return false;
501
502
503
504
505
506
507
508 if (dlm->mode == VSP1_DL_MODE_HEADERLESS)
509 return !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE)
510 & VI6_DL_BODY_SIZE_UPD);
511 else
512 return !!(vsp1_read(vsp1, VI6_CMD(dlm->index) & VI6_CMD_UPDHDR));
513}
514
515static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
516{
517 struct vsp1_dl_manager *dlm = dl->dlm;
518 struct vsp1_device *vsp1 = dlm->vsp1;
519
520 if (dlm->mode == VSP1_DL_MODE_HEADERLESS) {
521
522
523
524
525
526
527 vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
528 vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
529 (dl->body0.num_entries * sizeof(*dl->header->lists)));
530 } else {
531
532
533
534
535
536
537
538 vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
539 }
540}
541
542static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl)
543{
544 struct vsp1_dl_manager *dlm = dl->dlm;
545
546
547
548
549
550
551
552
553 if (vsp1_dl_list_hw_update_pending(dlm)) {
554 __vsp1_dl_list_put(dlm->pending);
555 dlm->pending = dl;
556 return;
557 }
558
559
560
561
562
563 vsp1_dl_list_hw_enqueue(dl);
564
565 __vsp1_dl_list_put(dlm->queued);
566 dlm->queued = dl;
567}
568
569static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl)
570{
571 struct vsp1_dl_manager *dlm = dl->dlm;
572
573
574
575
576
577
578 vsp1_dl_list_hw_enqueue(dl);
579
580 dlm->active = dl;
581}
582
583void vsp1_dl_list_commit(struct vsp1_dl_list *dl)
584{
585 struct vsp1_dl_manager *dlm = dl->dlm;
586 struct vsp1_dl_list *dl_child;
587 unsigned long flags;
588
589 if (dlm->mode == VSP1_DL_MODE_HEADER) {
590
591 vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
592
593 list_for_each_entry(dl_child, &dl->chain, chain) {
594 bool last = list_is_last(&dl_child->chain, &dl->chain);
595
596 vsp1_dl_list_fill_header(dl_child, last);
597 }
598 }
599
600 spin_lock_irqsave(&dlm->lock, flags);
601
602 if (dlm->singleshot)
603 vsp1_dl_list_commit_singleshot(dl);
604 else
605 vsp1_dl_list_commit_continuous(dl);
606
607 spin_unlock_irqrestore(&dlm->lock, flags);
608}
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623bool vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
624{
625 bool completed = false;
626
627 spin_lock(&dlm->lock);
628
629
630
631
632
633 if (dlm->singleshot) {
634 __vsp1_dl_list_put(dlm->active);
635 dlm->active = NULL;
636 completed = true;
637 goto done;
638 }
639
640
641
642
643
644
645
646 if (vsp1_dl_list_hw_update_pending(dlm))
647 goto done;
648
649
650
651
652
653 if (dlm->queued) {
654 __vsp1_dl_list_put(dlm->active);
655 dlm->active = dlm->queued;
656 dlm->queued = NULL;
657 completed = true;
658 }
659
660
661
662
663
664
665 if (dlm->pending) {
666 vsp1_dl_list_hw_enqueue(dlm->pending);
667 dlm->queued = dlm->pending;
668 dlm->pending = NULL;
669 }
670
671done:
672 spin_unlock(&dlm->lock);
673
674 return completed;
675}
676
677
678void vsp1_dlm_setup(struct vsp1_device *vsp1)
679{
680 u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
681 | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
682 | VI6_DL_CTRL_DLE;
683
684
685
686
687
688 if (vsp1->drm)
689 ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0;
690
691 vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
692 vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
693}
694
695void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
696{
697 unsigned long flags;
698
699 spin_lock_irqsave(&dlm->lock, flags);
700
701 __vsp1_dl_list_put(dlm->active);
702 __vsp1_dl_list_put(dlm->queued);
703 __vsp1_dl_list_put(dlm->pending);
704
705 spin_unlock_irqrestore(&dlm->lock, flags);
706
707 dlm->active = NULL;
708 dlm->queued = NULL;
709 dlm->pending = NULL;
710}
711
712
713
714
715
716
717static void vsp1_dlm_fragments_free(struct vsp1_dl_manager *dlm)
718{
719 unsigned long flags;
720
721 spin_lock_irqsave(&dlm->lock, flags);
722
723 while (!list_empty(&dlm->gc_fragments)) {
724 struct vsp1_dl_body *dlb;
725
726 dlb = list_first_entry(&dlm->gc_fragments, struct vsp1_dl_body,
727 list);
728 list_del(&dlb->list);
729
730 spin_unlock_irqrestore(&dlm->lock, flags);
731 vsp1_dl_fragment_free(dlb);
732 spin_lock_irqsave(&dlm->lock, flags);
733 }
734
735 spin_unlock_irqrestore(&dlm->lock, flags);
736}
737
738static void vsp1_dlm_garbage_collect(struct work_struct *work)
739{
740 struct vsp1_dl_manager *dlm =
741 container_of(work, struct vsp1_dl_manager, gc_work);
742
743 vsp1_dlm_fragments_free(dlm);
744}
745
746struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
747 unsigned int index,
748 unsigned int prealloc)
749{
750 struct vsp1_dl_manager *dlm;
751 unsigned int i;
752
753 dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL);
754 if (!dlm)
755 return NULL;
756
757 dlm->index = index;
758 dlm->mode = index == 0 && !vsp1->info->uapi
759 ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER;
760 dlm->singleshot = vsp1->info->uapi;
761 dlm->vsp1 = vsp1;
762
763 spin_lock_init(&dlm->lock);
764 INIT_LIST_HEAD(&dlm->free);
765 INIT_LIST_HEAD(&dlm->gc_fragments);
766 INIT_WORK(&dlm->gc_work, vsp1_dlm_garbage_collect);
767
768 for (i = 0; i < prealloc; ++i) {
769 struct vsp1_dl_list *dl;
770
771 dl = vsp1_dl_list_alloc(dlm);
772 if (!dl)
773 return NULL;
774
775 list_add_tail(&dl->list, &dlm->free);
776 }
777
778 return dlm;
779}
780
781void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
782{
783 struct vsp1_dl_list *dl, *next;
784
785 if (!dlm)
786 return;
787
788 cancel_work_sync(&dlm->gc_work);
789
790 list_for_each_entry_safe(dl, next, &dlm->free, list) {
791 list_del(&dl->list);
792 vsp1_dl_list_free(dl);
793 }
794
795 vsp1_dlm_fragments_free(dlm);
796}
797