1
2
3
4
5
6
7
8
9
10#include <linux/device.h>
11#include <linux/dma-mapping.h>
12#include <linux/gfp.h>
13#include <linux/refcount.h>
14#include <linux/slab.h>
15#include <linux/workqueue.h>
16
17#include "vsp1.h"
18#include "vsp1_dl.h"
19
20#define VSP1_DL_NUM_ENTRIES 256
21
22#define VSP1_DLH_INT_ENABLE (1 << 1)
23#define VSP1_DLH_AUTO_START (1 << 0)
24
25#define VSP1_DLH_EXT_PRE_CMD_EXEC (1 << 9)
26#define VSP1_DLH_EXT_POST_CMD_EXEC (1 << 8)
27
28struct vsp1_dl_header_list {
29 u32 num_bytes;
30 u32 addr;
31} __packed;
32
33struct vsp1_dl_header {
34 u32 num_lists;
35 struct vsp1_dl_header_list lists[8];
36 u32 next_header;
37 u32 flags;
38} __packed;
39
40
41
42
43
44
45
46
47
48
49struct vsp1_dl_ext_header {
50 u32 padding;
51
52
53
54
55
56
57
58
59
60 u16 pre_ext_dl_num_cmd;
61 u16 flags;
62 u32 pre_ext_dl_plist;
63
64 u32 post_ext_dl_num_cmd;
65 u32 post_ext_dl_plist;
66} __packed;
67
68struct vsp1_dl_header_extended {
69 struct vsp1_dl_header header;
70 struct vsp1_dl_ext_header ext;
71} __packed;
72
73struct vsp1_dl_entry {
74 u32 addr;
75 u32 data;
76} __packed;
77
78
79
80
81
82
83
84
85struct vsp1_pre_ext_dl_body {
86 u32 opcode;
87 u32 flags;
88 u32 address_set;
89 u32 reserved;
90} __packed;
91
92
93
94
95
96
97
98
99
100
101
102
103
104struct vsp1_dl_body {
105 struct list_head list;
106 struct list_head free;
107
108 refcount_t refcnt;
109
110 struct vsp1_dl_body_pool *pool;
111
112 struct vsp1_dl_entry *entries;
113 dma_addr_t dma;
114 size_t size;
115
116 unsigned int num_entries;
117 unsigned int max_entries;
118};
119
120
121
122
123
124
125
126
127
128
129
130struct vsp1_dl_body_pool {
131
132 dma_addr_t dma;
133 size_t size;
134 void *mem;
135
136
137 struct vsp1_dl_body *bodies;
138 struct list_head free;
139 spinlock_t lock;
140
141 struct vsp1_device *vsp1;
142};
143
144
145
146
147
148
149
150
151
152
153
154struct vsp1_dl_cmd_pool {
155
156 dma_addr_t dma;
157 size_t size;
158 void *mem;
159
160 struct vsp1_dl_ext_cmd *cmds;
161 struct list_head free;
162
163 spinlock_t lock;
164
165 struct vsp1_device *vsp1;
166};
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183struct vsp1_dl_list {
184 struct list_head list;
185 struct vsp1_dl_manager *dlm;
186
187 struct vsp1_dl_header *header;
188 struct vsp1_dl_ext_header *extension;
189 dma_addr_t dma;
190
191 struct vsp1_dl_body *body0;
192 struct list_head bodies;
193
194 struct vsp1_dl_ext_cmd *pre_cmd;
195 struct vsp1_dl_ext_cmd *post_cmd;
196
197 bool has_chain;
198 struct list_head chain;
199
200 unsigned int flags;
201};
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216struct vsp1_dl_manager {
217 unsigned int index;
218 bool singleshot;
219 struct vsp1_device *vsp1;
220
221 spinlock_t lock;
222 struct list_head free;
223 struct vsp1_dl_list *active;
224 struct vsp1_dl_list *queued;
225 struct vsp1_dl_list *pending;
226
227 struct vsp1_dl_body_pool *pool;
228 struct vsp1_dl_cmd_pool *cmdpool;
229};
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247struct vsp1_dl_body_pool *
248vsp1_dl_body_pool_create(struct vsp1_device *vsp1, unsigned int num_bodies,
249 unsigned int num_entries, size_t extra_size)
250{
251 struct vsp1_dl_body_pool *pool;
252 size_t dlb_size;
253 unsigned int i;
254
255 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
256 if (!pool)
257 return NULL;
258
259 pool->vsp1 = vsp1;
260
261
262
263
264
265
266
267 dlb_size = num_entries * sizeof(struct vsp1_dl_entry) + extra_size;
268 pool->size = dlb_size * num_bodies;
269
270 pool->bodies = kcalloc(num_bodies, sizeof(*pool->bodies), GFP_KERNEL);
271 if (!pool->bodies) {
272 kfree(pool);
273 return NULL;
274 }
275
276 pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma,
277 GFP_KERNEL);
278 if (!pool->mem) {
279 kfree(pool->bodies);
280 kfree(pool);
281 return NULL;
282 }
283
284 spin_lock_init(&pool->lock);
285 INIT_LIST_HEAD(&pool->free);
286
287 for (i = 0; i < num_bodies; ++i) {
288 struct vsp1_dl_body *dlb = &pool->bodies[i];
289
290 dlb->pool = pool;
291 dlb->max_entries = num_entries;
292
293 dlb->dma = pool->dma + i * dlb_size;
294 dlb->entries = pool->mem + i * dlb_size;
295
296 list_add_tail(&dlb->free, &pool->free);
297 }
298
299 return pool;
300}
301
302
303
304
305
306
307
308void vsp1_dl_body_pool_destroy(struct vsp1_dl_body_pool *pool)
309{
310 if (!pool)
311 return;
312
313 if (pool->mem)
314 dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem,
315 pool->dma);
316
317 kfree(pool->bodies);
318 kfree(pool);
319}
320
321
322
323
324
325
326
327
328
329struct vsp1_dl_body *vsp1_dl_body_get(struct vsp1_dl_body_pool *pool)
330{
331 struct vsp1_dl_body *dlb = NULL;
332 unsigned long flags;
333
334 spin_lock_irqsave(&pool->lock, flags);
335
336 if (!list_empty(&pool->free)) {
337 dlb = list_first_entry(&pool->free, struct vsp1_dl_body, free);
338 list_del(&dlb->free);
339 refcount_set(&dlb->refcnt, 1);
340 }
341
342 spin_unlock_irqrestore(&pool->lock, flags);
343
344 return dlb;
345}
346
347
348
349
350
351
352
353void vsp1_dl_body_put(struct vsp1_dl_body *dlb)
354{
355 unsigned long flags;
356
357 if (!dlb)
358 return;
359
360 if (!refcount_dec_and_test(&dlb->refcnt))
361 return;
362
363 dlb->num_entries = 0;
364
365 spin_lock_irqsave(&dlb->pool->lock, flags);
366 list_add_tail(&dlb->free, &dlb->pool->free);
367 spin_unlock_irqrestore(&dlb->pool->lock, flags);
368}
369
370
371
372
373
374
375
376
377
378
379
380void vsp1_dl_body_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
381{
382 if (WARN_ONCE(dlb->num_entries >= dlb->max_entries,
383 "DLB size exceeded (max %u)", dlb->max_entries))
384 return;
385
386 dlb->entries[dlb->num_entries].addr = reg;
387 dlb->entries[dlb->num_entries].data = data;
388 dlb->num_entries++;
389}
390
391
392
393
394
395enum vsp1_extcmd_type {
396 VSP1_EXTCMD_AUTODISP,
397 VSP1_EXTCMD_AUTOFLD,
398};
399
400struct vsp1_extended_command_info {
401 u16 opcode;
402 size_t body_size;
403};
404
405static const struct vsp1_extended_command_info vsp1_extended_commands[] = {
406 [VSP1_EXTCMD_AUTODISP] = { 0x02, 96 },
407 [VSP1_EXTCMD_AUTOFLD] = { 0x03, 160 },
408};
409
410
411
412
413
414
415
416
417
418
419
420
421
422static struct vsp1_dl_cmd_pool *
423vsp1_dl_cmd_pool_create(struct vsp1_device *vsp1, enum vsp1_extcmd_type type,
424 unsigned int num_cmds)
425{
426 struct vsp1_dl_cmd_pool *pool;
427 unsigned int i;
428 size_t cmd_size;
429
430 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
431 if (!pool)
432 return NULL;
433
434 pool->vsp1 = vsp1;
435
436 spin_lock_init(&pool->lock);
437 INIT_LIST_HEAD(&pool->free);
438
439 pool->cmds = kcalloc(num_cmds, sizeof(*pool->cmds), GFP_KERNEL);
440 if (!pool->cmds) {
441 kfree(pool);
442 return NULL;
443 }
444
445 cmd_size = sizeof(struct vsp1_pre_ext_dl_body) +
446 vsp1_extended_commands[type].body_size;
447 cmd_size = ALIGN(cmd_size, 16);
448
449 pool->size = cmd_size * num_cmds;
450 pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma,
451 GFP_KERNEL);
452 if (!pool->mem) {
453 kfree(pool->cmds);
454 kfree(pool);
455 return NULL;
456 }
457
458 for (i = 0; i < num_cmds; ++i) {
459 struct vsp1_dl_ext_cmd *cmd = &pool->cmds[i];
460 size_t cmd_offset = i * cmd_size;
461
462 size_t data_offset = sizeof(struct vsp1_pre_ext_dl_body) +
463 cmd_offset;
464
465 cmd->pool = pool;
466 cmd->opcode = vsp1_extended_commands[type].opcode;
467
468
469
470
471
472 cmd->num_cmds = 1;
473 cmd->cmds = pool->mem + cmd_offset;
474 cmd->cmd_dma = pool->dma + cmd_offset;
475
476 cmd->data = pool->mem + data_offset;
477 cmd->data_dma = pool->dma + data_offset;
478
479 list_add_tail(&cmd->free, &pool->free);
480 }
481
482 return pool;
483}
484
485static
486struct vsp1_dl_ext_cmd *vsp1_dl_ext_cmd_get(struct vsp1_dl_cmd_pool *pool)
487{
488 struct vsp1_dl_ext_cmd *cmd = NULL;
489 unsigned long flags;
490
491 spin_lock_irqsave(&pool->lock, flags);
492
493 if (!list_empty(&pool->free)) {
494 cmd = list_first_entry(&pool->free, struct vsp1_dl_ext_cmd,
495 free);
496 list_del(&cmd->free);
497 }
498
499 spin_unlock_irqrestore(&pool->lock, flags);
500
501 return cmd;
502}
503
504static void vsp1_dl_ext_cmd_put(struct vsp1_dl_ext_cmd *cmd)
505{
506 unsigned long flags;
507
508 if (!cmd)
509 return;
510
511
512 cmd->flags = 0;
513
514 spin_lock_irqsave(&cmd->pool->lock, flags);
515 list_add_tail(&cmd->free, &cmd->pool->free);
516 spin_unlock_irqrestore(&cmd->pool->lock, flags);
517}
518
519static void vsp1_dl_ext_cmd_pool_destroy(struct vsp1_dl_cmd_pool *pool)
520{
521 if (!pool)
522 return;
523
524 if (pool->mem)
525 dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem,
526 pool->dma);
527
528 kfree(pool->cmds);
529 kfree(pool);
530}
531
532struct vsp1_dl_ext_cmd *vsp1_dl_get_pre_cmd(struct vsp1_dl_list *dl)
533{
534 struct vsp1_dl_manager *dlm = dl->dlm;
535
536 if (dl->pre_cmd)
537 return dl->pre_cmd;
538
539 dl->pre_cmd = vsp1_dl_ext_cmd_get(dlm->cmdpool);
540
541 return dl->pre_cmd;
542}
543
544
545
546
547
548static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
549{
550 struct vsp1_dl_list *dl;
551 size_t header_offset;
552
553 dl = kzalloc(sizeof(*dl), GFP_KERNEL);
554 if (!dl)
555 return NULL;
556
557 INIT_LIST_HEAD(&dl->bodies);
558 dl->dlm = dlm;
559
560
561 dl->body0 = vsp1_dl_body_get(dlm->pool);
562 if (!dl->body0) {
563 kfree(dl);
564 return NULL;
565 }
566
567 header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries);
568
569 dl->header = ((void *)dl->body0->entries) + header_offset;
570 dl->dma = dl->body0->dma + header_offset;
571
572 memset(dl->header, 0, sizeof(*dl->header));
573 dl->header->lists[0].addr = dl->body0->dma;
574
575 return dl;
576}
577
578static void vsp1_dl_list_bodies_put(struct vsp1_dl_list *dl)
579{
580 struct vsp1_dl_body *dlb, *tmp;
581
582 list_for_each_entry_safe(dlb, tmp, &dl->bodies, list) {
583 list_del(&dlb->list);
584 vsp1_dl_body_put(dlb);
585 }
586}
587
588static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
589{
590 vsp1_dl_body_put(dl->body0);
591 vsp1_dl_list_bodies_put(dl);
592
593 kfree(dl);
594}
595
596
597
598
599
600
601
602
603
604struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
605{
606 struct vsp1_dl_list *dl = NULL;
607 unsigned long flags;
608
609 spin_lock_irqsave(&dlm->lock, flags);
610
611 if (!list_empty(&dlm->free)) {
612 dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
613 list_del(&dl->list);
614
615
616
617
618
619 INIT_LIST_HEAD(&dl->chain);
620 }
621
622 spin_unlock_irqrestore(&dlm->lock, flags);
623
624 return dl;
625}
626
627
628static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
629{
630 struct vsp1_dl_list *dl_next;
631
632 if (!dl)
633 return;
634
635
636
637
638
639 if (dl->has_chain) {
640 list_for_each_entry(dl_next, &dl->chain, chain)
641 __vsp1_dl_list_put(dl_next);
642 }
643
644 dl->has_chain = false;
645
646 vsp1_dl_list_bodies_put(dl);
647
648 vsp1_dl_ext_cmd_put(dl->pre_cmd);
649 vsp1_dl_ext_cmd_put(dl->post_cmd);
650
651 dl->pre_cmd = NULL;
652 dl->post_cmd = NULL;
653
654
655
656
657
658 dl->body0->num_entries = 0;
659
660 list_add_tail(&dl->list, &dl->dlm->free);
661}
662
663
664
665
666
667
668
669
670
671
672void vsp1_dl_list_put(struct vsp1_dl_list *dl)
673{
674 unsigned long flags;
675
676 if (!dl)
677 return;
678
679 spin_lock_irqsave(&dl->dlm->lock, flags);
680 __vsp1_dl_list_put(dl);
681 spin_unlock_irqrestore(&dl->dlm->lock, flags);
682}
683
684
685
686
687
688
689
690
691struct vsp1_dl_body *vsp1_dl_list_get_body0(struct vsp1_dl_list *dl)
692{
693 return dl->body0;
694}
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712int vsp1_dl_list_add_body(struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb)
713{
714 refcount_inc(&dlb->refcnt);
715
716 list_add_tail(&dlb->list, &dl->bodies);
717
718 return 0;
719}
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
736 struct vsp1_dl_list *dl)
737{
738 head->has_chain = true;
739 list_add_tail(&dl->chain, &head->chain);
740 return 0;
741}
742
743static void vsp1_dl_ext_cmd_fill_header(struct vsp1_dl_ext_cmd *cmd)
744{
745 cmd->cmds[0].opcode = cmd->opcode;
746 cmd->cmds[0].flags = cmd->flags;
747 cmd->cmds[0].address_set = cmd->data_dma;
748 cmd->cmds[0].reserved = 0;
749}
750
751static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
752{
753 struct vsp1_dl_manager *dlm = dl->dlm;
754 struct vsp1_dl_header_list *hdr = dl->header->lists;
755 struct vsp1_dl_body *dlb;
756 unsigned int num_lists = 0;
757
758
759
760
761
762
763
764 hdr->num_bytes = dl->body0->num_entries
765 * sizeof(*dl->header->lists);
766
767 list_for_each_entry(dlb, &dl->bodies, list) {
768 num_lists++;
769 hdr++;
770
771 hdr->addr = dlb->dma;
772 hdr->num_bytes = dlb->num_entries
773 * sizeof(*dl->header->lists);
774 }
775
776 dl->header->num_lists = num_lists;
777 dl->header->flags = 0;
778
779
780
781
782
783
784
785
786 if (!dlm->singleshot || is_last)
787 dl->header->flags |= VSP1_DLH_INT_ENABLE;
788
789
790
791
792
793
794
795 if (!dlm->singleshot || !is_last)
796 dl->header->flags |= VSP1_DLH_AUTO_START;
797
798 if (!is_last) {
799
800
801
802
803 struct vsp1_dl_list *next = list_next_entry(dl, chain);
804
805 dl->header->next_header = next->dma;
806 } else if (!dlm->singleshot) {
807
808
809
810
811
812 dl->header->next_header = dl->dma;
813 }
814
815 if (!dl->extension)
816 return;
817
818 dl->extension->flags = 0;
819
820 if (dl->pre_cmd) {
821 dl->extension->pre_ext_dl_plist = dl->pre_cmd->cmd_dma;
822 dl->extension->pre_ext_dl_num_cmd = dl->pre_cmd->num_cmds;
823 dl->extension->flags |= VSP1_DLH_EXT_PRE_CMD_EXEC;
824
825 vsp1_dl_ext_cmd_fill_header(dl->pre_cmd);
826 }
827
828 if (dl->post_cmd) {
829 dl->extension->post_ext_dl_plist = dl->post_cmd->cmd_dma;
830 dl->extension->post_ext_dl_num_cmd = dl->post_cmd->num_cmds;
831 dl->extension->flags |= VSP1_DLH_EXT_POST_CMD_EXEC;
832
833 vsp1_dl_ext_cmd_fill_header(dl->post_cmd);
834 }
835}
836
837static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
838{
839 struct vsp1_device *vsp1 = dlm->vsp1;
840
841 if (!dlm->queued)
842 return false;
843
844
845
846
847
848 return !!(vsp1_read(vsp1, VI6_CMD(dlm->index)) & VI6_CMD_UPDHDR);
849}
850
851static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
852{
853 struct vsp1_dl_manager *dlm = dl->dlm;
854 struct vsp1_device *vsp1 = dlm->vsp1;
855
856
857
858
859
860
861
862
863 vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
864}
865
866static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl)
867{
868 struct vsp1_dl_manager *dlm = dl->dlm;
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885 if (vsp1_dl_list_hw_update_pending(dlm)) {
886 WARN_ON(dlm->pending &&
887 (dlm->pending->flags & VSP1_DL_FRAME_END_INTERNAL));
888 __vsp1_dl_list_put(dlm->pending);
889 dlm->pending = dl;
890 return;
891 }
892
893
894
895
896
897 vsp1_dl_list_hw_enqueue(dl);
898
899 __vsp1_dl_list_put(dlm->queued);
900 dlm->queued = dl;
901}
902
903static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl)
904{
905 struct vsp1_dl_manager *dlm = dl->dlm;
906
907
908
909
910
911
912 vsp1_dl_list_hw_enqueue(dl);
913
914 dlm->active = dl;
915}
916
917void vsp1_dl_list_commit(struct vsp1_dl_list *dl, unsigned int dl_flags)
918{
919 struct vsp1_dl_manager *dlm = dl->dlm;
920 struct vsp1_dl_list *dl_next;
921 unsigned long flags;
922
923
924 vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
925
926 list_for_each_entry(dl_next, &dl->chain, chain) {
927 bool last = list_is_last(&dl_next->chain, &dl->chain);
928
929 vsp1_dl_list_fill_header(dl_next, last);
930 }
931
932 dl->flags = dl_flags & ~VSP1_DL_FRAME_END_COMPLETED;
933
934 spin_lock_irqsave(&dlm->lock, flags);
935
936 if (dlm->singleshot)
937 vsp1_dl_list_commit_singleshot(dl);
938 else
939 vsp1_dl_list_commit_continuous(dl);
940
941 spin_unlock_irqrestore(&dlm->lock, flags);
942}
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969unsigned int vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
970{
971 struct vsp1_device *vsp1 = dlm->vsp1;
972 u32 status = vsp1_read(vsp1, VI6_STATUS);
973 unsigned int flags = 0;
974
975 spin_lock(&dlm->lock);
976
977
978
979
980
981 if (dlm->singleshot) {
982 __vsp1_dl_list_put(dlm->active);
983 dlm->active = NULL;
984 flags |= VSP1_DL_FRAME_END_COMPLETED;
985 goto done;
986 }
987
988
989
990
991
992
993
994 if (vsp1_dl_list_hw_update_pending(dlm))
995 goto done;
996
997
998
999
1000
1001
1002 if (status & VI6_STATUS_FLD_STD(dlm->index))
1003 goto done;
1004
1005
1006
1007
1008
1009
1010
1011 if (dlm->active && (dlm->active->flags & VSP1_DL_FRAME_END_WRITEBACK)) {
1012 flags |= VSP1_DL_FRAME_END_WRITEBACK;
1013 dlm->active->flags &= ~VSP1_DL_FRAME_END_WRITEBACK;
1014 }
1015
1016
1017
1018
1019
1020 if (dlm->queued) {
1021 if (dlm->queued->flags & VSP1_DL_FRAME_END_INTERNAL)
1022 flags |= VSP1_DL_FRAME_END_INTERNAL;
1023 dlm->queued->flags &= ~VSP1_DL_FRAME_END_INTERNAL;
1024
1025 __vsp1_dl_list_put(dlm->active);
1026 dlm->active = dlm->queued;
1027 dlm->queued = NULL;
1028 flags |= VSP1_DL_FRAME_END_COMPLETED;
1029 }
1030
1031
1032
1033
1034
1035
1036 if (dlm->pending) {
1037 vsp1_dl_list_hw_enqueue(dlm->pending);
1038 dlm->queued = dlm->pending;
1039 dlm->pending = NULL;
1040 }
1041
1042done:
1043 spin_unlock(&dlm->lock);
1044
1045 return flags;
1046}
1047
1048
1049void vsp1_dlm_setup(struct vsp1_device *vsp1)
1050{
1051 unsigned int i;
1052 u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
1053 | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
1054 | VI6_DL_CTRL_DLE;
1055 u32 ext_dl = (0x02 << VI6_DL_EXT_CTRL_POLINT_SHIFT)
1056 | VI6_DL_EXT_CTRL_DLPRI | VI6_DL_EXT_CTRL_EXT;
1057
1058 if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) {
1059 for (i = 0; i < vsp1->info->wpf_count; ++i)
1060 vsp1_write(vsp1, VI6_DL_EXT_CTRL(i), ext_dl);
1061 }
1062
1063 vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
1064 vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
1065}
1066
1067void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
1068{
1069 unsigned long flags;
1070
1071 spin_lock_irqsave(&dlm->lock, flags);
1072
1073 __vsp1_dl_list_put(dlm->active);
1074 __vsp1_dl_list_put(dlm->queued);
1075 __vsp1_dl_list_put(dlm->pending);
1076
1077 spin_unlock_irqrestore(&dlm->lock, flags);
1078
1079 dlm->active = NULL;
1080 dlm->queued = NULL;
1081 dlm->pending = NULL;
1082}
1083
1084struct vsp1_dl_body *vsp1_dlm_dl_body_get(struct vsp1_dl_manager *dlm)
1085{
1086 return vsp1_dl_body_get(dlm->pool);
1087}
1088
1089struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
1090 unsigned int index,
1091 unsigned int prealloc)
1092{
1093 struct vsp1_dl_manager *dlm;
1094 size_t header_size;
1095 unsigned int i;
1096
1097 dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL);
1098 if (!dlm)
1099 return NULL;
1100
1101 dlm->index = index;
1102 dlm->singleshot = vsp1->info->uapi;
1103 dlm->vsp1 = vsp1;
1104
1105 spin_lock_init(&dlm->lock);
1106 INIT_LIST_HEAD(&dlm->free);
1107
1108
1109
1110
1111
1112
1113
1114
1115 header_size = vsp1_feature(vsp1, VSP1_HAS_EXT_DL) ?
1116 sizeof(struct vsp1_dl_header_extended) :
1117 sizeof(struct vsp1_dl_header);
1118
1119 header_size = ALIGN(header_size, 8);
1120
1121 dlm->pool = vsp1_dl_body_pool_create(vsp1, prealloc + 1,
1122 VSP1_DL_NUM_ENTRIES, header_size);
1123 if (!dlm->pool)
1124 return NULL;
1125
1126 for (i = 0; i < prealloc; ++i) {
1127 struct vsp1_dl_list *dl;
1128
1129 dl = vsp1_dl_list_alloc(dlm);
1130 if (!dl) {
1131 vsp1_dlm_destroy(dlm);
1132 return NULL;
1133 }
1134
1135
1136 if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL))
1137 dl->extension = (void *)dl->header
1138 + sizeof(*dl->header);
1139
1140 list_add_tail(&dl->list, &dlm->free);
1141 }
1142
1143 if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) {
1144 dlm->cmdpool = vsp1_dl_cmd_pool_create(vsp1,
1145 VSP1_EXTCMD_AUTOFLD, prealloc);
1146 if (!dlm->cmdpool) {
1147 vsp1_dlm_destroy(dlm);
1148 return NULL;
1149 }
1150 }
1151
1152 return dlm;
1153}
1154
1155void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
1156{
1157 struct vsp1_dl_list *dl, *next;
1158
1159 if (!dlm)
1160 return;
1161
1162 list_for_each_entry_safe(dl, next, &dlm->free, list) {
1163 list_del(&dl->list);
1164 vsp1_dl_list_free(dl);
1165 }
1166
1167 vsp1_dl_body_pool_destroy(dlm->pool);
1168 vsp1_dl_ext_cmd_pool_destroy(dlm->cmdpool);
1169}
1170