1
2
3
4
5
6
7
8
9
10#include <linux/device.h>
11#include <linux/dma-mapping.h>
12#include <linux/gfp.h>
13#include <linux/refcount.h>
14#include <linux/slab.h>
15#include <linux/workqueue.h>
16
17#include "vsp1.h"
18#include "vsp1_dl.h"
19
20#define VSP1_DL_NUM_ENTRIES 256
21
22#define VSP1_DLH_INT_ENABLE (1 << 1)
23#define VSP1_DLH_AUTO_START (1 << 0)
24
25#define VSP1_DLH_EXT_PRE_CMD_EXEC (1 << 9)
26#define VSP1_DLH_EXT_POST_CMD_EXEC (1 << 8)
27
28struct vsp1_dl_header_list {
29 u32 num_bytes;
30 u32 addr;
31} __packed;
32
33struct vsp1_dl_header {
34 u32 num_lists;
35 struct vsp1_dl_header_list lists[8];
36 u32 next_header;
37 u32 flags;
38} __packed;
39
40
41
42
43
44
45
46
47
48
49struct vsp1_dl_ext_header {
50 u32 padding;
51
52
53
54
55
56
57
58
59
60 u16 pre_ext_dl_num_cmd;
61 u16 flags;
62 u32 pre_ext_dl_plist;
63
64 u32 post_ext_dl_num_cmd;
65 u32 post_ext_dl_plist;
66} __packed;
67
68struct vsp1_dl_header_extended {
69 struct vsp1_dl_header header;
70 struct vsp1_dl_ext_header ext;
71} __packed;
72
73struct vsp1_dl_entry {
74 u32 addr;
75 u32 data;
76} __packed;
77
78
79
80
81
82
83
84
85struct vsp1_pre_ext_dl_body {
86 u32 opcode;
87 u32 flags;
88 u32 address_set;
89 u32 reserved;
90} __packed;
91
92
93
94
95
96
97
98
99
100
101
102
103
104struct vsp1_dl_body {
105 struct list_head list;
106 struct list_head free;
107
108 refcount_t refcnt;
109
110 struct vsp1_dl_body_pool *pool;
111
112 struct vsp1_dl_entry *entries;
113 dma_addr_t dma;
114 size_t size;
115
116 unsigned int num_entries;
117 unsigned int max_entries;
118};
119
120
121
122
123
124
125
126
127
128
129
130struct vsp1_dl_body_pool {
131
132 dma_addr_t dma;
133 size_t size;
134 void *mem;
135
136
137 struct vsp1_dl_body *bodies;
138 struct list_head free;
139 spinlock_t lock;
140
141 struct vsp1_device *vsp1;
142};
143
144
145
146
147
148
149
150
151
152
153
154struct vsp1_dl_cmd_pool {
155
156 dma_addr_t dma;
157 size_t size;
158 void *mem;
159
160 struct vsp1_dl_ext_cmd *cmds;
161 struct list_head free;
162
163 spinlock_t lock;
164
165 struct vsp1_device *vsp1;
166};
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183struct vsp1_dl_list {
184 struct list_head list;
185 struct vsp1_dl_manager *dlm;
186
187 struct vsp1_dl_header *header;
188 struct vsp1_dl_ext_header *extension;
189 dma_addr_t dma;
190
191 struct vsp1_dl_body *body0;
192 struct list_head bodies;
193
194 struct vsp1_dl_ext_cmd *pre_cmd;
195 struct vsp1_dl_ext_cmd *post_cmd;
196
197 bool has_chain;
198 struct list_head chain;
199
200 unsigned int flags;
201};
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216struct vsp1_dl_manager {
217 unsigned int index;
218 bool singleshot;
219 struct vsp1_device *vsp1;
220
221 spinlock_t lock;
222 struct list_head free;
223 struct vsp1_dl_list *active;
224 struct vsp1_dl_list *queued;
225 struct vsp1_dl_list *pending;
226
227 struct vsp1_dl_body_pool *pool;
228 struct vsp1_dl_cmd_pool *cmdpool;
229};
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247struct vsp1_dl_body_pool *
248vsp1_dl_body_pool_create(struct vsp1_device *vsp1, unsigned int num_bodies,
249 unsigned int num_entries, size_t extra_size)
250{
251 struct vsp1_dl_body_pool *pool;
252 size_t dlb_size;
253 unsigned int i;
254
255 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
256 if (!pool)
257 return NULL;
258
259 pool->vsp1 = vsp1;
260
261
262
263
264
265
266
267 dlb_size = num_entries * sizeof(struct vsp1_dl_entry) + extra_size;
268 pool->size = dlb_size * num_bodies;
269
270 pool->bodies = kcalloc(num_bodies, sizeof(*pool->bodies), GFP_KERNEL);
271 if (!pool->bodies) {
272 kfree(pool);
273 return NULL;
274 }
275
276 pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma,
277 GFP_KERNEL);
278 if (!pool->mem) {
279 kfree(pool->bodies);
280 kfree(pool);
281 return NULL;
282 }
283
284 spin_lock_init(&pool->lock);
285 INIT_LIST_HEAD(&pool->free);
286
287 for (i = 0; i < num_bodies; ++i) {
288 struct vsp1_dl_body *dlb = &pool->bodies[i];
289
290 dlb->pool = pool;
291 dlb->max_entries = num_entries;
292
293 dlb->dma = pool->dma + i * dlb_size;
294 dlb->entries = pool->mem + i * dlb_size;
295
296 list_add_tail(&dlb->free, &pool->free);
297 }
298
299 return pool;
300}
301
302
303
304
305
306
307
308void vsp1_dl_body_pool_destroy(struct vsp1_dl_body_pool *pool)
309{
310 if (!pool)
311 return;
312
313 if (pool->mem)
314 dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem,
315 pool->dma);
316
317 kfree(pool->bodies);
318 kfree(pool);
319}
320
321
322
323
324
325
326
327
328
329struct vsp1_dl_body *vsp1_dl_body_get(struct vsp1_dl_body_pool *pool)
330{
331 struct vsp1_dl_body *dlb = NULL;
332 unsigned long flags;
333
334 spin_lock_irqsave(&pool->lock, flags);
335
336 if (!list_empty(&pool->free)) {
337 dlb = list_first_entry(&pool->free, struct vsp1_dl_body, free);
338 list_del(&dlb->free);
339 refcount_set(&dlb->refcnt, 1);
340 }
341
342 spin_unlock_irqrestore(&pool->lock, flags);
343
344 return dlb;
345}
346
347
348
349
350
351
352
353void vsp1_dl_body_put(struct vsp1_dl_body *dlb)
354{
355 unsigned long flags;
356
357 if (!dlb)
358 return;
359
360 if (!refcount_dec_and_test(&dlb->refcnt))
361 return;
362
363 dlb->num_entries = 0;
364
365 spin_lock_irqsave(&dlb->pool->lock, flags);
366 list_add_tail(&dlb->free, &dlb->pool->free);
367 spin_unlock_irqrestore(&dlb->pool->lock, flags);
368}
369
370
371
372
373
374
375
376
377
378
379
380void vsp1_dl_body_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
381{
382 if (WARN_ONCE(dlb->num_entries >= dlb->max_entries,
383 "DLB size exceeded (max %u)", dlb->max_entries))
384 return;
385
386 dlb->entries[dlb->num_entries].addr = reg;
387 dlb->entries[dlb->num_entries].data = data;
388 dlb->num_entries++;
389}
390
391
392
393
394
395enum vsp1_extcmd_type {
396 VSP1_EXTCMD_AUTODISP,
397 VSP1_EXTCMD_AUTOFLD,
398};
399
400struct vsp1_extended_command_info {
401 u16 opcode;
402 size_t body_size;
403};
404
405static const struct vsp1_extended_command_info vsp1_extended_commands[] = {
406 [VSP1_EXTCMD_AUTODISP] = { 0x02, 96 },
407 [VSP1_EXTCMD_AUTOFLD] = { 0x03, 160 },
408};
409
410
411
412
413
414
415
416
417
418
419
420
421
422static struct vsp1_dl_cmd_pool *
423vsp1_dl_cmd_pool_create(struct vsp1_device *vsp1, enum vsp1_extcmd_type type,
424 unsigned int num_cmds)
425{
426 struct vsp1_dl_cmd_pool *pool;
427 unsigned int i;
428 size_t cmd_size;
429
430 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
431 if (!pool)
432 return NULL;
433
434 spin_lock_init(&pool->lock);
435 INIT_LIST_HEAD(&pool->free);
436
437 pool->cmds = kcalloc(num_cmds, sizeof(*pool->cmds), GFP_KERNEL);
438 if (!pool->cmds) {
439 kfree(pool);
440 return NULL;
441 }
442
443 cmd_size = sizeof(struct vsp1_pre_ext_dl_body) +
444 vsp1_extended_commands[type].body_size;
445 cmd_size = ALIGN(cmd_size, 16);
446
447 pool->size = cmd_size * num_cmds;
448 pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma,
449 GFP_KERNEL);
450 if (!pool->mem) {
451 kfree(pool->cmds);
452 kfree(pool);
453 return NULL;
454 }
455
456 for (i = 0; i < num_cmds; ++i) {
457 struct vsp1_dl_ext_cmd *cmd = &pool->cmds[i];
458 size_t cmd_offset = i * cmd_size;
459
460 size_t data_offset = sizeof(struct vsp1_pre_ext_dl_body) +
461 cmd_offset;
462
463 cmd->pool = pool;
464 cmd->opcode = vsp1_extended_commands[type].opcode;
465
466
467
468
469
470 cmd->num_cmds = 1;
471 cmd->cmds = pool->mem + cmd_offset;
472 cmd->cmd_dma = pool->dma + cmd_offset;
473
474 cmd->data = pool->mem + data_offset;
475 cmd->data_dma = pool->dma + data_offset;
476
477 list_add_tail(&cmd->free, &pool->free);
478 }
479
480 return pool;
481}
482
483static
484struct vsp1_dl_ext_cmd *vsp1_dl_ext_cmd_get(struct vsp1_dl_cmd_pool *pool)
485{
486 struct vsp1_dl_ext_cmd *cmd = NULL;
487 unsigned long flags;
488
489 spin_lock_irqsave(&pool->lock, flags);
490
491 if (!list_empty(&pool->free)) {
492 cmd = list_first_entry(&pool->free, struct vsp1_dl_ext_cmd,
493 free);
494 list_del(&cmd->free);
495 }
496
497 spin_unlock_irqrestore(&pool->lock, flags);
498
499 return cmd;
500}
501
502static void vsp1_dl_ext_cmd_put(struct vsp1_dl_ext_cmd *cmd)
503{
504 unsigned long flags;
505
506 if (!cmd)
507 return;
508
509
510 cmd->flags = 0;
511
512 spin_lock_irqsave(&cmd->pool->lock, flags);
513 list_add_tail(&cmd->free, &cmd->pool->free);
514 spin_unlock_irqrestore(&cmd->pool->lock, flags);
515}
516
517static void vsp1_dl_ext_cmd_pool_destroy(struct vsp1_dl_cmd_pool *pool)
518{
519 if (!pool)
520 return;
521
522 if (pool->mem)
523 dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem,
524 pool->dma);
525
526 kfree(pool->cmds);
527 kfree(pool);
528}
529
530struct vsp1_dl_ext_cmd *vsp1_dl_get_pre_cmd(struct vsp1_dl_list *dl)
531{
532 struct vsp1_dl_manager *dlm = dl->dlm;
533
534 if (dl->pre_cmd)
535 return dl->pre_cmd;
536
537 dl->pre_cmd = vsp1_dl_ext_cmd_get(dlm->cmdpool);
538
539 return dl->pre_cmd;
540}
541
542
543
544
545
546static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
547{
548 struct vsp1_dl_list *dl;
549 size_t header_offset;
550
551 dl = kzalloc(sizeof(*dl), GFP_KERNEL);
552 if (!dl)
553 return NULL;
554
555 INIT_LIST_HEAD(&dl->bodies);
556 dl->dlm = dlm;
557
558
559 dl->body0 = vsp1_dl_body_get(dlm->pool);
560 if (!dl->body0)
561 return NULL;
562
563 header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries);
564
565 dl->header = ((void *)dl->body0->entries) + header_offset;
566 dl->dma = dl->body0->dma + header_offset;
567
568 memset(dl->header, 0, sizeof(*dl->header));
569 dl->header->lists[0].addr = dl->body0->dma;
570
571 return dl;
572}
573
574static void vsp1_dl_list_bodies_put(struct vsp1_dl_list *dl)
575{
576 struct vsp1_dl_body *dlb, *tmp;
577
578 list_for_each_entry_safe(dlb, tmp, &dl->bodies, list) {
579 list_del(&dlb->list);
580 vsp1_dl_body_put(dlb);
581 }
582}
583
584static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
585{
586 vsp1_dl_body_put(dl->body0);
587 vsp1_dl_list_bodies_put(dl);
588
589 kfree(dl);
590}
591
592
593
594
595
596
597
598
599
600struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
601{
602 struct vsp1_dl_list *dl = NULL;
603 unsigned long flags;
604
605 spin_lock_irqsave(&dlm->lock, flags);
606
607 if (!list_empty(&dlm->free)) {
608 dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
609 list_del(&dl->list);
610
611
612
613
614
615 INIT_LIST_HEAD(&dl->chain);
616 }
617
618 spin_unlock_irqrestore(&dlm->lock, flags);
619
620 return dl;
621}
622
623
624static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
625{
626 struct vsp1_dl_list *dl_next;
627
628 if (!dl)
629 return;
630
631
632
633
634
635 if (dl->has_chain) {
636 list_for_each_entry(dl_next, &dl->chain, chain)
637 __vsp1_dl_list_put(dl_next);
638 }
639
640 dl->has_chain = false;
641
642 vsp1_dl_list_bodies_put(dl);
643
644 vsp1_dl_ext_cmd_put(dl->pre_cmd);
645 vsp1_dl_ext_cmd_put(dl->post_cmd);
646
647 dl->pre_cmd = NULL;
648 dl->post_cmd = NULL;
649
650
651
652
653
654 dl->body0->num_entries = 0;
655
656 list_add_tail(&dl->list, &dl->dlm->free);
657}
658
659
660
661
662
663
664
665
666
667
668void vsp1_dl_list_put(struct vsp1_dl_list *dl)
669{
670 unsigned long flags;
671
672 if (!dl)
673 return;
674
675 spin_lock_irqsave(&dl->dlm->lock, flags);
676 __vsp1_dl_list_put(dl);
677 spin_unlock_irqrestore(&dl->dlm->lock, flags);
678}
679
680
681
682
683
684
685
686
687struct vsp1_dl_body *vsp1_dl_list_get_body0(struct vsp1_dl_list *dl)
688{
689 return dl->body0;
690}
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708int vsp1_dl_list_add_body(struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb)
709{
710 refcount_inc(&dlb->refcnt);
711
712 list_add_tail(&dlb->list, &dl->bodies);
713
714 return 0;
715}
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
732 struct vsp1_dl_list *dl)
733{
734 head->has_chain = true;
735 list_add_tail(&dl->chain, &head->chain);
736 return 0;
737}
738
739static void vsp1_dl_ext_cmd_fill_header(struct vsp1_dl_ext_cmd *cmd)
740{
741 cmd->cmds[0].opcode = cmd->opcode;
742 cmd->cmds[0].flags = cmd->flags;
743 cmd->cmds[0].address_set = cmd->data_dma;
744 cmd->cmds[0].reserved = 0;
745}
746
747static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
748{
749 struct vsp1_dl_manager *dlm = dl->dlm;
750 struct vsp1_dl_header_list *hdr = dl->header->lists;
751 struct vsp1_dl_body *dlb;
752 unsigned int num_lists = 0;
753
754
755
756
757
758
759
760 hdr->num_bytes = dl->body0->num_entries
761 * sizeof(*dl->header->lists);
762
763 list_for_each_entry(dlb, &dl->bodies, list) {
764 num_lists++;
765 hdr++;
766
767 hdr->addr = dlb->dma;
768 hdr->num_bytes = dlb->num_entries
769 * sizeof(*dl->header->lists);
770 }
771
772 dl->header->num_lists = num_lists;
773 dl->header->flags = 0;
774
775
776
777
778
779
780
781
782 if (!dlm->singleshot || is_last)
783 dl->header->flags |= VSP1_DLH_INT_ENABLE;
784
785
786
787
788
789
790
791 if (!dlm->singleshot || !is_last)
792 dl->header->flags |= VSP1_DLH_AUTO_START;
793
794 if (!is_last) {
795
796
797
798
799 struct vsp1_dl_list *next = list_next_entry(dl, chain);
800
801 dl->header->next_header = next->dma;
802 } else if (!dlm->singleshot) {
803
804
805
806
807
808 dl->header->next_header = dl->dma;
809 }
810
811 if (!dl->extension)
812 return;
813
814 dl->extension->flags = 0;
815
816 if (dl->pre_cmd) {
817 dl->extension->pre_ext_dl_plist = dl->pre_cmd->cmd_dma;
818 dl->extension->pre_ext_dl_num_cmd = dl->pre_cmd->num_cmds;
819 dl->extension->flags |= VSP1_DLH_EXT_PRE_CMD_EXEC;
820
821 vsp1_dl_ext_cmd_fill_header(dl->pre_cmd);
822 }
823
824 if (dl->post_cmd) {
825 dl->extension->post_ext_dl_plist = dl->post_cmd->cmd_dma;
826 dl->extension->post_ext_dl_num_cmd = dl->post_cmd->num_cmds;
827 dl->extension->flags |= VSP1_DLH_EXT_POST_CMD_EXEC;
828
829 vsp1_dl_ext_cmd_fill_header(dl->post_cmd);
830 }
831}
832
833static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
834{
835 struct vsp1_device *vsp1 = dlm->vsp1;
836
837 if (!dlm->queued)
838 return false;
839
840
841
842
843
844 return !!(vsp1_read(vsp1, VI6_CMD(dlm->index)) & VI6_CMD_UPDHDR);
845}
846
847static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
848{
849 struct vsp1_dl_manager *dlm = dl->dlm;
850 struct vsp1_device *vsp1 = dlm->vsp1;
851
852
853
854
855
856
857
858
859 vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
860}
861
862static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl)
863{
864 struct vsp1_dl_manager *dlm = dl->dlm;
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881 if (vsp1_dl_list_hw_update_pending(dlm)) {
882 WARN_ON(dlm->pending &&
883 (dlm->pending->flags & VSP1_DL_FRAME_END_INTERNAL));
884 __vsp1_dl_list_put(dlm->pending);
885 dlm->pending = dl;
886 return;
887 }
888
889
890
891
892
893 vsp1_dl_list_hw_enqueue(dl);
894
895 __vsp1_dl_list_put(dlm->queued);
896 dlm->queued = dl;
897}
898
899static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl)
900{
901 struct vsp1_dl_manager *dlm = dl->dlm;
902
903
904
905
906
907
908 vsp1_dl_list_hw_enqueue(dl);
909
910 dlm->active = dl;
911}
912
913void vsp1_dl_list_commit(struct vsp1_dl_list *dl, unsigned int dl_flags)
914{
915 struct vsp1_dl_manager *dlm = dl->dlm;
916 struct vsp1_dl_list *dl_next;
917 unsigned long flags;
918
919
920 vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
921
922 list_for_each_entry(dl_next, &dl->chain, chain) {
923 bool last = list_is_last(&dl_next->chain, &dl->chain);
924
925 vsp1_dl_list_fill_header(dl_next, last);
926 }
927
928 dl->flags = dl_flags & ~VSP1_DL_FRAME_END_COMPLETED;
929
930 spin_lock_irqsave(&dlm->lock, flags);
931
932 if (dlm->singleshot)
933 vsp1_dl_list_commit_singleshot(dl);
934 else
935 vsp1_dl_list_commit_continuous(dl);
936
937 spin_unlock_irqrestore(&dlm->lock, flags);
938}
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965unsigned int vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
966{
967 struct vsp1_device *vsp1 = dlm->vsp1;
968 u32 status = vsp1_read(vsp1, VI6_STATUS);
969 unsigned int flags = 0;
970
971 spin_lock(&dlm->lock);
972
973
974
975
976
977 if (dlm->singleshot) {
978 __vsp1_dl_list_put(dlm->active);
979 dlm->active = NULL;
980 flags |= VSP1_DL_FRAME_END_COMPLETED;
981 goto done;
982 }
983
984
985
986
987
988
989
990 if (vsp1_dl_list_hw_update_pending(dlm))
991 goto done;
992
993
994
995
996
997
998 if (status & VI6_STATUS_FLD_STD(dlm->index))
999 goto done;
1000
1001
1002
1003
1004
1005
1006
1007 if (dlm->active && (dlm->active->flags & VSP1_DL_FRAME_END_WRITEBACK)) {
1008 flags |= VSP1_DL_FRAME_END_WRITEBACK;
1009 dlm->active->flags &= ~VSP1_DL_FRAME_END_WRITEBACK;
1010 }
1011
1012
1013
1014
1015
1016 if (dlm->queued) {
1017 if (dlm->queued->flags & VSP1_DL_FRAME_END_INTERNAL)
1018 flags |= VSP1_DL_FRAME_END_INTERNAL;
1019 dlm->queued->flags &= ~VSP1_DL_FRAME_END_INTERNAL;
1020
1021 __vsp1_dl_list_put(dlm->active);
1022 dlm->active = dlm->queued;
1023 dlm->queued = NULL;
1024 flags |= VSP1_DL_FRAME_END_COMPLETED;
1025 }
1026
1027
1028
1029
1030
1031
1032 if (dlm->pending) {
1033 vsp1_dl_list_hw_enqueue(dlm->pending);
1034 dlm->queued = dlm->pending;
1035 dlm->pending = NULL;
1036 }
1037
1038done:
1039 spin_unlock(&dlm->lock);
1040
1041 return flags;
1042}
1043
1044
1045void vsp1_dlm_setup(struct vsp1_device *vsp1)
1046{
1047 unsigned int i;
1048 u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
1049 | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
1050 | VI6_DL_CTRL_DLE;
1051 u32 ext_dl = (0x02 << VI6_DL_EXT_CTRL_POLINT_SHIFT)
1052 | VI6_DL_EXT_CTRL_DLPRI | VI6_DL_EXT_CTRL_EXT;
1053
1054 if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) {
1055 for (i = 0; i < vsp1->info->wpf_count; ++i)
1056 vsp1_write(vsp1, VI6_DL_EXT_CTRL(i), ext_dl);
1057 }
1058
1059 vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
1060 vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
1061}
1062
1063void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
1064{
1065 unsigned long flags;
1066
1067 spin_lock_irqsave(&dlm->lock, flags);
1068
1069 __vsp1_dl_list_put(dlm->active);
1070 __vsp1_dl_list_put(dlm->queued);
1071 __vsp1_dl_list_put(dlm->pending);
1072
1073 spin_unlock_irqrestore(&dlm->lock, flags);
1074
1075 dlm->active = NULL;
1076 dlm->queued = NULL;
1077 dlm->pending = NULL;
1078}
1079
1080struct vsp1_dl_body *vsp1_dlm_dl_body_get(struct vsp1_dl_manager *dlm)
1081{
1082 return vsp1_dl_body_get(dlm->pool);
1083}
1084
1085struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
1086 unsigned int index,
1087 unsigned int prealloc)
1088{
1089 struct vsp1_dl_manager *dlm;
1090 size_t header_size;
1091 unsigned int i;
1092
1093 dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL);
1094 if (!dlm)
1095 return NULL;
1096
1097 dlm->index = index;
1098 dlm->singleshot = vsp1->info->uapi;
1099 dlm->vsp1 = vsp1;
1100
1101 spin_lock_init(&dlm->lock);
1102 INIT_LIST_HEAD(&dlm->free);
1103
1104
1105
1106
1107
1108
1109
1110
1111 header_size = vsp1_feature(vsp1, VSP1_HAS_EXT_DL) ?
1112 sizeof(struct vsp1_dl_header_extended) :
1113 sizeof(struct vsp1_dl_header);
1114
1115 header_size = ALIGN(header_size, 8);
1116
1117 dlm->pool = vsp1_dl_body_pool_create(vsp1, prealloc + 1,
1118 VSP1_DL_NUM_ENTRIES, header_size);
1119 if (!dlm->pool)
1120 return NULL;
1121
1122 for (i = 0; i < prealloc; ++i) {
1123 struct vsp1_dl_list *dl;
1124
1125 dl = vsp1_dl_list_alloc(dlm);
1126 if (!dl) {
1127 vsp1_dlm_destroy(dlm);
1128 return NULL;
1129 }
1130
1131
1132 if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL))
1133 dl->extension = (void *)dl->header
1134 + sizeof(*dl->header);
1135
1136 list_add_tail(&dl->list, &dlm->free);
1137 }
1138
1139 if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) {
1140 dlm->cmdpool = vsp1_dl_cmd_pool_create(vsp1,
1141 VSP1_EXTCMD_AUTOFLD, prealloc);
1142 if (!dlm->cmdpool) {
1143 vsp1_dlm_destroy(dlm);
1144 return NULL;
1145 }
1146 }
1147
1148 return dlm;
1149}
1150
1151void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
1152{
1153 struct vsp1_dl_list *dl, *next;
1154
1155 if (!dlm)
1156 return;
1157
1158 list_for_each_entry_safe(dl, next, &dlm->free, list) {
1159 list_del(&dl->list);
1160 vsp1_dl_list_free(dl);
1161 }
1162
1163 vsp1_dl_body_pool_destroy(dlm->pool);
1164 vsp1_dl_ext_cmd_pool_destroy(dlm->cmdpool);
1165}
1166