1#ifndef _HFI1_SDMA_H
2#define _HFI1_SDMA_H
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50#include <linux/types.h>
51#include <linux/list.h>
52#include <asm/byteorder.h>
53#include <linux/workqueue.h>
54#include <linux/rculist.h>
55
56#include "hfi.h"
57#include "verbs.h"
58#include "sdma_txreq.h"
59
60
61#define MAX_DESC 64
62
63#define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1)
64
65#define SDMA_TXREQ_S_OK 0
66#define SDMA_TXREQ_S_SENDERROR 1
67#define SDMA_TXREQ_S_ABORTED 2
68#define SDMA_TXREQ_S_SHUTDOWN 3
69
70
71#define SDMA_TXREQ_F_URGENT 0x0001
72#define SDMA_TXREQ_F_AHG_COPY 0x0002
73#define SDMA_TXREQ_F_USE_AHG 0x0004
74
75#define SDMA_MAP_NONE 0
76#define SDMA_MAP_SINGLE 1
77#define SDMA_MAP_PAGE 2
78
79#define SDMA_AHG_VALUE_MASK 0xffff
80#define SDMA_AHG_VALUE_SHIFT 0
81#define SDMA_AHG_INDEX_MASK 0xf
82#define SDMA_AHG_INDEX_SHIFT 16
83#define SDMA_AHG_FIELD_LEN_MASK 0xf
84#define SDMA_AHG_FIELD_LEN_SHIFT 20
85#define SDMA_AHG_FIELD_START_MASK 0x1f
86#define SDMA_AHG_FIELD_START_SHIFT 24
87#define SDMA_AHG_UPDATE_ENABLE_MASK 0x1
88#define SDMA_AHG_UPDATE_ENABLE_SHIFT 31
89
90
91
92
93
94
95
96
97
98#define SDMA_AHG_NO_AHG 0
99#define SDMA_AHG_COPY 1
100#define SDMA_AHG_APPLY_UPDATE1 2
101#define SDMA_AHG_APPLY_UPDATE2 3
102#define SDMA_AHG_APPLY_UPDATE3 4
103
104
105
106
107#define SDMA_DESC0_FIRST_DESC_FLAG BIT_ULL(63)
108#define SDMA_DESC0_LAST_DESC_FLAG BIT_ULL(62)
109#define SDMA_DESC0_BYTE_COUNT_SHIFT 48
110#define SDMA_DESC0_BYTE_COUNT_WIDTH 14
111#define SDMA_DESC0_BYTE_COUNT_MASK \
112 ((1ULL << SDMA_DESC0_BYTE_COUNT_WIDTH) - 1)
113#define SDMA_DESC0_BYTE_COUNT_SMASK \
114 (SDMA_DESC0_BYTE_COUNT_MASK << SDMA_DESC0_BYTE_COUNT_SHIFT)
115#define SDMA_DESC0_PHY_ADDR_SHIFT 0
116#define SDMA_DESC0_PHY_ADDR_WIDTH 48
117#define SDMA_DESC0_PHY_ADDR_MASK \
118 ((1ULL << SDMA_DESC0_PHY_ADDR_WIDTH) - 1)
119#define SDMA_DESC0_PHY_ADDR_SMASK \
120 (SDMA_DESC0_PHY_ADDR_MASK << SDMA_DESC0_PHY_ADDR_SHIFT)
121
122#define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32
123#define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32
124#define SDMA_DESC1_HEADER_UPDATE1_MASK \
125 ((1ULL << SDMA_DESC1_HEADER_UPDATE1_WIDTH) - 1)
126#define SDMA_DESC1_HEADER_UPDATE1_SMASK \
127 (SDMA_DESC1_HEADER_UPDATE1_MASK << SDMA_DESC1_HEADER_UPDATE1_SHIFT)
128#define SDMA_DESC1_HEADER_MODE_SHIFT 13
129#define SDMA_DESC1_HEADER_MODE_WIDTH 3
130#define SDMA_DESC1_HEADER_MODE_MASK \
131 ((1ULL << SDMA_DESC1_HEADER_MODE_WIDTH) - 1)
132#define SDMA_DESC1_HEADER_MODE_SMASK \
133 (SDMA_DESC1_HEADER_MODE_MASK << SDMA_DESC1_HEADER_MODE_SHIFT)
134#define SDMA_DESC1_HEADER_INDEX_SHIFT 8
135#define SDMA_DESC1_HEADER_INDEX_WIDTH 5
136#define SDMA_DESC1_HEADER_INDEX_MASK \
137 ((1ULL << SDMA_DESC1_HEADER_INDEX_WIDTH) - 1)
138#define SDMA_DESC1_HEADER_INDEX_SMASK \
139 (SDMA_DESC1_HEADER_INDEX_MASK << SDMA_DESC1_HEADER_INDEX_SHIFT)
140#define SDMA_DESC1_HEADER_DWS_SHIFT 4
141#define SDMA_DESC1_HEADER_DWS_WIDTH 4
142#define SDMA_DESC1_HEADER_DWS_MASK \
143 ((1ULL << SDMA_DESC1_HEADER_DWS_WIDTH) - 1)
144#define SDMA_DESC1_HEADER_DWS_SMASK \
145 (SDMA_DESC1_HEADER_DWS_MASK << SDMA_DESC1_HEADER_DWS_SHIFT)
146#define SDMA_DESC1_GENERATION_SHIFT 2
147#define SDMA_DESC1_GENERATION_WIDTH 2
148#define SDMA_DESC1_GENERATION_MASK \
149 ((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1)
150#define SDMA_DESC1_GENERATION_SMASK \
151 (SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT)
152#define SDMA_DESC1_INT_REQ_FLAG BIT_ULL(1)
153#define SDMA_DESC1_HEAD_TO_HOST_FLAG BIT_ULL(0)
154
155enum sdma_states {
156 sdma_state_s00_hw_down,
157 sdma_state_s10_hw_start_up_halt_wait,
158 sdma_state_s15_hw_start_up_clean_wait,
159 sdma_state_s20_idle,
160 sdma_state_s30_sw_clean_up_wait,
161 sdma_state_s40_hw_clean_up_wait,
162 sdma_state_s50_hw_halt_wait,
163 sdma_state_s60_idle_halt_wait,
164 sdma_state_s80_hw_freeze,
165 sdma_state_s82_freeze_sw_clean,
166 sdma_state_s99_running,
167};
168
169enum sdma_events {
170 sdma_event_e00_go_hw_down,
171 sdma_event_e10_go_hw_start,
172 sdma_event_e15_hw_halt_done,
173 sdma_event_e25_hw_clean_up_done,
174 sdma_event_e30_go_running,
175 sdma_event_e40_sw_cleaned,
176 sdma_event_e50_hw_cleaned,
177 sdma_event_e60_hw_halted,
178 sdma_event_e70_go_idle,
179 sdma_event_e80_hw_freeze,
180 sdma_event_e81_hw_frozen,
181 sdma_event_e82_hw_unfreeze,
182 sdma_event_e85_link_down,
183 sdma_event_e90_sw_halted,
184};
185
186struct sdma_set_state_action {
187 unsigned op_enable:1;
188 unsigned op_intenable:1;
189 unsigned op_halt:1;
190 unsigned op_cleanup:1;
191 unsigned go_s99_running_tofalse:1;
192 unsigned go_s99_running_totrue:1;
193};
194
195struct sdma_state {
196 struct kref kref;
197 struct completion comp;
198 enum sdma_states current_state;
199 unsigned current_op;
200 unsigned go_s99_running;
201
202 enum sdma_states previous_state;
203 unsigned previous_op;
204 enum sdma_events last_event;
205};
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304struct hw_sdma_desc {
305
306 __le64 qw[2];
307};
308
309
310
311
312
313
314
315
316
317
318
319
320
321struct sdma_engine {
322
323 struct hfi1_devdata *dd;
324 struct hfi1_pportdata *ppd;
325
326 void __iomem *tail_csr;
327 u64 imask;
328 u64 idle_mask;
329 u64 progress_mask;
330 u64 int_mask;
331
332 volatile __le64 *head_dma;
333
334 dma_addr_t head_phys;
335
336 struct hw_sdma_desc *descq;
337
338 unsigned descq_full_count;
339 struct sdma_txreq **tx_ring;
340
341 dma_addr_t descq_phys;
342
343 u32 sdma_mask;
344
345 struct sdma_state state;
346
347 int cpu;
348
349 u8 sdma_shift;
350
351 u8 this_idx;
352
353 spinlock_t senddmactrl_lock;
354
355 u64 p_senddmactrl;
356
357
358 spinlock_t tail_lock ____cacheline_aligned_in_smp;
359#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
360
361 u64 tail_sn;
362#endif
363
364 u32 descq_tail;
365
366 unsigned long ahg_bits;
367
368 u16 desc_avail;
369
370 u16 tx_tail;
371
372 u16 descq_cnt;
373
374
375
376 seqlock_t head_lock ____cacheline_aligned_in_smp;
377#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
378
379 u64 head_sn;
380#endif
381
382 u32 descq_head;
383
384 u16 tx_head;
385
386 u64 last_status;
387
388 u64 err_cnt;
389
390 u64 sdma_int_cnt;
391 u64 idle_int_cnt;
392 u64 progress_int_cnt;
393
394
395 struct list_head dmawait;
396
397
398
399 struct tasklet_struct sdma_hw_clean_up_task
400 ____cacheline_aligned_in_smp;
401
402
403 struct tasklet_struct sdma_sw_clean_up_task
404 ____cacheline_aligned_in_smp;
405
406 struct work_struct err_halt_worker;
407
408 struct timer_list err_progress_check_timer;
409 u32 progress_check_head;
410
411 struct work_struct flush_worker;
412
413 spinlock_t flushlist_lock;
414
415 struct list_head flushlist;
416 struct cpumask cpu_mask;
417 struct kobject kobj;
418};
419
420int sdma_init(struct hfi1_devdata *dd, u8 port);
421void sdma_start(struct hfi1_devdata *dd);
422void sdma_exit(struct hfi1_devdata *dd);
423void sdma_all_running(struct hfi1_devdata *dd);
424void sdma_all_idle(struct hfi1_devdata *dd);
425void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle);
426void sdma_freeze(struct hfi1_devdata *dd);
427void sdma_unfreeze(struct hfi1_devdata *dd);
428void sdma_wait(struct hfi1_devdata *dd);
429
430
431
432
433
434
435
436
437
438
439static inline int sdma_empty(struct sdma_engine *sde)
440{
441 return sde->descq_tail == sde->descq_head;
442}
443
444static inline u16 sdma_descq_freecnt(struct sdma_engine *sde)
445{
446 return sde->descq_cnt -
447 (sde->descq_tail -
448 ACCESS_ONCE(sde->descq_head)) - 1;
449}
450
451static inline u16 sdma_descq_inprocess(struct sdma_engine *sde)
452{
453 return sde->descq_cnt - sdma_descq_freecnt(sde);
454}
455
456
457
458
459
460static inline int __sdma_running(struct sdma_engine *engine)
461{
462 return engine->state.current_state == sdma_state_s99_running;
463}
464
465
466
467
468
469
470
471
472
473
474
475
476static inline int sdma_running(struct sdma_engine *engine)
477{
478 unsigned long flags;
479 int ret;
480
481 spin_lock_irqsave(&engine->tail_lock, flags);
482 ret = __sdma_running(engine);
483 spin_unlock_irqrestore(&engine->tail_lock, flags);
484 return ret;
485}
486
487void _sdma_txreq_ahgadd(
488 struct sdma_txreq *tx,
489 u8 num_ahg,
490 u8 ahg_entry,
491 u32 *ahg,
492 u8 ahg_hlen);
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546static inline int sdma_txinit_ahg(
547 struct sdma_txreq *tx,
548 u16 flags,
549 u16 tlen,
550 u8 ahg_entry,
551 u8 num_ahg,
552 u32 *ahg,
553 u8 ahg_hlen,
554 void (*cb)(struct sdma_txreq *, int))
555{
556 if (tlen == 0)
557 return -ENODATA;
558 if (tlen > MAX_SDMA_PKT_SIZE)
559 return -EMSGSIZE;
560 tx->desc_limit = ARRAY_SIZE(tx->descs);
561 tx->descp = &tx->descs[0];
562 INIT_LIST_HEAD(&tx->list);
563 tx->num_desc = 0;
564 tx->flags = flags;
565 tx->complete = cb;
566 tx->coalesce_buf = NULL;
567 tx->wait = NULL;
568 tx->packet_len = tlen;
569 tx->tlen = tx->packet_len;
570 tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG;
571 tx->descs[0].qw[1] = 0;
572 if (flags & SDMA_TXREQ_F_AHG_COPY)
573 tx->descs[0].qw[1] |=
574 (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
575 << SDMA_DESC1_HEADER_INDEX_SHIFT) |
576 (((u64)SDMA_AHG_COPY & SDMA_DESC1_HEADER_MODE_MASK)
577 << SDMA_DESC1_HEADER_MODE_SHIFT);
578 else if (flags & SDMA_TXREQ_F_USE_AHG && num_ahg)
579 _sdma_txreq_ahgadd(tx, num_ahg, ahg_entry, ahg, ahg_hlen);
580 return 0;
581}
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614static inline int sdma_txinit(
615 struct sdma_txreq *tx,
616 u16 flags,
617 u16 tlen,
618 void (*cb)(struct sdma_txreq *, int))
619{
620 return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb);
621}
622
623
624static inline int sdma_mapping_type(struct sdma_desc *d)
625{
626 return (d->qw[1] & SDMA_DESC1_GENERATION_SMASK)
627 >> SDMA_DESC1_GENERATION_SHIFT;
628}
629
630static inline size_t sdma_mapping_len(struct sdma_desc *d)
631{
632 return (d->qw[0] & SDMA_DESC0_BYTE_COUNT_SMASK)
633 >> SDMA_DESC0_BYTE_COUNT_SHIFT;
634}
635
636static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d)
637{
638 return (d->qw[0] & SDMA_DESC0_PHY_ADDR_SMASK)
639 >> SDMA_DESC0_PHY_ADDR_SHIFT;
640}
641
642static inline void make_tx_sdma_desc(
643 struct sdma_txreq *tx,
644 int type,
645 dma_addr_t addr,
646 size_t len)
647{
648 struct sdma_desc *desc = &tx->descp[tx->num_desc];
649
650 if (!tx->num_desc) {
651
652 desc->qw[1] |= ((u64)type & SDMA_DESC1_GENERATION_MASK)
653 << SDMA_DESC1_GENERATION_SHIFT;
654 } else {
655 desc->qw[0] = 0;
656 desc->qw[1] = ((u64)type & SDMA_DESC1_GENERATION_MASK)
657 << SDMA_DESC1_GENERATION_SHIFT;
658 }
659 desc->qw[0] |= (((u64)addr & SDMA_DESC0_PHY_ADDR_MASK)
660 << SDMA_DESC0_PHY_ADDR_SHIFT) |
661 (((u64)len & SDMA_DESC0_BYTE_COUNT_MASK)
662 << SDMA_DESC0_BYTE_COUNT_SHIFT);
663}
664
665
666int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
667 int type, void *kvaddr, struct page *page,
668 unsigned long offset, u16 len);
669int _pad_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *);
670void __sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *);
671
672static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
673{
674 if (tx->num_desc)
675 __sdma_txclean(dd, tx);
676}
677
678
679static inline void _sdma_close_tx(struct hfi1_devdata *dd,
680 struct sdma_txreq *tx)
681{
682 tx->descp[tx->num_desc].qw[0] |=
683 SDMA_DESC0_LAST_DESC_FLAG;
684 tx->descp[tx->num_desc].qw[1] |=
685 dd->default_desc1;
686 if (tx->flags & SDMA_TXREQ_F_URGENT)
687 tx->descp[tx->num_desc].qw[1] |=
688 (SDMA_DESC1_HEAD_TO_HOST_FLAG |
689 SDMA_DESC1_INT_REQ_FLAG);
690}
691
692static inline int _sdma_txadd_daddr(
693 struct hfi1_devdata *dd,
694 int type,
695 struct sdma_txreq *tx,
696 dma_addr_t addr,
697 u16 len)
698{
699 int rval = 0;
700
701 make_tx_sdma_desc(
702 tx,
703 type,
704 addr, len);
705 WARN_ON(len > tx->tlen);
706 tx->tlen -= len;
707
708 if (!tx->tlen) {
709 if (tx->packet_len & (sizeof(u32) - 1)) {
710 rval = _pad_sdma_tx_descs(dd, tx);
711 if (rval)
712 return rval;
713 } else {
714 _sdma_close_tx(dd, tx);
715 }
716 }
717 tx->num_desc++;
718 return rval;
719}
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737static inline int sdma_txadd_page(
738 struct hfi1_devdata *dd,
739 struct sdma_txreq *tx,
740 struct page *page,
741 unsigned long offset,
742 u16 len)
743{
744 dma_addr_t addr;
745 int rval;
746
747 if ((unlikely(tx->num_desc == tx->desc_limit))) {
748 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_PAGE,
749 NULL, page, offset, len);
750 if (rval <= 0)
751 return rval;
752 }
753
754 addr = dma_map_page(
755 &dd->pcidev->dev,
756 page,
757 offset,
758 len,
759 DMA_TO_DEVICE);
760
761 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
762 __sdma_txclean(dd, tx);
763 return -ENOSPC;
764 }
765
766 return _sdma_txadd_daddr(
767 dd, SDMA_MAP_PAGE, tx, addr, len);
768}
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786static inline int sdma_txadd_daddr(
787 struct hfi1_devdata *dd,
788 struct sdma_txreq *tx,
789 dma_addr_t addr,
790 u16 len)
791{
792 int rval;
793
794 if ((unlikely(tx->num_desc == tx->desc_limit))) {
795 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_NONE,
796 NULL, NULL, 0, 0);
797 if (rval <= 0)
798 return rval;
799 }
800
801 return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len);
802}
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820static inline int sdma_txadd_kvaddr(
821 struct hfi1_devdata *dd,
822 struct sdma_txreq *tx,
823 void *kvaddr,
824 u16 len)
825{
826 dma_addr_t addr;
827 int rval;
828
829 if ((unlikely(tx->num_desc == tx->desc_limit))) {
830 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_SINGLE,
831 kvaddr, NULL, 0, len);
832 if (rval <= 0)
833 return rval;
834 }
835
836 addr = dma_map_single(
837 &dd->pcidev->dev,
838 kvaddr,
839 len,
840 DMA_TO_DEVICE);
841
842 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
843 __sdma_txclean(dd, tx);
844 return -ENOSPC;
845 }
846
847 return _sdma_txadd_daddr(
848 dd, SDMA_MAP_SINGLE, tx, addr, len);
849}
850
851struct iowait;
852
853int sdma_send_txreq(struct sdma_engine *sde,
854 struct iowait *wait,
855 struct sdma_txreq *tx);
856int sdma_send_txlist(struct sdma_engine *sde,
857 struct iowait *wait,
858 struct list_head *tx_list,
859 u32 *count);
860
861int sdma_ahg_alloc(struct sdma_engine *sde);
862void sdma_ahg_free(struct sdma_engine *sde, int ahg_index);
863
864
865
866
867
868
869
870
871
872
873static inline u32 sdma_build_ahg_descriptor(
874 u16 data,
875 u8 dwindex,
876 u8 startbit,
877 u8 bits)
878{
879 return (u32)(1UL << SDMA_AHG_UPDATE_ENABLE_SHIFT |
880 ((startbit & SDMA_AHG_FIELD_START_MASK) <<
881 SDMA_AHG_FIELD_START_SHIFT) |
882 ((bits & SDMA_AHG_FIELD_LEN_MASK) <<
883 SDMA_AHG_FIELD_LEN_SHIFT) |
884 ((dwindex & SDMA_AHG_INDEX_MASK) <<
885 SDMA_AHG_INDEX_SHIFT) |
886 ((data & SDMA_AHG_VALUE_MASK) <<
887 SDMA_AHG_VALUE_SHIFT));
888}
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904static inline unsigned sdma_progress(struct sdma_engine *sde, unsigned seq,
905 struct sdma_txreq *tx)
906{
907 if (read_seqretry(&sde->head_lock, seq)) {
908 sde->desc_avail = sdma_descq_freecnt(sde);
909 if (tx->num_desc > sde->desc_avail)
910 return 0;
911 return 1;
912 }
913 return 0;
914}
915
916
917
918
919
920
921
922
923
924
925static inline void sdma_iowait_schedule(
926 struct sdma_engine *sde,
927 struct iowait *wait)
928{
929 struct hfi1_pportdata *ppd = sde->dd->pport;
930
931 iowait_schedule(wait, ppd->hfi1_wq, sde->cpu);
932}
933
934
935void sdma_engine_error(struct sdma_engine *sde, u64 status);
936void sdma_engine_interrupt(struct sdma_engine *sde, u64 status);
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009struct sdma_map_elem {
1010 u32 mask;
1011 struct sdma_engine *sde[0];
1012};
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027struct sdma_vl_map {
1028 s8 engine_to_vl[TXE_NUM_SDMA_ENGINES];
1029 struct rcu_head list;
1030 u32 mask;
1031 u8 actual_vls;
1032 u8 vls;
1033 struct sdma_map_elem *map[0];
1034};
1035
1036int sdma_map_init(
1037 struct hfi1_devdata *dd,
1038 u8 port,
1039 u8 num_vls,
1040 u8 *vl_engines);
1041
1042
1043void _sdma_engine_progress_schedule(struct sdma_engine *sde);
1044
1045
1046
1047
1048
1049
1050
1051
1052static inline void sdma_engine_progress_schedule(
1053 struct sdma_engine *sde)
1054{
1055 if (!sde || sdma_descq_inprocess(sde) < (sde->descq_cnt / 8))
1056 return;
1057 _sdma_engine_progress_schedule(sde);
1058}
1059
1060struct sdma_engine *sdma_select_engine_sc(
1061 struct hfi1_devdata *dd,
1062 u32 selector,
1063 u8 sc5);
1064
1065struct sdma_engine *sdma_select_engine_vl(
1066 struct hfi1_devdata *dd,
1067 u32 selector,
1068 u8 vl);
1069
1070struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
1071 u32 selector, u8 vl);
1072ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf);
1073ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
1074 size_t count);
1075int sdma_engine_get_vl(struct sdma_engine *sde);
1076void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *);
1077void sdma_seqfile_dump_cpu_list(struct seq_file *s, struct hfi1_devdata *dd,
1078 unsigned long cpuid);
1079
1080#ifdef CONFIG_SDMA_VERBOSITY
1081void sdma_dumpstate(struct sdma_engine *);
1082#endif
1083static inline char *slashstrip(char *s)
1084{
1085 char *r = s;
1086
1087 while (*s)
1088 if (*s++ == '/')
1089 r = s;
1090 return r;
1091}
1092
1093u16 sdma_get_descq_cnt(void);
1094
1095extern uint mod_num_sdma;
1096
1097void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);
1098
1099#endif
1100