1#ifndef _HFI1_SDMA_H
2#define _HFI1_SDMA_H
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50#include <linux/types.h>
51#include <linux/list.h>
52#include <asm/byteorder.h>
53#include <linux/workqueue.h>
54#include <linux/rculist.h>
55
56#include "hfi.h"
57#include "verbs.h"
58#include "sdma_txreq.h"
59
60
61#define MAX_DESC 64
62
63#define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1)
64
65#define SDMA_MAP_NONE 0
66#define SDMA_MAP_SINGLE 1
67#define SDMA_MAP_PAGE 2
68
69#define SDMA_AHG_VALUE_MASK 0xffff
70#define SDMA_AHG_VALUE_SHIFT 0
71#define SDMA_AHG_INDEX_MASK 0xf
72#define SDMA_AHG_INDEX_SHIFT 16
73#define SDMA_AHG_FIELD_LEN_MASK 0xf
74#define SDMA_AHG_FIELD_LEN_SHIFT 20
75#define SDMA_AHG_FIELD_START_MASK 0x1f
76#define SDMA_AHG_FIELD_START_SHIFT 24
77#define SDMA_AHG_UPDATE_ENABLE_MASK 0x1
78#define SDMA_AHG_UPDATE_ENABLE_SHIFT 31
79
80
81
82
83
84
85
86
87
88#define SDMA_AHG_NO_AHG 0
89#define SDMA_AHG_COPY 1
90#define SDMA_AHG_APPLY_UPDATE1 2
91#define SDMA_AHG_APPLY_UPDATE2 3
92#define SDMA_AHG_APPLY_UPDATE3 4
93
94
95
96
97#define SDMA_DESC0_FIRST_DESC_FLAG BIT_ULL(63)
98#define SDMA_DESC0_LAST_DESC_FLAG BIT_ULL(62)
99#define SDMA_DESC0_BYTE_COUNT_SHIFT 48
100#define SDMA_DESC0_BYTE_COUNT_WIDTH 14
101#define SDMA_DESC0_BYTE_COUNT_MASK \
102 ((1ULL << SDMA_DESC0_BYTE_COUNT_WIDTH) - 1)
103#define SDMA_DESC0_BYTE_COUNT_SMASK \
104 (SDMA_DESC0_BYTE_COUNT_MASK << SDMA_DESC0_BYTE_COUNT_SHIFT)
105#define SDMA_DESC0_PHY_ADDR_SHIFT 0
106#define SDMA_DESC0_PHY_ADDR_WIDTH 48
107#define SDMA_DESC0_PHY_ADDR_MASK \
108 ((1ULL << SDMA_DESC0_PHY_ADDR_WIDTH) - 1)
109#define SDMA_DESC0_PHY_ADDR_SMASK \
110 (SDMA_DESC0_PHY_ADDR_MASK << SDMA_DESC0_PHY_ADDR_SHIFT)
111
112#define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32
113#define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32
114#define SDMA_DESC1_HEADER_UPDATE1_MASK \
115 ((1ULL << SDMA_DESC1_HEADER_UPDATE1_WIDTH) - 1)
116#define SDMA_DESC1_HEADER_UPDATE1_SMASK \
117 (SDMA_DESC1_HEADER_UPDATE1_MASK << SDMA_DESC1_HEADER_UPDATE1_SHIFT)
118#define SDMA_DESC1_HEADER_MODE_SHIFT 13
119#define SDMA_DESC1_HEADER_MODE_WIDTH 3
120#define SDMA_DESC1_HEADER_MODE_MASK \
121 ((1ULL << SDMA_DESC1_HEADER_MODE_WIDTH) - 1)
122#define SDMA_DESC1_HEADER_MODE_SMASK \
123 (SDMA_DESC1_HEADER_MODE_MASK << SDMA_DESC1_HEADER_MODE_SHIFT)
124#define SDMA_DESC1_HEADER_INDEX_SHIFT 8
125#define SDMA_DESC1_HEADER_INDEX_WIDTH 5
126#define SDMA_DESC1_HEADER_INDEX_MASK \
127 ((1ULL << SDMA_DESC1_HEADER_INDEX_WIDTH) - 1)
128#define SDMA_DESC1_HEADER_INDEX_SMASK \
129 (SDMA_DESC1_HEADER_INDEX_MASK << SDMA_DESC1_HEADER_INDEX_SHIFT)
130#define SDMA_DESC1_HEADER_DWS_SHIFT 4
131#define SDMA_DESC1_HEADER_DWS_WIDTH 4
132#define SDMA_DESC1_HEADER_DWS_MASK \
133 ((1ULL << SDMA_DESC1_HEADER_DWS_WIDTH) - 1)
134#define SDMA_DESC1_HEADER_DWS_SMASK \
135 (SDMA_DESC1_HEADER_DWS_MASK << SDMA_DESC1_HEADER_DWS_SHIFT)
136#define SDMA_DESC1_GENERATION_SHIFT 2
137#define SDMA_DESC1_GENERATION_WIDTH 2
138#define SDMA_DESC1_GENERATION_MASK \
139 ((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1)
140#define SDMA_DESC1_GENERATION_SMASK \
141 (SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT)
142#define SDMA_DESC1_INT_REQ_FLAG BIT_ULL(1)
143#define SDMA_DESC1_HEAD_TO_HOST_FLAG BIT_ULL(0)
144
145enum sdma_states {
146 sdma_state_s00_hw_down,
147 sdma_state_s10_hw_start_up_halt_wait,
148 sdma_state_s15_hw_start_up_clean_wait,
149 sdma_state_s20_idle,
150 sdma_state_s30_sw_clean_up_wait,
151 sdma_state_s40_hw_clean_up_wait,
152 sdma_state_s50_hw_halt_wait,
153 sdma_state_s60_idle_halt_wait,
154 sdma_state_s80_hw_freeze,
155 sdma_state_s82_freeze_sw_clean,
156 sdma_state_s99_running,
157};
158
159enum sdma_events {
160 sdma_event_e00_go_hw_down,
161 sdma_event_e10_go_hw_start,
162 sdma_event_e15_hw_halt_done,
163 sdma_event_e25_hw_clean_up_done,
164 sdma_event_e30_go_running,
165 sdma_event_e40_sw_cleaned,
166 sdma_event_e50_hw_cleaned,
167 sdma_event_e60_hw_halted,
168 sdma_event_e70_go_idle,
169 sdma_event_e80_hw_freeze,
170 sdma_event_e81_hw_frozen,
171 sdma_event_e82_hw_unfreeze,
172 sdma_event_e85_link_down,
173 sdma_event_e90_sw_halted,
174};
175
176struct sdma_set_state_action {
177 unsigned op_enable:1;
178 unsigned op_intenable:1;
179 unsigned op_halt:1;
180 unsigned op_cleanup:1;
181 unsigned go_s99_running_tofalse:1;
182 unsigned go_s99_running_totrue:1;
183};
184
185struct sdma_state {
186 struct kref kref;
187 struct completion comp;
188 enum sdma_states current_state;
189 unsigned current_op;
190 unsigned go_s99_running;
191
192 enum sdma_states previous_state;
193 unsigned previous_op;
194 enum sdma_events last_event;
195};
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294struct hw_sdma_desc {
295
296 __le64 qw[2];
297};
298
299
300
301
302
303
304
305
306
307
308
309
310
311struct sdma_engine {
312
313 struct hfi1_devdata *dd;
314 struct hfi1_pportdata *ppd;
315
316 void __iomem *tail_csr;
317 u64 imask;
318 u64 idle_mask;
319 u64 progress_mask;
320 u64 int_mask;
321
322 volatile __le64 *head_dma;
323
324 dma_addr_t head_phys;
325
326 struct hw_sdma_desc *descq;
327
328 unsigned descq_full_count;
329 struct sdma_txreq **tx_ring;
330
331 dma_addr_t descq_phys;
332
333 u32 sdma_mask;
334
335 struct sdma_state state;
336
337 int cpu;
338
339 u8 sdma_shift;
340
341 u8 this_idx;
342
343 spinlock_t senddmactrl_lock;
344
345 u64 p_senddmactrl;
346
347
348 spinlock_t tail_lock ____cacheline_aligned_in_smp;
349#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
350
351 u64 tail_sn;
352#endif
353
354 u32 descq_tail;
355
356 unsigned long ahg_bits;
357
358 u16 desc_avail;
359
360 u16 tx_tail;
361
362 u16 descq_cnt;
363
364
365
366 seqlock_t head_lock ____cacheline_aligned_in_smp;
367#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
368
369 u64 head_sn;
370#endif
371
372 u32 descq_head;
373
374 u16 tx_head;
375
376 u64 last_status;
377
378 u64 err_cnt;
379
380 u64 sdma_int_cnt;
381 u64 idle_int_cnt;
382 u64 progress_int_cnt;
383
384
385 seqlock_t waitlock;
386 struct list_head dmawait;
387
388
389
390 struct tasklet_struct sdma_hw_clean_up_task
391 ____cacheline_aligned_in_smp;
392
393
394 struct tasklet_struct sdma_sw_clean_up_task
395 ____cacheline_aligned_in_smp;
396
397 struct work_struct err_halt_worker;
398
399 struct timer_list err_progress_check_timer;
400 u32 progress_check_head;
401
402 struct work_struct flush_worker;
403
404 spinlock_t flushlist_lock;
405
406 struct list_head flushlist;
407 struct cpumask cpu_mask;
408 struct kobject kobj;
409 u32 msix_intr;
410};
411
412int sdma_init(struct hfi1_devdata *dd, u8 port);
413void sdma_start(struct hfi1_devdata *dd);
414void sdma_exit(struct hfi1_devdata *dd);
415void sdma_clean(struct hfi1_devdata *dd, size_t num_engines);
416void sdma_all_running(struct hfi1_devdata *dd);
417void sdma_all_idle(struct hfi1_devdata *dd);
418void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle);
419void sdma_freeze(struct hfi1_devdata *dd);
420void sdma_unfreeze(struct hfi1_devdata *dd);
421void sdma_wait(struct hfi1_devdata *dd);
422
423
424
425
426
427
428
429
430
431
432static inline int sdma_empty(struct sdma_engine *sde)
433{
434 return sde->descq_tail == sde->descq_head;
435}
436
437static inline u16 sdma_descq_freecnt(struct sdma_engine *sde)
438{
439 return sde->descq_cnt -
440 (sde->descq_tail -
441 READ_ONCE(sde->descq_head)) - 1;
442}
443
444static inline u16 sdma_descq_inprocess(struct sdma_engine *sde)
445{
446 return sde->descq_cnt - sdma_descq_freecnt(sde);
447}
448
449
450
451
452
453static inline int __sdma_running(struct sdma_engine *engine)
454{
455 return engine->state.current_state == sdma_state_s99_running;
456}
457
458
459
460
461
462
463
464
465
466
467
468
469static inline int sdma_running(struct sdma_engine *engine)
470{
471 unsigned long flags;
472 int ret;
473
474 spin_lock_irqsave(&engine->tail_lock, flags);
475 ret = __sdma_running(engine);
476 spin_unlock_irqrestore(&engine->tail_lock, flags);
477 return ret;
478}
479
480void _sdma_txreq_ahgadd(
481 struct sdma_txreq *tx,
482 u8 num_ahg,
483 u8 ahg_entry,
484 u32 *ahg,
485 u8 ahg_hlen);
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539static inline int sdma_txinit_ahg(
540 struct sdma_txreq *tx,
541 u16 flags,
542 u16 tlen,
543 u8 ahg_entry,
544 u8 num_ahg,
545 u32 *ahg,
546 u8 ahg_hlen,
547 void (*cb)(struct sdma_txreq *, int))
548{
549 if (tlen == 0)
550 return -ENODATA;
551 if (tlen > MAX_SDMA_PKT_SIZE)
552 return -EMSGSIZE;
553 tx->desc_limit = ARRAY_SIZE(tx->descs);
554 tx->descp = &tx->descs[0];
555 INIT_LIST_HEAD(&tx->list);
556 tx->num_desc = 0;
557 tx->flags = flags;
558 tx->complete = cb;
559 tx->coalesce_buf = NULL;
560 tx->wait = NULL;
561 tx->packet_len = tlen;
562 tx->tlen = tx->packet_len;
563 tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG;
564 tx->descs[0].qw[1] = 0;
565 if (flags & SDMA_TXREQ_F_AHG_COPY)
566 tx->descs[0].qw[1] |=
567 (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
568 << SDMA_DESC1_HEADER_INDEX_SHIFT) |
569 (((u64)SDMA_AHG_COPY & SDMA_DESC1_HEADER_MODE_MASK)
570 << SDMA_DESC1_HEADER_MODE_SHIFT);
571 else if (flags & SDMA_TXREQ_F_USE_AHG && num_ahg)
572 _sdma_txreq_ahgadd(tx, num_ahg, ahg_entry, ahg, ahg_hlen);
573 return 0;
574}
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607static inline int sdma_txinit(
608 struct sdma_txreq *tx,
609 u16 flags,
610 u16 tlen,
611 void (*cb)(struct sdma_txreq *, int))
612{
613 return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb);
614}
615
616
617static inline int sdma_mapping_type(struct sdma_desc *d)
618{
619 return (d->qw[1] & SDMA_DESC1_GENERATION_SMASK)
620 >> SDMA_DESC1_GENERATION_SHIFT;
621}
622
623static inline size_t sdma_mapping_len(struct sdma_desc *d)
624{
625 return (d->qw[0] & SDMA_DESC0_BYTE_COUNT_SMASK)
626 >> SDMA_DESC0_BYTE_COUNT_SHIFT;
627}
628
629static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d)
630{
631 return (d->qw[0] & SDMA_DESC0_PHY_ADDR_SMASK)
632 >> SDMA_DESC0_PHY_ADDR_SHIFT;
633}
634
635static inline void make_tx_sdma_desc(
636 struct sdma_txreq *tx,
637 int type,
638 dma_addr_t addr,
639 size_t len)
640{
641 struct sdma_desc *desc = &tx->descp[tx->num_desc];
642
643 if (!tx->num_desc) {
644
645 desc->qw[1] |= ((u64)type & SDMA_DESC1_GENERATION_MASK)
646 << SDMA_DESC1_GENERATION_SHIFT;
647 } else {
648 desc->qw[0] = 0;
649 desc->qw[1] = ((u64)type & SDMA_DESC1_GENERATION_MASK)
650 << SDMA_DESC1_GENERATION_SHIFT;
651 }
652 desc->qw[0] |= (((u64)addr & SDMA_DESC0_PHY_ADDR_MASK)
653 << SDMA_DESC0_PHY_ADDR_SHIFT) |
654 (((u64)len & SDMA_DESC0_BYTE_COUNT_MASK)
655 << SDMA_DESC0_BYTE_COUNT_SHIFT);
656}
657
658
659int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
660 int type, void *kvaddr, struct page *page,
661 unsigned long offset, u16 len);
662int _pad_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *);
663void __sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *);
664
665static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
666{
667 if (tx->num_desc)
668 __sdma_txclean(dd, tx);
669}
670
671
672static inline void _sdma_close_tx(struct hfi1_devdata *dd,
673 struct sdma_txreq *tx)
674{
675 tx->descp[tx->num_desc].qw[0] |=
676 SDMA_DESC0_LAST_DESC_FLAG;
677 tx->descp[tx->num_desc].qw[1] |=
678 dd->default_desc1;
679 if (tx->flags & SDMA_TXREQ_F_URGENT)
680 tx->descp[tx->num_desc].qw[1] |=
681 (SDMA_DESC1_HEAD_TO_HOST_FLAG |
682 SDMA_DESC1_INT_REQ_FLAG);
683}
684
685static inline int _sdma_txadd_daddr(
686 struct hfi1_devdata *dd,
687 int type,
688 struct sdma_txreq *tx,
689 dma_addr_t addr,
690 u16 len)
691{
692 int rval = 0;
693
694 make_tx_sdma_desc(
695 tx,
696 type,
697 addr, len);
698 WARN_ON(len > tx->tlen);
699 tx->tlen -= len;
700
701 if (!tx->tlen) {
702 if (tx->packet_len & (sizeof(u32) - 1)) {
703 rval = _pad_sdma_tx_descs(dd, tx);
704 if (rval)
705 return rval;
706 } else {
707 _sdma_close_tx(dd, tx);
708 }
709 }
710 tx->num_desc++;
711 return rval;
712}
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730static inline int sdma_txadd_page(
731 struct hfi1_devdata *dd,
732 struct sdma_txreq *tx,
733 struct page *page,
734 unsigned long offset,
735 u16 len)
736{
737 dma_addr_t addr;
738 int rval;
739
740 if ((unlikely(tx->num_desc == tx->desc_limit))) {
741 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_PAGE,
742 NULL, page, offset, len);
743 if (rval <= 0)
744 return rval;
745 }
746
747 addr = dma_map_page(
748 &dd->pcidev->dev,
749 page,
750 offset,
751 len,
752 DMA_TO_DEVICE);
753
754 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
755 __sdma_txclean(dd, tx);
756 return -ENOSPC;
757 }
758
759 return _sdma_txadd_daddr(
760 dd, SDMA_MAP_PAGE, tx, addr, len);
761}
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779static inline int sdma_txadd_daddr(
780 struct hfi1_devdata *dd,
781 struct sdma_txreq *tx,
782 dma_addr_t addr,
783 u16 len)
784{
785 int rval;
786
787 if ((unlikely(tx->num_desc == tx->desc_limit))) {
788 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_NONE,
789 NULL, NULL, 0, 0);
790 if (rval <= 0)
791 return rval;
792 }
793
794 return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len);
795}
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813static inline int sdma_txadd_kvaddr(
814 struct hfi1_devdata *dd,
815 struct sdma_txreq *tx,
816 void *kvaddr,
817 u16 len)
818{
819 dma_addr_t addr;
820 int rval;
821
822 if ((unlikely(tx->num_desc == tx->desc_limit))) {
823 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_SINGLE,
824 kvaddr, NULL, 0, len);
825 if (rval <= 0)
826 return rval;
827 }
828
829 addr = dma_map_single(
830 &dd->pcidev->dev,
831 kvaddr,
832 len,
833 DMA_TO_DEVICE);
834
835 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
836 __sdma_txclean(dd, tx);
837 return -ENOSPC;
838 }
839
840 return _sdma_txadd_daddr(
841 dd, SDMA_MAP_SINGLE, tx, addr, len);
842}
843
844struct iowait_work;
845
846int sdma_send_txreq(struct sdma_engine *sde,
847 struct iowait_work *wait,
848 struct sdma_txreq *tx,
849 bool pkts_sent);
850int sdma_send_txlist(struct sdma_engine *sde,
851 struct iowait_work *wait,
852 struct list_head *tx_list,
853 u16 *count_out);
854
855int sdma_ahg_alloc(struct sdma_engine *sde);
856void sdma_ahg_free(struct sdma_engine *sde, int ahg_index);
857
858
859
860
861
862
863
864
865
866
867static inline u32 sdma_build_ahg_descriptor(
868 u16 data,
869 u8 dwindex,
870 u8 startbit,
871 u8 bits)
872{
873 return (u32)(1UL << SDMA_AHG_UPDATE_ENABLE_SHIFT |
874 ((startbit & SDMA_AHG_FIELD_START_MASK) <<
875 SDMA_AHG_FIELD_START_SHIFT) |
876 ((bits & SDMA_AHG_FIELD_LEN_MASK) <<
877 SDMA_AHG_FIELD_LEN_SHIFT) |
878 ((dwindex & SDMA_AHG_INDEX_MASK) <<
879 SDMA_AHG_INDEX_SHIFT) |
880 ((data & SDMA_AHG_VALUE_MASK) <<
881 SDMA_AHG_VALUE_SHIFT));
882}
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898static inline unsigned sdma_progress(struct sdma_engine *sde, unsigned seq,
899 struct sdma_txreq *tx)
900{
901 if (read_seqretry(&sde->head_lock, seq)) {
902 sde->desc_avail = sdma_descq_freecnt(sde);
903 if (tx->num_desc > sde->desc_avail)
904 return 0;
905 return 1;
906 }
907 return 0;
908}
909
910
911void sdma_engine_error(struct sdma_engine *sde, u64 status);
912void sdma_engine_interrupt(struct sdma_engine *sde, u64 status);
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985struct sdma_map_elem {
986 u32 mask;
987 struct sdma_engine *sde[];
988};
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003struct sdma_vl_map {
1004 s8 engine_to_vl[TXE_NUM_SDMA_ENGINES];
1005 struct rcu_head list;
1006 u32 mask;
1007 u8 actual_vls;
1008 u8 vls;
1009 struct sdma_map_elem *map[];
1010};
1011
1012int sdma_map_init(
1013 struct hfi1_devdata *dd,
1014 u8 port,
1015 u8 num_vls,
1016 u8 *vl_engines);
1017
1018
1019void _sdma_engine_progress_schedule(struct sdma_engine *sde);
1020
1021
1022
1023
1024
1025
1026
1027
1028static inline void sdma_engine_progress_schedule(
1029 struct sdma_engine *sde)
1030{
1031 if (!sde || sdma_descq_inprocess(sde) < (sde->descq_cnt / 8))
1032 return;
1033 _sdma_engine_progress_schedule(sde);
1034}
1035
1036struct sdma_engine *sdma_select_engine_sc(
1037 struct hfi1_devdata *dd,
1038 u32 selector,
1039 u8 sc5);
1040
1041struct sdma_engine *sdma_select_engine_vl(
1042 struct hfi1_devdata *dd,
1043 u32 selector,
1044 u8 vl);
1045
1046struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
1047 u32 selector, u8 vl);
1048ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf);
1049ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
1050 size_t count);
1051int sdma_engine_get_vl(struct sdma_engine *sde);
1052void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *);
1053void sdma_seqfile_dump_cpu_list(struct seq_file *s, struct hfi1_devdata *dd,
1054 unsigned long cpuid);
1055
1056#ifdef CONFIG_SDMA_VERBOSITY
1057void sdma_dumpstate(struct sdma_engine *);
1058#endif
1059static inline char *slashstrip(char *s)
1060{
1061 char *r = s;
1062
1063 while (*s)
1064 if (*s++ == '/')
1065 r = s;
1066 return r;
1067}
1068
1069u16 sdma_get_descq_cnt(void);
1070
1071extern uint mod_num_sdma;
1072
1073void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);
1074
1075#endif
1076