1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51#include <linux/delay.h>
52#include "hfi.h"
53#include "qp.h"
54#include "trace.h"
55
56#define SC_CTXT_PACKET_EGRESS_TIMEOUT 350
57
58#define SC(name) SEND_CTXT_##name
59
60
61
62static void sc_wait_for_packet_egress(struct send_context *sc, int pause);
63
64
65
66
67
68void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl)
69{
70 write_csr(dd, SEND_CTRL, sendctrl | SEND_CTRL_CM_RESET_SMASK);
71 while (1) {
72 udelay(1);
73 sendctrl = read_csr(dd, SEND_CTRL);
74 if ((sendctrl & SEND_CTRL_CM_RESET_SMASK) == 0)
75 break;
76 }
77}
78
79
80#ifndef SEND_CTRL_UNSUPPORTED_VL_SHIFT
81#define SEND_CTRL_UNSUPPORTED_VL_SHIFT 3
82#define SEND_CTRL_UNSUPPORTED_VL_MASK 0xffull
83#define SEND_CTRL_UNSUPPORTED_VL_SMASK (SEND_CTRL_UNSUPPORTED_VL_MASK \
84 << SEND_CTRL_UNSUPPORTED_VL_SHIFT)
85#endif
86
87
88void pio_send_control(struct hfi1_devdata *dd, int op)
89{
90 u64 reg, mask;
91 unsigned long flags;
92 int write = 1;
93 int flush = 0;
94
95 spin_lock_irqsave(&dd->sendctrl_lock, flags);
96
97 reg = read_csr(dd, SEND_CTRL);
98 switch (op) {
99 case PSC_GLOBAL_ENABLE:
100 reg |= SEND_CTRL_SEND_ENABLE_SMASK;
101
102 case PSC_DATA_VL_ENABLE:
103
104 mask = (((~0ull)<<num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK)<<
105 SEND_CTRL_UNSUPPORTED_VL_SHIFT;
106 reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
107 break;
108 case PSC_GLOBAL_DISABLE:
109 reg &= ~SEND_CTRL_SEND_ENABLE_SMASK;
110 break;
111 case PSC_GLOBAL_VLARB_ENABLE:
112 reg |= SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
113 break;
114 case PSC_GLOBAL_VLARB_DISABLE:
115 reg &= ~SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
116 break;
117 case PSC_CM_RESET:
118 __cm_reset(dd, reg);
119 write = 0;
120 break;
121 case PSC_DATA_VL_DISABLE:
122 reg |= SEND_CTRL_UNSUPPORTED_VL_SMASK;
123 flush = 1;
124 break;
125 default:
126 dd_dev_err(dd, "%s: invalid control %d\n", __func__, op);
127 break;
128 }
129
130 if (write) {
131 write_csr(dd, SEND_CTRL, reg);
132 if (flush)
133 (void) read_csr(dd, SEND_CTRL);
134 }
135
136 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
137}
138
139
140#define NUM_SC_POOLS 2
141
142
143#define SCS_POOL_0 -1
144#define SCS_POOL_1 -2
145
146#define SCC_PER_VL -1
147#define SCC_PER_CPU -2
148
149#define SCC_PER_KRCVQ -3
150#define SCC_ACK_CREDITS 32
151
152#define PIO_WAIT_BATCH_SIZE 5
153
154
155static struct sc_config_sizes sc_config_sizes[SC_MAX] = {
156 [SC_KERNEL] = { .size = SCS_POOL_0,
157 .count = SCC_PER_VL },
158 [SC_ACK] = { .size = SCC_ACK_CREDITS,
159 .count = SCC_PER_KRCVQ },
160 [SC_USER] = { .size = SCS_POOL_0,
161 .count = SCC_PER_CPU },
162
163};
164
165
166struct mem_pool_config {
167 int centipercent;
168 int absolute_blocks;
169};
170
171
172static struct mem_pool_config sc_mem_pool_config[NUM_SC_POOLS] = {
173
174 { 10000, -1 },
175 { 0, -1 },
176};
177
178
179struct mem_pool_info {
180 int centipercent;
181
182 int count;
183 int blocks;
184 int size;
185};
186
187
188
189
190
191
192
193
194
195
196static int wildcard_to_pool(int wc)
197{
198 if (wc >= 0)
199 return -1;
200 return -wc - 1;
201}
202
203static const char *sc_type_names[SC_MAX] = {
204 "kernel",
205 "ack",
206 "user"
207};
208
209static const char *sc_type_name(int index)
210{
211 if (index < 0 || index >= SC_MAX)
212 return "unknown";
213 return sc_type_names[index];
214}
215
216
217
218
219
220
221int init_sc_pools_and_sizes(struct hfi1_devdata *dd)
222{
223 struct mem_pool_info mem_pool_info[NUM_SC_POOLS] = { { 0 } };
224 int total_blocks = (dd->chip_pio_mem_size / PIO_BLOCK_SIZE) - 1;
225 int total_contexts = 0;
226 int fixed_blocks;
227 int pool_blocks;
228 int used_blocks;
229 int cp_total;
230 int ab_total;
231 int extra;
232 int i;
233
234
235
236
237
238
239
240
241 cp_total = 0;
242 ab_total = 0;
243 for (i = 0; i < NUM_SC_POOLS; i++) {
244 int cp = sc_mem_pool_config[i].centipercent;
245 int ab = sc_mem_pool_config[i].absolute_blocks;
246
247
248
249
250
251 if (cp >= 0) {
252 cp_total += cp;
253 } else if (ab >= 0) {
254 ab_total += ab;
255 } else {
256 dd_dev_err(
257 dd,
258 "Send context memory pool %d: both the block count and centipercent are invalid\n",
259 i);
260 return -EINVAL;
261 }
262
263 mem_pool_info[i].centipercent = cp;
264 mem_pool_info[i].blocks = ab;
265 }
266
267
268 if (cp_total != 0 && ab_total != 0) {
269 dd_dev_err(
270 dd,
271 "All send context memory pools must be described as either centipercent or blocks, no mixing between pools\n");
272 return -EINVAL;
273 }
274
275
276 if (cp_total != 0 && cp_total != 10000) {
277 dd_dev_err(
278 dd,
279 "Send context memory pool centipercent is %d, expecting 10000\n",
280 cp_total);
281 return -EINVAL;
282 }
283
284
285 if (ab_total > total_blocks) {
286 dd_dev_err(
287 dd,
288 "Send context memory pool absolute block count %d is larger than the memory size %d\n",
289 ab_total, total_blocks);
290 return -EINVAL;
291 }
292
293
294
295
296
297
298
299
300 fixed_blocks = 0;
301 for (i = 0; i < SC_MAX; i++) {
302 int count = sc_config_sizes[i].count;
303 int size = sc_config_sizes[i].size;
304 int pool;
305
306
307
308
309
310
311
312 if (i == SC_ACK) {
313 count = dd->n_krcv_queues;
314 } else if (i == SC_KERNEL) {
315 count = num_vls + 1 ;
316 } else if (count == SCC_PER_CPU) {
317 count = dd->num_rcv_contexts - dd->n_krcv_queues;
318 } else if (count < 0) {
319 dd_dev_err(
320 dd,
321 "%s send context invalid count wildcard %d\n",
322 sc_type_name(i), count);
323 return -EINVAL;
324 }
325 if (total_contexts + count > dd->chip_send_contexts)
326 count = dd->chip_send_contexts - total_contexts;
327
328 total_contexts += count;
329
330
331
332
333
334
335
336 pool = wildcard_to_pool(size);
337 if (pool == -1) {
338 fixed_blocks += size * count;
339 } else if (pool < NUM_SC_POOLS) {
340 mem_pool_info[pool].count += count;
341 } else {
342 dd_dev_err(
343 dd,
344 "%s send context invalid pool wildcard %d\n",
345 sc_type_name(i), size);
346 return -EINVAL;
347 }
348
349 dd->sc_sizes[i].count = count;
350 dd->sc_sizes[i].size = size;
351 }
352 if (fixed_blocks > total_blocks) {
353 dd_dev_err(
354 dd,
355 "Send context fixed block count, %u, larger than total block count %u\n",
356 fixed_blocks, total_blocks);
357 return -EINVAL;
358 }
359
360
361 pool_blocks = total_blocks - fixed_blocks;
362 if (ab_total > pool_blocks) {
363 dd_dev_err(
364 dd,
365 "Send context fixed pool sizes, %u, larger than pool block count %u\n",
366 ab_total, pool_blocks);
367 return -EINVAL;
368 }
369
370 pool_blocks -= ab_total;
371
372 for (i = 0; i < NUM_SC_POOLS; i++) {
373 struct mem_pool_info *pi = &mem_pool_info[i];
374
375
376 if (pi->centipercent >= 0)
377 pi->blocks = (pool_blocks * pi->centipercent) / 10000;
378
379 if (pi->blocks == 0 && pi->count != 0) {
380 dd_dev_err(
381 dd,
382 "Send context memory pool %d has %u contexts, but no blocks\n",
383 i, pi->count);
384 return -EINVAL;
385 }
386 if (pi->count == 0) {
387
388 if (pi->blocks != 0)
389 dd_dev_err(
390 dd,
391 "Send context memory pool %d has %u blocks, but zero contexts\n",
392 i, pi->blocks);
393 pi->size = 0;
394 } else {
395 pi->size = pi->blocks / pi->count;
396 }
397 }
398
399
400 used_blocks = 0;
401 for (i = 0; i < SC_MAX; i++) {
402 if (dd->sc_sizes[i].size < 0) {
403 unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size);
404
405 WARN_ON_ONCE(pool >= NUM_SC_POOLS);
406 dd->sc_sizes[i].size = mem_pool_info[pool].size;
407 }
408
409#define PIO_MAX_BLOCKS 1024
410 if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS)
411 dd->sc_sizes[i].size = PIO_MAX_BLOCKS;
412
413
414 used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count;
415 }
416 extra = total_blocks - used_blocks;
417 if (extra != 0)
418 dd_dev_info(dd, "unused send context blocks: %d\n", extra);
419
420 return total_contexts;
421}
422
423int init_send_contexts(struct hfi1_devdata *dd)
424{
425 u16 base;
426 int ret, i, j, context;
427
428 ret = init_credit_return(dd);
429 if (ret)
430 return ret;
431
432 dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8),
433 GFP_KERNEL);
434 dd->send_contexts = kcalloc(dd->num_send_contexts,
435 sizeof(struct send_context_info),
436 GFP_KERNEL);
437 if (!dd->send_contexts || !dd->hw_to_sw) {
438 kfree(dd->hw_to_sw);
439 kfree(dd->send_contexts);
440 free_credit_return(dd);
441 return -ENOMEM;
442 }
443
444
445 for (i = 0; i < TXE_NUM_CONTEXTS; i++)
446 dd->hw_to_sw[i] = INVALID_SCI;
447
448
449
450
451
452 context = 0;
453 base = 1;
454 for (i = 0; i < SC_MAX; i++) {
455 struct sc_config_sizes *scs = &dd->sc_sizes[i];
456
457 for (j = 0; j < scs->count; j++) {
458 struct send_context_info *sci =
459 &dd->send_contexts[context];
460 sci->type = i;
461 sci->base = base;
462 sci->credits = scs->size;
463
464 context++;
465 base += scs->size;
466 }
467 }
468
469 return 0;
470}
471
472
473
474
475
476
477static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index,
478 u32 *hw_context)
479{
480 struct send_context_info *sci;
481 u32 index;
482 u32 context;
483
484 for (index = 0, sci = &dd->send_contexts[0];
485 index < dd->num_send_contexts; index++, sci++) {
486 if (sci->type == type && sci->allocated == 0) {
487 sci->allocated = 1;
488
489 context = dd->chip_send_contexts - index - 1;
490 dd->hw_to_sw[context] = index;
491 *sw_index = index;
492 *hw_context = context;
493 return 0;
494 }
495 }
496 dd_dev_err(dd, "Unable to locate a free type %d send context\n", type);
497 return -ENOSPC;
498}
499
500
501
502
503
504
505static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context)
506{
507 struct send_context_info *sci;
508
509 sci = &dd->send_contexts[sw_index];
510 if (!sci->allocated) {
511 dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n",
512 __func__, sw_index, hw_context);
513 }
514 sci->allocated = 0;
515 dd->hw_to_sw[hw_context] = INVALID_SCI;
516}
517
518
519static inline u32 group_context(u32 context, u32 group)
520{
521 return (context >> group) << group;
522}
523
524
525static inline u32 group_size(u32 group)
526{
527 return 1 << group;
528}
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543static void cr_group_addresses(struct send_context *sc, dma_addr_t *pa)
544{
545 u32 gc = group_context(sc->hw_context, sc->group);
546 u32 index = sc->hw_context & 0x7;
547
548 sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index];
549 *pa = (unsigned long)
550 &((struct credit_return *)sc->dd->cr_base[sc->node].pa)[gc];
551}
552
553
554
555
556
557static void sc_halted(struct work_struct *work)
558{
559 struct send_context *sc;
560
561 sc = container_of(work, struct send_context, halt_work);
562 sc_restart(sc);
563}
564
565
566
567
568
569
570
571
572
573
574
575u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize)
576{
577 u32 release_credits;
578 u32 threshold;
579
580
581 mtu += hdrqentsize << 2;
582 release_credits = DIV_ROUND_UP(mtu, PIO_BLOCK_SIZE);
583
584
585 if (sc->credits <= release_credits)
586 threshold = 1;
587 else
588 threshold = sc->credits - release_credits;
589
590 return threshold;
591}
592
593
594
595
596
597
598
599
600static u32 sc_percent_to_threshold(struct send_context *sc, u32 percent)
601{
602 return (sc->credits * percent) / 100;
603}
604
605
606
607
608void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold)
609{
610 unsigned long flags;
611 u32 old_threshold;
612 int force_return = 0;
613
614 spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
615
616 old_threshold = (sc->credit_ctrl >>
617 SC(CREDIT_CTRL_THRESHOLD_SHIFT))
618 & SC(CREDIT_CTRL_THRESHOLD_MASK);
619
620 if (new_threshold != old_threshold) {
621 sc->credit_ctrl =
622 (sc->credit_ctrl
623 & ~SC(CREDIT_CTRL_THRESHOLD_SMASK))
624 | ((new_threshold
625 & SC(CREDIT_CTRL_THRESHOLD_MASK))
626 << SC(CREDIT_CTRL_THRESHOLD_SHIFT));
627 write_kctxt_csr(sc->dd, sc->hw_context,
628 SC(CREDIT_CTRL), sc->credit_ctrl);
629
630
631 force_return = 1;
632 }
633
634 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
635
636 if (force_return)
637 sc_return_credits(sc);
638}
639
640
641
642
643
644
645void set_pio_integrity(struct send_context *sc)
646{
647 struct hfi1_devdata *dd = sc->dd;
648 u64 reg = 0;
649 u32 hw_context = sc->hw_context;
650 int type = sc->type;
651
652
653
654
655
656 if (likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) &&
657 dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE)
658 reg = hfi1_pkt_default_send_ctxt_mask(dd, type);
659
660 write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), reg);
661}
662
663
664
665
666
667struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
668 uint hdrqentsize, int numa)
669{
670 struct send_context_info *sci;
671 struct send_context *sc;
672 dma_addr_t pa;
673 unsigned long flags;
674 u64 reg;
675 u32 thresh;
676 u32 sw_index;
677 u32 hw_context;
678 int ret;
679 u8 opval, opmask;
680
681
682 if (dd->flags & HFI1_FROZEN)
683 return NULL;
684
685 sc = kzalloc_node(sizeof(struct send_context), GFP_KERNEL, numa);
686 if (!sc)
687 return NULL;
688
689 spin_lock_irqsave(&dd->sc_lock, flags);
690 ret = sc_hw_alloc(dd, type, &sw_index, &hw_context);
691 if (ret) {
692 spin_unlock_irqrestore(&dd->sc_lock, flags);
693 kfree(sc);
694 return NULL;
695 }
696
697 sci = &dd->send_contexts[sw_index];
698 sci->sc = sc;
699
700 sc->dd = dd;
701 sc->node = numa;
702 sc->type = type;
703 spin_lock_init(&sc->alloc_lock);
704 spin_lock_init(&sc->release_lock);
705 spin_lock_init(&sc->credit_ctrl_lock);
706 INIT_LIST_HEAD(&sc->piowait);
707 INIT_WORK(&sc->halt_work, sc_halted);
708 atomic_set(&sc->buffers_allocated, 0);
709 init_waitqueue_head(&sc->halt_wait);
710
711
712 sc->group = 0;
713
714 sc->sw_index = sw_index;
715 sc->hw_context = hw_context;
716 cr_group_addresses(sc, &pa);
717 sc->credits = sci->credits;
718
719
720#define PIO_ADDR_CONTEXT_MASK 0xfful
721#define PIO_ADDR_CONTEXT_SHIFT 16
722 sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK)
723 << PIO_ADDR_CONTEXT_SHIFT);
724
725
726 reg = ((sci->credits & SC(CTRL_CTXT_DEPTH_MASK))
727 << SC(CTRL_CTXT_DEPTH_SHIFT))
728 | ((sci->base & SC(CTRL_CTXT_BASE_MASK))
729 << SC(CTRL_CTXT_BASE_SHIFT));
730 write_kctxt_csr(dd, hw_context, SC(CTRL), reg);
731
732 set_pio_integrity(sc);
733
734
735 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1);
736
737
738 write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY),
739 (DEFAULT_PKEY &
740 SC(CHECK_PARTITION_KEY_VALUE_MASK))
741 << SC(CHECK_PARTITION_KEY_VALUE_SHIFT));
742
743
744 if (type == SC_USER) {
745 opval = USER_OPCODE_CHECK_VAL;
746 opmask = USER_OPCODE_CHECK_MASK;
747 } else {
748 opval = OPCODE_CHECK_VAL_DISABLED;
749 opmask = OPCODE_CHECK_MASK_DISABLED;
750 }
751
752
753 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE),
754 ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) |
755 ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT)));
756
757
758 reg = pa & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK);
759 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg);
760
761
762
763
764
765
766
767
768
769 if (type == SC_ACK) {
770 thresh = sc_percent_to_threshold(sc, 50);
771 } else if (type == SC_USER) {
772 thresh = sc_percent_to_threshold(sc,
773 user_credit_return_threshold);
774 } else {
775 thresh = sc_mtu_to_threshold(sc, hfi1_max_mtu, hdrqentsize);
776 }
777 reg = thresh << SC(CREDIT_CTRL_THRESHOLD_SHIFT);
778
779 if (type == SC_USER && HFI1_CAP_IS_USET(EARLY_CREDIT_RETURN))
780 reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
781 else if (HFI1_CAP_IS_KSET(EARLY_CREDIT_RETURN))
782 reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
783
784
785 sc->credit_ctrl = reg;
786 write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), reg);
787
788
789 if (type == SC_USER) {
790 reg = 1ULL << 15;
791 write_kctxt_csr(dd, hw_context, SC(CHECK_VL), reg);
792 }
793
794 spin_unlock_irqrestore(&dd->sc_lock, flags);
795
796
797
798
799
800
801
802
803
804 if (type != SC_USER) {
805
806
807
808
809 sc->sr_size = sci->credits + 1;
810 sc->sr = kzalloc_node(sizeof(union pio_shadow_ring) *
811 sc->sr_size, GFP_KERNEL, numa);
812 if (!sc->sr) {
813 sc_free(sc);
814 return NULL;
815 }
816 }
817
818 dd_dev_info(dd,
819 "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u\n",
820 sw_index,
821 hw_context,
822 sc_type_name(type),
823 sc->group,
824 sc->credits,
825 sc->credit_ctrl,
826 thresh);
827
828 return sc;
829}
830
831
832void sc_free(struct send_context *sc)
833{
834 struct hfi1_devdata *dd;
835 unsigned long flags;
836 u32 sw_index;
837 u32 hw_context;
838
839 if (!sc)
840 return;
841
842 sc->flags |= SCF_IN_FREE;
843 dd = sc->dd;
844 if (!list_empty(&sc->piowait))
845 dd_dev_err(dd, "piowait list not empty!\n");
846 sw_index = sc->sw_index;
847 hw_context = sc->hw_context;
848 sc_disable(sc);
849 flush_work(&sc->halt_work);
850
851 spin_lock_irqsave(&dd->sc_lock, flags);
852 dd->send_contexts[sw_index].sc = NULL;
853
854
855 write_kctxt_csr(dd, hw_context, SC(CTRL), 0);
856 write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), 0);
857 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), 0);
858 write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 0);
859 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 0);
860 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), 0);
861 write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), 0);
862
863
864 sc_hw_free(dd, sw_index, hw_context);
865 spin_unlock_irqrestore(&dd->sc_lock, flags);
866
867 kfree(sc->sr);
868 kfree(sc);
869}
870
871
872void sc_disable(struct send_context *sc)
873{
874 u64 reg;
875 unsigned long flags;
876 struct pio_buf *pbuf;
877
878 if (!sc)
879 return;
880
881
882 spin_lock_irqsave(&sc->alloc_lock, flags);
883 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
884 reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
885 sc->flags &= ~SCF_ENABLED;
886 sc_wait_for_packet_egress(sc, 1);
887 write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
888 spin_unlock_irqrestore(&sc->alloc_lock, flags);
889
890
891
892
893
894
895
896
897 udelay(1);
898 spin_lock_irqsave(&sc->release_lock, flags);
899 if (sc->sr) {
900 while (sc->sr_tail != sc->sr_head) {
901 pbuf = &sc->sr[sc->sr_tail].pbuf;
902 if (pbuf->cb)
903 (*pbuf->cb)(pbuf->arg, PRC_SC_DISABLE);
904 sc->sr_tail++;
905 if (sc->sr_tail >= sc->sr_size)
906 sc->sr_tail = 0;
907 }
908 }
909 spin_unlock_irqrestore(&sc->release_lock, flags);
910}
911
912
913#define packet_occupancy(r) \
914 (((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)\
915 >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT)
916
917
918#define egress_halted(r) \
919 ((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK)
920
921
922static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
923{
924 struct hfi1_devdata *dd = sc->dd;
925 u64 reg = 0;
926 u64 reg_prev;
927 u32 loop = 0;
928
929 while (1) {
930 reg_prev = reg;
931 reg = read_csr(dd, sc->hw_context * 8 +
932 SEND_EGRESS_CTXT_STATUS);
933
934 if (egress_halted(reg))
935 break;
936 reg = packet_occupancy(reg);
937 if (reg == 0)
938 break;
939
940 if (reg != reg_prev)
941 loop = 0;
942 if (loop > 500) {
943
944 dd_dev_err(dd,
945 "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
946 __func__, sc->sw_index,
947 sc->hw_context, (u32)reg);
948 queue_work(dd->pport->hfi1_wq,
949 &dd->pport->link_bounce_work);
950 break;
951 }
952 loop++;
953 udelay(1);
954 }
955
956 if (pause)
957
958 pause_for_credit_return(dd);
959}
960
961void sc_wait(struct hfi1_devdata *dd)
962{
963 int i;
964
965 for (i = 0; i < dd->num_send_contexts; i++) {
966 struct send_context *sc = dd->send_contexts[i].sc;
967
968 if (!sc)
969 continue;
970 sc_wait_for_packet_egress(sc, 0);
971 }
972}
973
974
975
976
977
978
979
980
981
982
983int sc_restart(struct send_context *sc)
984{
985 struct hfi1_devdata *dd = sc->dd;
986 u64 reg;
987 u32 loop;
988 int count;
989
990
991 if (!(sc->flags & SCF_HALTED) || (sc->flags & SCF_IN_FREE))
992 return -EINVAL;
993
994 dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index,
995 sc->hw_context);
996
997
998
999
1000
1001
1002
1003 loop = 0;
1004 while (1) {
1005 reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS));
1006 if (reg & SC(STATUS_CTXT_HALTED_SMASK))
1007 break;
1008 if (loop > 100) {
1009 dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n",
1010 __func__, sc->sw_index, sc->hw_context);
1011 return -ETIME;
1012 }
1013 loop++;
1014 udelay(1);
1015 }
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027 if (sc->type != SC_USER) {
1028
1029 loop = 0;
1030 while (1) {
1031 count = atomic_read(&sc->buffers_allocated);
1032 if (count == 0)
1033 break;
1034 if (loop > 100) {
1035 dd_dev_err(dd,
1036 "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n",
1037 __func__, sc->sw_index,
1038 sc->hw_context, count);
1039 }
1040 loop++;
1041 udelay(1);
1042 }
1043 }
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054 sc_disable(sc);
1055
1056
1057
1058
1059
1060
1061
1062 return sc_enable(sc);
1063}
1064
1065
1066
1067
1068
1069
1070void pio_freeze(struct hfi1_devdata *dd)
1071{
1072 struct send_context *sc;
1073 int i;
1074
1075 for (i = 0; i < dd->num_send_contexts; i++) {
1076 sc = dd->send_contexts[i].sc;
1077
1078
1079
1080
1081
1082 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
1083 continue;
1084
1085
1086 sc_disable(sc);
1087 }
1088}
1089
1090
1091
1092
1093
1094
1095
1096
1097void pio_kernel_unfreeze(struct hfi1_devdata *dd)
1098{
1099 struct send_context *sc;
1100 int i;
1101
1102 for (i = 0; i < dd->num_send_contexts; i++) {
1103 sc = dd->send_contexts[i].sc;
1104 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
1105 continue;
1106
1107 sc_enable(sc);
1108 }
1109}
1110
1111
1112
1113
1114
1115
1116
1117static int pio_init_wait_progress(struct hfi1_devdata *dd)
1118{
1119 u64 reg;
1120 int max, count = 0;
1121
1122
1123 max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5;
1124 while (1) {
1125 reg = read_csr(dd, SEND_PIO_INIT_CTXT);
1126 if (!(reg & SEND_PIO_INIT_CTXT_PIO_INIT_IN_PROGRESS_SMASK))
1127 break;
1128 if (count >= max)
1129 return -ETIMEDOUT;
1130 udelay(5);
1131 count++;
1132 }
1133
1134 return reg & SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK ? -EIO : 0;
1135}
1136
1137
1138
1139
1140
1141void pio_reset_all(struct hfi1_devdata *dd)
1142{
1143 int ret;
1144
1145
1146 ret = pio_init_wait_progress(dd);
1147
1148 if (ret == -EIO) {
1149
1150 write_csr(dd, SEND_PIO_ERR_CLEAR,
1151 SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK);
1152 }
1153
1154
1155 write_csr(dd, SEND_PIO_INIT_CTXT,
1156 SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK);
1157 udelay(2);
1158 ret = pio_init_wait_progress(dd);
1159 if (ret < 0) {
1160 dd_dev_err(dd,
1161 "PIO send context init %s while initializing all PIO blocks\n",
1162 ret == -ETIMEDOUT ? "is stuck" : "had an error");
1163 }
1164}
1165
1166
1167int sc_enable(struct send_context *sc)
1168{
1169 u64 sc_ctrl, reg, pio;
1170 struct hfi1_devdata *dd;
1171 unsigned long flags;
1172 int ret = 0;
1173
1174 if (!sc)
1175 return -EINVAL;
1176 dd = sc->dd;
1177
1178
1179
1180
1181
1182
1183
1184
1185 spin_lock_irqsave(&sc->alloc_lock, flags);
1186 sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
1187 if ((sc_ctrl & SC(CTRL_CTXT_ENABLE_SMASK)))
1188 goto unlock;
1189
1190
1191
1192 *sc->hw_free = 0;
1193 sc->free = 0;
1194 sc->alloc_free = 0;
1195 sc->fill = 0;
1196 sc->sr_head = 0;
1197 sc->sr_tail = 0;
1198 sc->flags = 0;
1199 atomic_set(&sc->buffers_allocated, 0);
1200
1201
1202
1203
1204
1205
1206
1207 reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS));
1208 if (reg)
1209 write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR),
1210 reg);
1211
1212
1213
1214
1215
1216 spin_lock(&dd->sc_init_lock);
1217
1218
1219
1220
1221
1222
1223
1224 pio = ((sc->hw_context & SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK) <<
1225 SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_SHIFT) |
1226 SEND_PIO_INIT_CTXT_PIO_SINGLE_CTXT_INIT_SMASK;
1227 write_csr(dd, SEND_PIO_INIT_CTXT, pio);
1228
1229
1230
1231
1232 udelay(2);
1233 ret = pio_init_wait_progress(dd);
1234 spin_unlock(&dd->sc_init_lock);
1235 if (ret) {
1236 dd_dev_err(dd,
1237 "sctxt%u(%u): Context not enabled due to init failure %d\n",
1238 sc->sw_index, sc->hw_context, ret);
1239 goto unlock;
1240 }
1241
1242
1243
1244
1245 sc_ctrl |= SC(CTRL_CTXT_ENABLE_SMASK);
1246 write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl);
1247
1248
1249
1250
1251 read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
1252 sc->flags |= SCF_ENABLED;
1253
1254unlock:
1255 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1256
1257 return ret;
1258}
1259
1260
1261void sc_return_credits(struct send_context *sc)
1262{
1263 if (!sc)
1264 return;
1265
1266
1267 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE),
1268 SC(CREDIT_FORCE_FORCE_RETURN_SMASK));
1269
1270
1271
1272
1273 read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE));
1274
1275 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0);
1276}
1277
1278
1279void sc_flush(struct send_context *sc)
1280{
1281 if (!sc)
1282 return;
1283
1284 sc_wait_for_packet_egress(sc, 1);
1285}
1286
1287
1288void sc_drop(struct send_context *sc)
1289{
1290 if (!sc)
1291 return;
1292
1293 dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n",
1294 __func__, sc->sw_index, sc->hw_context);
1295}
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305void sc_stop(struct send_context *sc, int flag)
1306{
1307 unsigned long flags;
1308
1309
1310 sc->flags |= flag;
1311
1312
1313 spin_lock_irqsave(&sc->alloc_lock, flags);
1314 sc->flags &= ~SCF_ENABLED;
1315 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1316 wake_up(&sc->halt_wait);
1317}
1318
1319#define BLOCK_DWORDS (PIO_BLOCK_SIZE/sizeof(u32))
1320#define dwords_to_blocks(x) DIV_ROUND_UP(x, BLOCK_DWORDS)
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
1333 pio_release_cb cb, void *arg)
1334{
1335 struct pio_buf *pbuf = NULL;
1336 unsigned long flags;
1337 unsigned long avail;
1338 unsigned long blocks = dwords_to_blocks(dw_len);
1339 unsigned long start_fill;
1340 int trycount = 0;
1341 u32 head, next;
1342
1343 spin_lock_irqsave(&sc->alloc_lock, flags);
1344 if (!(sc->flags & SCF_ENABLED)) {
1345 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1346 goto done;
1347 }
1348
1349retry:
1350 avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free);
1351 if (blocks > avail) {
1352
1353 if (unlikely(trycount)) {
1354 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1355 goto done;
1356 }
1357
1358 sc->alloc_free = ACCESS_ONCE(sc->free);
1359 avail =
1360 (unsigned long)sc->credits -
1361 (sc->fill - sc->alloc_free);
1362 if (blocks > avail) {
1363
1364 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1365 sc_release_update(sc);
1366 spin_lock_irqsave(&sc->alloc_lock, flags);
1367 sc->alloc_free = ACCESS_ONCE(sc->free);
1368 trycount++;
1369 goto retry;
1370 }
1371 }
1372
1373
1374
1375 atomic_inc(&sc->buffers_allocated);
1376
1377
1378 head = sc->sr_head;
1379
1380
1381 start_fill = sc->fill;
1382 sc->fill += blocks;
1383
1384
1385
1386
1387
1388
1389
1390
1391 pbuf = &sc->sr[head].pbuf;
1392 pbuf->sent_at = sc->fill;
1393 pbuf->cb = cb;
1394 pbuf->arg = arg;
1395 pbuf->sc = sc;
1396
1397
1398
1399 next = head + 1;
1400 if (next >= sc->sr_size)
1401 next = 0;
1402
1403
1404 smp_wmb();
1405 sc->sr_head = next;
1406 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1407
1408
1409 pbuf->start = sc->base_addr + ((start_fill % sc->credits)
1410 * PIO_BLOCK_SIZE);
1411 pbuf->size = sc->credits * PIO_BLOCK_SIZE;
1412 pbuf->end = sc->base_addr + pbuf->size;
1413 pbuf->block_count = blocks;
1414 pbuf->qw_written = 0;
1415 pbuf->carry_bytes = 0;
1416 pbuf->carry.val64 = 0;
1417done:
1418 return pbuf;
1419}
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432void sc_add_credit_return_intr(struct send_context *sc)
1433{
1434 unsigned long flags;
1435
1436
1437 spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
1438 if (sc->credit_intr_count == 0) {
1439 sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
1440 write_kctxt_csr(sc->dd, sc->hw_context,
1441 SC(CREDIT_CTRL), sc->credit_ctrl);
1442 }
1443 sc->credit_intr_count++;
1444 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
1445}
1446
1447
1448
1449
1450
1451void sc_del_credit_return_intr(struct send_context *sc)
1452{
1453 unsigned long flags;
1454
1455 WARN_ON(sc->credit_intr_count == 0);
1456
1457
1458 spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
1459 sc->credit_intr_count--;
1460 if (sc->credit_intr_count == 0) {
1461 sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
1462 write_kctxt_csr(sc->dd, sc->hw_context,
1463 SC(CREDIT_CTRL), sc->credit_ctrl);
1464 }
1465 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
1466}
1467
1468
1469
1470
1471
1472void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint)
1473{
1474 if (needint)
1475 sc_add_credit_return_intr(sc);
1476 else
1477 sc_del_credit_return_intr(sc);
1478 trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl);
1479 if (needint) {
1480 mmiowb();
1481 sc_return_credits(sc);
1482 }
1483}
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493static void sc_piobufavail(struct send_context *sc)
1494{
1495 struct hfi1_devdata *dd = sc->dd;
1496 struct hfi1_ibdev *dev = &dd->verbs_dev;
1497 struct list_head *list;
1498 struct hfi1_qp *qps[PIO_WAIT_BATCH_SIZE];
1499 struct hfi1_qp *qp;
1500 unsigned long flags;
1501 unsigned i, n = 0;
1502
1503 if (dd->send_contexts[sc->sw_index].type != SC_KERNEL)
1504 return;
1505 list = &sc->piowait;
1506
1507
1508
1509
1510
1511
1512 write_seqlock_irqsave(&dev->iowait_lock, flags);
1513 while (!list_empty(list)) {
1514 struct iowait *wait;
1515
1516 if (n == ARRAY_SIZE(qps))
1517 goto full;
1518 wait = list_first_entry(list, struct iowait, list);
1519 qp = container_of(wait, struct hfi1_qp, s_iowait);
1520 list_del_init(&qp->s_iowait.list);
1521
1522 qps[n++] = qp;
1523 }
1524
1525
1526
1527
1528 if (n)
1529 hfi1_sc_wantpiobuf_intr(sc, 0);
1530full:
1531 write_sequnlock_irqrestore(&dev->iowait_lock, flags);
1532
1533 for (i = 0; i < n; i++)
1534 hfi1_qp_wakeup(qps[i], HFI1_S_WAIT_PIO);
1535}
1536
1537
1538static inline int fill_code(u64 hw_free)
1539{
1540 int code = 0;
1541
1542 if (hw_free & CR_STATUS_SMASK)
1543 code |= PRC_STATUS_ERR;
1544 if (hw_free & CR_CREDIT_RETURN_DUE_TO_PBC_SMASK)
1545 code |= PRC_PBC;
1546 if (hw_free & CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SMASK)
1547 code |= PRC_THRESHOLD;
1548 if (hw_free & CR_CREDIT_RETURN_DUE_TO_ERR_SMASK)
1549 code |= PRC_FILL_ERR;
1550 if (hw_free & CR_CREDIT_RETURN_DUE_TO_FORCE_SMASK)
1551 code |= PRC_SC_DISABLE;
1552 return code;
1553}
1554
1555
1556#define sent_before(a, b) time_before(a, b)
1557
1558
1559
1560
1561void sc_release_update(struct send_context *sc)
1562{
1563 struct pio_buf *pbuf;
1564 u64 hw_free;
1565 u32 head, tail;
1566 unsigned long old_free;
1567 unsigned long extra;
1568 unsigned long flags;
1569 int code;
1570
1571 if (!sc)
1572 return;
1573
1574 spin_lock_irqsave(&sc->release_lock, flags);
1575
1576 hw_free = le64_to_cpu(*sc->hw_free);
1577 old_free = sc->free;
1578 extra = (((hw_free & CR_COUNTER_SMASK) >> CR_COUNTER_SHIFT)
1579 - (old_free & CR_COUNTER_MASK))
1580 & CR_COUNTER_MASK;
1581 sc->free = old_free + extra;
1582 trace_hfi1_piofree(sc, extra);
1583
1584
1585 code = -1;
1586 head = ACCESS_ONCE(sc->sr_head);
1587 tail = sc->sr_tail;
1588 while (head != tail) {
1589 pbuf = &sc->sr[tail].pbuf;
1590
1591 if (sent_before(sc->free, pbuf->sent_at)) {
1592
1593 break;
1594 }
1595 if (pbuf->cb) {
1596 if (code < 0)
1597 code = fill_code(hw_free);
1598 (*pbuf->cb)(pbuf->arg, code);
1599 }
1600
1601 tail++;
1602 if (tail >= sc->sr_size)
1603 tail = 0;
1604 }
1605
1606 sc->sr_tail = tail;
1607 spin_unlock_irqrestore(&sc->release_lock, flags);
1608 sc_piobufavail(sc);
1609}
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context)
1621{
1622 struct send_context *sc;
1623 u32 sw_index;
1624 u32 gc, gc_end;
1625
1626 spin_lock(&dd->sc_lock);
1627 sw_index = dd->hw_to_sw[hw_context];
1628 if (unlikely(sw_index >= dd->num_send_contexts)) {
1629 dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n",
1630 __func__, hw_context, sw_index);
1631 goto done;
1632 }
1633 sc = dd->send_contexts[sw_index].sc;
1634 if (unlikely(!sc))
1635 goto done;
1636
1637 gc = group_context(hw_context, sc->group);
1638 gc_end = gc + group_size(sc->group);
1639 for (; gc < gc_end; gc++) {
1640 sw_index = dd->hw_to_sw[gc];
1641 if (unlikely(sw_index >= dd->num_send_contexts)) {
1642 dd_dev_err(dd,
1643 "%s: invalid hw (%u) to sw (%u) mapping\n",
1644 __func__, hw_context, sw_index);
1645 continue;
1646 }
1647 sc_release_update(dd->send_contexts[sw_index].sc);
1648 }
1649done:
1650 spin_unlock(&dd->sc_lock);
1651}
1652
1653int init_pervl_scs(struct hfi1_devdata *dd)
1654{
1655 int i;
1656 u64 mask, all_vl_mask = (u64) 0x80ff;
1657 u32 ctxt;
1658
1659 dd->vld[15].sc = sc_alloc(dd, SC_KERNEL,
1660 dd->rcd[0]->rcvhdrqentsize, dd->node);
1661 if (!dd->vld[15].sc)
1662 goto nomem;
1663 hfi1_init_ctxt(dd->vld[15].sc);
1664 dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048);
1665 for (i = 0; i < num_vls; i++) {
1666
1667
1668
1669
1670
1671
1672
1673 dd->vld[i].sc = sc_alloc(dd, SC_KERNEL,
1674 dd->rcd[0]->rcvhdrqentsize, dd->node);
1675 if (!dd->vld[i].sc)
1676 goto nomem;
1677
1678 hfi1_init_ctxt(dd->vld[i].sc);
1679
1680
1681 dd->vld[i].mtu = hfi1_max_mtu;
1682 }
1683 sc_enable(dd->vld[15].sc);
1684 ctxt = dd->vld[15].sc->hw_context;
1685 mask = all_vl_mask & ~(1LL << 15);
1686 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
1687 dd_dev_info(dd,
1688 "Using send context %u(%u) for VL15\n",
1689 dd->vld[15].sc->sw_index, ctxt);
1690 for (i = 0; i < num_vls; i++) {
1691 sc_enable(dd->vld[i].sc);
1692 ctxt = dd->vld[i].sc->hw_context;
1693 mask = all_vl_mask & ~(1LL << i);
1694 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
1695 }
1696 return 0;
1697nomem:
1698 sc_free(dd->vld[15].sc);
1699 for (i = 0; i < num_vls; i++)
1700 sc_free(dd->vld[i].sc);
1701 return -ENOMEM;
1702}
1703
1704int init_credit_return(struct hfi1_devdata *dd)
1705{
1706 int ret;
1707 int num_numa;
1708 int i;
1709
1710 num_numa = num_online_nodes();
1711
1712 for (i = 0; i < num_numa; i++) {
1713 if (!node_online(i)) {
1714 dd_dev_err(dd, "NUMA nodes are not compact\n");
1715 ret = -EINVAL;
1716 goto done;
1717 }
1718 }
1719
1720 dd->cr_base = kcalloc(
1721 num_numa,
1722 sizeof(struct credit_return_base),
1723 GFP_KERNEL);
1724 if (!dd->cr_base) {
1725 dd_dev_err(dd, "Unable to allocate credit return base\n");
1726 ret = -ENOMEM;
1727 goto done;
1728 }
1729 for (i = 0; i < num_numa; i++) {
1730 int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
1731
1732 set_dev_node(&dd->pcidev->dev, i);
1733 dd->cr_base[i].va = dma_zalloc_coherent(
1734 &dd->pcidev->dev,
1735 bytes,
1736 &dd->cr_base[i].pa,
1737 GFP_KERNEL);
1738 if (dd->cr_base[i].va == NULL) {
1739 set_dev_node(&dd->pcidev->dev, dd->node);
1740 dd_dev_err(dd,
1741 "Unable to allocate credit return DMA range for NUMA %d\n",
1742 i);
1743 ret = -ENOMEM;
1744 goto done;
1745 }
1746 }
1747 set_dev_node(&dd->pcidev->dev, dd->node);
1748
1749 ret = 0;
1750done:
1751 return ret;
1752}
1753
1754void free_credit_return(struct hfi1_devdata *dd)
1755{
1756 int num_numa;
1757 int i;
1758
1759 if (!dd->cr_base)
1760 return;
1761
1762 num_numa = num_online_nodes();
1763 for (i = 0; i < num_numa; i++) {
1764 if (dd->cr_base[i].va) {
1765 dma_free_coherent(&dd->pcidev->dev,
1766 TXE_NUM_CONTEXTS
1767 * sizeof(struct credit_return),
1768 dd->cr_base[i].va,
1769 dd->cr_base[i].pa);
1770 }
1771 }
1772 kfree(dd->cr_base);
1773 dd->cr_base = NULL;
1774}
1775