1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <linux/pci.h>
49#include <linux/netdevice.h>
50#include <linux/vmalloc.h>
51#include <linux/delay.h>
52#include <linux/xarray.h>
53#include <linux/module.h>
54#include <linux/printk.h>
55#include <linux/hrtimer.h>
56#include <linux/bitmap.h>
57#include <linux/numa.h>
58#include <rdma/rdma_vt.h>
59
60#include "hfi.h"
61#include "device.h"
62#include "common.h"
63#include "trace.h"
64#include "mad.h"
65#include "sdma.h"
66#include "debugfs.h"
67#include "verbs.h"
68#include "aspm.h"
69#include "affinity.h"
70#include "vnic.h"
71#include "exp_rcv.h"
72
73#undef pr_fmt
74#define pr_fmt(fmt) DRIVER_NAME ": " fmt
75
76
77
78
79#define HFI1_MIN_USER_CTXT_BUFCNT 7
80
81#define HFI1_MIN_HDRQ_EGRBUF_CNT 2
82#define HFI1_MAX_HDRQ_EGRBUF_CNT 16352
83#define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024)
84#define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024)
85
86#define NUM_IB_PORTS 1
87
88
89
90
91
92int num_user_contexts = -1;
93module_param_named(num_user_contexts, num_user_contexts, int, 0444);
94MODULE_PARM_DESC(
95 num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)");
96
97uint krcvqs[RXE_NUM_DATA_VL];
98int krcvqsset;
99module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
100MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
101
102
103unsigned long n_krcvqs;
104
105static unsigned hfi1_rcvarr_split = 25;
106module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
107MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers");
108
109static uint eager_buffer_size = (8 << 20);
110module_param(eager_buffer_size, uint, S_IRUGO);
111MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB");
112
113static uint rcvhdrcnt = 2048;
114module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
115MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)");
116
117static uint hfi1_hdrq_entsize = 32;
118module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, 0444);
119MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B, 32 - 128B (default)");
120
121unsigned int user_credit_return_threshold = 33;
122module_param(user_credit_return_threshold, uint, S_IRUGO);
123MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
124
125static inline u64 encode_rcv_header_entry_size(u16 size);
126
127DEFINE_XARRAY_FLAGS(hfi1_dev_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
128
129static int hfi1_create_kctxt(struct hfi1_devdata *dd,
130 struct hfi1_pportdata *ppd)
131{
132 struct hfi1_ctxtdata *rcd;
133 int ret;
134
135
136 BUILD_BUG_ON(HFI1_CTRL_CTXT != 0);
137
138 ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd);
139 if (ret < 0) {
140 dd_dev_err(dd, "Kernel receive context allocation failed\n");
141 return ret;
142 }
143
144
145
146
147
148
149 rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
150 HFI1_CAP_KGET(NODROP_RHQ_FULL) |
151 HFI1_CAP_KGET(NODROP_EGR_FULL) |
152 HFI1_CAP_KGET(DMA_RTAIL);
153
154
155 if (rcd->ctxt == HFI1_CTRL_CTXT)
156 rcd->flags |= HFI1_CAP_DMA_RTAIL;
157 rcd->seq_cnt = 1;
158
159 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
160 if (!rcd->sc) {
161 dd_dev_err(dd, "Kernel send context allocation failed\n");
162 return -ENOMEM;
163 }
164 hfi1_init_ctxt(rcd->sc);
165
166 return 0;
167}
168
169
170
171
172int hfi1_create_kctxts(struct hfi1_devdata *dd)
173{
174 u16 i;
175 int ret;
176
177 dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd),
178 GFP_KERNEL, dd->node);
179 if (!dd->rcd)
180 return -ENOMEM;
181
182 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
183 ret = hfi1_create_kctxt(dd, dd->pport);
184 if (ret)
185 goto bail;
186 }
187
188 return 0;
189bail:
190 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i)
191 hfi1_free_ctxt(dd->rcd[i]);
192
193
194 kfree(dd->rcd);
195 dd->rcd = NULL;
196 return ret;
197}
198
199
200
201
202static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd)
203{
204 kref_init(&rcd->kref);
205}
206
207
208
209
210
211
212static void hfi1_rcd_free(struct kref *kref)
213{
214 unsigned long flags;
215 struct hfi1_ctxtdata *rcd =
216 container_of(kref, struct hfi1_ctxtdata, kref);
217
218 spin_lock_irqsave(&rcd->dd->uctxt_lock, flags);
219 rcd->dd->rcd[rcd->ctxt] = NULL;
220 spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags);
221
222 hfi1_free_ctxtdata(rcd->dd, rcd);
223
224 kfree(rcd);
225}
226
227
228
229
230
231
232
233int hfi1_rcd_put(struct hfi1_ctxtdata *rcd)
234{
235 if (rcd)
236 return kref_put(&rcd->kref, hfi1_rcd_free);
237
238 return 0;
239}
240
241
242
243
244
245
246
247
248
249
250int hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
251{
252 return kref_get_unless_zero(&rcd->kref);
253}
254
255
256
257
258
259
260
261
262
263
264
265static int allocate_rcd_index(struct hfi1_devdata *dd,
266 struct hfi1_ctxtdata *rcd, u16 *index)
267{
268 unsigned long flags;
269 u16 ctxt;
270
271 spin_lock_irqsave(&dd->uctxt_lock, flags);
272 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++)
273 if (!dd->rcd[ctxt])
274 break;
275
276 if (ctxt < dd->num_rcv_contexts) {
277 rcd->ctxt = ctxt;
278 dd->rcd[ctxt] = rcd;
279 hfi1_rcd_init(rcd);
280 }
281 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
282
283 if (ctxt >= dd->num_rcv_contexts)
284 return -EBUSY;
285
286 *index = ctxt;
287
288 return 0;
289}
290
291
292
293
294
295
296
297
298
299
300
301
302
303struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
304 u16 ctxt)
305{
306 if (ctxt < dd->num_rcv_contexts)
307 return hfi1_rcd_get_by_index(dd, ctxt);
308
309 return NULL;
310}
311
312
313
314
315
316
317
318
319
320
321
322
323
324struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt)
325{
326 unsigned long flags;
327 struct hfi1_ctxtdata *rcd = NULL;
328
329 spin_lock_irqsave(&dd->uctxt_lock, flags);
330 if (dd->rcd[ctxt]) {
331 rcd = dd->rcd[ctxt];
332 if (!hfi1_rcd_get(rcd))
333 rcd = NULL;
334 }
335 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
336
337 return rcd;
338}
339
340
341
342
343
344int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
345 struct hfi1_ctxtdata **context)
346{
347 struct hfi1_devdata *dd = ppd->dd;
348 struct hfi1_ctxtdata *rcd;
349 unsigned kctxt_ngroups = 0;
350 u32 base;
351
352 if (dd->rcv_entries.nctxt_extra >
353 dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)
354 kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
355 (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt));
356 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
357 if (rcd) {
358 u32 rcvtids, max_entries;
359 u16 ctxt;
360 int ret;
361
362 ret = allocate_rcd_index(dd, rcd, &ctxt);
363 if (ret) {
364 *context = NULL;
365 kfree(rcd);
366 return ret;
367 }
368
369 INIT_LIST_HEAD(&rcd->qp_wait_list);
370 hfi1_exp_tid_group_init(rcd);
371 rcd->ppd = ppd;
372 rcd->dd = dd;
373 rcd->numa_id = numa;
374 rcd->rcv_array_groups = dd->rcv_entries.ngroups;
375 rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
376
377 mutex_init(&rcd->exp_mutex);
378 spin_lock_init(&rcd->exp_lock);
379 INIT_LIST_HEAD(&rcd->flow_queue.queue_head);
380 INIT_LIST_HEAD(&rcd->rarr_queue.queue_head);
381
382 hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt);
383
384
385
386
387
388
389
390
391 if (ctxt < dd->first_dyn_alloc_ctxt) {
392 if (ctxt < kctxt_ngroups) {
393 base = ctxt * (dd->rcv_entries.ngroups + 1);
394 rcd->rcv_array_groups++;
395 } else {
396 base = kctxt_ngroups +
397 (ctxt * dd->rcv_entries.ngroups);
398 }
399 } else {
400 u16 ct = ctxt - dd->first_dyn_alloc_ctxt;
401
402 base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) +
403 kctxt_ngroups);
404 if (ct < dd->rcv_entries.nctxt_extra) {
405 base += ct * (dd->rcv_entries.ngroups + 1);
406 rcd->rcv_array_groups++;
407 } else {
408 base += dd->rcv_entries.nctxt_extra +
409 (ct * dd->rcv_entries.ngroups);
410 }
411 }
412 rcd->eager_base = base * dd->rcv_entries.group_size;
413
414 rcd->rcvhdrq_cnt = rcvhdrcnt;
415 rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
416 rcd->rhf_offset =
417 rcd->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
418
419
420
421
422
423
424
425
426
427
428
429 max_entries = rcd->rcv_array_groups *
430 dd->rcv_entries.group_size;
431 rcvtids = ((max_entries * hfi1_rcvarr_split) / 100);
432 rcd->egrbufs.count = round_down(rcvtids,
433 dd->rcv_entries.group_size);
434 if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) {
435 dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n",
436 rcd->ctxt);
437 rcd->egrbufs.count = MAX_EAGER_ENTRIES;
438 }
439 hfi1_cdbg(PROC,
440 "ctxt%u: max Eager buffer RcvArray entries: %u\n",
441 rcd->ctxt, rcd->egrbufs.count);
442
443
444
445
446
447
448
449
450
451 rcd->egrbufs.buffers =
452 kcalloc_node(rcd->egrbufs.count,
453 sizeof(*rcd->egrbufs.buffers),
454 GFP_KERNEL, numa);
455 if (!rcd->egrbufs.buffers)
456 goto bail;
457 rcd->egrbufs.rcvtids =
458 kcalloc_node(rcd->egrbufs.count,
459 sizeof(*rcd->egrbufs.rcvtids),
460 GFP_KERNEL, numa);
461 if (!rcd->egrbufs.rcvtids)
462 goto bail;
463 rcd->egrbufs.size = eager_buffer_size;
464
465
466
467
468
469 if (rcd->egrbufs.size < hfi1_max_mtu) {
470 rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
471 hfi1_cdbg(PROC,
472 "ctxt%u: eager bufs size too small. Adjusting to %u\n",
473 rcd->ctxt, rcd->egrbufs.size);
474 }
475 rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
476
477
478 if (ctxt < dd->first_dyn_alloc_ctxt) {
479 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
480 GFP_KERNEL, numa);
481 if (!rcd->opstats)
482 goto bail;
483
484
485 hfi1_kern_init_ctxt_generations(rcd);
486 }
487
488 *context = rcd;
489 return 0;
490 }
491
492bail:
493 *context = NULL;
494 hfi1_free_ctxt(rcd);
495 return -ENOMEM;
496}
497
498
499
500
501
502
503
504
505
506
507
508void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd)
509{
510 hfi1_rcd_put(rcd);
511}
512
513
514
515
516
517
518static inline u64 encode_rcv_header_entry_size(u16 size)
519{
520
521 if (size == 2)
522 return 1;
523 if (size == 16)
524 return 2;
525 else if (size == 32)
526 return 4;
527 return 0;
528}
529
530
531
532
533
534
535
536
537void set_link_ipg(struct hfi1_pportdata *ppd)
538{
539 struct hfi1_devdata *dd = ppd->dd;
540 struct cc_state *cc_state;
541 int i;
542 u16 cce, ccti_limit, max_ccti = 0;
543 u16 shift, mult;
544 u64 src;
545 u32 current_egress_rate;
546 u32 max_pkt_time;
547
548
549
550
551
552 cc_state = get_cc_state(ppd);
553
554 if (!cc_state)
555
556
557
558
559
560 return;
561
562 for (i = 0; i < OPA_MAX_SLS; i++) {
563 u16 ccti = ppd->cca_timer[i].ccti;
564
565 if (ccti > max_ccti)
566 max_ccti = ccti;
567 }
568
569 ccti_limit = cc_state->cct.ccti_limit;
570 if (max_ccti > ccti_limit)
571 max_ccti = ccti_limit;
572
573 cce = cc_state->cct.entries[max_ccti].entry;
574 shift = (cce & 0xc000) >> 14;
575 mult = (cce & 0x3fff);
576
577 current_egress_rate = active_egress_rate(ppd);
578
579 max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate);
580
581 src = (max_pkt_time >> shift) * mult;
582
583 src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK;
584 src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT;
585
586 write_csr(dd, SEND_STATIC_RATE_CONTROL, src);
587}
588
589static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
590{
591 struct cca_timer *cca_timer;
592 struct hfi1_pportdata *ppd;
593 int sl;
594 u16 ccti_timer, ccti_min;
595 struct cc_state *cc_state;
596 unsigned long flags;
597 enum hrtimer_restart ret = HRTIMER_NORESTART;
598
599 cca_timer = container_of(t, struct cca_timer, hrtimer);
600 ppd = cca_timer->ppd;
601 sl = cca_timer->sl;
602
603 rcu_read_lock();
604
605 cc_state = get_cc_state(ppd);
606
607 if (!cc_state) {
608 rcu_read_unlock();
609 return HRTIMER_NORESTART;
610 }
611
612
613
614
615
616
617
618 ccti_min = cc_state->cong_setting.entries[sl].ccti_min;
619 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
620
621 spin_lock_irqsave(&ppd->cca_timer_lock, flags);
622
623 if (cca_timer->ccti > ccti_min) {
624 cca_timer->ccti--;
625 set_link_ipg(ppd);
626 }
627
628 if (cca_timer->ccti > ccti_min) {
629 unsigned long nsec = 1024 * ccti_timer;
630
631 hrtimer_forward_now(t, ns_to_ktime(nsec));
632 ret = HRTIMER_RESTART;
633 }
634
635 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
636 rcu_read_unlock();
637 return ret;
638}
639
640
641
642
643void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
644 struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
645{
646 int i;
647 uint default_pkey_idx;
648 struct cc_state *cc_state;
649
650 ppd->dd = dd;
651 ppd->hw_pidx = hw_pidx;
652 ppd->port = port;
653 ppd->prev_link_width = LINK_WIDTH_DEFAULT;
654
655
656
657
658 for (i = 0; i < C_VL_COUNT + 1; i++) {
659 ppd->port_vl_xmit_wait_last[i] = 0;
660 ppd->vl_xmit_flit_cnt[i] = 0;
661 }
662
663 default_pkey_idx = 1;
664
665 ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
666 ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
667
668 if (loopback) {
669 dd_dev_err(dd, "Faking data partition 0x8001 in idx %u\n",
670 !default_pkey_idx);
671 ppd->pkeys[!default_pkey_idx] = 0x8001;
672 }
673
674 INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
675 INIT_WORK(&ppd->link_up_work, handle_link_up);
676 INIT_WORK(&ppd->link_down_work, handle_link_down);
677 INIT_WORK(&ppd->freeze_work, handle_freeze);
678 INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
679 INIT_WORK(&ppd->sma_message_work, handle_sma_message);
680 INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
681 INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
682 INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
683 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
684
685 mutex_init(&ppd->hls_lock);
686 spin_lock_init(&ppd->qsfp_info.qsfp_lock);
687
688 ppd->qsfp_info.ppd = ppd;
689 ppd->sm_trap_qp = 0x0;
690 ppd->sa_qp = 0x1;
691
692 ppd->hfi1_wq = NULL;
693
694 spin_lock_init(&ppd->cca_timer_lock);
695
696 for (i = 0; i < OPA_MAX_SLS; i++) {
697 hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
698 HRTIMER_MODE_REL);
699 ppd->cca_timer[i].ppd = ppd;
700 ppd->cca_timer[i].sl = i;
701 ppd->cca_timer[i].ccti = 0;
702 ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
703 }
704
705 ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
706
707 spin_lock_init(&ppd->cc_state_lock);
708 spin_lock_init(&ppd->cc_log_lock);
709 cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL);
710 RCU_INIT_POINTER(ppd->cc_state, cc_state);
711 if (!cc_state)
712 goto bail;
713 return;
714
715bail:
716 dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port);
717}
718
719
720
721
722
723static int loadtime_init(struct hfi1_devdata *dd)
724{
725 return 0;
726}
727
728
729
730
731
732
733
734
735
736static int init_after_reset(struct hfi1_devdata *dd)
737{
738 int i;
739 struct hfi1_ctxtdata *rcd;
740
741
742
743
744
745 for (i = 0; i < dd->num_rcv_contexts; i++) {
746 rcd = hfi1_rcd_get_by_index(dd, i);
747 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
748 HFI1_RCVCTRL_INTRAVAIL_DIS |
749 HFI1_RCVCTRL_TAILUPD_DIS, rcd);
750 hfi1_rcd_put(rcd);
751 }
752 pio_send_control(dd, PSC_GLOBAL_DISABLE);
753 for (i = 0; i < dd->num_send_contexts; i++)
754 sc_disable(dd->send_contexts[i].sc);
755
756 return 0;
757}
758
759static void enable_chip(struct hfi1_devdata *dd)
760{
761 struct hfi1_ctxtdata *rcd;
762 u32 rcvmask;
763 u16 i;
764
765
766 pio_send_control(dd, PSC_GLOBAL_ENABLE);
767
768
769
770
771
772 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
773 rcd = hfi1_rcd_get_by_index(dd, i);
774 if (!rcd)
775 continue;
776 rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
777 rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
778 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
779 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
780 rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
781 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL))
782 rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
783 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL))
784 rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
785 if (HFI1_CAP_IS_KSET(TID_RDMA))
786 rcvmask |= HFI1_RCVCTRL_TIDFLOW_ENB;
787 hfi1_rcvctrl(dd, rcvmask, rcd);
788 sc_enable(rcd->sc);
789 hfi1_rcd_put(rcd);
790 }
791}
792
793
794
795
796
797static int create_workqueues(struct hfi1_devdata *dd)
798{
799 int pidx;
800 struct hfi1_pportdata *ppd;
801
802 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
803 ppd = dd->pport + pidx;
804 if (!ppd->hfi1_wq) {
805 ppd->hfi1_wq =
806 alloc_workqueue(
807 "hfi%d_%d",
808 WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
809 WQ_MEM_RECLAIM,
810 HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES,
811 dd->unit, pidx);
812 if (!ppd->hfi1_wq)
813 goto wq_error;
814 }
815 if (!ppd->link_wq) {
816
817
818
819
820 ppd->link_wq =
821 alloc_workqueue(
822 "hfi_link_%d_%d",
823 WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND,
824 1,
825 dd->unit, pidx);
826 if (!ppd->link_wq)
827 goto wq_error;
828 }
829 }
830 return 0;
831wq_error:
832 pr_err("alloc_workqueue failed for port %d\n", pidx + 1);
833 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
834 ppd = dd->pport + pidx;
835 if (ppd->hfi1_wq) {
836 destroy_workqueue(ppd->hfi1_wq);
837 ppd->hfi1_wq = NULL;
838 }
839 if (ppd->link_wq) {
840 destroy_workqueue(ppd->link_wq);
841 ppd->link_wq = NULL;
842 }
843 }
844 return -ENOMEM;
845}
846
847
848
849
850
851
852
853static void enable_general_intr(struct hfi1_devdata *dd)
854{
855 set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true);
856 set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true);
857 set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true);
858 set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true);
859 set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true);
860 set_intr_bits(dd, IS_DC_START, IS_DC_END, true);
861 set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true);
862}
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879int hfi1_init(struct hfi1_devdata *dd, int reinit)
880{
881 int ret = 0, pidx, lastfail = 0;
882 unsigned long len;
883 u16 i;
884 struct hfi1_ctxtdata *rcd;
885 struct hfi1_pportdata *ppd;
886
887
888 dd->process_pio_send = hfi1_verbs_send_pio;
889 dd->process_dma_send = hfi1_verbs_send_dma;
890 dd->pio_inline_send = pio_copy;
891 dd->process_vnic_dma_send = hfi1_vnic_send_dma;
892
893 if (is_ax(dd)) {
894 atomic_set(&dd->drop_packet, DROP_PACKET_ON);
895 dd->do_drop = 1;
896 } else {
897 atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
898 dd->do_drop = 0;
899 }
900
901
902 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
903 ppd = dd->pport + pidx;
904 ppd->linkup = 0;
905 }
906
907 if (reinit)
908 ret = init_after_reset(dd);
909 else
910 ret = loadtime_init(dd);
911 if (ret)
912 goto done;
913
914
915 dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
916 sizeof(u64),
917 &dd->rcvhdrtail_dummy_dma,
918 GFP_KERNEL);
919
920 if (!dd->rcvhdrtail_dummy_kvaddr) {
921 dd_dev_err(dd, "cannot allocate dummy tail memory\n");
922 ret = -ENOMEM;
923 goto done;
924 }
925
926
927 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
928
929
930
931
932
933
934 rcd = hfi1_rcd_get_by_index(dd, i);
935 if (!rcd)
936 continue;
937
938 rcd->do_interrupt = &handle_receive_interrupt;
939
940 lastfail = hfi1_create_rcvhdrq(dd, rcd);
941 if (!lastfail)
942 lastfail = hfi1_setup_eagerbufs(rcd);
943 if (!lastfail)
944 lastfail = hfi1_kern_exp_rcv_init(rcd, reinit);
945 if (lastfail) {
946 dd_dev_err(dd,
947 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
948 ret = lastfail;
949 }
950
951 hfi1_rcd_put(rcd);
952 }
953
954
955 len = PAGE_ALIGN(chip_rcv_contexts(dd) * HFI1_MAX_SHARED_CTXTS *
956 sizeof(*dd->events));
957 dd->events = vmalloc_user(len);
958 if (!dd->events)
959 dd_dev_err(dd, "Failed to allocate user events page\n");
960
961
962
963
964 dd->status = vmalloc_user(PAGE_SIZE);
965 if (!dd->status)
966 dd_dev_err(dd, "Failed to allocate dev status page\n");
967 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
968 ppd = dd->pport + pidx;
969 if (dd->status)
970
971 ppd->statusp = &dd->status->port;
972
973 set_mtu(ppd);
974 }
975
976
977 enable_chip(dd);
978
979done:
980
981
982
983
984 if (dd->status)
985 dd->status->dev |= HFI1_STATUS_CHIP_PRESENT |
986 HFI1_STATUS_INITTED;
987 if (!ret) {
988
989 enable_general_intr(dd);
990 init_qsfp_int(dd);
991
992
993 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
994 ppd = dd->pport + pidx;
995
996
997
998
999
1000 lastfail = bringup_serdes(ppd);
1001 if (lastfail)
1002 dd_dev_info(dd,
1003 "Failed to bring up port %u\n",
1004 ppd->port);
1005
1006
1007
1008
1009
1010 if (ppd->statusp)
1011 *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT |
1012 HFI1_STATUS_INITTED;
1013 if (!ppd->link_speed_enabled)
1014 continue;
1015 }
1016 }
1017
1018
1019 return ret;
1020}
1021
1022struct hfi1_devdata *hfi1_lookup(int unit)
1023{
1024 return xa_load(&hfi1_dev_table, unit);
1025}
1026
1027
1028
1029
1030
1031static void stop_timers(struct hfi1_devdata *dd)
1032{
1033 struct hfi1_pportdata *ppd;
1034 int pidx;
1035
1036 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1037 ppd = dd->pport + pidx;
1038 if (ppd->led_override_timer.function) {
1039 del_timer_sync(&ppd->led_override_timer);
1040 atomic_set(&ppd->led_override_timer_active, 0);
1041 }
1042 }
1043}
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054static void shutdown_device(struct hfi1_devdata *dd)
1055{
1056 struct hfi1_pportdata *ppd;
1057 struct hfi1_ctxtdata *rcd;
1058 unsigned pidx;
1059 int i;
1060
1061 if (dd->flags & HFI1_SHUTDOWN)
1062 return;
1063 dd->flags |= HFI1_SHUTDOWN;
1064
1065 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1066 ppd = dd->pport + pidx;
1067
1068 ppd->linkup = 0;
1069 if (ppd->statusp)
1070 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
1071 HFI1_STATUS_IB_READY);
1072 }
1073 dd->flags &= ~HFI1_INITTED;
1074
1075
1076 set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
1077 msix_clean_up_interrupts(dd);
1078
1079 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1080 ppd = dd->pport + pidx;
1081 for (i = 0; i < dd->num_rcv_contexts; i++) {
1082 rcd = hfi1_rcd_get_by_index(dd, i);
1083 hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |
1084 HFI1_RCVCTRL_CTXT_DIS |
1085 HFI1_RCVCTRL_INTRAVAIL_DIS |
1086 HFI1_RCVCTRL_PKEY_DIS |
1087 HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd);
1088 hfi1_rcd_put(rcd);
1089 }
1090
1091
1092
1093
1094 for (i = 0; i < dd->num_send_contexts; i++)
1095 sc_flush(dd->send_contexts[i].sc);
1096 }
1097
1098
1099
1100
1101
1102 udelay(20);
1103
1104 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1105 ppd = dd->pport + pidx;
1106
1107
1108 for (i = 0; i < dd->num_send_contexts; i++)
1109 sc_disable(dd->send_contexts[i].sc);
1110
1111 pio_send_control(dd, PSC_GLOBAL_DISABLE);
1112
1113 shutdown_led_override(ppd);
1114
1115
1116
1117
1118
1119 hfi1_quiet_serdes(ppd);
1120
1121 if (ppd->hfi1_wq) {
1122 destroy_workqueue(ppd->hfi1_wq);
1123 ppd->hfi1_wq = NULL;
1124 }
1125 if (ppd->link_wq) {
1126 destroy_workqueue(ppd->link_wq);
1127 ppd->link_wq = NULL;
1128 }
1129 }
1130 sdma_exit(dd);
1131}
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1142{
1143 u32 e;
1144
1145 if (!rcd)
1146 return;
1147
1148 if (rcd->rcvhdrq) {
1149 dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd),
1150 rcd->rcvhdrq, rcd->rcvhdrq_dma);
1151 rcd->rcvhdrq = NULL;
1152 if (rcd->rcvhdrtail_kvaddr) {
1153 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1154 (void *)rcd->rcvhdrtail_kvaddr,
1155 rcd->rcvhdrqtailaddr_dma);
1156 rcd->rcvhdrtail_kvaddr = NULL;
1157 }
1158 }
1159
1160
1161 kfree(rcd->egrbufs.rcvtids);
1162 rcd->egrbufs.rcvtids = NULL;
1163
1164 for (e = 0; e < rcd->egrbufs.alloced; e++) {
1165 if (rcd->egrbufs.buffers[e].dma)
1166 dma_free_coherent(&dd->pcidev->dev,
1167 rcd->egrbufs.buffers[e].len,
1168 rcd->egrbufs.buffers[e].addr,
1169 rcd->egrbufs.buffers[e].dma);
1170 }
1171 kfree(rcd->egrbufs.buffers);
1172 rcd->egrbufs.alloced = 0;
1173 rcd->egrbufs.buffers = NULL;
1174
1175 sc_free(rcd->sc);
1176 rcd->sc = NULL;
1177
1178 vfree(rcd->subctxt_uregbase);
1179 vfree(rcd->subctxt_rcvegrbuf);
1180 vfree(rcd->subctxt_rcvhdr_base);
1181 kfree(rcd->opstats);
1182
1183 rcd->subctxt_uregbase = NULL;
1184 rcd->subctxt_rcvegrbuf = NULL;
1185 rcd->subctxt_rcvhdr_base = NULL;
1186 rcd->opstats = NULL;
1187}
1188
1189
1190
1191
1192
1193
1194static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
1195{
1196 struct hfi1_asic_data *ad;
1197 int other;
1198
1199 if (!dd->asic_data)
1200 return NULL;
1201 dd->asic_data->dds[dd->hfi1_id] = NULL;
1202 other = dd->hfi1_id ? 0 : 1;
1203 ad = dd->asic_data;
1204 dd->asic_data = NULL;
1205
1206 return ad->dds[other] ? NULL : ad;
1207}
1208
1209static void finalize_asic_data(struct hfi1_devdata *dd,
1210 struct hfi1_asic_data *ad)
1211{
1212 clean_up_i2c(dd, ad);
1213 kfree(ad);
1214}
1215
1216
1217
1218
1219
1220
1221
1222
1223static void hfi1_clean_devdata(struct hfi1_devdata *dd)
1224{
1225 struct hfi1_asic_data *ad;
1226 unsigned long flags;
1227
1228 xa_lock_irqsave(&hfi1_dev_table, flags);
1229 __xa_erase(&hfi1_dev_table, dd->unit);
1230 ad = release_asic_data(dd);
1231 xa_unlock_irqrestore(&hfi1_dev_table, flags);
1232
1233 finalize_asic_data(dd, ad);
1234 free_platform_config(dd);
1235 rcu_barrier();
1236 free_percpu(dd->int_counter);
1237 free_percpu(dd->rcv_limit);
1238 free_percpu(dd->send_schedule);
1239 free_percpu(dd->tx_opstats);
1240 dd->int_counter = NULL;
1241 dd->rcv_limit = NULL;
1242 dd->send_schedule = NULL;
1243 dd->tx_opstats = NULL;
1244 kfree(dd->comp_vect);
1245 dd->comp_vect = NULL;
1246 sdma_clean(dd, dd->num_sdma);
1247 rvt_dealloc_device(&dd->verbs_dev.rdi);
1248}
1249
1250static void __hfi1_free_devdata(struct kobject *kobj)
1251{
1252 struct hfi1_devdata *dd =
1253 container_of(kobj, struct hfi1_devdata, kobj);
1254
1255 hfi1_clean_devdata(dd);
1256}
1257
1258static struct kobj_type hfi1_devdata_type = {
1259 .release = __hfi1_free_devdata,
1260};
1261
1262void hfi1_free_devdata(struct hfi1_devdata *dd)
1263{
1264 kobject_put(&dd->kobj);
1265}
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
1277 size_t extra)
1278{
1279 struct hfi1_devdata *dd;
1280 int ret, nports;
1281
1282
1283 nports = extra / sizeof(struct hfi1_pportdata);
1284
1285 dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
1286 nports);
1287 if (!dd)
1288 return ERR_PTR(-ENOMEM);
1289 dd->num_pports = nports;
1290 dd->pport = (struct hfi1_pportdata *)(dd + 1);
1291 dd->pcidev = pdev;
1292 pci_set_drvdata(pdev, dd);
1293 dd->node = NUMA_NO_NODE;
1294
1295 ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b,
1296 GFP_KERNEL);
1297 if (ret < 0) {
1298 dev_err(&pdev->dev,
1299 "Could not allocate unit ID: error %d\n", -ret);
1300 goto bail;
1301 }
1302 rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
1303
1304
1305
1306
1307
1308 spin_lock_init(&dd->sc_lock);
1309 spin_lock_init(&dd->sendctrl_lock);
1310 spin_lock_init(&dd->rcvctrl_lock);
1311 spin_lock_init(&dd->uctxt_lock);
1312 spin_lock_init(&dd->hfi1_diag_trans_lock);
1313 spin_lock_init(&dd->sc_init_lock);
1314 spin_lock_init(&dd->dc8051_memlock);
1315 seqlock_init(&dd->sc2vl_lock);
1316 spin_lock_init(&dd->sde_map_lock);
1317 spin_lock_init(&dd->pio_map_lock);
1318 mutex_init(&dd->dc8051_lock);
1319 init_waitqueue_head(&dd->event_queue);
1320 spin_lock_init(&dd->irq_src_lock);
1321
1322 dd->int_counter = alloc_percpu(u64);
1323 if (!dd->int_counter) {
1324 ret = -ENOMEM;
1325 goto bail;
1326 }
1327
1328 dd->rcv_limit = alloc_percpu(u64);
1329 if (!dd->rcv_limit) {
1330 ret = -ENOMEM;
1331 goto bail;
1332 }
1333
1334 dd->send_schedule = alloc_percpu(u64);
1335 if (!dd->send_schedule) {
1336 ret = -ENOMEM;
1337 goto bail;
1338 }
1339
1340 dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx);
1341 if (!dd->tx_opstats) {
1342 ret = -ENOMEM;
1343 goto bail;
1344 }
1345
1346 dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL);
1347 if (!dd->comp_vect) {
1348 ret = -ENOMEM;
1349 goto bail;
1350 }
1351
1352 kobject_init(&dd->kobj, &hfi1_devdata_type);
1353 return dd;
1354
1355bail:
1356 hfi1_clean_devdata(dd);
1357 return ERR_PTR(ret);
1358}
1359
1360
1361
1362
1363
1364
1365void hfi1_disable_after_error(struct hfi1_devdata *dd)
1366{
1367 if (dd->flags & HFI1_INITTED) {
1368 u32 pidx;
1369
1370 dd->flags &= ~HFI1_INITTED;
1371 if (dd->pport)
1372 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1373 struct hfi1_pportdata *ppd;
1374
1375 ppd = dd->pport + pidx;
1376 if (dd->flags & HFI1_PRESENT)
1377 set_link_state(ppd, HLS_DN_DISABLE);
1378
1379 if (ppd->statusp)
1380 *ppd->statusp &= ~HFI1_STATUS_IB_READY;
1381 }
1382 }
1383
1384
1385
1386
1387
1388
1389 if (dd->status)
1390 dd->status->dev |= HFI1_STATUS_HWERROR;
1391}
1392
1393static void remove_one(struct pci_dev *);
1394static int init_one(struct pci_dev *, const struct pci_device_id *);
1395static void shutdown_one(struct pci_dev *);
1396
1397#define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
1398#define PFX DRIVER_NAME ": "
1399
1400const struct pci_device_id hfi1_pci_tbl[] = {
1401 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
1402 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
1403 { 0, }
1404};
1405
1406MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl);
1407
1408static struct pci_driver hfi1_pci_driver = {
1409 .name = DRIVER_NAME,
1410 .probe = init_one,
1411 .remove = remove_one,
1412 .shutdown = shutdown_one,
1413 .id_table = hfi1_pci_tbl,
1414 .err_handler = &hfi1_pci_err_handler,
1415};
1416
1417static void __init compute_krcvqs(void)
1418{
1419 int i;
1420
1421 for (i = 0; i < krcvqsset; i++)
1422 n_krcvqs += krcvqs[i];
1423}
1424
1425
1426
1427
1428
1429static int __init hfi1_mod_init(void)
1430{
1431 int ret;
1432
1433 ret = dev_init();
1434 if (ret)
1435 goto bail;
1436
1437 ret = node_affinity_init();
1438 if (ret)
1439 goto bail;
1440
1441
1442 if (!valid_opa_max_mtu(hfi1_max_mtu)) {
1443 pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
1444 hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU);
1445 hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
1446 }
1447
1448 if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu))
1449 hfi1_cu = 1;
1450
1451 if (user_credit_return_threshold > 100)
1452 user_credit_return_threshold = 100;
1453
1454 compute_krcvqs();
1455
1456
1457
1458
1459 if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK)
1460 rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK;
1461
1462 if (rcv_intr_count == 0 && rcv_intr_timeout == 0) {
1463 pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n");
1464 rcv_intr_count = 1;
1465 }
1466 if (rcv_intr_count > 1 && rcv_intr_timeout == 0) {
1467
1468
1469
1470
1471 pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n");
1472 rcv_intr_timeout = 1;
1473 }
1474 if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) {
1475
1476
1477
1478
1479 pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n");
1480 rcv_intr_dynamic = 0;
1481 }
1482
1483
1484 link_crc_mask &= SUPPORTED_CRCS;
1485
1486 ret = opfn_init();
1487 if (ret < 0) {
1488 pr_err("Failed to allocate opfn_wq");
1489 goto bail_dev;
1490 }
1491
1492 hfi1_compute_tid_rdma_flow_wt();
1493
1494
1495
1496
1497 hfi1_dbg_init();
1498 ret = pci_register_driver(&hfi1_pci_driver);
1499 if (ret < 0) {
1500 pr_err("Unable to register driver: error %d\n", -ret);
1501 goto bail_dev;
1502 }
1503 goto bail;
1504
1505bail_dev:
1506 hfi1_dbg_exit();
1507 dev_cleanup();
1508bail:
1509 return ret;
1510}
1511
1512module_init(hfi1_mod_init);
1513
1514
1515
1516
1517static void __exit hfi1_mod_cleanup(void)
1518{
1519 pci_unregister_driver(&hfi1_pci_driver);
1520 opfn_exit();
1521 node_affinity_destroy_all();
1522 hfi1_dbg_exit();
1523
1524 WARN_ON(!xa_empty(&hfi1_dev_table));
1525 dispose_firmware();
1526 dev_cleanup();
1527}
1528
1529module_exit(hfi1_mod_cleanup);
1530
1531
1532static void cleanup_device_data(struct hfi1_devdata *dd)
1533{
1534 int ctxt;
1535 int pidx;
1536
1537
1538 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1539 struct hfi1_pportdata *ppd = &dd->pport[pidx];
1540 struct cc_state *cc_state;
1541 int i;
1542
1543 if (ppd->statusp)
1544 *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT;
1545
1546 for (i = 0; i < OPA_MAX_SLS; i++)
1547 hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
1548
1549 spin_lock(&ppd->cc_state_lock);
1550 cc_state = get_cc_state_protected(ppd);
1551 RCU_INIT_POINTER(ppd->cc_state, NULL);
1552 spin_unlock(&ppd->cc_state_lock);
1553
1554 if (cc_state)
1555 kfree_rcu(cc_state, rcu);
1556 }
1557
1558 free_credit_return(dd);
1559
1560 if (dd->rcvhdrtail_dummy_kvaddr) {
1561 dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
1562 (void *)dd->rcvhdrtail_dummy_kvaddr,
1563 dd->rcvhdrtail_dummy_dma);
1564 dd->rcvhdrtail_dummy_kvaddr = NULL;
1565 }
1566
1567
1568
1569
1570
1571 for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) {
1572 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
1573
1574 if (rcd) {
1575 hfi1_free_ctxt_rcv_groups(rcd);
1576 hfi1_free_ctxt(rcd);
1577 }
1578 }
1579
1580 kfree(dd->rcd);
1581 dd->rcd = NULL;
1582
1583 free_pio_map(dd);
1584
1585 for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
1586 sc_free(dd->send_contexts[ctxt].sc);
1587 dd->num_send_contexts = 0;
1588 kfree(dd->send_contexts);
1589 dd->send_contexts = NULL;
1590 kfree(dd->hw_to_sw);
1591 dd->hw_to_sw = NULL;
1592 kfree(dd->boardname);
1593 vfree(dd->events);
1594 vfree(dd->status);
1595}
1596
1597
1598
1599
1600
1601static void postinit_cleanup(struct hfi1_devdata *dd)
1602{
1603 hfi1_start_cleanup(dd);
1604 hfi1_comp_vectors_clean_up(dd);
1605 hfi1_dev_affinity_clean_up(dd);
1606
1607 hfi1_pcie_ddcleanup(dd);
1608 hfi1_pcie_cleanup(dd->pcidev);
1609
1610 cleanup_device_data(dd);
1611
1612 hfi1_free_devdata(dd);
1613}
1614
1615static int init_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt)
1616{
1617 if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
1618 dd_dev_err(dd, "Receive header queue count too small\n");
1619 return -EINVAL;
1620 }
1621
1622 if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
1623 dd_dev_err(dd,
1624 "Receive header queue count cannot be greater than %u\n",
1625 HFI1_MAX_HDRQ_EGRBUF_CNT);
1626 return -EINVAL;
1627 }
1628
1629 if (thecnt % HDRQ_INCREMENT) {
1630 dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n",
1631 thecnt, HDRQ_INCREMENT);
1632 return -EINVAL;
1633 }
1634
1635 return 0;
1636}
1637
1638static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1639{
1640 int ret = 0, j, pidx, initfail;
1641 struct hfi1_devdata *dd;
1642 struct hfi1_pportdata *ppd;
1643
1644
1645 HFI1_CAP_LOCK();
1646
1647
1648 if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
1649 ent->device == PCI_DEVICE_ID_INTEL1)) {
1650 dev_err(&pdev->dev, "Failing on unknown Intel deviceid 0x%x\n",
1651 ent->device);
1652 ret = -ENODEV;
1653 goto bail;
1654 }
1655
1656
1657 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
1658 sizeof(struct hfi1_pportdata));
1659 if (IS_ERR(dd)) {
1660 ret = PTR_ERR(dd);
1661 goto bail;
1662 }
1663
1664
1665 ret = init_validate_rcvhdrcnt(dd, rcvhdrcnt);
1666 if (ret)
1667 goto bail;
1668
1669
1670 if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
1671 dd_dev_err(dd, "Invalid HdrQ Entry size %u\n",
1672 hfi1_hdrq_entsize);
1673 ret = -EINVAL;
1674 goto bail;
1675 }
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686 if (eager_buffer_size) {
1687 if (!is_power_of_2(eager_buffer_size))
1688 eager_buffer_size =
1689 roundup_pow_of_two(eager_buffer_size);
1690 eager_buffer_size =
1691 clamp_val(eager_buffer_size,
1692 MIN_EAGER_BUFFER * 8,
1693 MAX_EAGER_BUFFER_TOTAL);
1694 dd_dev_info(dd, "Eager buffer size %u\n",
1695 eager_buffer_size);
1696 } else {
1697 dd_dev_err(dd, "Invalid Eager buffer size of 0\n");
1698 ret = -EINVAL;
1699 goto bail;
1700 }
1701
1702
1703 hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
1704
1705 ret = hfi1_pcie_init(dd);
1706 if (ret)
1707 goto bail;
1708
1709
1710
1711
1712
1713 ret = hfi1_init_dd(dd);
1714 if (ret)
1715 goto clean_bail;
1716
1717 ret = create_workqueues(dd);
1718 if (ret)
1719 goto clean_bail;
1720
1721
1722 initfail = hfi1_init(dd, 0);
1723
1724
1725 hfi1_vnic_setup(dd);
1726
1727 ret = hfi1_register_ib_device(dd);
1728
1729
1730
1731
1732
1733
1734
1735 if (!initfail && !ret) {
1736 dd->flags |= HFI1_INITTED;
1737
1738 hfi1_dbg_ibdev_init(&dd->verbs_dev);
1739 }
1740
1741 j = hfi1_device_create(dd);
1742 if (j)
1743 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1744
1745 if (initfail || ret) {
1746 msix_clean_up_interrupts(dd);
1747 stop_timers(dd);
1748 flush_workqueue(ib_wq);
1749 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1750 hfi1_quiet_serdes(dd->pport + pidx);
1751 ppd = dd->pport + pidx;
1752 if (ppd->hfi1_wq) {
1753 destroy_workqueue(ppd->hfi1_wq);
1754 ppd->hfi1_wq = NULL;
1755 }
1756 if (ppd->link_wq) {
1757 destroy_workqueue(ppd->link_wq);
1758 ppd->link_wq = NULL;
1759 }
1760 }
1761 if (!j)
1762 hfi1_device_remove(dd);
1763 if (!ret)
1764 hfi1_unregister_ib_device(dd);
1765 hfi1_vnic_cleanup(dd);
1766 postinit_cleanup(dd);
1767 if (initfail)
1768 ret = initfail;
1769 goto bail;
1770 }
1771
1772 sdma_start(dd);
1773
1774 return 0;
1775
1776clean_bail:
1777 hfi1_pcie_cleanup(pdev);
1778bail:
1779 return ret;
1780}
1781
1782static void wait_for_clients(struct hfi1_devdata *dd)
1783{
1784
1785
1786
1787
1788 if (atomic_dec_and_test(&dd->user_refcount))
1789 complete(&dd->user_comp);
1790
1791 wait_for_completion(&dd->user_comp);
1792}
1793
1794static void remove_one(struct pci_dev *pdev)
1795{
1796 struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1797
1798
1799 hfi1_dbg_ibdev_exit(&dd->verbs_dev);
1800
1801
1802 hfi1_device_remove(dd);
1803
1804
1805 wait_for_clients(dd);
1806
1807
1808 hfi1_unregister_ib_device(dd);
1809
1810
1811 hfi1_vnic_cleanup(dd);
1812
1813
1814
1815
1816
1817 shutdown_device(dd);
1818
1819 stop_timers(dd);
1820
1821
1822 flush_workqueue(ib_wq);
1823
1824 postinit_cleanup(dd);
1825}
1826
1827static void shutdown_one(struct pci_dev *pdev)
1828{
1829 struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1830
1831 shutdown_device(dd);
1832}
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1844{
1845 unsigned amt;
1846 u64 reg;
1847
1848 if (!rcd->rcvhdrq) {
1849 gfp_t gfp_flags;
1850
1851 amt = rcvhdrq_size(rcd);
1852
1853 if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
1854 gfp_flags = GFP_KERNEL;
1855 else
1856 gfp_flags = GFP_USER;
1857 rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
1858 &rcd->rcvhdrq_dma,
1859 gfp_flags | __GFP_COMP);
1860
1861 if (!rcd->rcvhdrq) {
1862 dd_dev_err(dd,
1863 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1864 amt, rcd->ctxt);
1865 goto bail;
1866 }
1867
1868 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
1869 HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
1870 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
1871 PAGE_SIZE,
1872 &rcd->rcvhdrqtailaddr_dma,
1873 gfp_flags);
1874 if (!rcd->rcvhdrtail_kvaddr)
1875 goto bail_free;
1876 }
1877 }
1878
1879
1880
1881
1882
1883
1884 reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT)
1885 & RCV_HDR_CNT_CNT_MASK)
1886 << RCV_HDR_CNT_CNT_SHIFT;
1887 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg);
1888 reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize)
1889 & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK)
1890 << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
1891 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg);
1892 reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK)
1893 << RCV_HDR_SIZE_HDR_SIZE_SHIFT;
1894 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg);
1895
1896
1897
1898
1899
1900 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR,
1901 dd->rcvhdrtail_dummy_dma);
1902
1903 return 0;
1904
1905bail_free:
1906 dd_dev_err(dd,
1907 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1908 rcd->ctxt);
1909 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1910 rcd->rcvhdrq_dma);
1911 rcd->rcvhdrq = NULL;
1912bail:
1913 return -ENOMEM;
1914}
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
1926{
1927 struct hfi1_devdata *dd = rcd->dd;
1928 u32 max_entries, egrtop, alloced_bytes = 0;
1929 gfp_t gfp_flags;
1930 u16 order, idx = 0;
1931 int ret = 0;
1932 u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
1933
1934
1935
1936
1937
1938
1939
1940 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
1941
1942
1943
1944
1945
1946
1947
1948
1949 if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
1950 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
1951
1952
1953
1954
1955 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
1956 rcd->egrbufs.rcvtid_size = round_mtu;
1957
1958
1959
1960
1961
1962 if (rcd->egrbufs.size <= (1 << 20))
1963 rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu,
1964 rounddown_pow_of_two(rcd->egrbufs.size / 8));
1965
1966 while (alloced_bytes < rcd->egrbufs.size &&
1967 rcd->egrbufs.alloced < rcd->egrbufs.count) {
1968 rcd->egrbufs.buffers[idx].addr =
1969 dma_alloc_coherent(&dd->pcidev->dev,
1970 rcd->egrbufs.rcvtid_size,
1971 &rcd->egrbufs.buffers[idx].dma,
1972 gfp_flags);
1973 if (rcd->egrbufs.buffers[idx].addr) {
1974 rcd->egrbufs.buffers[idx].len =
1975 rcd->egrbufs.rcvtid_size;
1976 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
1977 rcd->egrbufs.buffers[idx].addr;
1978 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma =
1979 rcd->egrbufs.buffers[idx].dma;
1980 rcd->egrbufs.alloced++;
1981 alloced_bytes += rcd->egrbufs.rcvtid_size;
1982 idx++;
1983 } else {
1984 u32 new_size, i, j;
1985 u64 offset = 0;
1986
1987
1988
1989
1990
1991
1992
1993 if (rcd->egrbufs.rcvtid_size == round_mtu ||
1994 !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
1995 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
1996 rcd->ctxt);
1997 ret = -ENOMEM;
1998 goto bail_rcvegrbuf_phys;
1999 }
2000
2001 new_size = rcd->egrbufs.rcvtid_size / 2;
2002
2003
2004
2005
2006
2007
2008 if (idx == 0) {
2009 rcd->egrbufs.rcvtid_size = new_size;
2010 continue;
2011 }
2012
2013
2014
2015
2016
2017 rcd->egrbufs.alloced = 0;
2018 for (i = 0, j = 0, offset = 0; j < idx; i++) {
2019 if (i >= rcd->egrbufs.count)
2020 break;
2021 rcd->egrbufs.rcvtids[i].dma =
2022 rcd->egrbufs.buffers[j].dma + offset;
2023 rcd->egrbufs.rcvtids[i].addr =
2024 rcd->egrbufs.buffers[j].addr + offset;
2025 rcd->egrbufs.alloced++;
2026 if ((rcd->egrbufs.buffers[j].dma + offset +
2027 new_size) ==
2028 (rcd->egrbufs.buffers[j].dma +
2029 rcd->egrbufs.buffers[j].len)) {
2030 j++;
2031 offset = 0;
2032 } else {
2033 offset += new_size;
2034 }
2035 }
2036 rcd->egrbufs.rcvtid_size = new_size;
2037 }
2038 }
2039 rcd->egrbufs.numbufs = idx;
2040 rcd->egrbufs.size = alloced_bytes;
2041
2042 hfi1_cdbg(PROC,
2043 "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %uKB\n",
2044 rcd->ctxt, rcd->egrbufs.alloced,
2045 rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
2046
2047
2048
2049
2050
2051
2052 rcd->egrbufs.threshold =
2053 rounddown_pow_of_two(rcd->egrbufs.alloced / 2);
2054
2055
2056
2057
2058
2059 max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
2060 egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
2061 rcd->expected_count = max_entries - egrtop;
2062 if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2)
2063 rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
2064
2065 rcd->expected_base = rcd->eager_base + egrtop;
2066 hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
2067 rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
2068 rcd->eager_base, rcd->expected_base);
2069
2070 if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
2071 hfi1_cdbg(PROC,
2072 "ctxt%u: current Eager buffer size is invalid %u\n",
2073 rcd->ctxt, rcd->egrbufs.rcvtid_size);
2074 ret = -EINVAL;
2075 goto bail_rcvegrbuf_phys;
2076 }
2077
2078 for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
2079 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
2080 rcd->egrbufs.rcvtids[idx].dma, order);
2081 cond_resched();
2082 }
2083
2084 return 0;
2085
2086bail_rcvegrbuf_phys:
2087 for (idx = 0; idx < rcd->egrbufs.alloced &&
2088 rcd->egrbufs.buffers[idx].addr;
2089 idx++) {
2090 dma_free_coherent(&dd->pcidev->dev,
2091 rcd->egrbufs.buffers[idx].len,
2092 rcd->egrbufs.buffers[idx].addr,
2093 rcd->egrbufs.buffers[idx].dma);
2094 rcd->egrbufs.buffers[idx].addr = NULL;
2095 rcd->egrbufs.buffers[idx].dma = 0;
2096 rcd->egrbufs.buffers[idx].len = 0;
2097 }
2098
2099 return ret;
2100}
2101