1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <linux/pci.h>
49#include <linux/netdevice.h>
50#include <linux/vmalloc.h>
51#include <linux/delay.h>
52#include <linux/idr.h>
53#include <linux/module.h>
54#include <linux/printk.h>
55#include <linux/hrtimer.h>
56#include <rdma/rdma_vt.h>
57
58#include "hfi.h"
59#include "device.h"
60#include "common.h"
61#include "trace.h"
62#include "mad.h"
63#include "sdma.h"
64#include "debugfs.h"
65#include "verbs.h"
66#include "aspm.h"
67#include "affinity.h"
68
69#undef pr_fmt
70#define pr_fmt(fmt) DRIVER_NAME ": " fmt
71
72
73
74
75#define HFI1_MIN_USER_CTXT_BUFCNT 7
76
77#define HFI1_MIN_HDRQ_EGRBUF_CNT 2
78#define HFI1_MAX_HDRQ_EGRBUF_CNT 16352
79#define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024)
80#define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024)
81
82
83
84
85
86int num_user_contexts = -1;
87module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO);
88MODULE_PARM_DESC(
89 num_user_contexts, "Set max number of user contexts to use");
90
91uint krcvqs[RXE_NUM_DATA_VL];
92int krcvqsset;
93module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
94MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
95
96
97unsigned long n_krcvqs;
98
99static unsigned hfi1_rcvarr_split = 25;
100module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
101MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers");
102
103static uint eager_buffer_size = (2 << 20);
104module_param(eager_buffer_size, uint, S_IRUGO);
105MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 2MB");
106
107static uint rcvhdrcnt = 2048;
108module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
109MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)");
110
111static uint hfi1_hdrq_entsize = 32;
112module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, S_IRUGO);
113MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B (default), 32 - 128B");
114
115unsigned int user_credit_return_threshold = 33;
116module_param(user_credit_return_threshold, uint, S_IRUGO);
117MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
118
119static inline u64 encode_rcv_header_entry_size(u16);
120
121static struct idr hfi1_unit_table;
122u32 hfi1_cpulist_count;
123unsigned long *hfi1_cpulist;
124
125
126
127
128int hfi1_create_ctxts(struct hfi1_devdata *dd)
129{
130 unsigned i;
131 int ret;
132
133
134 BUILD_BUG_ON(HFI1_CTRL_CTXT != 0);
135
136 dd->rcd = kzalloc_node(dd->num_rcv_contexts * sizeof(*dd->rcd),
137 GFP_KERNEL, dd->node);
138 if (!dd->rcd)
139 goto nomem;
140
141
142 for (i = 0; i < dd->first_user_ctxt; ++i) {
143 struct hfi1_pportdata *ppd;
144 struct hfi1_ctxtdata *rcd;
145
146 ppd = dd->pport + (i % dd->num_pports);
147
148
149 rcd = hfi1_create_ctxtdata(ppd, i, dd->node);
150 if (!rcd) {
151 dd_dev_err(dd,
152 "Unable to allocate kernel receive context, failing\n");
153 goto nomem;
154 }
155
156
157
158
159
160 rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
161 HFI1_CAP_KGET(NODROP_RHQ_FULL) |
162 HFI1_CAP_KGET(NODROP_EGR_FULL) |
163 HFI1_CAP_KGET(DMA_RTAIL);
164
165
166 if (rcd->ctxt == HFI1_CTRL_CTXT)
167 rcd->flags |= HFI1_CAP_DMA_RTAIL;
168 rcd->seq_cnt = 1;
169
170 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
171 if (!rcd->sc) {
172 dd_dev_err(dd,
173 "Unable to allocate kernel send context, failing\n");
174 goto nomem;
175 }
176
177 ret = hfi1_init_ctxt(rcd->sc);
178 if (ret < 0) {
179 dd_dev_err(dd,
180 "Failed to setup kernel receive context, failing\n");
181 ret = -EFAULT;
182 goto bail;
183 }
184 }
185
186
187
188
189
190 aspm_init(dd);
191
192 return 0;
193nomem:
194 ret = -ENOMEM;
195bail:
196 if (dd->rcd) {
197 for (i = 0; i < dd->num_rcv_contexts; ++i)
198 hfi1_free_ctxtdata(dd, dd->rcd[i]);
199 }
200 kfree(dd->rcd);
201 dd->rcd = NULL;
202 return ret;
203}
204
205
206
207
208struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
209 int numa)
210{
211 struct hfi1_devdata *dd = ppd->dd;
212 struct hfi1_ctxtdata *rcd;
213 unsigned kctxt_ngroups = 0;
214 u32 base;
215
216 if (dd->rcv_entries.nctxt_extra >
217 dd->num_rcv_contexts - dd->first_user_ctxt)
218 kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
219 (dd->num_rcv_contexts - dd->first_user_ctxt));
220 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
221 if (rcd) {
222 u32 rcvtids, max_entries;
223
224 hfi1_cdbg(PROC, "setting up context %u\n", ctxt);
225
226 INIT_LIST_HEAD(&rcd->qp_wait_list);
227 rcd->ppd = ppd;
228 rcd->dd = dd;
229 rcd->cnt = 1;
230 rcd->ctxt = ctxt;
231 dd->rcd[ctxt] = rcd;
232 rcd->numa_id = numa;
233 rcd->rcv_array_groups = dd->rcv_entries.ngroups;
234
235 mutex_init(&rcd->exp_lock);
236
237
238
239
240
241
242
243
244 if (ctxt < dd->first_user_ctxt) {
245 if (ctxt < kctxt_ngroups) {
246 base = ctxt * (dd->rcv_entries.ngroups + 1);
247 rcd->rcv_array_groups++;
248 } else
249 base = kctxt_ngroups +
250 (ctxt * dd->rcv_entries.ngroups);
251 } else {
252 u16 ct = ctxt - dd->first_user_ctxt;
253
254 base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) +
255 kctxt_ngroups);
256 if (ct < dd->rcv_entries.nctxt_extra) {
257 base += ct * (dd->rcv_entries.ngroups + 1);
258 rcd->rcv_array_groups++;
259 } else
260 base += dd->rcv_entries.nctxt_extra +
261 (ct * dd->rcv_entries.ngroups);
262 }
263 rcd->eager_base = base * dd->rcv_entries.group_size;
264
265 rcd->rcvhdrq_cnt = rcvhdrcnt;
266 rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
267
268
269
270
271
272
273
274
275
276
277
278 max_entries = rcd->rcv_array_groups *
279 dd->rcv_entries.group_size;
280 rcvtids = ((max_entries * hfi1_rcvarr_split) / 100);
281 rcd->egrbufs.count = round_down(rcvtids,
282 dd->rcv_entries.group_size);
283 if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) {
284 dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n",
285 rcd->ctxt);
286 rcd->egrbufs.count = MAX_EAGER_ENTRIES;
287 }
288 hfi1_cdbg(PROC,
289 "ctxt%u: max Eager buffer RcvArray entries: %u\n",
290 rcd->ctxt, rcd->egrbufs.count);
291
292
293
294
295
296
297
298
299
300 rcd->egrbufs.buffers = kzalloc_node(
301 rcd->egrbufs.count * sizeof(*rcd->egrbufs.buffers),
302 GFP_KERNEL, numa);
303 if (!rcd->egrbufs.buffers)
304 goto bail;
305 rcd->egrbufs.rcvtids = kzalloc_node(
306 rcd->egrbufs.count *
307 sizeof(*rcd->egrbufs.rcvtids),
308 GFP_KERNEL, numa);
309 if (!rcd->egrbufs.rcvtids)
310 goto bail;
311 rcd->egrbufs.size = eager_buffer_size;
312
313
314
315
316
317 if (rcd->egrbufs.size < hfi1_max_mtu) {
318 rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
319 hfi1_cdbg(PROC,
320 "ctxt%u: eager bufs size too small. Adjusting to %zu\n",
321 rcd->ctxt, rcd->egrbufs.size);
322 }
323 rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
324
325 if (ctxt < dd->first_user_ctxt) {
326 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
327 GFP_KERNEL, numa);
328 if (!rcd->opstats)
329 goto bail;
330 }
331 }
332 return rcd;
333bail:
334 dd->rcd[ctxt] = NULL;
335 kfree(rcd->egrbufs.rcvtids);
336 kfree(rcd->egrbufs.buffers);
337 kfree(rcd);
338 return NULL;
339}
340
341
342
343
344
345
346static inline u64 encode_rcv_header_entry_size(u16 size)
347{
348
349 if (size == 2)
350 return 1;
351 if (size == 16)
352 return 2;
353 else if (size == 32)
354 return 4;
355 return 0;
356}
357
358
359
360
361
362
363
364
365void set_link_ipg(struct hfi1_pportdata *ppd)
366{
367 struct hfi1_devdata *dd = ppd->dd;
368 struct cc_state *cc_state;
369 int i;
370 u16 cce, ccti_limit, max_ccti = 0;
371 u16 shift, mult;
372 u64 src;
373 u32 current_egress_rate;
374 u32 max_pkt_time;
375
376
377
378
379
380 cc_state = get_cc_state(ppd);
381
382 if (!cc_state)
383
384
385
386
387
388 return;
389
390 for (i = 0; i < OPA_MAX_SLS; i++) {
391 u16 ccti = ppd->cca_timer[i].ccti;
392
393 if (ccti > max_ccti)
394 max_ccti = ccti;
395 }
396
397 ccti_limit = cc_state->cct.ccti_limit;
398 if (max_ccti > ccti_limit)
399 max_ccti = ccti_limit;
400
401 cce = cc_state->cct.entries[max_ccti].entry;
402 shift = (cce & 0xc000) >> 14;
403 mult = (cce & 0x3fff);
404
405 current_egress_rate = active_egress_rate(ppd);
406
407 max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate);
408
409 src = (max_pkt_time >> shift) * mult;
410
411 src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK;
412 src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT;
413
414 write_csr(dd, SEND_STATIC_RATE_CONTROL, src);
415}
416
417static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
418{
419 struct cca_timer *cca_timer;
420 struct hfi1_pportdata *ppd;
421 int sl;
422 u16 ccti_timer, ccti_min;
423 struct cc_state *cc_state;
424 unsigned long flags;
425 enum hrtimer_restart ret = HRTIMER_NORESTART;
426
427 cca_timer = container_of(t, struct cca_timer, hrtimer);
428 ppd = cca_timer->ppd;
429 sl = cca_timer->sl;
430
431 rcu_read_lock();
432
433 cc_state = get_cc_state(ppd);
434
435 if (!cc_state) {
436 rcu_read_unlock();
437 return HRTIMER_NORESTART;
438 }
439
440
441
442
443
444
445
446 ccti_min = cc_state->cong_setting.entries[sl].ccti_min;
447 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
448
449 spin_lock_irqsave(&ppd->cca_timer_lock, flags);
450
451 if (cca_timer->ccti > ccti_min) {
452 cca_timer->ccti--;
453 set_link_ipg(ppd);
454 }
455
456 if (cca_timer->ccti > ccti_min) {
457 unsigned long nsec = 1024 * ccti_timer;
458
459 hrtimer_forward_now(t, ns_to_ktime(nsec));
460 ret = HRTIMER_RESTART;
461 }
462
463 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
464 rcu_read_unlock();
465 return ret;
466}
467
468
469
470
471void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
472 struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
473{
474 int i;
475 uint default_pkey_idx;
476 struct cc_state *cc_state;
477
478 ppd->dd = dd;
479 ppd->hw_pidx = hw_pidx;
480 ppd->port = port;
481
482 default_pkey_idx = 1;
483
484 ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
485 if (loopback) {
486 hfi1_early_err(&pdev->dev,
487 "Faking data partition 0x8001 in idx %u\n",
488 !default_pkey_idx);
489 ppd->pkeys[!default_pkey_idx] = 0x8001;
490 }
491
492 INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
493 INIT_WORK(&ppd->link_up_work, handle_link_up);
494 INIT_WORK(&ppd->link_down_work, handle_link_down);
495 INIT_WORK(&ppd->freeze_work, handle_freeze);
496 INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
497 INIT_WORK(&ppd->sma_message_work, handle_sma_message);
498 INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
499 INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
500 INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
501 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
502
503 mutex_init(&ppd->hls_lock);
504 spin_lock_init(&ppd->qsfp_info.qsfp_lock);
505
506 ppd->qsfp_info.ppd = ppd;
507 ppd->sm_trap_qp = 0x0;
508 ppd->sa_qp = 0x1;
509
510 ppd->hfi1_wq = NULL;
511
512 spin_lock_init(&ppd->cca_timer_lock);
513
514 for (i = 0; i < OPA_MAX_SLS; i++) {
515 hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
516 HRTIMER_MODE_REL);
517 ppd->cca_timer[i].ppd = ppd;
518 ppd->cca_timer[i].sl = i;
519 ppd->cca_timer[i].ccti = 0;
520 ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
521 }
522
523 ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
524
525 spin_lock_init(&ppd->cc_state_lock);
526 spin_lock_init(&ppd->cc_log_lock);
527 cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL);
528 RCU_INIT_POINTER(ppd->cc_state, cc_state);
529 if (!cc_state)
530 goto bail;
531 return;
532
533bail:
534
535 hfi1_early_err(&pdev->dev,
536 "Congestion Control Agent disabled for port %d\n", port);
537}
538
539
540
541
542
543static int loadtime_init(struct hfi1_devdata *dd)
544{
545 return 0;
546}
547
548
549
550
551
552
553
554
555
556static int init_after_reset(struct hfi1_devdata *dd)
557{
558 int i;
559
560
561
562
563
564
565 for (i = 0; i < dd->num_rcv_contexts; i++)
566 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
567 HFI1_RCVCTRL_INTRAVAIL_DIS |
568 HFI1_RCVCTRL_TAILUPD_DIS, i);
569 pio_send_control(dd, PSC_GLOBAL_DISABLE);
570 for (i = 0; i < dd->num_send_contexts; i++)
571 sc_disable(dd->send_contexts[i].sc);
572
573 return 0;
574}
575
576static void enable_chip(struct hfi1_devdata *dd)
577{
578 u32 rcvmask;
579 u32 i;
580
581
582 pio_send_control(dd, PSC_GLOBAL_ENABLE);
583
584
585
586
587
588 for (i = 0; i < dd->first_user_ctxt; ++i) {
589 rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
590 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
591 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
592 if (!HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, MULTI_PKT_EGR))
593 rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
594 if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_RHQ_FULL))
595 rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
596 if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_EGR_FULL))
597 rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
598 hfi1_rcvctrl(dd, rcvmask, i);
599 sc_enable(dd->rcd[i]->sc);
600 }
601}
602
603
604
605
606
607static int create_workqueues(struct hfi1_devdata *dd)
608{
609 int pidx;
610 struct hfi1_pportdata *ppd;
611
612 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
613 ppd = dd->pport + pidx;
614 if (!ppd->hfi1_wq) {
615 ppd->hfi1_wq =
616 alloc_workqueue(
617 "hfi%d_%d",
618 WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
619 dd->num_sdma,
620 dd->unit, pidx);
621 if (!ppd->hfi1_wq)
622 goto wq_error;
623 }
624 }
625 return 0;
626wq_error:
627 pr_err("alloc_workqueue failed for port %d\n", pidx + 1);
628 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
629 ppd = dd->pport + pidx;
630 if (ppd->hfi1_wq) {
631 destroy_workqueue(ppd->hfi1_wq);
632 ppd->hfi1_wq = NULL;
633 }
634 }
635 return -ENOMEM;
636}
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653int hfi1_init(struct hfi1_devdata *dd, int reinit)
654{
655 int ret = 0, pidx, lastfail = 0;
656 unsigned i, len;
657 struct hfi1_ctxtdata *rcd;
658 struct hfi1_pportdata *ppd;
659
660
661 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EXPECTED] =
662 kdeth_process_expected;
663 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EAGER] =
664 kdeth_process_eager;
665 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_IB] = process_receive_ib;
666 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_ERROR] =
667 process_receive_error;
668 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_BYPASS] =
669 process_receive_bypass;
670 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID5] =
671 process_receive_invalid;
672 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID6] =
673 process_receive_invalid;
674 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID7] =
675 process_receive_invalid;
676 dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions;
677
678
679 dd->process_pio_send = hfi1_verbs_send_pio;
680 dd->process_dma_send = hfi1_verbs_send_dma;
681 dd->pio_inline_send = pio_copy;
682
683 if (is_ax(dd)) {
684 atomic_set(&dd->drop_packet, DROP_PACKET_ON);
685 dd->do_drop = 1;
686 } else {
687 atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
688 dd->do_drop = 0;
689 }
690
691
692 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
693 ppd = dd->pport + pidx;
694 ppd->linkup = 0;
695 }
696
697 if (reinit)
698 ret = init_after_reset(dd);
699 else
700 ret = loadtime_init(dd);
701 if (ret)
702 goto done;
703
704
705 dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent(
706 &dd->pcidev->dev, sizeof(u64),
707 &dd->rcvhdrtail_dummy_dma,
708 GFP_KERNEL);
709
710 if (!dd->rcvhdrtail_dummy_kvaddr) {
711 dd_dev_err(dd, "cannot allocate dummy tail memory\n");
712 ret = -ENOMEM;
713 goto done;
714 }
715
716
717 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
718
719
720
721
722
723
724 rcd = dd->rcd[i];
725 if (!rcd)
726 continue;
727
728 rcd->do_interrupt = &handle_receive_interrupt;
729
730 lastfail = hfi1_create_rcvhdrq(dd, rcd);
731 if (!lastfail)
732 lastfail = hfi1_setup_eagerbufs(rcd);
733 if (lastfail) {
734 dd_dev_err(dd,
735 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
736 ret = lastfail;
737 }
738 }
739
740
741 len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS *
742 sizeof(*dd->events));
743 dd->events = vmalloc_user(len);
744 if (!dd->events)
745 dd_dev_err(dd, "Failed to allocate user events page\n");
746
747
748
749
750 dd->status = vmalloc_user(PAGE_SIZE);
751 if (!dd->status)
752 dd_dev_err(dd, "Failed to allocate dev status page\n");
753 else
754 dd->freezelen = PAGE_SIZE - (sizeof(*dd->status) -
755 sizeof(dd->status->freezemsg));
756 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
757 ppd = dd->pport + pidx;
758 if (dd->status)
759
760 ppd->statusp = &dd->status->port;
761
762 set_mtu(ppd);
763 }
764
765
766 enable_chip(dd);
767
768done:
769
770
771
772
773 if (dd->status)
774 dd->status->dev |= HFI1_STATUS_CHIP_PRESENT |
775 HFI1_STATUS_INITTED;
776 if (!ret) {
777
778 set_intr_state(dd, 1);
779
780
781 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
782 ppd = dd->pport + pidx;
783
784
785
786
787
788 lastfail = bringup_serdes(ppd);
789 if (lastfail)
790 dd_dev_info(dd,
791 "Failed to bring up port %u\n",
792 ppd->port);
793
794
795
796
797
798 if (ppd->statusp)
799 *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT |
800 HFI1_STATUS_INITTED;
801 if (!ppd->link_speed_enabled)
802 continue;
803 }
804 }
805
806
807 return ret;
808}
809
810static inline struct hfi1_devdata *__hfi1_lookup(int unit)
811{
812 return idr_find(&hfi1_unit_table, unit);
813}
814
815struct hfi1_devdata *hfi1_lookup(int unit)
816{
817 struct hfi1_devdata *dd;
818 unsigned long flags;
819
820 spin_lock_irqsave(&hfi1_devs_lock, flags);
821 dd = __hfi1_lookup(unit);
822 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
823
824 return dd;
825}
826
827
828
829
830
831static void stop_timers(struct hfi1_devdata *dd)
832{
833 struct hfi1_pportdata *ppd;
834 int pidx;
835
836 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
837 ppd = dd->pport + pidx;
838 if (ppd->led_override_timer.data) {
839 del_timer_sync(&ppd->led_override_timer);
840 atomic_set(&ppd->led_override_timer_active, 0);
841 }
842 }
843}
844
845
846
847
848
849
850
851
852
853
854static void shutdown_device(struct hfi1_devdata *dd)
855{
856 struct hfi1_pportdata *ppd;
857 unsigned pidx;
858 int i;
859
860 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
861 ppd = dd->pport + pidx;
862
863 ppd->linkup = 0;
864 if (ppd->statusp)
865 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
866 HFI1_STATUS_IB_READY);
867 }
868 dd->flags &= ~HFI1_INITTED;
869
870
871 set_intr_state(dd, 0);
872
873 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
874 ppd = dd->pport + pidx;
875 for (i = 0; i < dd->num_rcv_contexts; i++)
876 hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |
877 HFI1_RCVCTRL_CTXT_DIS |
878 HFI1_RCVCTRL_INTRAVAIL_DIS |
879 HFI1_RCVCTRL_PKEY_DIS |
880 HFI1_RCVCTRL_ONE_PKT_EGR_DIS, i);
881
882
883
884
885 for (i = 0; i < dd->num_send_contexts; i++)
886 sc_flush(dd->send_contexts[i].sc);
887 }
888
889
890
891
892
893 udelay(20);
894
895 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
896 ppd = dd->pport + pidx;
897
898
899 for (i = 0; i < dd->num_send_contexts; i++)
900 sc_disable(dd->send_contexts[i].sc);
901
902 pio_send_control(dd, PSC_GLOBAL_DISABLE);
903
904 shutdown_led_override(ppd);
905
906
907
908
909
910 hfi1_quiet_serdes(ppd);
911
912 if (ppd->hfi1_wq) {
913 destroy_workqueue(ppd->hfi1_wq);
914 ppd->hfi1_wq = NULL;
915 }
916 }
917 sdma_exit(dd);
918}
919
920
921
922
923
924
925
926
927
928
929
930
931void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
932{
933 unsigned e;
934
935 if (!rcd)
936 return;
937
938 if (rcd->rcvhdrq) {
939 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
940 rcd->rcvhdrq, rcd->rcvhdrq_dma);
941 rcd->rcvhdrq = NULL;
942 if (rcd->rcvhdrtail_kvaddr) {
943 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
944 (void *)rcd->rcvhdrtail_kvaddr,
945 rcd->rcvhdrqtailaddr_dma);
946 rcd->rcvhdrtail_kvaddr = NULL;
947 }
948 }
949
950
951 kfree(rcd->egrbufs.rcvtids);
952
953 for (e = 0; e < rcd->egrbufs.alloced; e++) {
954 if (rcd->egrbufs.buffers[e].dma)
955 dma_free_coherent(&dd->pcidev->dev,
956 rcd->egrbufs.buffers[e].len,
957 rcd->egrbufs.buffers[e].addr,
958 rcd->egrbufs.buffers[e].dma);
959 }
960 kfree(rcd->egrbufs.buffers);
961
962 sc_free(rcd->sc);
963 vfree(rcd->user_event_mask);
964 vfree(rcd->subctxt_uregbase);
965 vfree(rcd->subctxt_rcvegrbuf);
966 vfree(rcd->subctxt_rcvhdr_base);
967 kfree(rcd->opstats);
968 kfree(rcd);
969}
970
971
972
973
974
975
976static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
977{
978 struct hfi1_asic_data *ad;
979 int other;
980
981 if (!dd->asic_data)
982 return NULL;
983 dd->asic_data->dds[dd->hfi1_id] = NULL;
984 other = dd->hfi1_id ? 0 : 1;
985 ad = dd->asic_data;
986 dd->asic_data = NULL;
987
988 return ad->dds[other] ? NULL : ad;
989}
990
991static void finalize_asic_data(struct hfi1_devdata *dd,
992 struct hfi1_asic_data *ad)
993{
994 clean_up_i2c(dd, ad);
995 kfree(ad);
996}
997
998static void __hfi1_free_devdata(struct kobject *kobj)
999{
1000 struct hfi1_devdata *dd =
1001 container_of(kobj, struct hfi1_devdata, kobj);
1002 struct hfi1_asic_data *ad;
1003 unsigned long flags;
1004
1005 spin_lock_irqsave(&hfi1_devs_lock, flags);
1006 idr_remove(&hfi1_unit_table, dd->unit);
1007 list_del(&dd->list);
1008 ad = release_asic_data(dd);
1009 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
1010 if (ad)
1011 finalize_asic_data(dd, ad);
1012 free_platform_config(dd);
1013 rcu_barrier();
1014 free_percpu(dd->int_counter);
1015 free_percpu(dd->rcv_limit);
1016 free_percpu(dd->send_schedule);
1017 rvt_dealloc_device(&dd->verbs_dev.rdi);
1018}
1019
1020static struct kobj_type hfi1_devdata_type = {
1021 .release = __hfi1_free_devdata,
1022};
1023
1024void hfi1_free_devdata(struct hfi1_devdata *dd)
1025{
1026 kobject_put(&dd->kobj);
1027}
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
1038{
1039 unsigned long flags;
1040 struct hfi1_devdata *dd;
1041 int ret, nports;
1042
1043
1044 nports = extra / sizeof(struct hfi1_pportdata);
1045
1046 dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
1047 nports);
1048 if (!dd)
1049 return ERR_PTR(-ENOMEM);
1050 dd->num_pports = nports;
1051 dd->pport = (struct hfi1_pportdata *)(dd + 1);
1052
1053 INIT_LIST_HEAD(&dd->list);
1054 idr_preload(GFP_KERNEL);
1055 spin_lock_irqsave(&hfi1_devs_lock, flags);
1056
1057 ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT);
1058 if (ret >= 0) {
1059 dd->unit = ret;
1060 list_add(&dd->list, &hfi1_dev_list);
1061 }
1062
1063 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
1064 idr_preload_end();
1065
1066 if (ret < 0) {
1067 hfi1_early_err(&pdev->dev,
1068 "Could not allocate unit ID: error %d\n", -ret);
1069 goto bail;
1070 }
1071
1072
1073
1074
1075 spin_lock_init(&dd->sc_lock);
1076 spin_lock_init(&dd->sendctrl_lock);
1077 spin_lock_init(&dd->rcvctrl_lock);
1078 spin_lock_init(&dd->uctxt_lock);
1079 spin_lock_init(&dd->hfi1_diag_trans_lock);
1080 spin_lock_init(&dd->sc_init_lock);
1081 spin_lock_init(&dd->dc8051_lock);
1082 spin_lock_init(&dd->dc8051_memlock);
1083 seqlock_init(&dd->sc2vl_lock);
1084 spin_lock_init(&dd->sde_map_lock);
1085 spin_lock_init(&dd->pio_map_lock);
1086 init_waitqueue_head(&dd->event_queue);
1087
1088 dd->int_counter = alloc_percpu(u64);
1089 if (!dd->int_counter) {
1090 ret = -ENOMEM;
1091 hfi1_early_err(&pdev->dev,
1092 "Could not allocate per-cpu int_counter\n");
1093 goto bail;
1094 }
1095
1096 dd->rcv_limit = alloc_percpu(u64);
1097 if (!dd->rcv_limit) {
1098 ret = -ENOMEM;
1099 hfi1_early_err(&pdev->dev,
1100 "Could not allocate per-cpu rcv_limit\n");
1101 goto bail;
1102 }
1103
1104 dd->send_schedule = alloc_percpu(u64);
1105 if (!dd->send_schedule) {
1106 ret = -ENOMEM;
1107 hfi1_early_err(&pdev->dev,
1108 "Could not allocate per-cpu int_counter\n");
1109 goto bail;
1110 }
1111
1112 if (!hfi1_cpulist_count) {
1113 u32 count = num_online_cpus();
1114
1115 hfi1_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long),
1116 GFP_KERNEL);
1117 if (hfi1_cpulist)
1118 hfi1_cpulist_count = count;
1119 else
1120 hfi1_early_err(
1121 &pdev->dev,
1122 "Could not alloc cpulist info, cpu affinity might be wrong\n");
1123 }
1124 kobject_init(&dd->kobj, &hfi1_devdata_type);
1125 return dd;
1126
1127bail:
1128 if (!list_empty(&dd->list))
1129 list_del_init(&dd->list);
1130 rvt_dealloc_device(&dd->verbs_dev.rdi);
1131 return ERR_PTR(ret);
1132}
1133
1134
1135
1136
1137
1138
1139void hfi1_disable_after_error(struct hfi1_devdata *dd)
1140{
1141 if (dd->flags & HFI1_INITTED) {
1142 u32 pidx;
1143
1144 dd->flags &= ~HFI1_INITTED;
1145 if (dd->pport)
1146 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1147 struct hfi1_pportdata *ppd;
1148
1149 ppd = dd->pport + pidx;
1150 if (dd->flags & HFI1_PRESENT)
1151 set_link_state(ppd, HLS_DN_DISABLE);
1152
1153 if (ppd->statusp)
1154 *ppd->statusp &= ~HFI1_STATUS_IB_READY;
1155 }
1156 }
1157
1158
1159
1160
1161
1162
1163 if (dd->status)
1164 dd->status->dev |= HFI1_STATUS_HWERROR;
1165}
1166
1167static void remove_one(struct pci_dev *);
1168static int init_one(struct pci_dev *, const struct pci_device_id *);
1169
1170#define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
1171#define PFX DRIVER_NAME ": "
1172
1173const struct pci_device_id hfi1_pci_tbl[] = {
1174 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
1175 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
1176 { 0, }
1177};
1178
1179MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl);
1180
1181static struct pci_driver hfi1_pci_driver = {
1182 .name = DRIVER_NAME,
1183 .probe = init_one,
1184 .remove = remove_one,
1185 .id_table = hfi1_pci_tbl,
1186 .err_handler = &hfi1_pci_err_handler,
1187};
1188
1189static void __init compute_krcvqs(void)
1190{
1191 int i;
1192
1193 for (i = 0; i < krcvqsset; i++)
1194 n_krcvqs += krcvqs[i];
1195}
1196
1197
1198
1199
1200
1201static int __init hfi1_mod_init(void)
1202{
1203 int ret;
1204
1205 ret = dev_init();
1206 if (ret)
1207 goto bail;
1208
1209 ret = node_affinity_init();
1210 if (ret)
1211 goto bail;
1212
1213
1214 if (!valid_opa_max_mtu(hfi1_max_mtu)) {
1215 pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
1216 hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU);
1217 hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
1218 }
1219
1220 if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu))
1221 hfi1_cu = 1;
1222
1223 if (user_credit_return_threshold > 100)
1224 user_credit_return_threshold = 100;
1225
1226 compute_krcvqs();
1227
1228
1229
1230
1231 if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK)
1232 rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK;
1233
1234 if (rcv_intr_count == 0 && rcv_intr_timeout == 0) {
1235 pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n");
1236 rcv_intr_count = 1;
1237 }
1238 if (rcv_intr_count > 1 && rcv_intr_timeout == 0) {
1239
1240
1241
1242
1243 pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n");
1244 rcv_intr_timeout = 1;
1245 }
1246 if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) {
1247
1248
1249
1250
1251 pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n");
1252 rcv_intr_dynamic = 0;
1253 }
1254
1255
1256 link_crc_mask &= SUPPORTED_CRCS;
1257
1258
1259
1260
1261
1262 idr_init(&hfi1_unit_table);
1263
1264 hfi1_dbg_init();
1265 ret = hfi1_wss_init();
1266 if (ret < 0)
1267 goto bail_wss;
1268 ret = pci_register_driver(&hfi1_pci_driver);
1269 if (ret < 0) {
1270 pr_err("Unable to register driver: error %d\n", -ret);
1271 goto bail_dev;
1272 }
1273 goto bail;
1274
1275bail_dev:
1276 hfi1_wss_exit();
1277bail_wss:
1278 hfi1_dbg_exit();
1279 idr_destroy(&hfi1_unit_table);
1280 dev_cleanup();
1281bail:
1282 return ret;
1283}
1284
1285module_init(hfi1_mod_init);
1286
1287
1288
1289
1290static void __exit hfi1_mod_cleanup(void)
1291{
1292 pci_unregister_driver(&hfi1_pci_driver);
1293 node_affinity_destroy();
1294 hfi1_wss_exit();
1295 hfi1_dbg_exit();
1296 hfi1_cpulist_count = 0;
1297 kfree(hfi1_cpulist);
1298
1299 idr_destroy(&hfi1_unit_table);
1300 dispose_firmware();
1301 dev_cleanup();
1302}
1303
1304module_exit(hfi1_mod_cleanup);
1305
1306
1307static void cleanup_device_data(struct hfi1_devdata *dd)
1308{
1309 int ctxt;
1310 int pidx;
1311 struct hfi1_ctxtdata **tmp;
1312 unsigned long flags;
1313
1314
1315 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1316 struct hfi1_pportdata *ppd = &dd->pport[pidx];
1317 struct cc_state *cc_state;
1318 int i;
1319
1320 if (ppd->statusp)
1321 *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT;
1322
1323 for (i = 0; i < OPA_MAX_SLS; i++)
1324 hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
1325
1326 spin_lock(&ppd->cc_state_lock);
1327 cc_state = get_cc_state_protected(ppd);
1328 RCU_INIT_POINTER(ppd->cc_state, NULL);
1329 spin_unlock(&ppd->cc_state_lock);
1330
1331 if (cc_state)
1332 kfree_rcu(cc_state, rcu);
1333 }
1334
1335 free_credit_return(dd);
1336
1337
1338
1339
1340
1341
1342
1343
1344 spin_lock_irqsave(&dd->uctxt_lock, flags);
1345 tmp = dd->rcd;
1346 dd->rcd = NULL;
1347 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1348
1349 if (dd->rcvhdrtail_dummy_kvaddr) {
1350 dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
1351 (void *)dd->rcvhdrtail_dummy_kvaddr,
1352 dd->rcvhdrtail_dummy_dma);
1353 dd->rcvhdrtail_dummy_kvaddr = NULL;
1354 }
1355
1356 for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) {
1357 struct hfi1_ctxtdata *rcd = tmp[ctxt];
1358
1359 tmp[ctxt] = NULL;
1360 if (rcd) {
1361 hfi1_clear_tids(rcd);
1362 hfi1_free_ctxtdata(dd, rcd);
1363 }
1364 }
1365 kfree(tmp);
1366 free_pio_map(dd);
1367
1368 for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
1369 sc_free(dd->send_contexts[ctxt].sc);
1370 dd->num_send_contexts = 0;
1371 kfree(dd->send_contexts);
1372 dd->send_contexts = NULL;
1373 kfree(dd->hw_to_sw);
1374 dd->hw_to_sw = NULL;
1375 kfree(dd->boardname);
1376 vfree(dd->events);
1377 vfree(dd->status);
1378}
1379
1380
1381
1382
1383
1384static void postinit_cleanup(struct hfi1_devdata *dd)
1385{
1386 hfi1_start_cleanup(dd);
1387
1388 hfi1_pcie_ddcleanup(dd);
1389 hfi1_pcie_cleanup(dd->pcidev);
1390
1391 cleanup_device_data(dd);
1392
1393 hfi1_free_devdata(dd);
1394}
1395
1396static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt)
1397{
1398 if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
1399 hfi1_early_err(dev, "Receive header queue count too small\n");
1400 return -EINVAL;
1401 }
1402
1403 if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
1404 hfi1_early_err(dev,
1405 "Receive header queue count cannot be greater than %u\n",
1406 HFI1_MAX_HDRQ_EGRBUF_CNT);
1407 return -EINVAL;
1408 }
1409
1410 if (thecnt % HDRQ_INCREMENT) {
1411 hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n",
1412 thecnt, HDRQ_INCREMENT);
1413 return -EINVAL;
1414 }
1415
1416 return 0;
1417}
1418
1419static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1420{
1421 int ret = 0, j, pidx, initfail;
1422 struct hfi1_devdata *dd;
1423 struct hfi1_pportdata *ppd;
1424
1425
1426 HFI1_CAP_LOCK();
1427
1428
1429 ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt);
1430 if (ret)
1431 goto bail;
1432
1433
1434 if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
1435 hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n",
1436 hfi1_hdrq_entsize);
1437 ret = -EINVAL;
1438 goto bail;
1439 }
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450 if (eager_buffer_size) {
1451 if (!is_power_of_2(eager_buffer_size))
1452 eager_buffer_size =
1453 roundup_pow_of_two(eager_buffer_size);
1454 eager_buffer_size =
1455 clamp_val(eager_buffer_size,
1456 MIN_EAGER_BUFFER * 8,
1457 MAX_EAGER_BUFFER_TOTAL);
1458 hfi1_early_info(&pdev->dev, "Eager buffer size %u\n",
1459 eager_buffer_size);
1460 } else {
1461 hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n");
1462 ret = -EINVAL;
1463 goto bail;
1464 }
1465
1466
1467 hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
1468
1469 ret = hfi1_pcie_init(pdev, ent);
1470 if (ret)
1471 goto bail;
1472
1473 if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
1474 ent->device == PCI_DEVICE_ID_INTEL1)) {
1475 hfi1_early_err(&pdev->dev,
1476 "Failing on unknown Intel deviceid 0x%x\n",
1477 ent->device);
1478 ret = -ENODEV;
1479 goto clean_bail;
1480 }
1481
1482
1483
1484
1485
1486 dd = hfi1_init_dd(pdev, ent);
1487
1488 if (IS_ERR(dd)) {
1489 ret = PTR_ERR(dd);
1490 goto clean_bail;
1491 }
1492
1493 ret = create_workqueues(dd);
1494 if (ret)
1495 goto clean_bail;
1496
1497
1498 initfail = hfi1_init(dd, 0);
1499
1500 ret = hfi1_register_ib_device(dd);
1501
1502
1503
1504
1505
1506
1507
1508 if (!initfail && !ret) {
1509 dd->flags |= HFI1_INITTED;
1510
1511 hfi1_dbg_ibdev_init(&dd->verbs_dev);
1512 }
1513
1514 j = hfi1_device_create(dd);
1515 if (j)
1516 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1517
1518 if (initfail || ret) {
1519 stop_timers(dd);
1520 flush_workqueue(ib_wq);
1521 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1522 hfi1_quiet_serdes(dd->pport + pidx);
1523 ppd = dd->pport + pidx;
1524 if (ppd->hfi1_wq) {
1525 destroy_workqueue(ppd->hfi1_wq);
1526 ppd->hfi1_wq = NULL;
1527 }
1528 }
1529 if (!j)
1530 hfi1_device_remove(dd);
1531 if (!ret)
1532 hfi1_unregister_ib_device(dd);
1533 postinit_cleanup(dd);
1534 if (initfail)
1535 ret = initfail;
1536 goto bail;
1537 }
1538
1539 sdma_start(dd);
1540
1541 return 0;
1542
1543clean_bail:
1544 hfi1_pcie_cleanup(pdev);
1545bail:
1546 return ret;
1547}
1548
1549static void wait_for_clients(struct hfi1_devdata *dd)
1550{
1551
1552
1553
1554
1555 if (atomic_dec_and_test(&dd->user_refcount))
1556 complete(&dd->user_comp);
1557
1558 wait_for_completion(&dd->user_comp);
1559}
1560
1561static void remove_one(struct pci_dev *pdev)
1562{
1563 struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1564
1565
1566 hfi1_dbg_ibdev_exit(&dd->verbs_dev);
1567
1568
1569 hfi1_device_remove(dd);
1570
1571
1572 wait_for_clients(dd);
1573
1574
1575 hfi1_unregister_ib_device(dd);
1576
1577
1578
1579
1580
1581 shutdown_device(dd);
1582
1583 stop_timers(dd);
1584
1585
1586 flush_workqueue(ib_wq);
1587
1588 postinit_cleanup(dd);
1589}
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1601{
1602 unsigned amt;
1603 u64 reg;
1604
1605 if (!rcd->rcvhdrq) {
1606 dma_addr_t dma_hdrqtail;
1607 gfp_t gfp_flags;
1608
1609
1610
1611
1612
1613 amt = PAGE_ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize *
1614 sizeof(u32));
1615
1616 gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
1617 GFP_USER : GFP_KERNEL;
1618 rcd->rcvhdrq = dma_zalloc_coherent(
1619 &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma,
1620 gfp_flags | __GFP_COMP);
1621
1622 if (!rcd->rcvhdrq) {
1623 dd_dev_err(dd,
1624 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1625 amt, rcd->ctxt);
1626 goto bail;
1627 }
1628
1629 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
1630 rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
1631 &dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail,
1632 gfp_flags);
1633 if (!rcd->rcvhdrtail_kvaddr)
1634 goto bail_free;
1635 rcd->rcvhdrqtailaddr_dma = dma_hdrqtail;
1636 }
1637
1638 rcd->rcvhdrq_size = amt;
1639 }
1640
1641
1642
1643
1644
1645
1646 reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT)
1647 & RCV_HDR_CNT_CNT_MASK)
1648 << RCV_HDR_CNT_CNT_SHIFT;
1649 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg);
1650 reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize)
1651 & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK)
1652 << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
1653 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg);
1654 reg = (dd->rcvhdrsize & RCV_HDR_SIZE_HDR_SIZE_MASK)
1655 << RCV_HDR_SIZE_HDR_SIZE_SHIFT;
1656 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg);
1657
1658
1659
1660
1661
1662 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR,
1663 dd->rcvhdrtail_dummy_dma);
1664
1665 return 0;
1666
1667bail_free:
1668 dd_dev_err(dd,
1669 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1670 rcd->ctxt);
1671 vfree(rcd->user_event_mask);
1672 rcd->user_event_mask = NULL;
1673 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1674 rcd->rcvhdrq_dma);
1675 rcd->rcvhdrq = NULL;
1676bail:
1677 return -ENOMEM;
1678}
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
1690{
1691 struct hfi1_devdata *dd = rcd->dd;
1692 u32 max_entries, egrtop, alloced_bytes = 0, idx = 0;
1693 gfp_t gfp_flags;
1694 u16 order;
1695 int ret = 0;
1696 u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
1697
1698
1699
1700
1701
1702
1703
1704 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
1705
1706
1707
1708
1709
1710
1711
1712
1713 if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
1714 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
1715
1716
1717
1718
1719 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
1720 rcd->egrbufs.rcvtid_size = round_mtu;
1721
1722
1723
1724
1725
1726 if (rcd->egrbufs.size <= (1 << 20))
1727 rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu,
1728 rounddown_pow_of_two(rcd->egrbufs.size / 8));
1729
1730 while (alloced_bytes < rcd->egrbufs.size &&
1731 rcd->egrbufs.alloced < rcd->egrbufs.count) {
1732 rcd->egrbufs.buffers[idx].addr =
1733 dma_zalloc_coherent(&dd->pcidev->dev,
1734 rcd->egrbufs.rcvtid_size,
1735 &rcd->egrbufs.buffers[idx].dma,
1736 gfp_flags);
1737 if (rcd->egrbufs.buffers[idx].addr) {
1738 rcd->egrbufs.buffers[idx].len =
1739 rcd->egrbufs.rcvtid_size;
1740 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
1741 rcd->egrbufs.buffers[idx].addr;
1742 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma =
1743 rcd->egrbufs.buffers[idx].dma;
1744 rcd->egrbufs.alloced++;
1745 alloced_bytes += rcd->egrbufs.rcvtid_size;
1746 idx++;
1747 } else {
1748 u32 new_size, i, j;
1749 u64 offset = 0;
1750
1751
1752
1753
1754
1755
1756
1757 if (rcd->egrbufs.rcvtid_size == round_mtu ||
1758 !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
1759 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
1760 rcd->ctxt);
1761 goto bail_rcvegrbuf_phys;
1762 }
1763
1764 new_size = rcd->egrbufs.rcvtid_size / 2;
1765
1766
1767
1768
1769
1770
1771 if (idx == 0) {
1772 rcd->egrbufs.rcvtid_size = new_size;
1773 continue;
1774 }
1775
1776
1777
1778
1779
1780 rcd->egrbufs.alloced = 0;
1781 for (i = 0, j = 0, offset = 0; j < idx; i++) {
1782 if (i >= rcd->egrbufs.count)
1783 break;
1784 rcd->egrbufs.rcvtids[i].dma =
1785 rcd->egrbufs.buffers[j].dma + offset;
1786 rcd->egrbufs.rcvtids[i].addr =
1787 rcd->egrbufs.buffers[j].addr + offset;
1788 rcd->egrbufs.alloced++;
1789 if ((rcd->egrbufs.buffers[j].dma + offset +
1790 new_size) ==
1791 (rcd->egrbufs.buffers[j].dma +
1792 rcd->egrbufs.buffers[j].len)) {
1793 j++;
1794 offset = 0;
1795 } else {
1796 offset += new_size;
1797 }
1798 }
1799 rcd->egrbufs.rcvtid_size = new_size;
1800 }
1801 }
1802 rcd->egrbufs.numbufs = idx;
1803 rcd->egrbufs.size = alloced_bytes;
1804
1805 hfi1_cdbg(PROC,
1806 "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
1807 rcd->ctxt, rcd->egrbufs.alloced,
1808 rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
1809
1810
1811
1812
1813
1814
1815 rcd->egrbufs.threshold =
1816 rounddown_pow_of_two(rcd->egrbufs.alloced / 2);
1817
1818
1819
1820
1821
1822 max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
1823 egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
1824 rcd->expected_count = max_entries - egrtop;
1825 if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2)
1826 rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
1827
1828 rcd->expected_base = rcd->eager_base + egrtop;
1829 hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
1830 rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
1831 rcd->eager_base, rcd->expected_base);
1832
1833 if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
1834 hfi1_cdbg(PROC,
1835 "ctxt%u: current Eager buffer size is invalid %u\n",
1836 rcd->ctxt, rcd->egrbufs.rcvtid_size);
1837 ret = -EINVAL;
1838 goto bail;
1839 }
1840
1841 for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
1842 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
1843 rcd->egrbufs.rcvtids[idx].dma, order);
1844 cond_resched();
1845 }
1846 goto bail;
1847
1848bail_rcvegrbuf_phys:
1849 for (idx = 0; idx < rcd->egrbufs.alloced &&
1850 rcd->egrbufs.buffers[idx].addr;
1851 idx++) {
1852 dma_free_coherent(&dd->pcidev->dev,
1853 rcd->egrbufs.buffers[idx].len,
1854 rcd->egrbufs.buffers[idx].addr,
1855 rcd->egrbufs.buffers[idx].dma);
1856 rcd->egrbufs.buffers[idx].addr = NULL;
1857 rcd->egrbufs.buffers[idx].dma = 0;
1858 rcd->egrbufs.buffers[idx].len = 0;
1859 }
1860bail:
1861 return ret;
1862}
1863