1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38#include <linux/kernel.h>
39#include <linux/module.h>
40#include <linux/errno.h>
41#include <linux/types.h>
42#include <linux/debugfs.h>
43#include <linux/export.h>
44#include <linux/list.h>
45#include <linux/skbuff.h>
46#include <linux/pci.h>
47
48#include "cxgb4.h"
49#include "cxgb4_uld.h"
50#include "t4_regs.h"
51#include "t4fw_api.h"
52#include "t4_msg.h"
53
54#define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
55
56static int get_msix_idx_from_bmap(struct adapter *adap)
57{
58 struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
59 unsigned long flags;
60 unsigned int msix_idx;
61
62 spin_lock_irqsave(&bmap->lock, flags);
63 msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
64 if (msix_idx < bmap->mapsize) {
65 __set_bit(msix_idx, bmap->msix_bmap);
66 } else {
67 spin_unlock_irqrestore(&bmap->lock, flags);
68 return -ENOSPC;
69 }
70
71 spin_unlock_irqrestore(&bmap->lock, flags);
72 return msix_idx;
73}
74
75static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
76{
77 struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
78 unsigned long flags;
79
80 spin_lock_irqsave(&bmap->lock, flags);
81 __clear_bit(msix_idx, bmap->msix_bmap);
82 spin_unlock_irqrestore(&bmap->lock, flags);
83}
84
85
86static void uldrx_flush_handler(struct sge_rspq *q)
87{
88 struct adapter *adap = q->adap;
89
90 if (adap->uld[q->uld].lro_flush)
91 adap->uld[q->uld].lro_flush(&q->lro_mgr);
92}
93
94
95
96
97
98
99
100
101
102
103static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
104 const struct pkt_gl *gl)
105{
106 struct adapter *adap = q->adap;
107 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
108 int ret;
109
110
111 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
112 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
113 rsp += 2;
114
115 if (q->flush_handler)
116 ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
117 rsp, gl, &q->lro_mgr,
118 &q->napi);
119 else
120 ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
121 rsp, gl);
122
123 if (ret) {
124 rxq->stats.nomem++;
125 return -1;
126 }
127
128 if (!gl)
129 rxq->stats.imm++;
130 else if (gl == CXGB4_MSG_AN)
131 rxq->stats.an++;
132 else
133 rxq->stats.pkts++;
134 return 0;
135}
136
137static int alloc_uld_rxqs(struct adapter *adap,
138 struct sge_uld_rxq_info *rxq_info, bool lro)
139{
140 struct sge *s = &adap->sge;
141 unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
142 struct sge_ofld_rxq *q = rxq_info->uldrxq;
143 unsigned short *ids = rxq_info->rspq_id;
144 unsigned int bmap_idx = 0;
145 unsigned int per_chan;
146 int i, err, msi_idx, que_idx = 0;
147
148 per_chan = rxq_info->nrxq / adap->params.nports;
149
150 if (adap->flags & USING_MSIX)
151 msi_idx = 1;
152 else
153 msi_idx = -((int)s->intrq.abs_id + 1);
154
155 for (i = 0; i < nq; i++, q++) {
156 if (i == rxq_info->nrxq) {
157
158 per_chan = rxq_info->nciq / adap->params.nports;
159 que_idx = 0;
160 }
161
162 if (msi_idx >= 0) {
163 bmap_idx = get_msix_idx_from_bmap(adap);
164 msi_idx = adap->msix_info_ulds[bmap_idx].idx;
165 }
166 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
167 adap->port[que_idx++ / per_chan],
168 msi_idx,
169 q->fl.size ? &q->fl : NULL,
170 uldrx_handler,
171 lro ? uldrx_flush_handler : NULL,
172 0);
173 if (err)
174 goto freeout;
175 if (msi_idx >= 0)
176 rxq_info->msix_tbl[i] = bmap_idx;
177 memset(&q->stats, 0, sizeof(q->stats));
178 if (ids)
179 ids[i] = q->rspq.abs_id;
180 }
181 return 0;
182freeout:
183 q = rxq_info->uldrxq;
184 for ( ; i; i--, q++) {
185 if (q->rspq.desc)
186 free_rspq_fl(adap, &q->rspq,
187 q->fl.size ? &q->fl : NULL);
188 }
189 return err;
190}
191
192static int
193setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
194{
195 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
196 int i, ret = 0;
197
198 if (adap->flags & USING_MSIX) {
199 rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
200 sizeof(unsigned short),
201 GFP_KERNEL);
202 if (!rxq_info->msix_tbl)
203 return -ENOMEM;
204 }
205
206 ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
207
208
209 if (adap->flags & FULL_INIT_DONE &&
210 !ret && uld_type == CXGB4_ULD_RDMA) {
211 struct sge *s = &adap->sge;
212 unsigned int cmplqid;
213 u32 param, cmdop;
214
215 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
216 for_each_port(adap, i) {
217 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
218 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
219 FW_PARAMS_PARAM_X_V(cmdop) |
220 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
221 ret = t4_set_params(adap, adap->mbox, adap->pf,
222 0, 1, ¶m, &cmplqid);
223 }
224 }
225 return ret;
226}
227
228static void t4_free_uld_rxqs(struct adapter *adap, int n,
229 struct sge_ofld_rxq *q)
230{
231 for ( ; n; n--, q++) {
232 if (q->rspq.desc)
233 free_rspq_fl(adap, &q->rspq,
234 q->fl.size ? &q->fl : NULL);
235 }
236}
237
238static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
239{
240 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
241
242 if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
243 struct sge *s = &adap->sge;
244 u32 param, cmdop, cmplqid = 0;
245 int i;
246
247 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
248 for_each_port(adap, i) {
249 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
250 FW_PARAMS_PARAM_X_V(cmdop) |
251 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
252 t4_set_params(adap, adap->mbox, adap->pf,
253 0, 1, ¶m, &cmplqid);
254 }
255 }
256
257 if (rxq_info->nciq)
258 t4_free_uld_rxqs(adap, rxq_info->nciq,
259 rxq_info->uldrxq + rxq_info->nrxq);
260 t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
261 if (adap->flags & USING_MSIX)
262 kfree(rxq_info->msix_tbl);
263}
264
265static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
266 const struct cxgb4_uld_info *uld_info)
267{
268 struct sge *s = &adap->sge;
269 struct sge_uld_rxq_info *rxq_info;
270 int i, nrxq, ciq_size;
271
272 rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
273 if (!rxq_info)
274 return -ENOMEM;
275
276 if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
277 gmb();
278 i = s->nqs_per_uld;
279 rxq_info->nrxq = roundup(i, adap->params.nports);
280 } else {
281 gmb();
282 i = min_t(int, uld_info->nrxq,
283 num_online_cpus());
284 rxq_info->nrxq = roundup(i, adap->params.nports);
285 }
286 if (!uld_info->ciq) {
287 rxq_info->nciq = 0;
288 } else {
289 if (adap->flags & USING_MSIX)
290 rxq_info->nciq = min_t(int, s->nqs_per_uld,
291 num_online_cpus());
292 else
293 rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
294 num_online_cpus());
295 rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
296 adap->params.nports);
297 rxq_info->nciq = max_t(int, rxq_info->nciq,
298 adap->params.nports);
299 }
300
301 nrxq = rxq_info->nrxq + rxq_info->nciq;
302 rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
303 GFP_KERNEL);
304 if (!rxq_info->uldrxq) {
305 kfree(rxq_info);
306 return -ENOMEM;
307 }
308
309 rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
310 if (!rxq_info->rspq_id) {
311 kfree(rxq_info->uldrxq);
312 kfree(rxq_info);
313 return -ENOMEM;
314 }
315
316 for (i = 0; i < rxq_info->nrxq; i++) {
317 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
318
319 init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64);
320 r->rspq.uld = uld_type;
321 r->fl.size = 72;
322 }
323
324 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
325 if (ciq_size > SGE_MAX_IQ_SIZE) {
326 dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
327 ciq_size = SGE_MAX_IQ_SIZE;
328 }
329
330 for (i = rxq_info->nrxq; i < nrxq; i++) {
331 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
332
333 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
334 r->rspq.uld = uld_type;
335 }
336
337 memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
338 adap->sge.uld_rxq_info[uld_type] = rxq_info;
339
340 return 0;
341}
342
343static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
344{
345 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
346
347 kfree(rxq_info->rspq_id);
348 kfree(rxq_info->uldrxq);
349 kfree(rxq_info);
350}
351
352static int
353request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
354{
355 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
356 int err = 0;
357 unsigned int idx, bmap_idx;
358
359 for_each_uldrxq(rxq_info, idx) {
360 bmap_idx = rxq_info->msix_tbl[idx];
361 err = request_irq(adap->msix_info_ulds[bmap_idx].vec,
362 t4_sge_intr_msix, 0,
363 adap->msix_info_ulds[bmap_idx].desc,
364 &rxq_info->uldrxq[idx].rspq);
365 if (err)
366 goto unwind;
367 }
368 return 0;
369unwind:
370 while (idx-- > 0) {
371 bmap_idx = rxq_info->msix_tbl[idx];
372 free_msix_idx_in_bmap(adap, bmap_idx);
373 free_irq(adap->msix_info_ulds[bmap_idx].vec,
374 &rxq_info->uldrxq[idx].rspq);
375 }
376 return err;
377}
378
379static void
380free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
381{
382 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
383 unsigned int idx, bmap_idx;
384
385 for_each_uldrxq(rxq_info, idx) {
386 bmap_idx = rxq_info->msix_tbl[idx];
387
388 free_msix_idx_in_bmap(adap, bmap_idx);
389 free_irq(adap->msix_info_ulds[bmap_idx].vec,
390 &rxq_info->uldrxq[idx].rspq);
391 }
392}
393
394static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
395{
396 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
397 int n = sizeof(adap->msix_info_ulds[0].desc);
398 unsigned int idx, bmap_idx;
399
400 for_each_uldrxq(rxq_info, idx) {
401 bmap_idx = rxq_info->msix_tbl[idx];
402
403 snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
404 adap->port[0]->name, rxq_info->name, idx);
405 }
406}
407
408static void enable_rx(struct adapter *adap, struct sge_rspq *q)
409{
410 if (!q)
411 return;
412
413 if (q->handler)
414 napi_enable(&q->napi);
415
416
417 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
418 SEINTARM_V(q->intr_params) |
419 INGRESSQID_V(q->cntxt_id));
420}
421
422static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
423{
424 if (q && q->handler)
425 napi_disable(&q->napi);
426}
427
428static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
429{
430 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
431 int idx;
432
433 for_each_uldrxq(rxq_info, idx)
434 enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
435}
436
437static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
438{
439 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
440 int idx;
441
442 for_each_uldrxq(rxq_info, idx)
443 quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
444}
445
446static void
447free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
448{
449 int nq = txq_info->ntxq;
450 int i;
451
452 for (i = 0; i < nq; i++) {
453 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
454
455 if (txq && txq->q.desc) {
456 tasklet_kill(&txq->qresume_tsk);
457 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
458 txq->q.cntxt_id);
459 free_tx_desc(adap, &txq->q, txq->q.in_use, false);
460 kfree(txq->q.sdesc);
461 __skb_queue_purge(&txq->sendq);
462 free_txq(adap, &txq->q);
463 }
464 }
465}
466
467static int
468alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info,
469 unsigned int uld_type)
470{
471 struct sge *s = &adap->sge;
472 int nq = txq_info->ntxq;
473 int i, j, err;
474
475 j = nq / adap->params.nports;
476 for (i = 0; i < nq; i++) {
477 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
478
479 txq->q.size = 1024;
480 err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j],
481 s->fw_evtq.cntxt_id, uld_type);
482 if (err)
483 goto freeout;
484 }
485 return 0;
486freeout:
487 free_sge_txq_uld(adap, txq_info);
488 return err;
489}
490
491static void
492release_sge_txq_uld(struct adapter *adap, unsigned int uld_type)
493{
494 struct sge_uld_txq_info *txq_info = NULL;
495 int tx_uld_type = TX_ULD(uld_type);
496
497 txq_info = adap->sge.uld_txq_info[tx_uld_type];
498
499 if (txq_info && atomic_dec_and_test(&txq_info->users)) {
500 free_sge_txq_uld(adap, txq_info);
501 kfree(txq_info->uldtxq);
502 kfree(txq_info);
503 adap->sge.uld_txq_info[tx_uld_type] = NULL;
504 }
505}
506
507static int
508setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
509 const struct cxgb4_uld_info *uld_info)
510{
511 struct sge_uld_txq_info *txq_info = NULL;
512 int tx_uld_type, i;
513
514 tx_uld_type = TX_ULD(uld_type);
515 txq_info = adap->sge.uld_txq_info[tx_uld_type];
516
517 if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info &&
518 (atomic_inc_return(&txq_info->users) > 1))
519 return 0;
520
521 txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
522 if (!txq_info)
523 return -ENOMEM;
524
525 i = min_t(int, uld_info->ntxq, num_online_cpus());
526 txq_info->ntxq = roundup(i, adap->params.nports);
527
528 txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
529 GFP_KERNEL);
530 if (!txq_info->uldtxq) {
531 kfree(txq_info);
532 return -ENOMEM;
533 }
534
535 if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) {
536 kfree(txq_info->uldtxq);
537 kfree(txq_info);
538 return -ENOMEM;
539 }
540
541 atomic_inc(&txq_info->users);
542 adap->sge.uld_txq_info[tx_uld_type] = txq_info;
543 return 0;
544}
545
546static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
547 struct cxgb4_lld_info *lli)
548{
549 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
550
551 lli->rxq_ids = rxq_info->rspq_id;
552 lli->nrxq = rxq_info->nrxq;
553 lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
554 lli->nciq = rxq_info->nciq;
555}
556
557int t4_uld_mem_alloc(struct adapter *adap)
558{
559 struct sge *s = &adap->sge;
560
561 adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
562 if (!adap->uld)
563 return -ENOMEM;
564
565 s->uld_rxq_info = kzalloc(CXGB4_ULD_MAX *
566 sizeof(struct sge_uld_rxq_info *),
567 GFP_KERNEL);
568 if (!s->uld_rxq_info)
569 goto err_uld;
570
571 s->uld_txq_info = kzalloc(CXGB4_TX_MAX *
572 sizeof(struct sge_uld_txq_info *),
573 GFP_KERNEL);
574 if (!s->uld_txq_info)
575 goto err_uld_rx;
576 return 0;
577
578err_uld_rx:
579 kfree(s->uld_rxq_info);
580err_uld:
581 kfree(adap->uld);
582 return -ENOMEM;
583}
584
585void t4_uld_mem_free(struct adapter *adap)
586{
587 struct sge *s = &adap->sge;
588
589 kfree(s->uld_txq_info);
590 kfree(s->uld_rxq_info);
591 kfree(adap->uld);
592}
593
594
595static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type)
596{
597 if (adap->uld[type].handle) {
598 adap->uld[type].handle = NULL;
599 adap->uld[type].add = NULL;
600 release_sge_txq_uld(adap, type);
601
602 if (adap->flags & FULL_INIT_DONE)
603 quiesce_rx_uld(adap, type);
604
605 if (adap->flags & USING_MSIX)
606 free_msix_queue_irqs_uld(adap, type);
607
608 free_sge_queues_uld(adap, type);
609 free_queues_uld(adap, type);
610 }
611}
612
613void t4_uld_clean_up(struct adapter *adap)
614{
615 unsigned int i;
616
617 mutex_lock(&uld_mutex);
618 for (i = 0; i < CXGB4_ULD_MAX; i++) {
619 if (!adap->uld[i].handle)
620 continue;
621
622 cxgb4_shutdown_uld_adapter(adap, i);
623 }
624 mutex_unlock(&uld_mutex);
625}
626
627static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
628{
629 int i;
630
631 lld->pdev = adap->pdev;
632 lld->pf = adap->pf;
633 lld->l2t = adap->l2t;
634 lld->tids = &adap->tids;
635 lld->ports = adap->port;
636 lld->vr = &adap->vres;
637 lld->mtus = adap->params.mtus;
638 lld->ntxq = adap->sge.ofldqsets;
639 lld->nchan = adap->params.nports;
640 lld->nports = adap->params.nports;
641 lld->wr_cred = adap->params.ofldq_wr_cred;
642 lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
643 lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
644 lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
645 lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
646 lld->iscsi_ppm = &adap->iscsi_ppm;
647 lld->adapter_type = adap->params.chip;
648 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
649 lld->udb_density = 1 << adap->params.sge.eq_qpp;
650 lld->ucq_density = 1 << adap->params.sge.iq_qpp;
651 lld->filt_mode = adap->params.tp.vlan_pri_map;
652
653 for (i = 0; i < NCHAN; i++)
654 lld->tx_modq[i] = i;
655 lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
656 lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
657 lld->fw_vers = adap->params.fw_vers;
658 lld->dbfifo_int_thresh = dbfifo_int_thresh;
659 lld->sge_ingpadboundary = adap->sge.fl_align;
660 lld->sge_egrstatuspagesize = adap->sge.stat_len;
661 lld->sge_pktshift = adap->sge.pktshift;
662 lld->ulp_crypto = adap->params.crypto;
663 lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
664 lld->max_ordird_qp = adap->params.max_ordird_qp;
665 lld->max_ird_adapter = adap->params.max_ird_adapter;
666 lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
667 lld->nodeid = dev_to_node(adap->pdev_dev);
668}
669
670static void uld_attach(struct adapter *adap, unsigned int uld)
671{
672 void *handle;
673 struct cxgb4_lld_info lli;
674
675 uld_init(adap, &lli);
676 uld_queue_init(adap, uld, &lli);
677
678 handle = adap->uld[uld].add(&lli);
679 if (IS_ERR(handle)) {
680 dev_warn(adap->pdev_dev,
681 "could not attach to the %s driver, error %ld\n",
682 adap->uld[uld].name, PTR_ERR(handle));
683 return;
684 }
685
686 adap->uld[uld].handle = handle;
687 t4_register_netevent_notifier();
688
689 if (adap->flags & FULL_INIT_DONE)
690 adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
691}
692
693
694
695
696
697
698
699
700
701
702int cxgb4_register_uld(enum cxgb4_uld type,
703 const struct cxgb4_uld_info *p)
704{
705 int ret = 0;
706 unsigned int adap_idx = 0;
707 struct adapter *adap;
708
709 if (type >= CXGB4_ULD_MAX)
710 return -EINVAL;
711
712 mutex_lock(&uld_mutex);
713 list_for_each_entry(adap, &adapter_list, list_node) {
714 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
715 (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
716 continue;
717 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
718 continue;
719 ret = cfg_queues_uld(adap, type, p);
720 if (ret)
721 goto out;
722 ret = setup_sge_queues_uld(adap, type, p->lro);
723 if (ret)
724 goto free_queues;
725 if (adap->flags & USING_MSIX) {
726 name_msix_vecs_uld(adap, type);
727 ret = request_msix_queue_irqs_uld(adap, type);
728 if (ret)
729 goto free_rxq;
730 }
731 if (adap->flags & FULL_INIT_DONE)
732 enable_rx_uld(adap, type);
733 if (adap->uld[type].add) {
734 ret = -EBUSY;
735 goto free_irq;
736 }
737 ret = setup_sge_txq_uld(adap, type, p);
738 if (ret)
739 goto free_irq;
740 adap->uld[type] = *p;
741 uld_attach(adap, type);
742 adap_idx++;
743 }
744 mutex_unlock(&uld_mutex);
745 return 0;
746
747free_irq:
748 if (adap->flags & FULL_INIT_DONE)
749 quiesce_rx_uld(adap, type);
750 if (adap->flags & USING_MSIX)
751 free_msix_queue_irqs_uld(adap, type);
752free_rxq:
753 free_sge_queues_uld(adap, type);
754free_queues:
755 free_queues_uld(adap, type);
756out:
757
758 list_for_each_entry(adap, &adapter_list, list_node) {
759 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
760 (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
761 continue;
762 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
763 continue;
764 if (!adap_idx)
765 break;
766 adap->uld[type].handle = NULL;
767 adap->uld[type].add = NULL;
768 release_sge_txq_uld(adap, type);
769 if (adap->flags & FULL_INIT_DONE)
770 quiesce_rx_uld(adap, type);
771 if (adap->flags & USING_MSIX)
772 free_msix_queue_irqs_uld(adap, type);
773 free_sge_queues_uld(adap, type);
774 free_queues_uld(adap, type);
775 adap_idx--;
776 }
777 mutex_unlock(&uld_mutex);
778 return ret;
779}
780EXPORT_SYMBOL(cxgb4_register_uld);
781
782
783
784
785
786
787
788int cxgb4_unregister_uld(enum cxgb4_uld type)
789{
790 struct adapter *adap;
791
792 if (type >= CXGB4_ULD_MAX)
793 return -EINVAL;
794
795 mutex_lock(&uld_mutex);
796 list_for_each_entry(adap, &adapter_list, list_node) {
797 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
798 (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
799 continue;
800 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
801 continue;
802
803 cxgb4_shutdown_uld_adapter(adap, type);
804 }
805 mutex_unlock(&uld_mutex);
806
807 return 0;
808}
809EXPORT_SYMBOL(cxgb4_unregister_uld);
810