1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/mempool.h>
25#include <linux/slab.h>
26#include <linux/pci.h>
27#include <linux/interrupt.h>
28
29#include <scsi/scsi.h>
30#include <scsi/scsi_device.h>
31#include <scsi/scsi_transport_fc.h>
32#include <scsi/fc/fc_fs.h>
33
34#include <linux/nvme-fc-driver.h>
35
36#include "lpfc_hw4.h"
37#include "lpfc_hw.h"
38#include "lpfc_sli.h"
39#include "lpfc_sli4.h"
40#include "lpfc_nl.h"
41#include "lpfc_disc.h"
42#include "lpfc.h"
43#include "lpfc_scsi.h"
44#include "lpfc_nvme.h"
45#include "lpfc_nvmet.h"
46#include "lpfc_crtn.h"
47#include "lpfc_logmsg.h"
48
49#define LPFC_MBUF_POOL_SIZE 64
50#define LPFC_MEM_POOL_SIZE 64
51#define LPFC_DEVICE_DATA_POOL_SIZE 64
52
53int
54lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
55 size_t bytes;
56 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
57
58 if (max_xri <= 0)
59 return -ENOMEM;
60 bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) *
61 sizeof(unsigned long);
62 phba->cfg_rrq_xri_bitmap_sz = bytes;
63 phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
64 bytes);
65 if (!phba->active_rrq_pool)
66 return -ENOMEM;
67 else
68 return 0;
69}
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86int
87lpfc_mem_alloc(struct lpfc_hba *phba, int align)
88{
89 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
90 int i;
91
92 if (phba->sli_rev == LPFC_SLI_REV4) {
93
94 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
95 i = phba->cfg_sg_dma_buf_size;
96 else
97 i = SLI4_PAGE_SIZE;
98
99 phba->lpfc_sg_dma_buf_pool =
100 dma_pool_create("lpfc_sg_dma_buf_pool",
101 &phba->pcidev->dev,
102 phba->cfg_sg_dma_buf_size,
103 i, 0);
104 if (!phba->lpfc_sg_dma_buf_pool)
105 goto fail;
106
107 } else {
108 phba->lpfc_sg_dma_buf_pool =
109 dma_pool_create("lpfc_sg_dma_buf_pool",
110 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
111 align, 0);
112
113 if (!phba->lpfc_sg_dma_buf_pool)
114 goto fail;
115 }
116
117 phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev,
118 LPFC_BPL_SIZE,
119 align, 0);
120 if (!phba->lpfc_mbuf_pool)
121 goto fail_free_dma_buf_pool;
122
123 pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE,
124 sizeof(struct lpfc_dmabuf),
125 GFP_KERNEL);
126 if (!pool->elements)
127 goto fail_free_lpfc_mbuf_pool;
128
129 pool->max_count = 0;
130 pool->current_count = 0;
131 for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
132 pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool,
133 GFP_KERNEL, &pool->elements[i].phys);
134 if (!pool->elements[i].virt)
135 goto fail_free_mbuf_pool;
136 pool->max_count++;
137 pool->current_count++;
138 }
139
140 phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
141 sizeof(LPFC_MBOXQ_t));
142 if (!phba->mbox_mem_pool)
143 goto fail_free_mbuf_pool;
144
145 phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
146 sizeof(struct lpfc_nodelist));
147 if (!phba->nlp_mem_pool)
148 goto fail_free_mbox_pool;
149
150 if (phba->sli_rev == LPFC_SLI_REV4) {
151 phba->rrq_pool =
152 mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
153 sizeof(struct lpfc_node_rrq));
154 if (!phba->rrq_pool)
155 goto fail_free_nlp_mem_pool;
156 phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool",
157 &phba->pcidev->dev,
158 LPFC_HDR_BUF_SIZE, align, 0);
159 if (!phba->lpfc_hrb_pool)
160 goto fail_free_rrq_mem_pool;
161
162 phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool",
163 &phba->pcidev->dev,
164 LPFC_DATA_BUF_SIZE, align, 0);
165 if (!phba->lpfc_drb_pool)
166 goto fail_free_hrb_pool;
167 phba->lpfc_hbq_pool = NULL;
168 } else {
169 phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool",
170 &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0);
171 if (!phba->lpfc_hbq_pool)
172 goto fail_free_nlp_mem_pool;
173 phba->lpfc_hrb_pool = NULL;
174 phba->lpfc_drb_pool = NULL;
175 }
176
177 if (phba->cfg_EnableXLane) {
178 phba->device_data_mem_pool = mempool_create_kmalloc_pool(
179 LPFC_DEVICE_DATA_POOL_SIZE,
180 sizeof(struct lpfc_device_data));
181 if (!phba->device_data_mem_pool)
182 goto fail_free_drb_pool;
183 } else {
184 phba->device_data_mem_pool = NULL;
185 }
186
187 return 0;
188fail_free_drb_pool:
189 dma_pool_destroy(phba->lpfc_drb_pool);
190 phba->lpfc_drb_pool = NULL;
191 fail_free_hrb_pool:
192 dma_pool_destroy(phba->lpfc_hrb_pool);
193 phba->lpfc_hrb_pool = NULL;
194 fail_free_rrq_mem_pool:
195 mempool_destroy(phba->rrq_pool);
196 phba->rrq_pool = NULL;
197 fail_free_nlp_mem_pool:
198 mempool_destroy(phba->nlp_mem_pool);
199 phba->nlp_mem_pool = NULL;
200 fail_free_mbox_pool:
201 mempool_destroy(phba->mbox_mem_pool);
202 phba->mbox_mem_pool = NULL;
203 fail_free_mbuf_pool:
204 while (i--)
205 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
206 pool->elements[i].phys);
207 kfree(pool->elements);
208 fail_free_lpfc_mbuf_pool:
209 dma_pool_destroy(phba->lpfc_mbuf_pool);
210 phba->lpfc_mbuf_pool = NULL;
211 fail_free_dma_buf_pool:
212 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
213 phba->lpfc_sg_dma_buf_pool = NULL;
214 fail:
215 return -ENOMEM;
216}
217
218int
219lpfc_nvmet_mem_alloc(struct lpfc_hba *phba)
220{
221 phba->lpfc_nvmet_drb_pool =
222 dma_pool_create("lpfc_nvmet_drb_pool",
223 &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE,
224 SGL_ALIGN_SZ, 0);
225 if (!phba->lpfc_nvmet_drb_pool) {
226 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
227 "6024 Can't enable NVME Target - no memory\n");
228 return -ENOMEM;
229 }
230 return 0;
231}
232
233
234
235
236
237
238
239
240
241
242void
243lpfc_mem_free(struct lpfc_hba *phba)
244{
245 int i;
246 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
247 struct lpfc_device_data *device_data;
248
249
250 lpfc_sli_hbqbuf_free_all(phba);
251 if (phba->lpfc_nvmet_drb_pool)
252 dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
253 phba->lpfc_nvmet_drb_pool = NULL;
254 if (phba->lpfc_drb_pool)
255 dma_pool_destroy(phba->lpfc_drb_pool);
256 phba->lpfc_drb_pool = NULL;
257 if (phba->lpfc_hrb_pool)
258 dma_pool_destroy(phba->lpfc_hrb_pool);
259 phba->lpfc_hrb_pool = NULL;
260 if (phba->txrdy_payload_pool)
261 dma_pool_destroy(phba->txrdy_payload_pool);
262 phba->txrdy_payload_pool = NULL;
263
264 if (phba->lpfc_hbq_pool)
265 dma_pool_destroy(phba->lpfc_hbq_pool);
266 phba->lpfc_hbq_pool = NULL;
267
268 if (phba->rrq_pool)
269 mempool_destroy(phba->rrq_pool);
270 phba->rrq_pool = NULL;
271
272
273 mempool_destroy(phba->nlp_mem_pool);
274 phba->nlp_mem_pool = NULL;
275 if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) {
276 mempool_destroy(phba->active_rrq_pool);
277 phba->active_rrq_pool = NULL;
278 }
279
280
281 mempool_destroy(phba->mbox_mem_pool);
282 phba->mbox_mem_pool = NULL;
283
284
285 for (i = 0; i < pool->current_count; i++)
286 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
287 pool->elements[i].phys);
288 kfree(pool->elements);
289
290 dma_pool_destroy(phba->lpfc_mbuf_pool);
291 phba->lpfc_mbuf_pool = NULL;
292
293
294 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
295 phba->lpfc_sg_dma_buf_pool = NULL;
296
297
298 if (phba->device_data_mem_pool) {
299
300 while (!list_empty(&phba->luns)) {
301 device_data = list_first_entry(&phba->luns,
302 struct lpfc_device_data,
303 listentry);
304 list_del(&device_data->listentry);
305 mempool_free(device_data, phba->device_data_mem_pool);
306 }
307 mempool_destroy(phba->device_data_mem_pool);
308 }
309 phba->device_data_mem_pool = NULL;
310 return;
311}
312
313
314
315
316
317
318
319
320
321
322
323
324void
325lpfc_mem_free_all(struct lpfc_hba *phba)
326{
327 struct lpfc_sli *psli = &phba->sli;
328 LPFC_MBOXQ_t *mbox, *next_mbox;
329 struct lpfc_dmabuf *mp;
330
331
332 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
333 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
334 if (mp) {
335 lpfc_mbuf_free(phba, mp->virt, mp->phys);
336 kfree(mp);
337 }
338 list_del(&mbox->list);
339 mempool_free(mbox, phba->mbox_mem_pool);
340 }
341
342 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
343 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
344 if (mp) {
345 lpfc_mbuf_free(phba, mp->virt, mp->phys);
346 kfree(mp);
347 }
348 list_del(&mbox->list);
349 mempool_free(mbox, phba->mbox_mem_pool);
350 }
351
352 spin_lock_irq(&phba->hbalock);
353 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
354 spin_unlock_irq(&phba->hbalock);
355 if (psli->mbox_active) {
356 mbox = psli->mbox_active;
357 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
358 if (mp) {
359 lpfc_mbuf_free(phba, mp->virt, mp->phys);
360 kfree(mp);
361 }
362 mempool_free(mbox, phba->mbox_mem_pool);
363 psli->mbox_active = NULL;
364 }
365
366
367 lpfc_mem_free(phba);
368
369
370 kfree(psli->iocbq_lookup);
371 psli->iocbq_lookup = NULL;
372
373 return;
374}
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394void *
395lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
396{
397 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
398 unsigned long iflags;
399 void *ret;
400
401 ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
402
403 spin_lock_irqsave(&phba->hbalock, iflags);
404 if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
405 pool->current_count--;
406 ret = pool->elements[pool->current_count].virt;
407 *handle = pool->elements[pool->current_count].phys;
408 }
409 spin_unlock_irqrestore(&phba->hbalock, iflags);
410 return ret;
411}
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427void
428__lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
429{
430 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
431
432 if (pool->current_count < pool->max_count) {
433 pool->elements[pool->current_count].virt = virt;
434 pool->elements[pool->current_count].phys = dma;
435 pool->current_count++;
436 } else {
437 dma_pool_free(phba->lpfc_mbuf_pool, virt, dma);
438 }
439 return;
440}
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455void
456lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
457{
458 unsigned long iflags;
459
460 spin_lock_irqsave(&phba->hbalock, iflags);
461 __lpfc_mbuf_free(phba, virt, dma);
462 spin_unlock_irqrestore(&phba->hbalock, iflags);
463 return;
464}
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480void *
481lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
482{
483 void *ret;
484
485 ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
486 return ret;
487}
488
489
490
491
492
493
494
495
496
497
498void
499lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
500{
501 dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
502}
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517struct hbq_dmabuf *
518lpfc_els_hbq_alloc(struct lpfc_hba *phba)
519{
520 struct hbq_dmabuf *hbqbp;
521
522 hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
523 if (!hbqbp)
524 return NULL;
525
526 hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
527 &hbqbp->dbuf.phys);
528 if (!hbqbp->dbuf.virt) {
529 kfree(hbqbp);
530 return NULL;
531 }
532 hbqbp->total_size = LPFC_BPL_SIZE;
533 return hbqbp;
534}
535
536
537
538
539
540
541
542
543
544
545
546
547
548void
549lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
550{
551 dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
552 kfree(hbqbp);
553 return;
554}
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569struct hbq_dmabuf *
570lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
571{
572 struct hbq_dmabuf *dma_buf;
573
574 dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
575 if (!dma_buf)
576 return NULL;
577
578 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
579 &dma_buf->hbuf.phys);
580 if (!dma_buf->hbuf.virt) {
581 kfree(dma_buf);
582 return NULL;
583 }
584 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
585 &dma_buf->dbuf.phys);
586 if (!dma_buf->dbuf.virt) {
587 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
588 dma_buf->hbuf.phys);
589 kfree(dma_buf);
590 return NULL;
591 }
592 dma_buf->total_size = LPFC_DATA_BUF_SIZE;
593 return dma_buf;
594}
595
596
597
598
599
600
601
602
603
604
605
606
607
608void
609lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
610{
611 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
612 dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
613 kfree(dmab);
614}
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629struct rqb_dmabuf *
630lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
631{
632 struct rqb_dmabuf *dma_buf;
633
634 dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
635 if (!dma_buf)
636 return NULL;
637
638 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
639 &dma_buf->hbuf.phys);
640 if (!dma_buf->hbuf.virt) {
641 kfree(dma_buf);
642 return NULL;
643 }
644 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool,
645 GFP_KERNEL, &dma_buf->dbuf.phys);
646 if (!dma_buf->dbuf.virt) {
647 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
648 dma_buf->hbuf.phys);
649 kfree(dma_buf);
650 return NULL;
651 }
652 dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE;
653 return dma_buf;
654}
655
656
657
658
659
660
661
662
663
664
665
666
667
668void
669lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
670{
671 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
672 dma_pool_free(phba->lpfc_nvmet_drb_pool,
673 dmab->dbuf.virt, dmab->dbuf.phys);
674 kfree(dmab);
675}
676
677
678
679
680
681
682
683
684
685
686
687
688
689void
690lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
691{
692 struct hbq_dmabuf *hbq_entry;
693 unsigned long flags;
694
695 if (!mp)
696 return;
697
698 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
699 hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
700
701 spin_lock_irqsave(&phba->hbalock, flags);
702 if (!phba->hbq_in_use) {
703 spin_unlock_irqrestore(&phba->hbalock, flags);
704 return;
705 }
706 list_del(&hbq_entry->dbuf.list);
707 if (hbq_entry->tag == -1) {
708 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
709 (phba, hbq_entry);
710 } else {
711 lpfc_sli_free_hbq(phba, hbq_entry);
712 }
713 spin_unlock_irqrestore(&phba->hbalock, flags);
714 } else {
715 lpfc_mbuf_free(phba, mp->virt, mp->phys);
716 kfree(mp);
717 }
718 return;
719}
720
721
722
723
724
725
726
727
728
729
730
731
732
733void
734lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
735{
736 struct lpfc_rqb *rqbp;
737 struct lpfc_rqe hrqe;
738 struct lpfc_rqe drqe;
739 struct rqb_dmabuf *rqb_entry;
740 unsigned long flags;
741 int rc;
742
743 if (!mp)
744 return;
745
746 rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf);
747 rqbp = rqb_entry->hrq->rqbp;
748
749 spin_lock_irqsave(&phba->hbalock, flags);
750 list_del(&rqb_entry->hbuf.list);
751 hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys);
752 hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys);
753 drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys);
754 drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
755 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
756 if (rc < 0) {
757 (rqbp->rqb_free_buffer)(phba, rqb_entry);
758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
759 "6409 Cannot post to HRQ %d: %x %x %x "
760 "DRQ %x %x\n",
761 rqb_entry->hrq->queue_id,
762 rqb_entry->hrq->host_index,
763 rqb_entry->hrq->hba_index,
764 rqb_entry->hrq->entry_count,
765 rqb_entry->drq->host_index,
766 rqb_entry->drq->hba_index);
767 } else {
768 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
769 rqbp->buffer_count++;
770 }
771
772 spin_unlock_irqrestore(&phba->hbalock, flags);
773}
774