1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/mempool.h>
25#include <linux/slab.h>
26#include <linux/pci.h>
27#include <linux/interrupt.h>
28
29#include <scsi/scsi.h>
30#include <scsi/scsi_device.h>
31#include <scsi/scsi_transport_fc.h>
32#include <scsi/fc/fc_fs.h>
33
34#include <linux/nvme-fc-driver.h>
35
36#include "lpfc_hw4.h"
37#include "lpfc_hw.h"
38#include "lpfc_sli.h"
39#include "lpfc_sli4.h"
40#include "lpfc_nl.h"
41#include "lpfc_disc.h"
42#include "lpfc.h"
43#include "lpfc_scsi.h"
44#include "lpfc_nvme.h"
45#include "lpfc_nvmet.h"
46#include "lpfc_crtn.h"
47#include "lpfc_logmsg.h"
48
49#define LPFC_MBUF_POOL_SIZE 64
50#define LPFC_MEM_POOL_SIZE 64
51#define LPFC_DEVICE_DATA_POOL_SIZE 64
52
53int
54lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
55 size_t bytes;
56 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
57
58 if (max_xri <= 0)
59 return -ENOMEM;
60 bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) *
61 sizeof(unsigned long);
62 phba->cfg_rrq_xri_bitmap_sz = bytes;
63 phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
64 bytes);
65 if (!phba->active_rrq_pool)
66 return -ENOMEM;
67 else
68 return 0;
69}
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86int
87lpfc_mem_alloc(struct lpfc_hba *phba, int align)
88{
89 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
90 int i;
91
92 if (phba->sli_rev == LPFC_SLI_REV4) {
93
94 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
95 i = phba->cfg_sg_dma_buf_size;
96 else
97 i = SLI4_PAGE_SIZE;
98
99 phba->lpfc_sg_dma_buf_pool =
100 dma_pool_create("lpfc_sg_dma_buf_pool",
101 &phba->pcidev->dev,
102 phba->cfg_sg_dma_buf_size,
103 i, 0);
104 if (!phba->lpfc_sg_dma_buf_pool)
105 goto fail;
106
107 } else {
108 phba->lpfc_sg_dma_buf_pool =
109 dma_pool_create("lpfc_sg_dma_buf_pool",
110 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
111 align, 0);
112
113 if (!phba->lpfc_sg_dma_buf_pool)
114 goto fail;
115 }
116
117 phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev,
118 LPFC_BPL_SIZE,
119 align, 0);
120 if (!phba->lpfc_mbuf_pool)
121 goto fail_free_dma_buf_pool;
122
123 pool->elements = kmalloc(sizeof(struct lpfc_dmabuf) *
124 LPFC_MBUF_POOL_SIZE, GFP_KERNEL);
125 if (!pool->elements)
126 goto fail_free_lpfc_mbuf_pool;
127
128 pool->max_count = 0;
129 pool->current_count = 0;
130 for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
131 pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool,
132 GFP_KERNEL, &pool->elements[i].phys);
133 if (!pool->elements[i].virt)
134 goto fail_free_mbuf_pool;
135 pool->max_count++;
136 pool->current_count++;
137 }
138
139 phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
140 sizeof(LPFC_MBOXQ_t));
141 if (!phba->mbox_mem_pool)
142 goto fail_free_mbuf_pool;
143
144 phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
145 sizeof(struct lpfc_nodelist));
146 if (!phba->nlp_mem_pool)
147 goto fail_free_mbox_pool;
148
149 if (phba->sli_rev == LPFC_SLI_REV4) {
150 phba->rrq_pool =
151 mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
152 sizeof(struct lpfc_node_rrq));
153 if (!phba->rrq_pool)
154 goto fail_free_nlp_mem_pool;
155 phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool",
156 &phba->pcidev->dev,
157 LPFC_HDR_BUF_SIZE, align, 0);
158 if (!phba->lpfc_hrb_pool)
159 goto fail_free_rrq_mem_pool;
160
161 phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool",
162 &phba->pcidev->dev,
163 LPFC_DATA_BUF_SIZE, align, 0);
164 if (!phba->lpfc_drb_pool)
165 goto fail_free_hrb_pool;
166 phba->lpfc_hbq_pool = NULL;
167 } else {
168 phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool",
169 &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0);
170 if (!phba->lpfc_hbq_pool)
171 goto fail_free_nlp_mem_pool;
172 phba->lpfc_hrb_pool = NULL;
173 phba->lpfc_drb_pool = NULL;
174 }
175
176 if (phba->cfg_EnableXLane) {
177 phba->device_data_mem_pool = mempool_create_kmalloc_pool(
178 LPFC_DEVICE_DATA_POOL_SIZE,
179 sizeof(struct lpfc_device_data));
180 if (!phba->device_data_mem_pool)
181 goto fail_free_drb_pool;
182 } else {
183 phba->device_data_mem_pool = NULL;
184 }
185
186 return 0;
187fail_free_drb_pool:
188 dma_pool_destroy(phba->lpfc_drb_pool);
189 phba->lpfc_drb_pool = NULL;
190 fail_free_hrb_pool:
191 dma_pool_destroy(phba->lpfc_hrb_pool);
192 phba->lpfc_hrb_pool = NULL;
193 fail_free_rrq_mem_pool:
194 mempool_destroy(phba->rrq_pool);
195 phba->rrq_pool = NULL;
196 fail_free_nlp_mem_pool:
197 mempool_destroy(phba->nlp_mem_pool);
198 phba->nlp_mem_pool = NULL;
199 fail_free_mbox_pool:
200 mempool_destroy(phba->mbox_mem_pool);
201 phba->mbox_mem_pool = NULL;
202 fail_free_mbuf_pool:
203 while (i--)
204 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
205 pool->elements[i].phys);
206 kfree(pool->elements);
207 fail_free_lpfc_mbuf_pool:
208 dma_pool_destroy(phba->lpfc_mbuf_pool);
209 phba->lpfc_mbuf_pool = NULL;
210 fail_free_dma_buf_pool:
211 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
212 phba->lpfc_sg_dma_buf_pool = NULL;
213 fail:
214 return -ENOMEM;
215}
216
217int
218lpfc_nvmet_mem_alloc(struct lpfc_hba *phba)
219{
220 phba->lpfc_nvmet_drb_pool =
221 dma_pool_create("lpfc_nvmet_drb_pool",
222 &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE,
223 SGL_ALIGN_SZ, 0);
224 if (!phba->lpfc_nvmet_drb_pool) {
225 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
226 "6024 Can't enable NVME Target - no memory\n");
227 return -ENOMEM;
228 }
229 return 0;
230}
231
232
233
234
235
236
237
238
239
240
241void
242lpfc_mem_free(struct lpfc_hba *phba)
243{
244 int i;
245 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
246 struct lpfc_device_data *device_data;
247
248
249 lpfc_sli_hbqbuf_free_all(phba);
250 if (phba->lpfc_nvmet_drb_pool)
251 dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
252 phba->lpfc_nvmet_drb_pool = NULL;
253 if (phba->lpfc_drb_pool)
254 dma_pool_destroy(phba->lpfc_drb_pool);
255 phba->lpfc_drb_pool = NULL;
256 if (phba->lpfc_hrb_pool)
257 dma_pool_destroy(phba->lpfc_hrb_pool);
258 phba->lpfc_hrb_pool = NULL;
259 if (phba->txrdy_payload_pool)
260 dma_pool_destroy(phba->txrdy_payload_pool);
261 phba->txrdy_payload_pool = NULL;
262
263 if (phba->lpfc_hbq_pool)
264 dma_pool_destroy(phba->lpfc_hbq_pool);
265 phba->lpfc_hbq_pool = NULL;
266
267 if (phba->rrq_pool)
268 mempool_destroy(phba->rrq_pool);
269 phba->rrq_pool = NULL;
270
271
272 mempool_destroy(phba->nlp_mem_pool);
273 phba->nlp_mem_pool = NULL;
274 if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) {
275 mempool_destroy(phba->active_rrq_pool);
276 phba->active_rrq_pool = NULL;
277 }
278
279
280 mempool_destroy(phba->mbox_mem_pool);
281 phba->mbox_mem_pool = NULL;
282
283
284 for (i = 0; i < pool->current_count; i++)
285 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
286 pool->elements[i].phys);
287 kfree(pool->elements);
288
289 dma_pool_destroy(phba->lpfc_mbuf_pool);
290 phba->lpfc_mbuf_pool = NULL;
291
292
293 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
294 phba->lpfc_sg_dma_buf_pool = NULL;
295
296
297 if (phba->device_data_mem_pool) {
298
299 while (!list_empty(&phba->luns)) {
300 device_data = list_first_entry(&phba->luns,
301 struct lpfc_device_data,
302 listentry);
303 list_del(&device_data->listentry);
304 mempool_free(device_data, phba->device_data_mem_pool);
305 }
306 mempool_destroy(phba->device_data_mem_pool);
307 }
308 phba->device_data_mem_pool = NULL;
309 return;
310}
311
312
313
314
315
316
317
318
319
320
321
322
323void
324lpfc_mem_free_all(struct lpfc_hba *phba)
325{
326 struct lpfc_sli *psli = &phba->sli;
327 LPFC_MBOXQ_t *mbox, *next_mbox;
328 struct lpfc_dmabuf *mp;
329
330
331 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
332 mp = (struct lpfc_dmabuf *) (mbox->context1);
333 if (mp) {
334 lpfc_mbuf_free(phba, mp->virt, mp->phys);
335 kfree(mp);
336 }
337 list_del(&mbox->list);
338 mempool_free(mbox, phba->mbox_mem_pool);
339 }
340
341 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
342 mp = (struct lpfc_dmabuf *) (mbox->context1);
343 if (mp) {
344 lpfc_mbuf_free(phba, mp->virt, mp->phys);
345 kfree(mp);
346 }
347 list_del(&mbox->list);
348 mempool_free(mbox, phba->mbox_mem_pool);
349 }
350
351 spin_lock_irq(&phba->hbalock);
352 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
353 spin_unlock_irq(&phba->hbalock);
354 if (psli->mbox_active) {
355 mbox = psli->mbox_active;
356 mp = (struct lpfc_dmabuf *) (mbox->context1);
357 if (mp) {
358 lpfc_mbuf_free(phba, mp->virt, mp->phys);
359 kfree(mp);
360 }
361 mempool_free(mbox, phba->mbox_mem_pool);
362 psli->mbox_active = NULL;
363 }
364
365
366 lpfc_mem_free(phba);
367
368
369 kfree(psli->iocbq_lookup);
370 psli->iocbq_lookup = NULL;
371
372 return;
373}
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393void *
394lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
395{
396 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
397 unsigned long iflags;
398 void *ret;
399
400 ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
401
402 spin_lock_irqsave(&phba->hbalock, iflags);
403 if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
404 pool->current_count--;
405 ret = pool->elements[pool->current_count].virt;
406 *handle = pool->elements[pool->current_count].phys;
407 }
408 spin_unlock_irqrestore(&phba->hbalock, iflags);
409 return ret;
410}
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426void
427__lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
428{
429 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
430
431 if (pool->current_count < pool->max_count) {
432 pool->elements[pool->current_count].virt = virt;
433 pool->elements[pool->current_count].phys = dma;
434 pool->current_count++;
435 } else {
436 dma_pool_free(phba->lpfc_mbuf_pool, virt, dma);
437 }
438 return;
439}
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454void
455lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
456{
457 unsigned long iflags;
458
459 spin_lock_irqsave(&phba->hbalock, iflags);
460 __lpfc_mbuf_free(phba, virt, dma);
461 spin_unlock_irqrestore(&phba->hbalock, iflags);
462 return;
463}
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479void *
480lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
481{
482 void *ret;
483
484 ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
485 return ret;
486}
487
488
489
490
491
492
493
494
495
496
497void
498lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
499{
500 dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
501}
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516struct hbq_dmabuf *
517lpfc_els_hbq_alloc(struct lpfc_hba *phba)
518{
519 struct hbq_dmabuf *hbqbp;
520
521 hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
522 if (!hbqbp)
523 return NULL;
524
525 hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
526 &hbqbp->dbuf.phys);
527 if (!hbqbp->dbuf.virt) {
528 kfree(hbqbp);
529 return NULL;
530 }
531 hbqbp->total_size = LPFC_BPL_SIZE;
532 return hbqbp;
533}
534
535
536
537
538
539
540
541
542
543
544
545
546
547void
548lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
549{
550 dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
551 kfree(hbqbp);
552 return;
553}
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568struct hbq_dmabuf *
569lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
570{
571 struct hbq_dmabuf *dma_buf;
572
573 dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
574 if (!dma_buf)
575 return NULL;
576
577 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
578 &dma_buf->hbuf.phys);
579 if (!dma_buf->hbuf.virt) {
580 kfree(dma_buf);
581 return NULL;
582 }
583 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
584 &dma_buf->dbuf.phys);
585 if (!dma_buf->dbuf.virt) {
586 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
587 dma_buf->hbuf.phys);
588 kfree(dma_buf);
589 return NULL;
590 }
591 dma_buf->total_size = LPFC_DATA_BUF_SIZE;
592 return dma_buf;
593}
594
595
596
597
598
599
600
601
602
603
604
605
606
607void
608lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
609{
610 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
611 dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
612 kfree(dmab);
613}
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628struct rqb_dmabuf *
629lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
630{
631 struct rqb_dmabuf *dma_buf;
632
633 dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
634 if (!dma_buf)
635 return NULL;
636
637 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
638 &dma_buf->hbuf.phys);
639 if (!dma_buf->hbuf.virt) {
640 kfree(dma_buf);
641 return NULL;
642 }
643 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool,
644 GFP_KERNEL, &dma_buf->dbuf.phys);
645 if (!dma_buf->dbuf.virt) {
646 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
647 dma_buf->hbuf.phys);
648 kfree(dma_buf);
649 return NULL;
650 }
651 dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE;
652 return dma_buf;
653}
654
655
656
657
658
659
660
661
662
663
664
665
666
667void
668lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
669{
670 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
671 dma_pool_free(phba->lpfc_nvmet_drb_pool,
672 dmab->dbuf.virt, dmab->dbuf.phys);
673 kfree(dmab);
674}
675
676
677
678
679
680
681
682
683
684
685
686
687
688void
689lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
690{
691 struct hbq_dmabuf *hbq_entry;
692 unsigned long flags;
693
694 if (!mp)
695 return;
696
697 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
698 hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
699
700 spin_lock_irqsave(&phba->hbalock, flags);
701 if (!phba->hbq_in_use) {
702 spin_unlock_irqrestore(&phba->hbalock, flags);
703 return;
704 }
705 list_del(&hbq_entry->dbuf.list);
706 if (hbq_entry->tag == -1) {
707 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
708 (phba, hbq_entry);
709 } else {
710 lpfc_sli_free_hbq(phba, hbq_entry);
711 }
712 spin_unlock_irqrestore(&phba->hbalock, flags);
713 } else {
714 lpfc_mbuf_free(phba, mp->virt, mp->phys);
715 kfree(mp);
716 }
717 return;
718}
719
720
721
722
723
724
725
726
727
728
729
730
731
732void
733lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
734{
735 struct lpfc_rqb *rqbp;
736 struct lpfc_rqe hrqe;
737 struct lpfc_rqe drqe;
738 struct rqb_dmabuf *rqb_entry;
739 unsigned long flags;
740 int rc;
741
742 if (!mp)
743 return;
744
745 rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf);
746 rqbp = rqb_entry->hrq->rqbp;
747
748 spin_lock_irqsave(&phba->hbalock, flags);
749 list_del(&rqb_entry->hbuf.list);
750 hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys);
751 hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys);
752 drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys);
753 drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
754 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
755 if (rc < 0) {
756 (rqbp->rqb_free_buffer)(phba, rqb_entry);
757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
758 "6409 Cannot post to HRQ %d: %x %x %x "
759 "DRQ %x %x\n",
760 rqb_entry->hrq->queue_id,
761 rqb_entry->hrq->host_index,
762 rqb_entry->hrq->hba_index,
763 rqb_entry->hrq->entry_count,
764 rqb_entry->drq->host_index,
765 rqb_entry->drq->hba_index);
766 } else {
767 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
768 rqbp->buffer_count++;
769 }
770
771 spin_unlock_irqrestore(&phba->hbalock, flags);
772}
773