1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/mempool.h>
25#include <linux/slab.h>
26#include <linux/pci.h>
27#include <linux/interrupt.h>
28
29#include <scsi/scsi.h>
30#include <scsi/scsi_device.h>
31#include <scsi/scsi_transport_fc.h>
32#include <scsi/fc/fc_fs.h>
33
34#include <linux/nvme-fc-driver.h>
35
36#include "lpfc_hw4.h"
37#include "lpfc_hw.h"
38#include "lpfc_sli.h"
39#include "lpfc_sli4.h"
40#include "lpfc_nl.h"
41#include "lpfc_disc.h"
42#include "lpfc.h"
43#include "lpfc_scsi.h"
44#include "lpfc_nvme.h"
45#include "lpfc_nvmet.h"
46#include "lpfc_crtn.h"
47#include "lpfc_logmsg.h"
48
49#define LPFC_MBUF_POOL_SIZE 64
50#define LPFC_MEM_POOL_SIZE 64
51#define LPFC_DEVICE_DATA_POOL_SIZE 64
52
53int
54lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
55 size_t bytes;
56 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
57
58 if (max_xri <= 0)
59 return -ENOMEM;
60 bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) *
61 sizeof(unsigned long);
62 phba->cfg_rrq_xri_bitmap_sz = bytes;
63 phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
64 bytes);
65 if (!phba->active_rrq_pool)
66 return -ENOMEM;
67 else
68 return 0;
69}
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86int
87lpfc_mem_alloc(struct lpfc_hba *phba, int align)
88{
89 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
90 int i;
91
92
93 phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev,
94 LPFC_BPL_SIZE,
95 align, 0);
96 if (!phba->lpfc_mbuf_pool)
97 goto fail;
98
99 pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE,
100 sizeof(struct lpfc_dmabuf),
101 GFP_KERNEL);
102 if (!pool->elements)
103 goto fail_free_lpfc_mbuf_pool;
104
105 pool->max_count = 0;
106 pool->current_count = 0;
107 for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
108 pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool,
109 GFP_KERNEL, &pool->elements[i].phys);
110 if (!pool->elements[i].virt)
111 goto fail_free_mbuf_pool;
112 pool->max_count++;
113 pool->current_count++;
114 }
115
116 phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
117 sizeof(LPFC_MBOXQ_t));
118 if (!phba->mbox_mem_pool)
119 goto fail_free_mbuf_pool;
120
121 phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
122 sizeof(struct lpfc_nodelist));
123 if (!phba->nlp_mem_pool)
124 goto fail_free_mbox_pool;
125
126 if (phba->sli_rev == LPFC_SLI_REV4) {
127 phba->rrq_pool =
128 mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
129 sizeof(struct lpfc_node_rrq));
130 if (!phba->rrq_pool)
131 goto fail_free_nlp_mem_pool;
132 phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool",
133 &phba->pcidev->dev,
134 LPFC_HDR_BUF_SIZE, align, 0);
135 if (!phba->lpfc_hrb_pool)
136 goto fail_free_rrq_mem_pool;
137
138 phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool",
139 &phba->pcidev->dev,
140 LPFC_DATA_BUF_SIZE, align, 0);
141 if (!phba->lpfc_drb_pool)
142 goto fail_free_hrb_pool;
143 phba->lpfc_hbq_pool = NULL;
144 } else {
145 phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool",
146 &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0);
147 if (!phba->lpfc_hbq_pool)
148 goto fail_free_nlp_mem_pool;
149 phba->lpfc_hrb_pool = NULL;
150 phba->lpfc_drb_pool = NULL;
151 }
152
153 if (phba->cfg_EnableXLane) {
154 phba->device_data_mem_pool = mempool_create_kmalloc_pool(
155 LPFC_DEVICE_DATA_POOL_SIZE,
156 sizeof(struct lpfc_device_data));
157 if (!phba->device_data_mem_pool)
158 goto fail_free_drb_pool;
159 } else {
160 phba->device_data_mem_pool = NULL;
161 }
162
163 return 0;
164fail_free_drb_pool:
165 dma_pool_destroy(phba->lpfc_drb_pool);
166 phba->lpfc_drb_pool = NULL;
167 fail_free_hrb_pool:
168 dma_pool_destroy(phba->lpfc_hrb_pool);
169 phba->lpfc_hrb_pool = NULL;
170 fail_free_rrq_mem_pool:
171 mempool_destroy(phba->rrq_pool);
172 phba->rrq_pool = NULL;
173 fail_free_nlp_mem_pool:
174 mempool_destroy(phba->nlp_mem_pool);
175 phba->nlp_mem_pool = NULL;
176 fail_free_mbox_pool:
177 mempool_destroy(phba->mbox_mem_pool);
178 phba->mbox_mem_pool = NULL;
179 fail_free_mbuf_pool:
180 while (i--)
181 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
182 pool->elements[i].phys);
183 kfree(pool->elements);
184 fail_free_lpfc_mbuf_pool:
185 dma_pool_destroy(phba->lpfc_mbuf_pool);
186 phba->lpfc_mbuf_pool = NULL;
187 fail:
188 return -ENOMEM;
189}
190
191int
192lpfc_nvmet_mem_alloc(struct lpfc_hba *phba)
193{
194 phba->lpfc_nvmet_drb_pool =
195 dma_pool_create("lpfc_nvmet_drb_pool",
196 &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE,
197 SGL_ALIGN_SZ, 0);
198 if (!phba->lpfc_nvmet_drb_pool) {
199 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
200 "6024 Can't enable NVME Target - no memory\n");
201 return -ENOMEM;
202 }
203 return 0;
204}
205
206
207
208
209
210
211
212
213
214
215void
216lpfc_mem_free(struct lpfc_hba *phba)
217{
218 int i;
219 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
220 struct lpfc_device_data *device_data;
221
222
223 lpfc_sli_hbqbuf_free_all(phba);
224 dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
225 phba->lpfc_nvmet_drb_pool = NULL;
226
227 dma_pool_destroy(phba->lpfc_drb_pool);
228 phba->lpfc_drb_pool = NULL;
229
230 dma_pool_destroy(phba->lpfc_hrb_pool);
231 phba->lpfc_hrb_pool = NULL;
232
233 dma_pool_destroy(phba->lpfc_hbq_pool);
234 phba->lpfc_hbq_pool = NULL;
235
236 mempool_destroy(phba->rrq_pool);
237 phba->rrq_pool = NULL;
238
239
240 mempool_destroy(phba->nlp_mem_pool);
241 phba->nlp_mem_pool = NULL;
242 if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) {
243 mempool_destroy(phba->active_rrq_pool);
244 phba->active_rrq_pool = NULL;
245 }
246
247
248 mempool_destroy(phba->mbox_mem_pool);
249 phba->mbox_mem_pool = NULL;
250
251
252 for (i = 0; i < pool->current_count; i++)
253 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
254 pool->elements[i].phys);
255 kfree(pool->elements);
256
257 dma_pool_destroy(phba->lpfc_mbuf_pool);
258 phba->lpfc_mbuf_pool = NULL;
259
260
261 if (phba->device_data_mem_pool) {
262
263 while (!list_empty(&phba->luns)) {
264 device_data = list_first_entry(&phba->luns,
265 struct lpfc_device_data,
266 listentry);
267 list_del(&device_data->listentry);
268 mempool_free(device_data, phba->device_data_mem_pool);
269 }
270 mempool_destroy(phba->device_data_mem_pool);
271 }
272 phba->device_data_mem_pool = NULL;
273 return;
274}
275
276
277
278
279
280
281
282
283
284
285
286
287void
288lpfc_mem_free_all(struct lpfc_hba *phba)
289{
290 struct lpfc_sli *psli = &phba->sli;
291 LPFC_MBOXQ_t *mbox, *next_mbox;
292 struct lpfc_dmabuf *mp;
293
294
295 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
296 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
297 if (mp) {
298 lpfc_mbuf_free(phba, mp->virt, mp->phys);
299 kfree(mp);
300 }
301 list_del(&mbox->list);
302 mempool_free(mbox, phba->mbox_mem_pool);
303 }
304
305 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
306 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
307 if (mp) {
308 lpfc_mbuf_free(phba, mp->virt, mp->phys);
309 kfree(mp);
310 }
311 list_del(&mbox->list);
312 mempool_free(mbox, phba->mbox_mem_pool);
313 }
314
315 spin_lock_irq(&phba->hbalock);
316 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
317 spin_unlock_irq(&phba->hbalock);
318 if (psli->mbox_active) {
319 mbox = psli->mbox_active;
320 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
321 if (mp) {
322 lpfc_mbuf_free(phba, mp->virt, mp->phys);
323 kfree(mp);
324 }
325 mempool_free(mbox, phba->mbox_mem_pool);
326 psli->mbox_active = NULL;
327 }
328
329
330 lpfc_mem_free(phba);
331
332
333 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
334 phba->lpfc_sg_dma_buf_pool = NULL;
335
336 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
337 phba->lpfc_cmd_rsp_buf_pool = NULL;
338
339
340 kfree(psli->iocbq_lookup);
341 psli->iocbq_lookup = NULL;
342
343 return;
344}
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364void *
365lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
366{
367 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
368 unsigned long iflags;
369 void *ret;
370
371 ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
372
373 spin_lock_irqsave(&phba->hbalock, iflags);
374 if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
375 pool->current_count--;
376 ret = pool->elements[pool->current_count].virt;
377 *handle = pool->elements[pool->current_count].phys;
378 }
379 spin_unlock_irqrestore(&phba->hbalock, iflags);
380 return ret;
381}
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397void
398__lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
399{
400 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
401
402 if (pool->current_count < pool->max_count) {
403 pool->elements[pool->current_count].virt = virt;
404 pool->elements[pool->current_count].phys = dma;
405 pool->current_count++;
406 } else {
407 dma_pool_free(phba->lpfc_mbuf_pool, virt, dma);
408 }
409 return;
410}
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425void
426lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
427{
428 unsigned long iflags;
429
430 spin_lock_irqsave(&phba->hbalock, iflags);
431 __lpfc_mbuf_free(phba, virt, dma);
432 spin_unlock_irqrestore(&phba->hbalock, iflags);
433 return;
434}
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450void *
451lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
452{
453 void *ret;
454
455 ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
456 return ret;
457}
458
459
460
461
462
463
464
465
466
467
468void
469lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
470{
471 dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
472}
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487struct hbq_dmabuf *
488lpfc_els_hbq_alloc(struct lpfc_hba *phba)
489{
490 struct hbq_dmabuf *hbqbp;
491
492 hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
493 if (!hbqbp)
494 return NULL;
495
496 hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
497 &hbqbp->dbuf.phys);
498 if (!hbqbp->dbuf.virt) {
499 kfree(hbqbp);
500 return NULL;
501 }
502 hbqbp->total_size = LPFC_BPL_SIZE;
503 return hbqbp;
504}
505
506
507
508
509
510
511
512
513
514
515
516
517
518void
519lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
520{
521 dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
522 kfree(hbqbp);
523 return;
524}
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539struct hbq_dmabuf *
540lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
541{
542 struct hbq_dmabuf *dma_buf;
543
544 dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
545 if (!dma_buf)
546 return NULL;
547
548 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
549 &dma_buf->hbuf.phys);
550 if (!dma_buf->hbuf.virt) {
551 kfree(dma_buf);
552 return NULL;
553 }
554 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
555 &dma_buf->dbuf.phys);
556 if (!dma_buf->dbuf.virt) {
557 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
558 dma_buf->hbuf.phys);
559 kfree(dma_buf);
560 return NULL;
561 }
562 dma_buf->total_size = LPFC_DATA_BUF_SIZE;
563 return dma_buf;
564}
565
566
567
568
569
570
571
572
573
574
575
576
577
578void
579lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
580{
581 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
582 dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
583 kfree(dmab);
584}
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599struct rqb_dmabuf *
600lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
601{
602 struct rqb_dmabuf *dma_buf;
603
604 dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
605 if (!dma_buf)
606 return NULL;
607
608 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
609 &dma_buf->hbuf.phys);
610 if (!dma_buf->hbuf.virt) {
611 kfree(dma_buf);
612 return NULL;
613 }
614 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool,
615 GFP_KERNEL, &dma_buf->dbuf.phys);
616 if (!dma_buf->dbuf.virt) {
617 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
618 dma_buf->hbuf.phys);
619 kfree(dma_buf);
620 return NULL;
621 }
622 dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE;
623 return dma_buf;
624}
625
626
627
628
629
630
631
632
633
634
635
636
637
638void
639lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
640{
641 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
642 dma_pool_free(phba->lpfc_nvmet_drb_pool,
643 dmab->dbuf.virt, dmab->dbuf.phys);
644 kfree(dmab);
645}
646
647
648
649
650
651
652
653
654
655
656
657
658
659void
660lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
661{
662 struct hbq_dmabuf *hbq_entry;
663 unsigned long flags;
664
665 if (!mp)
666 return;
667
668 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
669 hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
670
671 spin_lock_irqsave(&phba->hbalock, flags);
672 if (!phba->hbq_in_use) {
673 spin_unlock_irqrestore(&phba->hbalock, flags);
674 return;
675 }
676 list_del(&hbq_entry->dbuf.list);
677 if (hbq_entry->tag == -1) {
678 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
679 (phba, hbq_entry);
680 } else {
681 lpfc_sli_free_hbq(phba, hbq_entry);
682 }
683 spin_unlock_irqrestore(&phba->hbalock, flags);
684 } else {
685 lpfc_mbuf_free(phba, mp->virt, mp->phys);
686 kfree(mp);
687 }
688 return;
689}
690
691
692
693
694
695
696
697
698
699
700
701
702
703void
704lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
705{
706 struct lpfc_rqb *rqbp;
707 struct lpfc_rqe hrqe;
708 struct lpfc_rqe drqe;
709 struct rqb_dmabuf *rqb_entry;
710 unsigned long flags;
711 int rc;
712
713 if (!mp)
714 return;
715
716 rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf);
717 rqbp = rqb_entry->hrq->rqbp;
718
719 spin_lock_irqsave(&phba->hbalock, flags);
720 list_del(&rqb_entry->hbuf.list);
721 hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys);
722 hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys);
723 drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys);
724 drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
725 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
726 if (rc < 0) {
727 (rqbp->rqb_free_buffer)(phba, rqb_entry);
728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
729 "6409 Cannot post to HRQ %d: %x %x %x "
730 "DRQ %x %x\n",
731 rqb_entry->hrq->queue_id,
732 rqb_entry->hrq->host_index,
733 rqb_entry->hrq->hba_index,
734 rqb_entry->hrq->entry_count,
735 rqb_entry->drq->host_index,
736 rqb_entry->drq->hba_index);
737 } else {
738 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
739 rqbp->buffer_count++;
740 }
741
742 spin_unlock_irqrestore(&phba->hbalock, flags);
743}
744