1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/mempool.h>
25#include <linux/slab.h>
26#include <linux/pci.h>
27#include <linux/interrupt.h>
28
29#include <scsi/scsi.h>
30#include <scsi/scsi_device.h>
31#include <scsi/scsi_transport_fc.h>
32#include <scsi/fc/fc_fs.h>
33
34#include "lpfc_hw4.h"
35#include "lpfc_hw.h"
36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
38#include "lpfc_nl.h"
39#include "lpfc_disc.h"
40#include "lpfc.h"
41#include "lpfc_scsi.h"
42#include "lpfc_crtn.h"
43#include "lpfc_logmsg.h"
44
45#define LPFC_MBUF_POOL_SIZE 64
46#define LPFC_MEM_POOL_SIZE 64
47#define LPFC_DEVICE_DATA_POOL_SIZE 64
48#define LPFC_RRQ_POOL_SIZE 256
49#define LPFC_MBX_POOL_SIZE 256
50
51int
52lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
53 size_t bytes;
54 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
55
56 if (max_xri <= 0)
57 return -ENOMEM;
58 bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) *
59 sizeof(unsigned long);
60 phba->cfg_rrq_xri_bitmap_sz = bytes;
61 phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
62 bytes);
63 if (!phba->active_rrq_pool)
64 return -ENOMEM;
65 else
66 return 0;
67}
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85int
86lpfc_mem_alloc(struct lpfc_hba *phba, int align)
87{
88 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
89 int i;
90
91
92 phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev,
93 LPFC_BPL_SIZE,
94 align, 0);
95 if (!phba->lpfc_mbuf_pool)
96 goto fail;
97
98 pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE,
99 sizeof(struct lpfc_dmabuf),
100 GFP_KERNEL);
101 if (!pool->elements)
102 goto fail_free_lpfc_mbuf_pool;
103
104 pool->max_count = 0;
105 pool->current_count = 0;
106 for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
107 pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool,
108 GFP_KERNEL, &pool->elements[i].phys);
109 if (!pool->elements[i].virt)
110 goto fail_free_mbuf_pool;
111 pool->max_count++;
112 pool->current_count++;
113 }
114
115 phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MBX_POOL_SIZE,
116 sizeof(LPFC_MBOXQ_t));
117 if (!phba->mbox_mem_pool)
118 goto fail_free_mbuf_pool;
119
120 phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
121 sizeof(struct lpfc_nodelist));
122 if (!phba->nlp_mem_pool)
123 goto fail_free_mbox_pool;
124
125 if (phba->sli_rev == LPFC_SLI_REV4) {
126 phba->rrq_pool =
127 mempool_create_kmalloc_pool(LPFC_RRQ_POOL_SIZE,
128 sizeof(struct lpfc_node_rrq));
129 if (!phba->rrq_pool)
130 goto fail_free_nlp_mem_pool;
131 phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool",
132 &phba->pcidev->dev,
133 LPFC_HDR_BUF_SIZE, align, 0);
134 if (!phba->lpfc_hrb_pool)
135 goto fail_free_rrq_mem_pool;
136
137 phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool",
138 &phba->pcidev->dev,
139 LPFC_DATA_BUF_SIZE, align, 0);
140 if (!phba->lpfc_drb_pool)
141 goto fail_free_hrb_pool;
142 phba->lpfc_hbq_pool = NULL;
143 } else {
144 phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool",
145 &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0);
146 if (!phba->lpfc_hbq_pool)
147 goto fail_free_nlp_mem_pool;
148 phba->lpfc_hrb_pool = NULL;
149 phba->lpfc_drb_pool = NULL;
150 }
151
152 if (phba->cfg_EnableXLane) {
153 phba->device_data_mem_pool = mempool_create_kmalloc_pool(
154 LPFC_DEVICE_DATA_POOL_SIZE,
155 sizeof(struct lpfc_device_data));
156 if (!phba->device_data_mem_pool)
157 goto fail_free_drb_pool;
158 } else {
159 phba->device_data_mem_pool = NULL;
160 }
161
162 return 0;
163fail_free_drb_pool:
164 dma_pool_destroy(phba->lpfc_drb_pool);
165 phba->lpfc_drb_pool = NULL;
166 fail_free_hrb_pool:
167 dma_pool_destroy(phba->lpfc_hrb_pool);
168 phba->lpfc_hrb_pool = NULL;
169 fail_free_rrq_mem_pool:
170 mempool_destroy(phba->rrq_pool);
171 phba->rrq_pool = NULL;
172 fail_free_nlp_mem_pool:
173 mempool_destroy(phba->nlp_mem_pool);
174 phba->nlp_mem_pool = NULL;
175 fail_free_mbox_pool:
176 mempool_destroy(phba->mbox_mem_pool);
177 phba->mbox_mem_pool = NULL;
178 fail_free_mbuf_pool:
179 while (i--)
180 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
181 pool->elements[i].phys);
182 kfree(pool->elements);
183 fail_free_lpfc_mbuf_pool:
184 dma_pool_destroy(phba->lpfc_mbuf_pool);
185 phba->lpfc_mbuf_pool = NULL;
186 fail:
187 return -ENOMEM;
188}
189
190int
191lpfc_nvmet_mem_alloc(struct lpfc_hba *phba)
192{
193 phba->lpfc_nvmet_drb_pool =
194 dma_pool_create("lpfc_nvmet_drb_pool",
195 &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE,
196 SGL_ALIGN_SZ, 0);
197 if (!phba->lpfc_nvmet_drb_pool) {
198 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
199 "6024 Can't enable NVME Target - no memory\n");
200 return -ENOMEM;
201 }
202 return 0;
203}
204
205
206
207
208
209
210
211
212
213
214void
215lpfc_mem_free(struct lpfc_hba *phba)
216{
217 int i;
218 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
219 struct lpfc_device_data *device_data;
220
221
222 lpfc_sli_hbqbuf_free_all(phba);
223 dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
224 phba->lpfc_nvmet_drb_pool = NULL;
225
226 dma_pool_destroy(phba->lpfc_drb_pool);
227 phba->lpfc_drb_pool = NULL;
228
229 dma_pool_destroy(phba->lpfc_hrb_pool);
230 phba->lpfc_hrb_pool = NULL;
231
232 dma_pool_destroy(phba->lpfc_hbq_pool);
233 phba->lpfc_hbq_pool = NULL;
234
235 mempool_destroy(phba->rrq_pool);
236 phba->rrq_pool = NULL;
237
238
239 mempool_destroy(phba->nlp_mem_pool);
240 phba->nlp_mem_pool = NULL;
241 if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) {
242 mempool_destroy(phba->active_rrq_pool);
243 phba->active_rrq_pool = NULL;
244 }
245
246
247 mempool_destroy(phba->mbox_mem_pool);
248 phba->mbox_mem_pool = NULL;
249
250
251 for (i = 0; i < pool->current_count; i++)
252 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
253 pool->elements[i].phys);
254 kfree(pool->elements);
255
256 dma_pool_destroy(phba->lpfc_mbuf_pool);
257 phba->lpfc_mbuf_pool = NULL;
258
259
260 if (phba->device_data_mem_pool) {
261
262 while (!list_empty(&phba->luns)) {
263 device_data = list_first_entry(&phba->luns,
264 struct lpfc_device_data,
265 listentry);
266 list_del(&device_data->listentry);
267 mempool_free(device_data, phba->device_data_mem_pool);
268 }
269 mempool_destroy(phba->device_data_mem_pool);
270 }
271 phba->device_data_mem_pool = NULL;
272 return;
273}
274
275
276
277
278
279
280
281
282
283
284
285
286void
287lpfc_mem_free_all(struct lpfc_hba *phba)
288{
289 struct lpfc_sli *psli = &phba->sli;
290 LPFC_MBOXQ_t *mbox, *next_mbox;
291 struct lpfc_dmabuf *mp;
292
293
294 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
295 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
296 if (mp) {
297 lpfc_mbuf_free(phba, mp->virt, mp->phys);
298 kfree(mp);
299 }
300 list_del(&mbox->list);
301 mempool_free(mbox, phba->mbox_mem_pool);
302 }
303
304 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
305 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
306 if (mp) {
307 lpfc_mbuf_free(phba, mp->virt, mp->phys);
308 kfree(mp);
309 }
310 list_del(&mbox->list);
311 mempool_free(mbox, phba->mbox_mem_pool);
312 }
313
314 spin_lock_irq(&phba->hbalock);
315 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
316 spin_unlock_irq(&phba->hbalock);
317 if (psli->mbox_active) {
318 mbox = psli->mbox_active;
319 mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
320 if (mp) {
321 lpfc_mbuf_free(phba, mp->virt, mp->phys);
322 kfree(mp);
323 }
324 mempool_free(mbox, phba->mbox_mem_pool);
325 psli->mbox_active = NULL;
326 }
327
328
329 lpfc_mem_free(phba);
330
331
332 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
333 phba->lpfc_sg_dma_buf_pool = NULL;
334
335 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
336 phba->lpfc_cmd_rsp_buf_pool = NULL;
337
338
339 if (phba->cgn_i) {
340 dma_free_coherent(&phba->pcidev->dev,
341 sizeof(struct lpfc_cgn_info),
342 phba->cgn_i->virt, phba->cgn_i->phys);
343 kfree(phba->cgn_i);
344 phba->cgn_i = NULL;
345 }
346
347
348 kfree(phba->rxtable);
349 phba->rxtable = NULL;
350
351
352 kfree(psli->iocbq_lookup);
353 psli->iocbq_lookup = NULL;
354
355 return;
356}
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376void *
377lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
378{
379 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
380 unsigned long iflags;
381 void *ret;
382
383 ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
384
385 spin_lock_irqsave(&phba->hbalock, iflags);
386 if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
387 pool->current_count--;
388 ret = pool->elements[pool->current_count].virt;
389 *handle = pool->elements[pool->current_count].phys;
390 }
391 spin_unlock_irqrestore(&phba->hbalock, iflags);
392 return ret;
393}
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409void
410__lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
411{
412 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
413
414 if (pool->current_count < pool->max_count) {
415 pool->elements[pool->current_count].virt = virt;
416 pool->elements[pool->current_count].phys = dma;
417 pool->current_count++;
418 } else {
419 dma_pool_free(phba->lpfc_mbuf_pool, virt, dma);
420 }
421 return;
422}
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437void
438lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
439{
440 unsigned long iflags;
441
442 spin_lock_irqsave(&phba->hbalock, iflags);
443 __lpfc_mbuf_free(phba, virt, dma);
444 spin_unlock_irqrestore(&phba->hbalock, iflags);
445 return;
446}
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462void *
463lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
464{
465 void *ret;
466
467 ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
468 return ret;
469}
470
471
472
473
474
475
476
477
478
479
480void
481lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
482{
483 dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
484}
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499struct hbq_dmabuf *
500lpfc_els_hbq_alloc(struct lpfc_hba *phba)
501{
502 struct hbq_dmabuf *hbqbp;
503
504 hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
505 if (!hbqbp)
506 return NULL;
507
508 hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
509 &hbqbp->dbuf.phys);
510 if (!hbqbp->dbuf.virt) {
511 kfree(hbqbp);
512 return NULL;
513 }
514 hbqbp->total_size = LPFC_BPL_SIZE;
515 return hbqbp;
516}
517
518
519
520
521
522
523
524
525
526
527
528
529
530void
531lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
532{
533 dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
534 kfree(hbqbp);
535 return;
536}
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551struct hbq_dmabuf *
552lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
553{
554 struct hbq_dmabuf *dma_buf;
555
556 dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
557 if (!dma_buf)
558 return NULL;
559
560 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
561 &dma_buf->hbuf.phys);
562 if (!dma_buf->hbuf.virt) {
563 kfree(dma_buf);
564 return NULL;
565 }
566 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
567 &dma_buf->dbuf.phys);
568 if (!dma_buf->dbuf.virt) {
569 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
570 dma_buf->hbuf.phys);
571 kfree(dma_buf);
572 return NULL;
573 }
574 dma_buf->total_size = LPFC_DATA_BUF_SIZE;
575 return dma_buf;
576}
577
578
579
580
581
582
583
584
585
586
587
588
589
590void
591lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
592{
593 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
594 dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
595 kfree(dmab);
596}
597
598
599
600
601
602
603
604
605
606
607
608
609struct rqb_dmabuf *
610lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
611{
612 struct rqb_dmabuf *dma_buf;
613
614 dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
615 if (!dma_buf)
616 return NULL;
617
618 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
619 &dma_buf->hbuf.phys);
620 if (!dma_buf->hbuf.virt) {
621 kfree(dma_buf);
622 return NULL;
623 }
624 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool,
625 GFP_KERNEL, &dma_buf->dbuf.phys);
626 if (!dma_buf->dbuf.virt) {
627 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
628 dma_buf->hbuf.phys);
629 kfree(dma_buf);
630 return NULL;
631 }
632 dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE;
633 return dma_buf;
634}
635
636
637
638
639
640
641
642
643
644
645
646
647
648void
649lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
650{
651 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
652 dma_pool_free(phba->lpfc_nvmet_drb_pool,
653 dmab->dbuf.virt, dmab->dbuf.phys);
654 kfree(dmab);
655}
656
657
658
659
660
661
662
663
664
665
666
667
668
669void
670lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
671{
672 struct hbq_dmabuf *hbq_entry;
673 unsigned long flags;
674
675 if (!mp)
676 return;
677
678 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
679 hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
680
681 spin_lock_irqsave(&phba->hbalock, flags);
682 if (!phba->hbq_in_use) {
683 spin_unlock_irqrestore(&phba->hbalock, flags);
684 return;
685 }
686 list_del(&hbq_entry->dbuf.list);
687 if (hbq_entry->tag == -1) {
688 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
689 (phba, hbq_entry);
690 } else {
691 lpfc_sli_free_hbq(phba, hbq_entry);
692 }
693 spin_unlock_irqrestore(&phba->hbalock, flags);
694 } else {
695 lpfc_mbuf_free(phba, mp->virt, mp->phys);
696 kfree(mp);
697 }
698 return;
699}
700
701
702
703
704
705
706
707
708
709
710
711
712
713void
714lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
715{
716 struct lpfc_rqb *rqbp;
717 struct lpfc_rqe hrqe;
718 struct lpfc_rqe drqe;
719 struct rqb_dmabuf *rqb_entry;
720 unsigned long flags;
721 int rc;
722
723 if (!mp)
724 return;
725
726 rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf);
727 rqbp = rqb_entry->hrq->rqbp;
728
729 spin_lock_irqsave(&phba->hbalock, flags);
730 list_del(&rqb_entry->hbuf.list);
731 hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys);
732 hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys);
733 drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys);
734 drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
735 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
736 if (rc < 0) {
737 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
738 "6409 Cannot post to HRQ %d: %x %x %x "
739 "DRQ %x %x\n",
740 rqb_entry->hrq->queue_id,
741 rqb_entry->hrq->host_index,
742 rqb_entry->hrq->hba_index,
743 rqb_entry->hrq->entry_count,
744 rqb_entry->drq->host_index,
745 rqb_entry->drq->hba_index);
746 (rqbp->rqb_free_buffer)(phba, rqb_entry);
747 } else {
748 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
749 rqbp->buffer_count++;
750 }
751
752 spin_unlock_irqrestore(&phba->hbalock, flags);
753}
754