1
2
3
4
5
6
7
8
9
10
11#include <linux/delay.h>
12#include <linux/list.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15
16#include <asm/unaligned.h>
17
18#include <scsi/scsi_cmnd.h>
19#include <scsi/scsi_host.h>
20#include <uapi/scsi/cxlflash_ioctl.h>
21
22#include "main.h"
23#include "sislite.h"
24#include "common.h"
25
26MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
27MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
28MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
29MODULE_LICENSE("GPL");
30
31static struct class *cxlflash_class;
32static u32 cxlflash_major;
33static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
34
35
36
37
38
39
40
41
42static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
43{
44 struct afu *afu = cmd->parent;
45 struct cxlflash_cfg *cfg = afu->parent;
46 struct device *dev = &cfg->dev->dev;
47 struct sisl_ioarcb *ioarcb;
48 struct sisl_ioasa *ioasa;
49 u32 resid;
50
51 if (unlikely(!cmd))
52 return;
53
54 ioarcb = &(cmd->rcb);
55 ioasa = &(cmd->sa);
56
57 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
58 resid = ioasa->resid;
59 scsi_set_resid(scp, resid);
60 dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
61 __func__, cmd, scp, resid);
62 }
63
64 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
65 dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
66 __func__, cmd, scp);
67 scp->result = (DID_ERROR << 16);
68 }
69
70 dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
71 "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
72 ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
73 ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
74
75 if (ioasa->rc.scsi_rc) {
76
77 if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
78 memcpy(scp->sense_buffer, ioasa->sense_data,
79 SISL_SENSE_DATA_LEN);
80 scp->result = ioasa->rc.scsi_rc;
81 } else
82 scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
83 }
84
85
86
87
88
89 if (ioasa->rc.fc_rc) {
90
91 switch (ioasa->rc.fc_rc) {
92 case SISL_FC_RC_LINKDOWN:
93 scp->result = (DID_REQUEUE << 16);
94 break;
95 case SISL_FC_RC_RESID:
96
97 if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
98
99
100
101
102
103 scp->result = (DID_ERROR << 16);
104 }
105 break;
106 case SISL_FC_RC_RESIDERR:
107
108 case SISL_FC_RC_TGTABORT:
109 case SISL_FC_RC_ABORTOK:
110 case SISL_FC_RC_ABORTFAIL:
111 case SISL_FC_RC_NOLOGI:
112 case SISL_FC_RC_ABORTPEND:
113 case SISL_FC_RC_WRABORTPEND:
114 case SISL_FC_RC_NOEXP:
115 case SISL_FC_RC_INUSE:
116 scp->result = (DID_ERROR << 16);
117 break;
118 }
119 }
120
121 if (ioasa->rc.afu_rc) {
122
123 switch (ioasa->rc.afu_rc) {
124 case SISL_AFU_RC_NO_CHANNELS:
125 scp->result = (DID_NO_CONNECT << 16);
126 break;
127 case SISL_AFU_RC_DATA_DMA_ERR:
128 switch (ioasa->afu_extra) {
129 case SISL_AFU_DMA_ERR_PAGE_IN:
130
131 scp->result = (DID_IMM_RETRY << 16);
132 break;
133 case SISL_AFU_DMA_ERR_INVALID_EA:
134 default:
135 scp->result = (DID_ERROR << 16);
136 }
137 break;
138 case SISL_AFU_RC_OUT_OF_DATA_BUFS:
139
140 scp->result = (DID_ALLOC_FAILURE << 16);
141 break;
142 default:
143 scp->result = (DID_ERROR << 16);
144 }
145 }
146}
147
148
149
150
151
152
153
154
155
156
157static void cmd_complete(struct afu_cmd *cmd)
158{
159 struct scsi_cmnd *scp;
160 ulong lock_flags;
161 struct afu *afu = cmd->parent;
162 struct cxlflash_cfg *cfg = afu->parent;
163 struct device *dev = &cfg->dev->dev;
164 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
165
166 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
167 list_del(&cmd->list);
168 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
169
170 if (cmd->scp) {
171 scp = cmd->scp;
172 if (unlikely(cmd->sa.ioasc))
173 process_cmd_err(cmd, scp);
174 else
175 scp->result = (DID_OK << 16);
176
177 dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
178 __func__, scp, scp->result, cmd->sa.ioasc);
179 scp->scsi_done(scp);
180 } else if (cmd->cmd_tmf) {
181 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
182 cfg->tmf_active = false;
183 wake_up_all_locked(&cfg->tmf_waitq);
184 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
185 } else
186 complete(&cmd->cevent);
187}
188
189
190
191
192
193
194
195
196static void flush_pending_cmds(struct hwq *hwq)
197{
198 struct cxlflash_cfg *cfg = hwq->afu->parent;
199 struct afu_cmd *cmd, *tmp;
200 struct scsi_cmnd *scp;
201 ulong lock_flags;
202
203 list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
204
205 if (!list_empty(&cmd->queue))
206 continue;
207
208 list_del(&cmd->list);
209
210 if (cmd->scp) {
211 scp = cmd->scp;
212 scp->result = (DID_IMM_RETRY << 16);
213 scp->scsi_done(scp);
214 } else {
215 cmd->cmd_aborted = true;
216
217 if (cmd->cmd_tmf) {
218 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
219 cfg->tmf_active = false;
220 wake_up_all_locked(&cfg->tmf_waitq);
221 spin_unlock_irqrestore(&cfg->tmf_slock,
222 lock_flags);
223 } else
224 complete(&cmd->cevent);
225 }
226 }
227}
228
229
230
231
232
233
234
235
236
237
238
239
240static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
241{
242 struct cxlflash_cfg *cfg = hwq->afu->parent;
243 struct device *dev = &cfg->dev->dev;
244 int rc = -ETIMEDOUT;
245 int nretry = 0;
246 u64 val = 0x1;
247 ulong lock_flags;
248
249 dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
250
251 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
252
253 writeq_be(val, reset_reg);
254 do {
255 val = readq_be(reset_reg);
256 if ((val & 0x1) == 0x0) {
257 rc = 0;
258 break;
259 }
260
261
262 udelay(1 << nretry);
263 } while (nretry++ < MC_ROOM_RETRY_CNT);
264
265 if (!rc)
266 flush_pending_cmds(hwq);
267
268 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
269
270 dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
271 __func__, rc, val, nretry);
272 return rc;
273}
274
275
276
277
278
279
280
281static int context_reset_ioarrin(struct hwq *hwq)
282{
283 return context_reset(hwq, &hwq->host_map->ioarrin);
284}
285
286
287
288
289
290
291
292static int context_reset_sq(struct hwq *hwq)
293{
294 return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
295}
296
297
298
299
300
301
302
303
304
305static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
306{
307 struct cxlflash_cfg *cfg = afu->parent;
308 struct device *dev = &cfg->dev->dev;
309 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
310 int rc = 0;
311 s64 room;
312 ulong lock_flags;
313
314
315
316
317
318 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
319 if (--hwq->room < 0) {
320 room = readq_be(&hwq->host_map->cmd_room);
321 if (room <= 0) {
322 dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
323 "0x%02X, room=0x%016llX\n",
324 __func__, cmd->rcb.cdb[0], room);
325 hwq->room = 0;
326 rc = SCSI_MLQUEUE_HOST_BUSY;
327 goto out;
328 }
329 hwq->room = room - 1;
330 }
331
332 list_add(&cmd->list, &hwq->pending_cmds);
333 writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
334out:
335 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
336 dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n",
337 __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
338 return rc;
339}
340
341
342
343
344
345
346
347
348
349static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
350{
351 struct cxlflash_cfg *cfg = afu->parent;
352 struct device *dev = &cfg->dev->dev;
353 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
354 int rc = 0;
355 int newval;
356 ulong lock_flags;
357
358 newval = atomic_dec_if_positive(&hwq->hsq_credits);
359 if (newval <= 0) {
360 rc = SCSI_MLQUEUE_HOST_BUSY;
361 goto out;
362 }
363
364 cmd->rcb.ioasa = &cmd->sa;
365
366 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
367
368 *hwq->hsq_curr = cmd->rcb;
369 if (hwq->hsq_curr < hwq->hsq_end)
370 hwq->hsq_curr++;
371 else
372 hwq->hsq_curr = hwq->hsq_start;
373
374 list_add(&cmd->list, &hwq->pending_cmds);
375 writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
376
377 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
378out:
379 dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
380 "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
381 cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
382 readq_be(&hwq->host_map->sq_head),
383 readq_be(&hwq->host_map->sq_tail));
384 return rc;
385}
386
387
388
389
390
391
392
393
394static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
395{
396 struct cxlflash_cfg *cfg = afu->parent;
397 struct device *dev = &cfg->dev->dev;
398 int rc = 0;
399 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
400
401 timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
402 if (!timeout)
403 rc = -ETIMEDOUT;
404
405 if (cmd->cmd_aborted)
406 rc = -EAGAIN;
407
408 if (unlikely(cmd->sa.ioasc != 0)) {
409 dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
410 __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
411 rc = -EIO;
412 }
413
414 return rc;
415}
416
417
418
419
420
421
422
423
424
425
426
427static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
428 struct afu *afu)
429{
430 u32 tag;
431 u32 hwq = 0;
432
433 if (afu->num_hwqs == 1)
434 return 0;
435
436 switch (afu->hwq_mode) {
437 case HWQ_MODE_RR:
438 hwq = afu->hwq_rr_count++ % afu->num_hwqs;
439 break;
440 case HWQ_MODE_TAG:
441 tag = blk_mq_unique_tag(scp->request);
442 hwq = blk_mq_unique_tag_to_hwq(tag);
443 break;
444 case HWQ_MODE_CPU:
445 hwq = smp_processor_id() % afu->num_hwqs;
446 break;
447 default:
448 WARN_ON_ONCE(1);
449 }
450
451 return hwq;
452}
453
454
455
456
457
458
459
460
461
462
463static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
464 u64 tmfcmd)
465{
466 struct afu *afu = cfg->afu;
467 struct afu_cmd *cmd = NULL;
468 struct device *dev = &cfg->dev->dev;
469 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
470 bool needs_deletion = false;
471 char *buf = NULL;
472 ulong lock_flags;
473 int rc = 0;
474 ulong to;
475
476 buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
477 if (unlikely(!buf)) {
478 dev_err(dev, "%s: no memory for command\n", __func__);
479 rc = -ENOMEM;
480 goto out;
481 }
482
483 cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
484 INIT_LIST_HEAD(&cmd->queue);
485
486
487 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
488 if (cfg->tmf_active)
489 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
490 !cfg->tmf_active,
491 cfg->tmf_slock);
492 cfg->tmf_active = true;
493 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
494
495 cmd->parent = afu;
496 cmd->cmd_tmf = true;
497 cmd->hwq_index = hwq->index;
498
499 cmd->rcb.ctx_id = hwq->ctx_hndl;
500 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
501 cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
502 cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
503 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
504 SISL_REQ_FLAGS_SUP_UNDERRUN |
505 SISL_REQ_FLAGS_TMF_CMD);
506 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
507
508 rc = afu->send_cmd(afu, cmd);
509 if (unlikely(rc)) {
510 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
511 cfg->tmf_active = false;
512 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
513 goto out;
514 }
515
516 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
517 to = msecs_to_jiffies(5000);
518 to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
519 !cfg->tmf_active,
520 cfg->tmf_slock,
521 to);
522 if (!to) {
523 dev_err(dev, "%s: TMF timed out\n", __func__);
524 rc = -ETIMEDOUT;
525 needs_deletion = true;
526 } else if (cmd->cmd_aborted) {
527 dev_err(dev, "%s: TMF aborted\n", __func__);
528 rc = -EAGAIN;
529 } else if (cmd->sa.ioasc) {
530 dev_err(dev, "%s: TMF failed ioasc=%08x\n",
531 __func__, cmd->sa.ioasc);
532 rc = -EIO;
533 }
534 cfg->tmf_active = false;
535 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
536
537 if (needs_deletion) {
538 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
539 list_del(&cmd->list);
540 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
541 }
542out:
543 kfree(buf);
544 return rc;
545}
546
547
548
549
550
551
552
553static const char *cxlflash_driver_info(struct Scsi_Host *host)
554{
555 return CXLFLASH_ADAPTER_NAME;
556}
557
558
559
560
561
562
563
564
565static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
566{
567 struct cxlflash_cfg *cfg = shost_priv(host);
568 struct afu *afu = cfg->afu;
569 struct device *dev = &cfg->dev->dev;
570 struct afu_cmd *cmd = sc_to_afuci(scp);
571 struct scatterlist *sg = scsi_sglist(scp);
572 int hwq_index = cmd_to_target_hwq(host, scp, afu);
573 struct hwq *hwq = get_hwq(afu, hwq_index);
574 u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
575 ulong lock_flags;
576 int rc = 0;
577
578 dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
579 "cdb=(%08x-%08x-%08x-%08x)\n",
580 __func__, scp, host->host_no, scp->device->channel,
581 scp->device->id, scp->device->lun,
582 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
583 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
584 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
585 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
586
587
588
589
590
591 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
592 if (cfg->tmf_active) {
593 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
594 rc = SCSI_MLQUEUE_HOST_BUSY;
595 goto out;
596 }
597 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
598
599 switch (cfg->state) {
600 case STATE_PROBING:
601 case STATE_PROBED:
602 case STATE_RESET:
603 dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
604 rc = SCSI_MLQUEUE_HOST_BUSY;
605 goto out;
606 case STATE_FAILTERM:
607 dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
608 scp->result = (DID_NO_CONNECT << 16);
609 scp->scsi_done(scp);
610 rc = 0;
611 goto out;
612 default:
613 atomic_inc(&afu->cmds_active);
614 break;
615 }
616
617 if (likely(sg)) {
618 cmd->rcb.data_len = sg->length;
619 cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
620 }
621
622 cmd->scp = scp;
623 cmd->parent = afu;
624 cmd->hwq_index = hwq_index;
625
626 cmd->sa.ioasc = 0;
627 cmd->rcb.ctx_id = hwq->ctx_hndl;
628 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
629 cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
630 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
631
632 if (scp->sc_data_direction == DMA_TO_DEVICE)
633 req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
634
635 cmd->rcb.req_flags = req_flags;
636 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
637
638 rc = afu->send_cmd(afu, cmd);
639 atomic_dec(&afu->cmds_active);
640out:
641 return rc;
642}
643
644
645
646
647
648static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
649{
650 struct pci_dev *pdev = cfg->dev;
651
652 if (pci_channel_offline(pdev))
653 wait_event_timeout(cfg->reset_waitq,
654 !pci_channel_offline(pdev),
655 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
656}
657
658
659
660
661
662static void free_mem(struct cxlflash_cfg *cfg)
663{
664 struct afu *afu = cfg->afu;
665
666 if (cfg->afu) {
667 free_pages((ulong)afu, get_order(sizeof(struct afu)));
668 cfg->afu = NULL;
669 }
670}
671
672
673
674
675
676static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
677{
678 if (cfg->async_reset_cookie == 0)
679 return;
680
681
682 async_synchronize_cookie(cfg->async_reset_cookie + 1);
683 cfg->async_reset_cookie = 0;
684}
685
686
687
688
689
690
691
692
693
694
695static void stop_afu(struct cxlflash_cfg *cfg)
696{
697 struct afu *afu = cfg->afu;
698 struct hwq *hwq;
699 int i;
700
701 cancel_work_sync(&cfg->work_q);
702 if (!current_is_async())
703 cxlflash_reset_sync(cfg);
704
705 if (likely(afu)) {
706 while (atomic_read(&afu->cmds_active))
707 ssleep(1);
708
709 if (afu_is_irqpoll_enabled(afu)) {
710 for (i = 0; i < afu->num_hwqs; i++) {
711 hwq = get_hwq(afu, i);
712
713 irq_poll_disable(&hwq->irqpoll);
714 }
715 }
716
717 if (likely(afu->afu_map)) {
718 cfg->ops->psa_unmap(afu->afu_map);
719 afu->afu_map = NULL;
720 }
721 }
722}
723
724
725
726
727
728
729
730
731
732static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
733 u32 index)
734{
735 struct afu *afu = cfg->afu;
736 struct device *dev = &cfg->dev->dev;
737 struct hwq *hwq;
738
739 if (!afu) {
740 dev_err(dev, "%s: returning with NULL afu\n", __func__);
741 return;
742 }
743
744 hwq = get_hwq(afu, index);
745
746 if (!hwq->ctx_cookie) {
747 dev_err(dev, "%s: returning with NULL MC\n", __func__);
748 return;
749 }
750
751 switch (level) {
752 case UNMAP_THREE:
753
754 if (index == PRIMARY_HWQ)
755 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
756
757 case UNMAP_TWO:
758 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
759
760 case UNMAP_ONE:
761 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
762
763 case FREE_IRQ:
764 cfg->ops->free_afu_irqs(hwq->ctx_cookie);
765
766 case UNDO_NOOP:
767
768 break;
769 }
770}
771
772
773
774
775
776
777
778
779static void term_mc(struct cxlflash_cfg *cfg, u32 index)
780{
781 struct afu *afu = cfg->afu;
782 struct device *dev = &cfg->dev->dev;
783 struct hwq *hwq;
784 ulong lock_flags;
785
786 if (!afu) {
787 dev_err(dev, "%s: returning with NULL afu\n", __func__);
788 return;
789 }
790
791 hwq = get_hwq(afu, index);
792
793 if (!hwq->ctx_cookie) {
794 dev_err(dev, "%s: returning with NULL MC\n", __func__);
795 return;
796 }
797
798 WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
799 if (index != PRIMARY_HWQ)
800 WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
801 hwq->ctx_cookie = NULL;
802
803 spin_lock_irqsave(&hwq->hrrq_slock, lock_flags);
804 hwq->hrrq_online = false;
805 spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags);
806
807 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
808 flush_pending_cmds(hwq);
809 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
810}
811
812
813
814
815
816
817
818static void term_afu(struct cxlflash_cfg *cfg)
819{
820 struct device *dev = &cfg->dev->dev;
821 int k;
822
823
824
825
826
827
828
829
830
831
832 for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
833 term_intr(cfg, UNMAP_THREE, k);
834
835 stop_afu(cfg);
836
837 for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
838 term_mc(cfg, k);
839
840 dev_dbg(dev, "%s: returning\n", __func__);
841}
842
843
844
845
846
847
848
849
850
851
852
853static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
854{
855 struct afu *afu = cfg->afu;
856 struct device *dev = &cfg->dev->dev;
857 struct dev_dependent_vals *ddv;
858 __be64 __iomem *fc_port_regs;
859 u64 reg, status;
860 int i, retry_cnt = 0;
861
862 ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
863 if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
864 return;
865
866 if (!afu || !afu->afu_map) {
867 dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
868 return;
869 }
870
871
872 for (i = 0; i < cfg->num_fc_ports; i++) {
873 fc_port_regs = get_fc_port_regs(cfg, i);
874
875 reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
876 reg |= SISL_FC_SHUTDOWN_NORMAL;
877 writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
878 }
879
880 if (!wait)
881 return;
882
883
884 for (i = 0; i < cfg->num_fc_ports; i++) {
885 fc_port_regs = get_fc_port_regs(cfg, i);
886 retry_cnt = 0;
887
888 while (true) {
889 status = readq_be(&fc_port_regs[FC_STATUS / 8]);
890 if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
891 break;
892 if (++retry_cnt >= MC_RETRY_CNT) {
893 dev_dbg(dev, "%s: port %d shutdown processing "
894 "not yet completed\n", __func__, i);
895 break;
896 }
897 msleep(100 * retry_cnt);
898 }
899 }
900}
901
902
903
904
905
906
907static int cxlflash_get_minor(void)
908{
909 int minor;
910 long bit;
911
912 bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
913 if (bit >= CXLFLASH_MAX_ADAPTERS)
914 return -1;
915
916 minor = bit & MINORMASK;
917 set_bit(minor, cxlflash_minor);
918 return minor;
919}
920
921
922
923
924
925static void cxlflash_put_minor(int minor)
926{
927 clear_bit(minor, cxlflash_minor);
928}
929
930
931
932
933
934static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
935{
936 device_unregister(cfg->chardev);
937 cfg->chardev = NULL;
938 cdev_del(&cfg->cdev);
939 cxlflash_put_minor(MINOR(cfg->cdev.dev));
940}
941
942
943
944
945
946
947
948
949static void cxlflash_remove(struct pci_dev *pdev)
950{
951 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
952 struct device *dev = &pdev->dev;
953 ulong lock_flags;
954
955 if (!pci_is_enabled(pdev)) {
956 dev_dbg(dev, "%s: Device is disabled\n", __func__);
957 return;
958 }
959
960
961 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
962 cfg->state != STATE_PROBING);
963 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
964 if (cfg->tmf_active)
965 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
966 !cfg->tmf_active,
967 cfg->tmf_slock);
968 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
969
970
971 notify_shutdown(cfg, true);
972
973 cfg->state = STATE_FAILTERM;
974 cxlflash_stop_term_user_contexts(cfg);
975
976 switch (cfg->init_state) {
977 case INIT_STATE_CDEV:
978 cxlflash_release_chrdev(cfg);
979
980 case INIT_STATE_SCSI:
981 cxlflash_term_local_luns(cfg);
982 scsi_remove_host(cfg->host);
983
984 case INIT_STATE_AFU:
985 term_afu(cfg);
986
987 case INIT_STATE_PCI:
988 cfg->ops->destroy_afu(cfg->afu_cookie);
989 pci_disable_device(pdev);
990
991 case INIT_STATE_NONE:
992 free_mem(cfg);
993 scsi_host_put(cfg->host);
994 break;
995 }
996
997 dev_dbg(dev, "%s: returning\n", __func__);
998}
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010static int alloc_mem(struct cxlflash_cfg *cfg)
1011{
1012 int rc = 0;
1013 struct device *dev = &cfg->dev->dev;
1014
1015
1016 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1017 get_order(sizeof(struct afu)));
1018 if (unlikely(!cfg->afu)) {
1019 dev_err(dev, "%s: cannot get %d free pages\n",
1020 __func__, get_order(sizeof(struct afu)));
1021 rc = -ENOMEM;
1022 goto out;
1023 }
1024 cfg->afu->parent = cfg;
1025 cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
1026 cfg->afu->afu_map = NULL;
1027out:
1028 return rc;
1029}
1030
1031
1032
1033
1034
1035
1036
1037static int init_pci(struct cxlflash_cfg *cfg)
1038{
1039 struct pci_dev *pdev = cfg->dev;
1040 struct device *dev = &cfg->dev->dev;
1041 int rc = 0;
1042
1043 rc = pci_enable_device(pdev);
1044 if (rc || pci_channel_offline(pdev)) {
1045 if (pci_channel_offline(pdev)) {
1046 cxlflash_wait_for_pci_err_recovery(cfg);
1047 rc = pci_enable_device(pdev);
1048 }
1049
1050 if (rc) {
1051 dev_err(dev, "%s: Cannot enable adapter\n", __func__);
1052 cxlflash_wait_for_pci_err_recovery(cfg);
1053 goto out;
1054 }
1055 }
1056
1057out:
1058 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1059 return rc;
1060}
1061
1062
1063
1064
1065
1066
1067
1068static int init_scsi(struct cxlflash_cfg *cfg)
1069{
1070 struct pci_dev *pdev = cfg->dev;
1071 struct device *dev = &cfg->dev->dev;
1072 int rc = 0;
1073
1074 rc = scsi_add_host(cfg->host, &pdev->dev);
1075 if (rc) {
1076 dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
1077 goto out;
1078 }
1079
1080 scsi_scan_host(cfg->host);
1081
1082out:
1083 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1084 return rc;
1085}
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095static void set_port_online(__be64 __iomem *fc_regs)
1096{
1097 u64 cmdcfg;
1098
1099 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1100 cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE);
1101 cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE);
1102 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1103}
1104
1105
1106
1107
1108
1109
1110
1111static void set_port_offline(__be64 __iomem *fc_regs)
1112{
1113 u64 cmdcfg;
1114
1115 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1116 cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE);
1117 cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE);
1118 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1119}
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1135{
1136 u64 status;
1137
1138 WARN_ON(delay_us < 1000);
1139
1140 do {
1141 msleep(delay_us / 1000);
1142 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1143 if (status == U64_MAX)
1144 nretry /= 2;
1145 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1146 nretry--);
1147
1148 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
1149}
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1164{
1165 u64 status;
1166
1167 WARN_ON(delay_us < 1000);
1168
1169 do {
1170 msleep(delay_us / 1000);
1171 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1172 if (status == U64_MAX)
1173 nretry /= 2;
1174 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1175 nretry--);
1176
1177 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1178}
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1194 u64 wwpn)
1195{
1196 struct cxlflash_cfg *cfg = afu->parent;
1197 struct device *dev = &cfg->dev->dev;
1198
1199 set_port_offline(fc_regs);
1200 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1201 FC_PORT_STATUS_RETRY_CNT)) {
1202 dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
1203 __func__, port);
1204 }
1205
1206 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1207
1208 set_port_online(fc_regs);
1209 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1210 FC_PORT_STATUS_RETRY_CNT)) {
1211 dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
1212 __func__, port);
1213 }
1214}
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1230{
1231 struct cxlflash_cfg *cfg = afu->parent;
1232 struct device *dev = &cfg->dev->dev;
1233 u64 port_sel;
1234
1235
1236 port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1237 port_sel &= ~(1ULL << port);
1238 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1239 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1240
1241 set_port_offline(fc_regs);
1242 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1243 FC_PORT_STATUS_RETRY_CNT))
1244 dev_err(dev, "%s: wait on port %d to go offline timed out\n",
1245 __func__, port);
1246
1247 set_port_online(fc_regs);
1248 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1249 FC_PORT_STATUS_RETRY_CNT))
1250 dev_err(dev, "%s: wait on port %d to go online timed out\n",
1251 __func__, port);
1252
1253
1254 port_sel |= (1ULL << port);
1255 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1256 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1257
1258 dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
1259}
1260
1261
1262
1263
1264
1265static void afu_err_intr_init(struct afu *afu)
1266{
1267 struct cxlflash_cfg *cfg = afu->parent;
1268 __be64 __iomem *fc_port_regs;
1269 int i;
1270 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1271 u64 reg;
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1282
1283 reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1284
1285 if (afu->internal_lun)
1286 reg |= 1;
1287 writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1288
1289 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1290
1291
1292 writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1293
1294
1295 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1296
1297
1298 fc_port_regs = get_fc_port_regs(cfg, 0);
1299 reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
1300 reg &= SISL_FC_INTERNAL_MASK;
1301 if (afu->internal_lun)
1302 reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1303 writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
1304
1305
1306 for (i = 0; i < cfg->num_fc_ports; i++) {
1307 fc_port_regs = get_fc_port_regs(cfg, i);
1308
1309 writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
1310 writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1311 }
1312
1313
1314
1315
1316
1317
1318
1319 for (i = 0; i < afu->num_hwqs; i++) {
1320 hwq = get_hwq(afu, i);
1321
1322 reg = readq_be(&hwq->host_map->ctx_ctrl);
1323 WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0);
1324 reg |= SISL_MSI_SYNC_ERROR;
1325 writeq_be(reg, &hwq->host_map->ctx_ctrl);
1326 writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
1327 }
1328}
1329
1330
1331
1332
1333
1334
1335
1336
1337static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1338{
1339 struct hwq *hwq = (struct hwq *)data;
1340 struct cxlflash_cfg *cfg = hwq->afu->parent;
1341 struct device *dev = &cfg->dev->dev;
1342 u64 reg;
1343 u64 reg_unmasked;
1344
1345 reg = readq_be(&hwq->host_map->intr_status);
1346 reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1347
1348 if (reg_unmasked == 0UL) {
1349 dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
1350 __func__, reg);
1351 goto cxlflash_sync_err_irq_exit;
1352 }
1353
1354 dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
1355 __func__, reg);
1356
1357 writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
1358
1359cxlflash_sync_err_irq_exit:
1360 return IRQ_HANDLED;
1361}
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
1374{
1375 struct afu *afu = hwq->afu;
1376 struct afu_cmd *cmd;
1377 struct sisl_ioasa *ioasa;
1378 struct sisl_ioarcb *ioarcb;
1379 bool toggle = hwq->toggle;
1380 int num_hrrq = 0;
1381 u64 entry,
1382 *hrrq_start = hwq->hrrq_start,
1383 *hrrq_end = hwq->hrrq_end,
1384 *hrrq_curr = hwq->hrrq_curr;
1385
1386
1387 while (true) {
1388 entry = *hrrq_curr;
1389
1390 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1391 break;
1392
1393 entry &= ~SISL_RESP_HANDLE_T_BIT;
1394
1395 if (afu_is_sq_cmd_mode(afu)) {
1396 ioasa = (struct sisl_ioasa *)entry;
1397 cmd = container_of(ioasa, struct afu_cmd, sa);
1398 } else {
1399 ioarcb = (struct sisl_ioarcb *)entry;
1400 cmd = container_of(ioarcb, struct afu_cmd, rcb);
1401 }
1402
1403 list_add_tail(&cmd->queue, doneq);
1404
1405
1406 if (hrrq_curr < hrrq_end)
1407 hrrq_curr++;
1408 else {
1409 hrrq_curr = hrrq_start;
1410 toggle ^= SISL_RESP_HANDLE_T_BIT;
1411 }
1412
1413 atomic_inc(&hwq->hsq_credits);
1414 num_hrrq++;
1415
1416 if (budget > 0 && num_hrrq >= budget)
1417 break;
1418 }
1419
1420 hwq->hrrq_curr = hrrq_curr;
1421 hwq->toggle = toggle;
1422
1423 return num_hrrq;
1424}
1425
1426
1427
1428
1429
1430
1431
1432static void process_cmd_doneq(struct list_head *doneq)
1433{
1434 struct afu_cmd *cmd, *tmp;
1435
1436 WARN_ON(list_empty(doneq));
1437
1438 list_for_each_entry_safe(cmd, tmp, doneq, queue)
1439 cmd_complete(cmd);
1440}
1441
1442
1443
1444
1445
1446
1447
1448
1449static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
1450{
1451 struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
1452 unsigned long hrrq_flags;
1453 LIST_HEAD(doneq);
1454 int num_entries = 0;
1455
1456 spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1457
1458 num_entries = process_hrrq(hwq, &doneq, budget);
1459 if (num_entries < budget)
1460 irq_poll_complete(irqpoll);
1461
1462 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1463
1464 process_cmd_doneq(&doneq);
1465 return num_entries;
1466}
1467
1468
1469
1470
1471
1472
1473
1474
1475static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1476{
1477 struct hwq *hwq = (struct hwq *)data;
1478 struct afu *afu = hwq->afu;
1479 unsigned long hrrq_flags;
1480 LIST_HEAD(doneq);
1481 int num_entries = 0;
1482
1483 spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1484
1485
1486 if (!hwq->hrrq_online) {
1487 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1488 return IRQ_HANDLED;
1489 }
1490
1491 if (afu_is_irqpoll_enabled(afu)) {
1492 irq_poll_sched(&hwq->irqpoll);
1493 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1494 return IRQ_HANDLED;
1495 }
1496
1497 num_entries = process_hrrq(hwq, &doneq, -1);
1498 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1499
1500 if (num_entries == 0)
1501 return IRQ_NONE;
1502
1503 process_cmd_doneq(&doneq);
1504 return IRQ_HANDLED;
1505}
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516#define ASTATUS_FC(_a, _b, _c, _d) \
1517 { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1518
1519#define BUILD_SISL_ASTATUS_FC_PORT(_a) \
1520 ASTATUS_FC(_a, LINK_UP, "link up", 0), \
1521 ASTATUS_FC(_a, LINK_DN, "link down", 0), \
1522 ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \
1523 ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \
1524 ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1525 ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \
1526 ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \
1527 ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
1528
1529static const struct asyc_intr_info ainfo[] = {
1530 BUILD_SISL_ASTATUS_FC_PORT(1),
1531 BUILD_SISL_ASTATUS_FC_PORT(0),
1532 BUILD_SISL_ASTATUS_FC_PORT(3),
1533 BUILD_SISL_ASTATUS_FC_PORT(2)
1534};
1535
1536
1537
1538
1539
1540
1541
1542
1543static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1544{
1545 struct hwq *hwq = (struct hwq *)data;
1546 struct afu *afu = hwq->afu;
1547 struct cxlflash_cfg *cfg = afu->parent;
1548 struct device *dev = &cfg->dev->dev;
1549 const struct asyc_intr_info *info;
1550 struct sisl_global_map __iomem *global = &afu->afu_map->global;
1551 __be64 __iomem *fc_port_regs;
1552 u64 reg_unmasked;
1553 u64 reg;
1554 u64 bit;
1555 u8 port;
1556
1557 reg = readq_be(&global->regs.aintr_status);
1558 reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1559
1560 if (unlikely(reg_unmasked == 0)) {
1561 dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
1562 __func__, reg);
1563 goto out;
1564 }
1565
1566
1567 writeq_be(reg_unmasked, &global->regs.aintr_clear);
1568
1569
1570 for_each_set_bit(bit, (ulong *)®_unmasked, BITS_PER_LONG) {
1571 if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
1572 WARN_ON_ONCE(1);
1573 continue;
1574 }
1575
1576 info = &ainfo[bit];
1577 if (unlikely(info->status != 1ULL << bit)) {
1578 WARN_ON_ONCE(1);
1579 continue;
1580 }
1581
1582 port = info->port;
1583 fc_port_regs = get_fc_port_regs(cfg, port);
1584
1585 dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1586 __func__, port, info->desc,
1587 readq_be(&fc_port_regs[FC_STATUS / 8]));
1588
1589
1590
1591
1592
1593 if (info->action & LINK_RESET) {
1594 dev_err(dev, "%s: FC Port %d: resetting link\n",
1595 __func__, port);
1596 cfg->lr_state = LINK_RESET_REQUIRED;
1597 cfg->lr_port = port;
1598 schedule_work(&cfg->work_q);
1599 }
1600
1601 if (info->action & CLR_FC_ERROR) {
1602 reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
1603
1604
1605
1606
1607
1608
1609 dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
1610 __func__, port, reg);
1611
1612 writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
1613 writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1614 }
1615
1616 if (info->action & SCAN_HOST) {
1617 atomic_inc(&cfg->scan_host_needed);
1618 schedule_work(&cfg->work_q);
1619 }
1620 }
1621
1622out:
1623 return IRQ_HANDLED;
1624}
1625
1626
1627
1628
1629
1630
1631
1632
1633static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1634{
1635 struct device *dev = &cfg->dev->dev;
1636 struct pci_dev *pdev = cfg->dev;
1637 int rc = 0;
1638 int ro_start, ro_size, i, j, k;
1639 ssize_t vpd_size;
1640 char vpd_data[CXLFLASH_VPD_LEN];
1641 char tmp_buf[WWPN_BUF_LEN] = { 0 };
1642 const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
1643 cfg->dev_id->driver_data;
1644 const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
1645 const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
1646
1647
1648 vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1649 if (unlikely(vpd_size <= 0)) {
1650 dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1651 __func__, vpd_size);
1652 rc = -ENODEV;
1653 goto out;
1654 }
1655
1656
1657 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1658 PCI_VPD_LRDT_RO_DATA);
1659 if (unlikely(ro_start < 0)) {
1660 dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
1661 rc = -ENODEV;
1662 goto out;
1663 }
1664
1665
1666 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1667 j = ro_size;
1668 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1669 if (unlikely((i + j) > vpd_size)) {
1670 dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
1671 __func__, (i + j), vpd_size);
1672 ro_size = vpd_size - i;
1673 }
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689 for (k = 0; k < cfg->num_fc_ports; k++) {
1690 j = ro_size;
1691 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1692
1693 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1694 if (i < 0) {
1695 if (wwpn_vpd_required)
1696 dev_err(dev, "%s: Port %d WWPN not found\n",
1697 __func__, k);
1698 wwpn[k] = 0ULL;
1699 continue;
1700 }
1701
1702 j = pci_vpd_info_field_size(&vpd_data[i]);
1703 i += PCI_VPD_INFO_FLD_HDR_SIZE;
1704 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1705 dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
1706 __func__, k);
1707 rc = -ENODEV;
1708 goto out;
1709 }
1710
1711 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1712 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1713 if (unlikely(rc)) {
1714 dev_err(dev, "%s: WWPN conversion failed for port %d\n",
1715 __func__, k);
1716 rc = -ENODEV;
1717 goto out;
1718 }
1719
1720 dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
1721 }
1722
1723out:
1724 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1725 return rc;
1726}
1727
1728
1729
1730
1731
1732
1733
1734
1735static void init_pcr(struct cxlflash_cfg *cfg)
1736{
1737 struct afu *afu = cfg->afu;
1738 struct sisl_ctrl_map __iomem *ctrl_map;
1739 struct hwq *hwq;
1740 void *cookie;
1741 int i;
1742
1743 for (i = 0; i < MAX_CONTEXT; i++) {
1744 ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1745
1746
1747 writeq_be(0, &ctrl_map->rht_start);
1748 writeq_be(0, &ctrl_map->rht_cnt_id);
1749 writeq_be(0, &ctrl_map->ctx_cap);
1750 }
1751
1752
1753 for (i = 0; i < afu->num_hwqs; i++) {
1754 hwq = get_hwq(afu, i);
1755 cookie = hwq->ctx_cookie;
1756
1757 hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
1758 hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
1759 hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
1760
1761
1762 writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
1763 }
1764}
1765
1766
1767
1768
1769
1770static int init_global(struct cxlflash_cfg *cfg)
1771{
1772 struct afu *afu = cfg->afu;
1773 struct device *dev = &cfg->dev->dev;
1774 struct hwq *hwq;
1775 struct sisl_host_map __iomem *hmap;
1776 __be64 __iomem *fc_port_regs;
1777 u64 wwpn[MAX_FC_PORTS];
1778 int i = 0, num_ports = 0;
1779 int rc = 0;
1780 int j;
1781 void *ctx;
1782 u64 reg;
1783
1784 rc = read_vpd(cfg, &wwpn[0]);
1785 if (rc) {
1786 dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1787 goto out;
1788 }
1789
1790
1791 for (i = 0; i < afu->num_hwqs; i++) {
1792 hwq = get_hwq(afu, i);
1793 hmap = hwq->host_map;
1794
1795 writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
1796 writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
1797 hwq->hrrq_online = true;
1798
1799 if (afu_is_sq_cmd_mode(afu)) {
1800 writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
1801 writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
1802 }
1803 }
1804
1805
1806 reg = readq_be(&afu->afu_map->global.regs.afu_config);
1807 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1808
1809
1810
1811
1812 writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1813
1814
1815 if (afu->internal_lun) {
1816
1817 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1818 num_ports = 0;
1819 } else {
1820 writeq_be(PORT_MASK(cfg->num_fc_ports),
1821 &afu->afu_map->global.regs.afu_port_sel);
1822 num_ports = cfg->num_fc_ports;
1823 }
1824
1825 for (i = 0; i < num_ports; i++) {
1826 fc_port_regs = get_fc_port_regs(cfg, i);
1827
1828
1829 writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
1830
1831 (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
1832 writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
1833
1834
1835 if (wwpn[i] != 0)
1836 afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
1837
1838
1839
1840 msleep(100);
1841 }
1842
1843 if (afu_is_ocxl_lisn(afu)) {
1844
1845 for (i = 0; i < afu->num_hwqs; i++) {
1846 hwq = get_hwq(afu, i);
1847 ctx = hwq->ctx_cookie;
1848
1849 for (j = 0; j < hwq->num_irqs; j++) {
1850 reg = cfg->ops->get_irq_objhndl(ctx, j);
1851 writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]);
1852 }
1853
1854 reg = hwq->ctx_hndl;
1855 writeq_be(SISL_LISN_PASID(reg, reg),
1856 &hwq->ctrl_map->lisn_pasid[0]);
1857 writeq_be(SISL_LISN_PASID(0UL, reg),
1858 &hwq->ctrl_map->lisn_pasid[1]);
1859 }
1860 }
1861
1862
1863
1864
1865 for (i = 0; i < afu->num_hwqs; i++) {
1866 hwq = get_hwq(afu, i);
1867
1868 (void)readq_be(&hwq->ctrl_map->mbox_r);
1869 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1870 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1871 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1872 &hwq->ctrl_map->ctx_cap);
1873 }
1874
1875
1876
1877
1878
1879
1880
1881 hwq = get_hwq(afu, PRIMARY_HWQ);
1882 reg = readq_be(&hwq->host_map->ctx_ctrl);
1883 if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
1884 cfg->ws_unmap = true;
1885
1886
1887 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1888out:
1889 return rc;
1890}
1891
1892
1893
1894
1895
1896static int start_afu(struct cxlflash_cfg *cfg)
1897{
1898 struct afu *afu = cfg->afu;
1899 struct device *dev = &cfg->dev->dev;
1900 struct hwq *hwq;
1901 int rc = 0;
1902 int i;
1903
1904 init_pcr(cfg);
1905
1906
1907 for (i = 0; i < afu->num_hwqs; i++) {
1908 hwq = get_hwq(afu, i);
1909
1910
1911 memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
1912
1913
1914 hwq->hrrq_start = &hwq->rrq_entry[0];
1915 hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
1916 hwq->hrrq_curr = hwq->hrrq_start;
1917 hwq->toggle = 1;
1918
1919
1920 spin_lock_init(&hwq->hrrq_slock);
1921 spin_lock_init(&hwq->hsq_slock);
1922
1923
1924 if (afu_is_sq_cmd_mode(afu)) {
1925 memset(&hwq->sq, 0, sizeof(hwq->sq));
1926 hwq->hsq_start = &hwq->sq[0];
1927 hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
1928 hwq->hsq_curr = hwq->hsq_start;
1929
1930 atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
1931 }
1932
1933
1934 if (afu_is_irqpoll_enabled(afu))
1935 irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
1936 cxlflash_irqpoll);
1937
1938 }
1939
1940 rc = init_global(cfg);
1941
1942 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1943 return rc;
1944}
1945
1946
1947
1948
1949
1950
1951
1952
1953static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1954 struct hwq *hwq)
1955{
1956 struct device *dev = &cfg->dev->dev;
1957 void *ctx = hwq->ctx_cookie;
1958 int rc = 0;
1959 enum undo_level level = UNDO_NOOP;
1960 bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
1961 int num_irqs = hwq->num_irqs;
1962
1963 rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
1964 if (unlikely(rc)) {
1965 dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1966 __func__, rc);
1967 level = UNDO_NOOP;
1968 goto out;
1969 }
1970
1971 rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
1972 "SISL_MSI_SYNC_ERROR");
1973 if (unlikely(rc <= 0)) {
1974 dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1975 level = FREE_IRQ;
1976 goto out;
1977 }
1978
1979 rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
1980 "SISL_MSI_RRQ_UPDATED");
1981 if (unlikely(rc <= 0)) {
1982 dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1983 level = UNMAP_ONE;
1984 goto out;
1985 }
1986
1987
1988 if (!is_primary_hwq)
1989 goto out;
1990
1991 rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
1992 "SISL_MSI_ASYNC_ERROR");
1993 if (unlikely(rc <= 0)) {
1994 dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1995 level = UNMAP_TWO;
1996 goto out;
1997 }
1998out:
1999 return level;
2000}
2001
2002
2003
2004
2005
2006
2007
2008
2009static int init_mc(struct cxlflash_cfg *cfg, u32 index)
2010{
2011 void *ctx;
2012 struct device *dev = &cfg->dev->dev;
2013 struct hwq *hwq = get_hwq(cfg->afu, index);
2014 int rc = 0;
2015 int num_irqs;
2016 enum undo_level level;
2017
2018 hwq->afu = cfg->afu;
2019 hwq->index = index;
2020 INIT_LIST_HEAD(&hwq->pending_cmds);
2021
2022 if (index == PRIMARY_HWQ) {
2023 ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
2024 num_irqs = 3;
2025 } else {
2026 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
2027 num_irqs = 2;
2028 }
2029 if (IS_ERR_OR_NULL(ctx)) {
2030 rc = -ENOMEM;
2031 goto err1;
2032 }
2033
2034 WARN_ON(hwq->ctx_cookie);
2035 hwq->ctx_cookie = ctx;
2036 hwq->num_irqs = num_irqs;
2037
2038
2039 cfg->ops->set_master(ctx);
2040
2041
2042 if (index == PRIMARY_HWQ) {
2043 rc = cfg->ops->afu_reset(ctx);
2044 if (unlikely(rc)) {
2045 dev_err(dev, "%s: AFU reset failed rc=%d\n",
2046 __func__, rc);
2047 goto err1;
2048 }
2049 }
2050
2051 level = init_intr(cfg, hwq);
2052 if (unlikely(level)) {
2053 dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
2054 goto err2;
2055 }
2056
2057
2058 rc = cfg->ops->start_context(hwq->ctx_cookie);
2059 if (unlikely(rc)) {
2060 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
2061 level = UNMAP_THREE;
2062 goto err2;
2063 }
2064
2065out:
2066 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2067 return rc;
2068err2:
2069 term_intr(cfg, level, index);
2070 if (index != PRIMARY_HWQ)
2071 cfg->ops->release_context(ctx);
2072err1:
2073 hwq->ctx_cookie = NULL;
2074 goto out;
2075}
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092static void get_num_afu_ports(struct cxlflash_cfg *cfg)
2093{
2094 struct afu *afu = cfg->afu;
2095 struct device *dev = &cfg->dev->dev;
2096 u64 port_mask;
2097 int num_fc_ports = LEGACY_FC_PORTS;
2098
2099 port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
2100 if (port_mask != 0ULL)
2101 num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
2102
2103 dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
2104 __func__, port_mask, num_fc_ports);
2105
2106 cfg->num_fc_ports = num_fc_ports;
2107 cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
2108}
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119static int init_afu(struct cxlflash_cfg *cfg)
2120{
2121 u64 reg;
2122 int rc = 0;
2123 struct afu *afu = cfg->afu;
2124 struct device *dev = &cfg->dev->dev;
2125 struct hwq *hwq;
2126 int i;
2127
2128 cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
2129
2130 mutex_init(&afu->sync_active);
2131 afu->num_hwqs = afu->desired_hwqs;
2132 for (i = 0; i < afu->num_hwqs; i++) {
2133 rc = init_mc(cfg, i);
2134 if (rc) {
2135 dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
2136 __func__, rc, i);
2137 goto err1;
2138 }
2139 }
2140
2141
2142 hwq = get_hwq(afu, PRIMARY_HWQ);
2143 afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
2144 if (!afu->afu_map) {
2145 dev_err(dev, "%s: psa_map failed\n", __func__);
2146 rc = -ENOMEM;
2147 goto err1;
2148 }
2149
2150
2151 reg = readq(&afu->afu_map->global.regs.afu_version);
2152 memcpy(afu->version, ®, sizeof(reg));
2153 afu->interface_version =
2154 readq_be(&afu->afu_map->global.regs.interface_version);
2155 if ((afu->interface_version + 1) == 0) {
2156 dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
2157 "interface version %016llx\n", afu->version,
2158 afu->interface_version);
2159 rc = -EINVAL;
2160 goto err1;
2161 }
2162
2163 if (afu_is_sq_cmd_mode(afu)) {
2164 afu->send_cmd = send_cmd_sq;
2165 afu->context_reset = context_reset_sq;
2166 } else {
2167 afu->send_cmd = send_cmd_ioarrin;
2168 afu->context_reset = context_reset_ioarrin;
2169 }
2170
2171 dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
2172 afu->version, afu->interface_version);
2173
2174 get_num_afu_ports(cfg);
2175
2176 rc = start_afu(cfg);
2177 if (rc) {
2178 dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
2179 goto err1;
2180 }
2181
2182 afu_err_intr_init(cfg->afu);
2183 for (i = 0; i < afu->num_hwqs; i++) {
2184 hwq = get_hwq(afu, i);
2185
2186 hwq->room = readq_be(&hwq->host_map->cmd_room);
2187 }
2188
2189
2190 cxlflash_restore_luntable(cfg);
2191out:
2192 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2193 return rc;
2194
2195err1:
2196 for (i = afu->num_hwqs - 1; i >= 0; i--) {
2197 term_intr(cfg, UNMAP_THREE, i);
2198 term_mc(cfg, i);
2199 }
2200 goto out;
2201}
2202
2203
2204
2205
2206
2207
2208
2209static int afu_reset(struct cxlflash_cfg *cfg)
2210{
2211 struct device *dev = &cfg->dev->dev;
2212 int rc = 0;
2213
2214
2215
2216
2217 term_afu(cfg);
2218
2219 rc = init_afu(cfg);
2220
2221 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2222 return rc;
2223}
2224
2225
2226
2227
2228
2229
2230
2231
2232static void drain_ioctls(struct cxlflash_cfg *cfg)
2233{
2234 down_write(&cfg->ioctl_rwsem);
2235 up_write(&cfg->ioctl_rwsem);
2236}
2237
2238
2239
2240
2241
2242
2243static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
2244{
2245 struct cxlflash_cfg *cfg = data;
2246 struct device *dev = &cfg->dev->dev;
2247 int rc = 0;
2248
2249 if (cfg->state != STATE_RESET) {
2250 dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
2251 __func__, cfg->state);
2252 goto out;
2253 }
2254
2255 drain_ioctls(cfg);
2256 cxlflash_mark_contexts_error(cfg);
2257 rc = afu_reset(cfg);
2258 if (rc)
2259 cfg->state = STATE_FAILTERM;
2260 else
2261 cfg->state = STATE_NORMAL;
2262 wake_up_all(&cfg->reset_waitq);
2263
2264out:
2265 scsi_unblock_requests(cfg->host);
2266}
2267
2268
2269
2270
2271
2272static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
2273{
2274 struct device *dev = &cfg->dev->dev;
2275
2276 if (cfg->state != STATE_NORMAL) {
2277 dev_dbg(dev, "%s: Not performing reset state=%d\n",
2278 __func__, cfg->state);
2279 return;
2280 }
2281
2282 cfg->state = STATE_RESET;
2283 scsi_block_requests(cfg->host);
2284 cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
2285 cfg);
2286}
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
2305{
2306 struct cxlflash_cfg *cfg = afu->parent;
2307 struct device *dev = &cfg->dev->dev;
2308 struct afu_cmd *cmd = NULL;
2309 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
2310 ulong lock_flags;
2311 char *buf = NULL;
2312 int rc = 0;
2313 int nretry = 0;
2314
2315 if (cfg->state != STATE_NORMAL) {
2316 dev_dbg(dev, "%s: Sync not required state=%u\n",
2317 __func__, cfg->state);
2318 return 0;
2319 }
2320
2321 mutex_lock(&afu->sync_active);
2322 atomic_inc(&afu->cmds_active);
2323 buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
2324 if (unlikely(!buf)) {
2325 dev_err(dev, "%s: no memory for command\n", __func__);
2326 rc = -ENOMEM;
2327 goto out;
2328 }
2329
2330 cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
2331
2332retry:
2333 memset(cmd, 0, sizeof(*cmd));
2334 memcpy(&cmd->rcb, rcb, sizeof(*rcb));
2335 INIT_LIST_HEAD(&cmd->queue);
2336 init_completion(&cmd->cevent);
2337 cmd->parent = afu;
2338 cmd->hwq_index = hwq->index;
2339 cmd->rcb.ctx_id = hwq->ctx_hndl;
2340
2341 dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
2342 __func__, afu, cmd, cmd->rcb.cdb[0], nretry);
2343
2344 rc = afu->send_cmd(afu, cmd);
2345 if (unlikely(rc)) {
2346 rc = -ENOBUFS;
2347 goto out;
2348 }
2349
2350 rc = wait_resp(afu, cmd);
2351 switch (rc) {
2352 case -ETIMEDOUT:
2353 rc = afu->context_reset(hwq);
2354 if (rc) {
2355
2356 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
2357 list_del(&cmd->list);
2358 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
2359
2360 cxlflash_schedule_async_reset(cfg);
2361 break;
2362 }
2363
2364 case -EAGAIN:
2365 if (++nretry < 2)
2366 goto retry;
2367
2368 default:
2369 break;
2370 }
2371
2372 if (rcb->ioasa)
2373 *rcb->ioasa = cmd->sa;
2374out:
2375 atomic_dec(&afu->cmds_active);
2376 mutex_unlock(&afu->sync_active);
2377 kfree(buf);
2378 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2379 return rc;
2380}
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
2399{
2400 struct cxlflash_cfg *cfg = afu->parent;
2401 struct device *dev = &cfg->dev->dev;
2402 struct sisl_ioarcb rcb = { 0 };
2403
2404 dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
2405 __func__, afu, ctx, res, mode);
2406
2407 rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
2408 rcb.msi = SISL_MSI_RRQ_UPDATED;
2409 rcb.timeout = MC_AFU_SYNC_TIMEOUT;
2410
2411 rcb.cdb[0] = SISL_AFU_CMD_SYNC;
2412 rcb.cdb[1] = mode;
2413 put_unaligned_be16(ctx, &rcb.cdb[2]);
2414 put_unaligned_be32(res, &rcb.cdb[4]);
2415
2416 return send_afu_cmd(afu, &rcb);
2417}
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
2430{
2431 int rc = FAILED;
2432 struct Scsi_Host *host = scp->device->host;
2433 struct cxlflash_cfg *cfg = shost_priv(host);
2434 struct afu_cmd *cmd = sc_to_afuc(scp);
2435 struct device *dev = &cfg->dev->dev;
2436 struct afu *afu = cfg->afu;
2437 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
2438
2439 dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2440 "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2441 scp->device->channel, scp->device->id, scp->device->lun,
2442 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2443 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2444 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2445 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2446
2447
2448
2449
2450 if (cfg->state != STATE_NORMAL) {
2451 dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
2452 __func__, cfg->state);
2453 goto out;
2454 }
2455
2456 rc = afu->context_reset(hwq);
2457 if (unlikely(rc))
2458 goto out;
2459
2460 rc = SUCCESS;
2461
2462out:
2463 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2464 return rc;
2465}
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
2476{
2477 int rc = SUCCESS;
2478 struct scsi_device *sdev = scp->device;
2479 struct Scsi_Host *host = sdev->host;
2480 struct cxlflash_cfg *cfg = shost_priv(host);
2481 struct device *dev = &cfg->dev->dev;
2482 int rcr = 0;
2483
2484 dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
2485 host->host_no, sdev->channel, sdev->id, sdev->lun);
2486retry:
2487 switch (cfg->state) {
2488 case STATE_NORMAL:
2489 rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
2490 if (unlikely(rcr))
2491 rc = FAILED;
2492 break;
2493 case STATE_RESET:
2494 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2495 goto retry;
2496 default:
2497 rc = FAILED;
2498 break;
2499 }
2500
2501 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2502 return rc;
2503}
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
2519{
2520 int rc = SUCCESS;
2521 int rcr = 0;
2522 struct Scsi_Host *host = scp->device->host;
2523 struct cxlflash_cfg *cfg = shost_priv(host);
2524 struct device *dev = &cfg->dev->dev;
2525
2526 dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
2527
2528 switch (cfg->state) {
2529 case STATE_NORMAL:
2530 cfg->state = STATE_RESET;
2531 drain_ioctls(cfg);
2532 cxlflash_mark_contexts_error(cfg);
2533 rcr = afu_reset(cfg);
2534 if (rcr) {
2535 rc = FAILED;
2536 cfg->state = STATE_FAILTERM;
2537 } else
2538 cfg->state = STATE_NORMAL;
2539 wake_up_all(&cfg->reset_waitq);
2540 ssleep(1);
2541
2542 case STATE_RESET:
2543 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2544 if (cfg->state == STATE_NORMAL)
2545 break;
2546
2547 default:
2548 rc = FAILED;
2549 break;
2550 }
2551
2552 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2553 return rc;
2554}
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
2566{
2567
2568 if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
2569 qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
2570
2571 scsi_change_queue_depth(sdev, qdepth);
2572 return sdev->queue_depth;
2573}
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583static ssize_t cxlflash_show_port_status(u32 port,
2584 struct cxlflash_cfg *cfg,
2585 char *buf)
2586{
2587 struct device *dev = &cfg->dev->dev;
2588 char *disp_status;
2589 u64 status;
2590 __be64 __iomem *fc_port_regs;
2591
2592 WARN_ON(port >= MAX_FC_PORTS);
2593
2594 if (port >= cfg->num_fc_ports) {
2595 dev_info(dev, "%s: Port %d not supported on this card.\n",
2596 __func__, port);
2597 return -EINVAL;
2598 }
2599
2600 fc_port_regs = get_fc_port_regs(cfg, port);
2601 status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
2602 status &= FC_MTIP_STATUS_MASK;
2603
2604 if (status == FC_MTIP_STATUS_ONLINE)
2605 disp_status = "online";
2606 else if (status == FC_MTIP_STATUS_OFFLINE)
2607 disp_status = "offline";
2608 else
2609 disp_status = "unknown";
2610
2611 return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2612}
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622static ssize_t port0_show(struct device *dev,
2623 struct device_attribute *attr,
2624 char *buf)
2625{
2626 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2627
2628 return cxlflash_show_port_status(0, cfg, buf);
2629}
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639static ssize_t port1_show(struct device *dev,
2640 struct device_attribute *attr,
2641 char *buf)
2642{
2643 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2644
2645 return cxlflash_show_port_status(1, cfg, buf);
2646}
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656static ssize_t port2_show(struct device *dev,
2657 struct device_attribute *attr,
2658 char *buf)
2659{
2660 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2661
2662 return cxlflash_show_port_status(2, cfg, buf);
2663}
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673static ssize_t port3_show(struct device *dev,
2674 struct device_attribute *attr,
2675 char *buf)
2676{
2677 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2678
2679 return cxlflash_show_port_status(3, cfg, buf);
2680}
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690static ssize_t lun_mode_show(struct device *dev,
2691 struct device_attribute *attr, char *buf)
2692{
2693 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2694 struct afu *afu = cfg->afu;
2695
2696 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2697}
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721static ssize_t lun_mode_store(struct device *dev,
2722 struct device_attribute *attr,
2723 const char *buf, size_t count)
2724{
2725 struct Scsi_Host *shost = class_to_shost(dev);
2726 struct cxlflash_cfg *cfg = shost_priv(shost);
2727 struct afu *afu = cfg->afu;
2728 int rc;
2729 u32 lun_mode;
2730
2731 rc = kstrtouint(buf, 10, &lun_mode);
2732 if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2733 afu->internal_lun = lun_mode;
2734
2735
2736
2737
2738
2739
2740 if (afu->internal_lun)
2741 shost->max_channel = 0;
2742 else
2743 shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
2744
2745 afu_reset(cfg);
2746 scsi_scan_host(cfg->host);
2747 }
2748
2749 return count;
2750}
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760static ssize_t ioctl_version_show(struct device *dev,
2761 struct device_attribute *attr, char *buf)
2762{
2763 ssize_t bytes = 0;
2764
2765 bytes = scnprintf(buf, PAGE_SIZE,
2766 "disk: %u\n", DK_CXLFLASH_VERSION_0);
2767 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2768 "host: %u\n", HT_CXLFLASH_VERSION_0);
2769
2770 return bytes;
2771}
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781static ssize_t cxlflash_show_port_lun_table(u32 port,
2782 struct cxlflash_cfg *cfg,
2783 char *buf)
2784{
2785 struct device *dev = &cfg->dev->dev;
2786 __be64 __iomem *fc_port_luns;
2787 int i;
2788 ssize_t bytes = 0;
2789
2790 WARN_ON(port >= MAX_FC_PORTS);
2791
2792 if (port >= cfg->num_fc_ports) {
2793 dev_info(dev, "%s: Port %d not supported on this card.\n",
2794 __func__, port);
2795 return -EINVAL;
2796 }
2797
2798 fc_port_luns = get_fc_port_luns(cfg, port);
2799
2800 for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2801 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2802 "%03d: %016llx\n",
2803 i, readq_be(&fc_port_luns[i]));
2804 return bytes;
2805}
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815static ssize_t port0_lun_table_show(struct device *dev,
2816 struct device_attribute *attr,
2817 char *buf)
2818{
2819 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2820
2821 return cxlflash_show_port_lun_table(0, cfg, buf);
2822}
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832static ssize_t port1_lun_table_show(struct device *dev,
2833 struct device_attribute *attr,
2834 char *buf)
2835{
2836 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2837
2838 return cxlflash_show_port_lun_table(1, cfg, buf);
2839}
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849static ssize_t port2_lun_table_show(struct device *dev,
2850 struct device_attribute *attr,
2851 char *buf)
2852{
2853 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2854
2855 return cxlflash_show_port_lun_table(2, cfg, buf);
2856}
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866static ssize_t port3_lun_table_show(struct device *dev,
2867 struct device_attribute *attr,
2868 char *buf)
2869{
2870 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2871
2872 return cxlflash_show_port_lun_table(3, cfg, buf);
2873}
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886static ssize_t irqpoll_weight_show(struct device *dev,
2887 struct device_attribute *attr, char *buf)
2888{
2889 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2890 struct afu *afu = cfg->afu;
2891
2892 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
2893}
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907static ssize_t irqpoll_weight_store(struct device *dev,
2908 struct device_attribute *attr,
2909 const char *buf, size_t count)
2910{
2911 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2912 struct device *cfgdev = &cfg->dev->dev;
2913 struct afu *afu = cfg->afu;
2914 struct hwq *hwq;
2915 u32 weight;
2916 int rc, i;
2917
2918 rc = kstrtouint(buf, 10, &weight);
2919 if (rc)
2920 return -EINVAL;
2921
2922 if (weight > 256) {
2923 dev_info(cfgdev,
2924 "Invalid IRQ poll weight. It must be 256 or less.\n");
2925 return -EINVAL;
2926 }
2927
2928 if (weight == afu->irqpoll_weight) {
2929 dev_info(cfgdev,
2930 "Current IRQ poll weight has the same weight.\n");
2931 return -EINVAL;
2932 }
2933
2934 if (afu_is_irqpoll_enabled(afu)) {
2935 for (i = 0; i < afu->num_hwqs; i++) {
2936 hwq = get_hwq(afu, i);
2937
2938 irq_poll_disable(&hwq->irqpoll);
2939 }
2940 }
2941
2942 afu->irqpoll_weight = weight;
2943
2944 if (weight > 0) {
2945 for (i = 0; i < afu->num_hwqs; i++) {
2946 hwq = get_hwq(afu, i);
2947
2948 irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
2949 }
2950 }
2951
2952 return count;
2953}
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964static ssize_t num_hwqs_show(struct device *dev,
2965 struct device_attribute *attr, char *buf)
2966{
2967 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2968 struct afu *afu = cfg->afu;
2969
2970 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
2971}
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987static ssize_t num_hwqs_store(struct device *dev,
2988 struct device_attribute *attr,
2989 const char *buf, size_t count)
2990{
2991 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2992 struct afu *afu = cfg->afu;
2993 int rc;
2994 int nhwqs, num_hwqs;
2995
2996 rc = kstrtoint(buf, 10, &nhwqs);
2997 if (rc)
2998 return -EINVAL;
2999
3000 if (nhwqs >= 1)
3001 num_hwqs = nhwqs;
3002 else if (nhwqs == 0)
3003 num_hwqs = num_online_cpus();
3004 else
3005 num_hwqs = num_online_cpus() / abs(nhwqs);
3006
3007 afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
3008 WARN_ON_ONCE(afu->desired_hwqs == 0);
3009
3010retry:
3011 switch (cfg->state) {
3012 case STATE_NORMAL:
3013 cfg->state = STATE_RESET;
3014 drain_ioctls(cfg);
3015 cxlflash_mark_contexts_error(cfg);
3016 rc = afu_reset(cfg);
3017 if (rc)
3018 cfg->state = STATE_FAILTERM;
3019 else
3020 cfg->state = STATE_NORMAL;
3021 wake_up_all(&cfg->reset_waitq);
3022 break;
3023 case STATE_RESET:
3024 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
3025 if (cfg->state == STATE_NORMAL)
3026 goto retry;
3027
3028 default:
3029
3030 dev_err(dev, "%s: Device is not ready, state=%d\n",
3031 __func__, cfg->state);
3032 break;
3033 }
3034
3035 return count;
3036}
3037
3038static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049static ssize_t hwq_mode_show(struct device *dev,
3050 struct device_attribute *attr, char *buf)
3051{
3052 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
3053 struct afu *afu = cfg->afu;
3054
3055 return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
3056}
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072static ssize_t hwq_mode_store(struct device *dev,
3073 struct device_attribute *attr,
3074 const char *buf, size_t count)
3075{
3076 struct Scsi_Host *shost = class_to_shost(dev);
3077 struct cxlflash_cfg *cfg = shost_priv(shost);
3078 struct device *cfgdev = &cfg->dev->dev;
3079 struct afu *afu = cfg->afu;
3080 int i;
3081 u32 mode = MAX_HWQ_MODE;
3082
3083 for (i = 0; i < MAX_HWQ_MODE; i++) {
3084 if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
3085 mode = i;
3086 break;
3087 }
3088 }
3089
3090 if (mode >= MAX_HWQ_MODE) {
3091 dev_info(cfgdev, "Invalid HWQ steering mode.\n");
3092 return -EINVAL;
3093 }
3094
3095 afu->hwq_mode = mode;
3096
3097 return count;
3098}
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108static ssize_t mode_show(struct device *dev,
3109 struct device_attribute *attr, char *buf)
3110{
3111 struct scsi_device *sdev = to_scsi_device(dev);
3112
3113 return scnprintf(buf, PAGE_SIZE, "%s\n",
3114 sdev->hostdata ? "superpipe" : "legacy");
3115}
3116
3117
3118
3119
3120static DEVICE_ATTR_RO(port0);
3121static DEVICE_ATTR_RO(port1);
3122static DEVICE_ATTR_RO(port2);
3123static DEVICE_ATTR_RO(port3);
3124static DEVICE_ATTR_RW(lun_mode);
3125static DEVICE_ATTR_RO(ioctl_version);
3126static DEVICE_ATTR_RO(port0_lun_table);
3127static DEVICE_ATTR_RO(port1_lun_table);
3128static DEVICE_ATTR_RO(port2_lun_table);
3129static DEVICE_ATTR_RO(port3_lun_table);
3130static DEVICE_ATTR_RW(irqpoll_weight);
3131static DEVICE_ATTR_RW(num_hwqs);
3132static DEVICE_ATTR_RW(hwq_mode);
3133
3134static struct device_attribute *cxlflash_host_attrs[] = {
3135 &dev_attr_port0,
3136 &dev_attr_port1,
3137 &dev_attr_port2,
3138 &dev_attr_port3,
3139 &dev_attr_lun_mode,
3140 &dev_attr_ioctl_version,
3141 &dev_attr_port0_lun_table,
3142 &dev_attr_port1_lun_table,
3143 &dev_attr_port2_lun_table,
3144 &dev_attr_port3_lun_table,
3145 &dev_attr_irqpoll_weight,
3146 &dev_attr_num_hwqs,
3147 &dev_attr_hwq_mode,
3148 NULL
3149};
3150
3151
3152
3153
3154static DEVICE_ATTR_RO(mode);
3155
3156static struct device_attribute *cxlflash_dev_attrs[] = {
3157 &dev_attr_mode,
3158 NULL
3159};
3160
3161
3162
3163
3164static struct scsi_host_template driver_template = {
3165 .module = THIS_MODULE,
3166 .name = CXLFLASH_ADAPTER_NAME,
3167 .info = cxlflash_driver_info,
3168 .ioctl = cxlflash_ioctl,
3169 .proc_name = CXLFLASH_NAME,
3170 .queuecommand = cxlflash_queuecommand,
3171 .eh_abort_handler = cxlflash_eh_abort_handler,
3172 .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
3173 .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
3174 .change_queue_depth = cxlflash_change_queue_depth,
3175 .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
3176 .can_queue = CXLFLASH_MAX_CMDS,
3177 .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
3178 .this_id = -1,
3179 .sg_tablesize = 1,
3180 .max_sectors = CXLFLASH_MAX_SECTORS,
3181 .shost_attrs = cxlflash_host_attrs,
3182 .sdev_attrs = cxlflash_dev_attrs,
3183};
3184
3185
3186
3187
3188static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
3189 CXLFLASH_WWPN_VPD_REQUIRED };
3190static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
3191 CXLFLASH_NOTIFY_SHUTDOWN };
3192static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
3193 (CXLFLASH_NOTIFY_SHUTDOWN |
3194 CXLFLASH_OCXL_DEV) };
3195
3196
3197
3198
3199static struct pci_device_id cxlflash_pci_table[] = {
3200 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
3201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
3202 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
3203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
3204 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
3205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
3206 {}
3207};
3208
3209MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220static void cxlflash_worker_thread(struct work_struct *work)
3221{
3222 struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
3223 work_q);
3224 struct afu *afu = cfg->afu;
3225 struct device *dev = &cfg->dev->dev;
3226 __be64 __iomem *fc_port_regs;
3227 int port;
3228 ulong lock_flags;
3229
3230
3231
3232 if (cfg->state != STATE_NORMAL)
3233 return;
3234
3235 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3236
3237 if (cfg->lr_state == LINK_RESET_REQUIRED) {
3238 port = cfg->lr_port;
3239 if (port < 0)
3240 dev_err(dev, "%s: invalid port index %d\n",
3241 __func__, port);
3242 else {
3243 spin_unlock_irqrestore(cfg->host->host_lock,
3244 lock_flags);
3245
3246
3247 fc_port_regs = get_fc_port_regs(cfg, port);
3248 afu_link_reset(afu, port, fc_port_regs);
3249 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3250 }
3251
3252 cfg->lr_state = LINK_RESET_COMPLETE;
3253 }
3254
3255 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
3256
3257 if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
3258 scsi_scan_host(cfg->host);
3259}
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270static int cxlflash_chr_open(struct inode *inode, struct file *file)
3271{
3272 struct cxlflash_cfg *cfg;
3273
3274 if (!capable(CAP_SYS_ADMIN))
3275 return -EACCES;
3276
3277 cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
3278 file->private_data = cfg;
3279
3280 return 0;
3281}
3282
3283
3284
3285
3286
3287
3288
3289static char *decode_hioctl(unsigned int cmd)
3290{
3291 switch (cmd) {
3292 case HT_CXLFLASH_LUN_PROVISION:
3293 return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
3294 }
3295
3296 return "UNKNOWN";
3297}
3298
3299
3300
3301
3302
3303
3304
3305
3306static int cxlflash_lun_provision(struct cxlflash_cfg *cfg,
3307 struct ht_cxlflash_lun_provision *lunprov)
3308{
3309 struct afu *afu = cfg->afu;
3310 struct device *dev = &cfg->dev->dev;
3311 struct sisl_ioarcb rcb;
3312 struct sisl_ioasa asa;
3313 __be64 __iomem *fc_port_regs;
3314 u16 port = lunprov->port;
3315 u16 scmd = lunprov->hdr.subcmd;
3316 u16 type;
3317 u64 reg;
3318 u64 size;
3319 u64 lun_id;
3320 int rc = 0;
3321
3322 if (!afu_is_lun_provision(afu)) {
3323 rc = -ENOTSUPP;
3324 goto out;
3325 }
3326
3327 if (port >= cfg->num_fc_ports) {
3328 rc = -EINVAL;
3329 goto out;
3330 }
3331
3332 switch (scmd) {
3333 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
3334 type = SISL_AFU_LUN_PROVISION_CREATE;
3335 size = lunprov->size;
3336 lun_id = 0;
3337 break;
3338 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
3339 type = SISL_AFU_LUN_PROVISION_DELETE;
3340 size = 0;
3341 lun_id = lunprov->lun_id;
3342 break;
3343 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
3344 fc_port_regs = get_fc_port_regs(cfg, port);
3345
3346 reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
3347 lunprov->max_num_luns = reg;
3348 reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
3349 lunprov->cur_num_luns = reg;
3350 reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
3351 lunprov->max_cap_port = reg;
3352 reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
3353 lunprov->cur_cap_port = reg;
3354
3355 goto out;
3356 default:
3357 rc = -EINVAL;
3358 goto out;
3359 }
3360
3361 memset(&rcb, 0, sizeof(rcb));
3362 memset(&asa, 0, sizeof(asa));
3363 rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
3364 rcb.lun_id = lun_id;
3365 rcb.msi = SISL_MSI_RRQ_UPDATED;
3366 rcb.timeout = MC_LUN_PROV_TIMEOUT;
3367 rcb.ioasa = &asa;
3368
3369 rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
3370 rcb.cdb[1] = type;
3371 rcb.cdb[2] = port;
3372 put_unaligned_be64(size, &rcb.cdb[8]);
3373
3374 rc = send_afu_cmd(afu, &rcb);
3375 if (rc) {
3376 dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3377 __func__, rc, asa.ioasc, asa.afu_extra);
3378 goto out;
3379 }
3380
3381 if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
3382 lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
3383 memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
3384 }
3385out:
3386 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3387 return rc;
3388}
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
3401 struct ht_cxlflash_afu_debug *afu_dbg)
3402{
3403 struct afu *afu = cfg->afu;
3404 struct device *dev = &cfg->dev->dev;
3405 struct sisl_ioarcb rcb;
3406 struct sisl_ioasa asa;
3407 char *buf = NULL;
3408 char *kbuf = NULL;
3409 void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
3410 u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
3411 u32 ulen = afu_dbg->data_len;
3412 bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
3413 int rc = 0;
3414
3415 if (!afu_is_afu_debug(afu)) {
3416 rc = -ENOTSUPP;
3417 goto out;
3418 }
3419
3420 if (ulen) {
3421 req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
3422
3423 if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
3424 rc = -EINVAL;
3425 goto out;
3426 }
3427
3428 buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
3429 if (unlikely(!buf)) {
3430 rc = -ENOMEM;
3431 goto out;
3432 }
3433
3434 kbuf = PTR_ALIGN(buf, cache_line_size());
3435
3436 if (is_write) {
3437 req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
3438
3439 if (copy_from_user(kbuf, ubuf, ulen)) {
3440 rc = -EFAULT;
3441 goto out;
3442 }
3443 }
3444 }
3445
3446 memset(&rcb, 0, sizeof(rcb));
3447 memset(&asa, 0, sizeof(asa));
3448
3449 rcb.req_flags = req_flags;
3450 rcb.msi = SISL_MSI_RRQ_UPDATED;
3451 rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
3452 rcb.ioasa = &asa;
3453
3454 if (ulen) {
3455 rcb.data_len = ulen;
3456 rcb.data_ea = (uintptr_t)kbuf;
3457 }
3458
3459 rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
3460 memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
3461 HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
3462
3463 rc = send_afu_cmd(afu, &rcb);
3464 if (rc) {
3465 dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3466 __func__, rc, asa.ioasc, asa.afu_extra);
3467 goto out;
3468 }
3469
3470 if (ulen && !is_write) {
3471 if (copy_to_user(ubuf, kbuf, ulen))
3472 rc = -EFAULT;
3473 }
3474out:
3475 kfree(buf);
3476 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3477 return rc;
3478}
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
3497 unsigned long arg)
3498{
3499 typedef int (*hioctl) (struct cxlflash_cfg *, void *);
3500
3501 struct cxlflash_cfg *cfg = file->private_data;
3502 struct device *dev = &cfg->dev->dev;
3503 char buf[sizeof(union cxlflash_ht_ioctls)];
3504 void __user *uarg = (void __user *)arg;
3505 struct ht_cxlflash_hdr *hdr;
3506 size_t size = 0;
3507 bool known_ioctl = false;
3508 int idx = 0;
3509 int rc = 0;
3510 hioctl do_ioctl = NULL;
3511
3512 static const struct {
3513 size_t size;
3514 hioctl ioctl;
3515 } ioctl_tbl[] = {
3516 { sizeof(struct ht_cxlflash_lun_provision),
3517 (hioctl)cxlflash_lun_provision },
3518 { sizeof(struct ht_cxlflash_afu_debug),
3519 (hioctl)cxlflash_afu_debug },
3520 };
3521
3522
3523 down_read(&cfg->ioctl_rwsem);
3524
3525 dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
3526 __func__, cmd, idx, sizeof(ioctl_tbl));
3527
3528 switch (cmd) {
3529 case HT_CXLFLASH_LUN_PROVISION:
3530 case HT_CXLFLASH_AFU_DEBUG:
3531 known_ioctl = true;
3532 idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
3533 size = ioctl_tbl[idx].size;
3534 do_ioctl = ioctl_tbl[idx].ioctl;
3535
3536 if (likely(do_ioctl))
3537 break;
3538
3539
3540 default:
3541 rc = -EINVAL;
3542 goto out;
3543 }
3544
3545 if (unlikely(copy_from_user(&buf, uarg, size))) {
3546 dev_err(dev, "%s: copy_from_user() fail "
3547 "size=%lu cmd=%d (%s) uarg=%p\n",
3548 __func__, size, cmd, decode_hioctl(cmd), uarg);
3549 rc = -EFAULT;
3550 goto out;
3551 }
3552
3553 hdr = (struct ht_cxlflash_hdr *)&buf;
3554 if (hdr->version != HT_CXLFLASH_VERSION_0) {
3555 dev_dbg(dev, "%s: Version %u not supported for %s\n",
3556 __func__, hdr->version, decode_hioctl(cmd));
3557 rc = -EINVAL;
3558 goto out;
3559 }
3560
3561 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
3562 dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
3563 rc = -EINVAL;
3564 goto out;
3565 }
3566
3567 rc = do_ioctl(cfg, (void *)&buf);
3568 if (likely(!rc))
3569 if (unlikely(copy_to_user(uarg, &buf, size))) {
3570 dev_err(dev, "%s: copy_to_user() fail "
3571 "size=%lu cmd=%d (%s) uarg=%p\n",
3572 __func__, size, cmd, decode_hioctl(cmd), uarg);
3573 rc = -EFAULT;
3574 }
3575
3576
3577
3578out:
3579 up_read(&cfg->ioctl_rwsem);
3580 if (unlikely(rc && known_ioctl))
3581 dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3582 __func__, decode_hioctl(cmd), cmd, rc);
3583 else
3584 dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3585 __func__, decode_hioctl(cmd), cmd, rc);
3586 return rc;
3587}
3588
3589
3590
3591
3592static const struct file_operations cxlflash_chr_fops = {
3593 .owner = THIS_MODULE,
3594 .open = cxlflash_chr_open,
3595 .unlocked_ioctl = cxlflash_chr_ioctl,
3596 .compat_ioctl = cxlflash_chr_ioctl,
3597};
3598
3599
3600
3601
3602
3603
3604
3605static int init_chrdev(struct cxlflash_cfg *cfg)
3606{
3607 struct device *dev = &cfg->dev->dev;
3608 struct device *char_dev;
3609 dev_t devno;
3610 int minor;
3611 int rc = 0;
3612
3613 minor = cxlflash_get_minor();
3614 if (unlikely(minor < 0)) {
3615 dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
3616 rc = -ENOSPC;
3617 goto out;
3618 }
3619
3620 devno = MKDEV(cxlflash_major, minor);
3621 cdev_init(&cfg->cdev, &cxlflash_chr_fops);
3622
3623 rc = cdev_add(&cfg->cdev, devno, 1);
3624 if (rc) {
3625 dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
3626 goto err1;
3627 }
3628
3629 char_dev = device_create(cxlflash_class, NULL, devno,
3630 NULL, "cxlflash%d", minor);
3631 if (IS_ERR(char_dev)) {
3632 rc = PTR_ERR(char_dev);
3633 dev_err(dev, "%s: device_create failed rc=%d\n",
3634 __func__, rc);
3635 goto err2;
3636 }
3637
3638 cfg->chardev = char_dev;
3639out:
3640 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3641 return rc;
3642err2:
3643 cdev_del(&cfg->cdev);
3644err1:
3645 cxlflash_put_minor(minor);
3646 goto out;
3647}
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665static int cxlflash_probe(struct pci_dev *pdev,
3666 const struct pci_device_id *dev_id)
3667{
3668 struct Scsi_Host *host;
3669 struct cxlflash_cfg *cfg = NULL;
3670 struct device *dev = &pdev->dev;
3671 struct dev_dependent_vals *ddv;
3672 int rc = 0;
3673 int k;
3674
3675 dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
3676 __func__, pdev->irq);
3677
3678 ddv = (struct dev_dependent_vals *)dev_id->driver_data;
3679 driver_template.max_sectors = ddv->max_sectors;
3680
3681 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
3682 if (!host) {
3683 dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
3684 rc = -ENOMEM;
3685 goto out;
3686 }
3687
3688 host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
3689 host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
3690 host->unique_id = host->host_no;
3691 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
3692
3693 cfg = shost_priv(host);
3694 cfg->state = STATE_PROBING;
3695 cfg->host = host;
3696 rc = alloc_mem(cfg);
3697 if (rc) {
3698 dev_err(dev, "%s: alloc_mem failed\n", __func__);
3699 rc = -ENOMEM;
3700 scsi_host_put(cfg->host);
3701 goto out;
3702 }
3703
3704 cfg->init_state = INIT_STATE_NONE;
3705 cfg->dev = pdev;
3706 cfg->cxl_fops = cxlflash_cxl_fops;
3707 cfg->ops = cxlflash_assign_ops(ddv);
3708 WARN_ON_ONCE(!cfg->ops);
3709
3710
3711
3712
3713
3714
3715
3716
3717 cfg->promote_lun_index = 0;
3718
3719 for (k = 0; k < MAX_FC_PORTS; k++)
3720 cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
3721
3722 cfg->dev_id = (struct pci_device_id *)dev_id;
3723
3724 init_waitqueue_head(&cfg->tmf_waitq);
3725 init_waitqueue_head(&cfg->reset_waitq);
3726
3727 INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
3728 cfg->lr_state = LINK_RESET_INVALID;
3729 cfg->lr_port = -1;
3730 spin_lock_init(&cfg->tmf_slock);
3731 mutex_init(&cfg->ctx_tbl_list_mutex);
3732 mutex_init(&cfg->ctx_recovery_mutex);
3733 init_rwsem(&cfg->ioctl_rwsem);
3734 INIT_LIST_HEAD(&cfg->ctx_err_recovery);
3735 INIT_LIST_HEAD(&cfg->lluns);
3736
3737 pci_set_drvdata(pdev, cfg);
3738
3739 rc = init_pci(cfg);
3740 if (rc) {
3741 dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
3742 goto out_remove;
3743 }
3744 cfg->init_state = INIT_STATE_PCI;
3745
3746 cfg->afu_cookie = cfg->ops->create_afu(pdev);
3747 if (unlikely(!cfg->afu_cookie)) {
3748 dev_err(dev, "%s: create_afu failed\n", __func__);
3749 goto out_remove;
3750 }
3751
3752 rc = init_afu(cfg);
3753 if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
3754 dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
3755 goto out_remove;
3756 }
3757 cfg->init_state = INIT_STATE_AFU;
3758
3759 rc = init_scsi(cfg);
3760 if (rc) {
3761 dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
3762 goto out_remove;
3763 }
3764 cfg->init_state = INIT_STATE_SCSI;
3765
3766 rc = init_chrdev(cfg);
3767 if (rc) {
3768 dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
3769 goto out_remove;
3770 }
3771 cfg->init_state = INIT_STATE_CDEV;
3772
3773 if (wq_has_sleeper(&cfg->reset_waitq)) {
3774 cfg->state = STATE_PROBED;
3775 wake_up_all(&cfg->reset_waitq);
3776 } else
3777 cfg->state = STATE_NORMAL;
3778out:
3779 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3780 return rc;
3781
3782out_remove:
3783 cfg->state = STATE_PROBED;
3784 cxlflash_remove(pdev);
3785 goto out;
3786}
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
3799 pci_channel_state_t state)
3800{
3801 int rc = 0;
3802 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3803 struct device *dev = &cfg->dev->dev;
3804
3805 dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
3806
3807 switch (state) {
3808 case pci_channel_io_frozen:
3809 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
3810 cfg->state != STATE_PROBING);
3811 if (cfg->state == STATE_FAILTERM)
3812 return PCI_ERS_RESULT_DISCONNECT;
3813
3814 cfg->state = STATE_RESET;
3815 scsi_block_requests(cfg->host);
3816 drain_ioctls(cfg);
3817 rc = cxlflash_mark_contexts_error(cfg);
3818 if (unlikely(rc))
3819 dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
3820 __func__, rc);
3821 term_afu(cfg);
3822 return PCI_ERS_RESULT_NEED_RESET;
3823 case pci_channel_io_perm_failure:
3824 cfg->state = STATE_FAILTERM;
3825 wake_up_all(&cfg->reset_waitq);
3826 scsi_unblock_requests(cfg->host);
3827 return PCI_ERS_RESULT_DISCONNECT;
3828 default:
3829 break;
3830 }
3831 return PCI_ERS_RESULT_NEED_RESET;
3832}
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
3844{
3845 int rc = 0;
3846 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3847 struct device *dev = &cfg->dev->dev;
3848
3849 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3850
3851 rc = init_afu(cfg);
3852 if (unlikely(rc)) {
3853 dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
3854 return PCI_ERS_RESULT_DISCONNECT;
3855 }
3856
3857 return PCI_ERS_RESULT_RECOVERED;
3858}
3859
3860
3861
3862
3863
3864static void cxlflash_pci_resume(struct pci_dev *pdev)
3865{
3866 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3867 struct device *dev = &cfg->dev->dev;
3868
3869 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3870
3871 cfg->state = STATE_NORMAL;
3872 wake_up_all(&cfg->reset_waitq);
3873 scsi_unblock_requests(cfg->host);
3874}
3875
3876
3877
3878
3879
3880
3881
3882
3883static char *cxlflash_devnode(struct device *dev, umode_t *mode)
3884{
3885 return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
3886}
3887
3888
3889
3890
3891
3892
3893static int cxlflash_class_init(void)
3894{
3895 dev_t devno;
3896 int rc = 0;
3897
3898 rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
3899 if (unlikely(rc)) {
3900 pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
3901 goto out;
3902 }
3903
3904 cxlflash_major = MAJOR(devno);
3905
3906 cxlflash_class = class_create(THIS_MODULE, "cxlflash");
3907 if (IS_ERR(cxlflash_class)) {
3908 rc = PTR_ERR(cxlflash_class);
3909 pr_err("%s: class_create failed rc=%d\n", __func__, rc);
3910 goto err;
3911 }
3912
3913 cxlflash_class->devnode = cxlflash_devnode;
3914out:
3915 pr_debug("%s: returning rc=%d\n", __func__, rc);
3916 return rc;
3917err:
3918 unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3919 goto out;
3920}
3921
3922
3923
3924
3925static void cxlflash_class_exit(void)
3926{
3927 dev_t devno = MKDEV(cxlflash_major, 0);
3928
3929 class_destroy(cxlflash_class);
3930 unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3931}
3932
3933static const struct pci_error_handlers cxlflash_err_handler = {
3934 .error_detected = cxlflash_pci_error_detected,
3935 .slot_reset = cxlflash_pci_slot_reset,
3936 .resume = cxlflash_pci_resume,
3937};
3938
3939
3940
3941
3942static struct pci_driver cxlflash_driver = {
3943 .name = CXLFLASH_NAME,
3944 .id_table = cxlflash_pci_table,
3945 .probe = cxlflash_probe,
3946 .remove = cxlflash_remove,
3947 .shutdown = cxlflash_remove,
3948 .err_handler = &cxlflash_err_handler,
3949};
3950
3951
3952
3953
3954
3955
3956static int __init init_cxlflash(void)
3957{
3958 int rc;
3959
3960 check_sizes();
3961 cxlflash_list_init();
3962 rc = cxlflash_class_init();
3963 if (unlikely(rc))
3964 goto out;
3965
3966 rc = pci_register_driver(&cxlflash_driver);
3967 if (unlikely(rc))
3968 goto err;
3969out:
3970 pr_debug("%s: returning rc=%d\n", __func__, rc);
3971 return rc;
3972err:
3973 cxlflash_class_exit();
3974 goto out;
3975}
3976
3977
3978
3979
3980static void __exit exit_cxlflash(void)
3981{
3982 cxlflash_term_global_luns();
3983 cxlflash_free_errpage();
3984
3985 pci_unregister_driver(&cxlflash_driver);
3986 cxlflash_class_exit();
3987}
3988
3989module_init(init_cxlflash);
3990module_exit(exit_cxlflash);
3991