1
2
3
4
5
6
7
8
9
10
11#include <linux/delay.h>
12#include <linux/list.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15
16#include <asm/unaligned.h>
17
18#include <scsi/scsi_cmnd.h>
19#include <scsi/scsi_host.h>
20#include <uapi/scsi/cxlflash_ioctl.h>
21
22#include "main.h"
23#include "sislite.h"
24#include "common.h"
25
26MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
27MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
28MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
29MODULE_LICENSE("GPL");
30
31static struct class *cxlflash_class;
32static u32 cxlflash_major;
33static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
34
35
36
37
38
39
40
41
42static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
43{
44 struct afu *afu = cmd->parent;
45 struct cxlflash_cfg *cfg = afu->parent;
46 struct device *dev = &cfg->dev->dev;
47 struct sisl_ioasa *ioasa;
48 u32 resid;
49
50 ioasa = &(cmd->sa);
51
52 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
53 resid = ioasa->resid;
54 scsi_set_resid(scp, resid);
55 dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
56 __func__, cmd, scp, resid);
57 }
58
59 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
60 dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
61 __func__, cmd, scp);
62 scp->result = (DID_ERROR << 16);
63 }
64
65 dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
66 "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
67 ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
68 ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
69
70 if (ioasa->rc.scsi_rc) {
71
72 if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
73 memcpy(scp->sense_buffer, ioasa->sense_data,
74 SISL_SENSE_DATA_LEN);
75 scp->result = ioasa->rc.scsi_rc;
76 } else
77 scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
78 }
79
80
81
82
83
84 if (ioasa->rc.fc_rc) {
85
86 switch (ioasa->rc.fc_rc) {
87 case SISL_FC_RC_LINKDOWN:
88 scp->result = (DID_REQUEUE << 16);
89 break;
90 case SISL_FC_RC_RESID:
91
92 if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
93
94
95
96
97
98 scp->result = (DID_ERROR << 16);
99 }
100 break;
101 case SISL_FC_RC_RESIDERR:
102
103 case SISL_FC_RC_TGTABORT:
104 case SISL_FC_RC_ABORTOK:
105 case SISL_FC_RC_ABORTFAIL:
106 case SISL_FC_RC_NOLOGI:
107 case SISL_FC_RC_ABORTPEND:
108 case SISL_FC_RC_WRABORTPEND:
109 case SISL_FC_RC_NOEXP:
110 case SISL_FC_RC_INUSE:
111 scp->result = (DID_ERROR << 16);
112 break;
113 }
114 }
115
116 if (ioasa->rc.afu_rc) {
117
118 switch (ioasa->rc.afu_rc) {
119 case SISL_AFU_RC_NO_CHANNELS:
120 scp->result = (DID_NO_CONNECT << 16);
121 break;
122 case SISL_AFU_RC_DATA_DMA_ERR:
123 switch (ioasa->afu_extra) {
124 case SISL_AFU_DMA_ERR_PAGE_IN:
125
126 scp->result = (DID_IMM_RETRY << 16);
127 break;
128 case SISL_AFU_DMA_ERR_INVALID_EA:
129 default:
130 scp->result = (DID_ERROR << 16);
131 }
132 break;
133 case SISL_AFU_RC_OUT_OF_DATA_BUFS:
134
135 scp->result = (DID_ALLOC_FAILURE << 16);
136 break;
137 default:
138 scp->result = (DID_ERROR << 16);
139 }
140 }
141}
142
143
144
145
146
147
148
149
150
151
152static void cmd_complete(struct afu_cmd *cmd)
153{
154 struct scsi_cmnd *scp;
155 ulong lock_flags;
156 struct afu *afu = cmd->parent;
157 struct cxlflash_cfg *cfg = afu->parent;
158 struct device *dev = &cfg->dev->dev;
159 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
160
161 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
162 list_del(&cmd->list);
163 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
164
165 if (cmd->scp) {
166 scp = cmd->scp;
167 if (unlikely(cmd->sa.ioasc))
168 process_cmd_err(cmd, scp);
169 else
170 scp->result = (DID_OK << 16);
171
172 dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
173 __func__, scp, scp->result, cmd->sa.ioasc);
174 scp->scsi_done(scp);
175 } else if (cmd->cmd_tmf) {
176 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
177 cfg->tmf_active = false;
178 wake_up_all_locked(&cfg->tmf_waitq);
179 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
180 } else
181 complete(&cmd->cevent);
182}
183
184
185
186
187
188
189
190
191static void flush_pending_cmds(struct hwq *hwq)
192{
193 struct cxlflash_cfg *cfg = hwq->afu->parent;
194 struct afu_cmd *cmd, *tmp;
195 struct scsi_cmnd *scp;
196 ulong lock_flags;
197
198 list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
199
200 if (!list_empty(&cmd->queue))
201 continue;
202
203 list_del(&cmd->list);
204
205 if (cmd->scp) {
206 scp = cmd->scp;
207 scp->result = (DID_IMM_RETRY << 16);
208 scp->scsi_done(scp);
209 } else {
210 cmd->cmd_aborted = true;
211
212 if (cmd->cmd_tmf) {
213 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
214 cfg->tmf_active = false;
215 wake_up_all_locked(&cfg->tmf_waitq);
216 spin_unlock_irqrestore(&cfg->tmf_slock,
217 lock_flags);
218 } else
219 complete(&cmd->cevent);
220 }
221 }
222}
223
224
225
226
227
228
229
230
231
232
233
234
235static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
236{
237 struct cxlflash_cfg *cfg = hwq->afu->parent;
238 struct device *dev = &cfg->dev->dev;
239 int rc = -ETIMEDOUT;
240 int nretry = 0;
241 u64 val = 0x1;
242 ulong lock_flags;
243
244 dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
245
246 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
247
248 writeq_be(val, reset_reg);
249 do {
250 val = readq_be(reset_reg);
251 if ((val & 0x1) == 0x0) {
252 rc = 0;
253 break;
254 }
255
256
257 udelay(1 << nretry);
258 } while (nretry++ < MC_ROOM_RETRY_CNT);
259
260 if (!rc)
261 flush_pending_cmds(hwq);
262
263 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
264
265 dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
266 __func__, rc, val, nretry);
267 return rc;
268}
269
270
271
272
273
274
275
276static int context_reset_ioarrin(struct hwq *hwq)
277{
278 return context_reset(hwq, &hwq->host_map->ioarrin);
279}
280
281
282
283
284
285
286
287static int context_reset_sq(struct hwq *hwq)
288{
289 return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
290}
291
292
293
294
295
296
297
298
299
300static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
301{
302 struct cxlflash_cfg *cfg = afu->parent;
303 struct device *dev = &cfg->dev->dev;
304 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
305 int rc = 0;
306 s64 room;
307 ulong lock_flags;
308
309
310
311
312
313 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
314 if (--hwq->room < 0) {
315 room = readq_be(&hwq->host_map->cmd_room);
316 if (room <= 0) {
317 dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
318 "0x%02X, room=0x%016llX\n",
319 __func__, cmd->rcb.cdb[0], room);
320 hwq->room = 0;
321 rc = SCSI_MLQUEUE_HOST_BUSY;
322 goto out;
323 }
324 hwq->room = room - 1;
325 }
326
327 list_add(&cmd->list, &hwq->pending_cmds);
328 writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
329out:
330 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
331 dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n",
332 __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
333 return rc;
334}
335
336
337
338
339
340
341
342
343
344static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
345{
346 struct cxlflash_cfg *cfg = afu->parent;
347 struct device *dev = &cfg->dev->dev;
348 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
349 int rc = 0;
350 int newval;
351 ulong lock_flags;
352
353 newval = atomic_dec_if_positive(&hwq->hsq_credits);
354 if (newval <= 0) {
355 rc = SCSI_MLQUEUE_HOST_BUSY;
356 goto out;
357 }
358
359 cmd->rcb.ioasa = &cmd->sa;
360
361 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
362
363 *hwq->hsq_curr = cmd->rcb;
364 if (hwq->hsq_curr < hwq->hsq_end)
365 hwq->hsq_curr++;
366 else
367 hwq->hsq_curr = hwq->hsq_start;
368
369 list_add(&cmd->list, &hwq->pending_cmds);
370 writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
371
372 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
373out:
374 dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
375 "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
376 cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
377 readq_be(&hwq->host_map->sq_head),
378 readq_be(&hwq->host_map->sq_tail));
379 return rc;
380}
381
382
383
384
385
386
387
388
389static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
390{
391 struct cxlflash_cfg *cfg = afu->parent;
392 struct device *dev = &cfg->dev->dev;
393 int rc = 0;
394 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
395
396 timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
397 if (!timeout)
398 rc = -ETIMEDOUT;
399
400 if (cmd->cmd_aborted)
401 rc = -EAGAIN;
402
403 if (unlikely(cmd->sa.ioasc != 0)) {
404 dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
405 __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
406 rc = -EIO;
407 }
408
409 return rc;
410}
411
412
413
414
415
416
417
418
419
420
421
422static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
423 struct afu *afu)
424{
425 u32 tag;
426 u32 hwq = 0;
427
428 if (afu->num_hwqs == 1)
429 return 0;
430
431 switch (afu->hwq_mode) {
432 case HWQ_MODE_RR:
433 hwq = afu->hwq_rr_count++ % afu->num_hwqs;
434 break;
435 case HWQ_MODE_TAG:
436 tag = blk_mq_unique_tag(scp->request);
437 hwq = blk_mq_unique_tag_to_hwq(tag);
438 break;
439 case HWQ_MODE_CPU:
440 hwq = smp_processor_id() % afu->num_hwqs;
441 break;
442 default:
443 WARN_ON_ONCE(1);
444 }
445
446 return hwq;
447}
448
449
450
451
452
453
454
455
456
457
458static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
459 u64 tmfcmd)
460{
461 struct afu *afu = cfg->afu;
462 struct afu_cmd *cmd = NULL;
463 struct device *dev = &cfg->dev->dev;
464 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
465 bool needs_deletion = false;
466 char *buf = NULL;
467 ulong lock_flags;
468 int rc = 0;
469 ulong to;
470
471 buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
472 if (unlikely(!buf)) {
473 dev_err(dev, "%s: no memory for command\n", __func__);
474 rc = -ENOMEM;
475 goto out;
476 }
477
478 cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
479 INIT_LIST_HEAD(&cmd->queue);
480
481
482 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
483 if (cfg->tmf_active)
484 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
485 !cfg->tmf_active,
486 cfg->tmf_slock);
487 cfg->tmf_active = true;
488 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
489
490 cmd->parent = afu;
491 cmd->cmd_tmf = true;
492 cmd->hwq_index = hwq->index;
493
494 cmd->rcb.ctx_id = hwq->ctx_hndl;
495 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
496 cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
497 cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
498 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
499 SISL_REQ_FLAGS_SUP_UNDERRUN |
500 SISL_REQ_FLAGS_TMF_CMD);
501 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
502
503 rc = afu->send_cmd(afu, cmd);
504 if (unlikely(rc)) {
505 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
506 cfg->tmf_active = false;
507 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
508 goto out;
509 }
510
511 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
512 to = msecs_to_jiffies(5000);
513 to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
514 !cfg->tmf_active,
515 cfg->tmf_slock,
516 to);
517 if (!to) {
518 dev_err(dev, "%s: TMF timed out\n", __func__);
519 rc = -ETIMEDOUT;
520 needs_deletion = true;
521 } else if (cmd->cmd_aborted) {
522 dev_err(dev, "%s: TMF aborted\n", __func__);
523 rc = -EAGAIN;
524 } else if (cmd->sa.ioasc) {
525 dev_err(dev, "%s: TMF failed ioasc=%08x\n",
526 __func__, cmd->sa.ioasc);
527 rc = -EIO;
528 }
529 cfg->tmf_active = false;
530 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
531
532 if (needs_deletion) {
533 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
534 list_del(&cmd->list);
535 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
536 }
537out:
538 kfree(buf);
539 return rc;
540}
541
542
543
544
545
546
547
548static const char *cxlflash_driver_info(struct Scsi_Host *host)
549{
550 return CXLFLASH_ADAPTER_NAME;
551}
552
553
554
555
556
557
558
559
560static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
561{
562 struct cxlflash_cfg *cfg = shost_priv(host);
563 struct afu *afu = cfg->afu;
564 struct device *dev = &cfg->dev->dev;
565 struct afu_cmd *cmd = sc_to_afuci(scp);
566 struct scatterlist *sg = scsi_sglist(scp);
567 int hwq_index = cmd_to_target_hwq(host, scp, afu);
568 struct hwq *hwq = get_hwq(afu, hwq_index);
569 u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
570 ulong lock_flags;
571 int rc = 0;
572
573 dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
574 "cdb=(%08x-%08x-%08x-%08x)\n",
575 __func__, scp, host->host_no, scp->device->channel,
576 scp->device->id, scp->device->lun,
577 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
578 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
579 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
580 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
581
582
583
584
585
586 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
587 if (cfg->tmf_active) {
588 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
589 rc = SCSI_MLQUEUE_HOST_BUSY;
590 goto out;
591 }
592 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
593
594 switch (cfg->state) {
595 case STATE_PROBING:
596 case STATE_PROBED:
597 case STATE_RESET:
598 dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
599 rc = SCSI_MLQUEUE_HOST_BUSY;
600 goto out;
601 case STATE_FAILTERM:
602 dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
603 scp->result = (DID_NO_CONNECT << 16);
604 scp->scsi_done(scp);
605 rc = 0;
606 goto out;
607 default:
608 atomic_inc(&afu->cmds_active);
609 break;
610 }
611
612 if (likely(sg)) {
613 cmd->rcb.data_len = sg->length;
614 cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
615 }
616
617 cmd->scp = scp;
618 cmd->parent = afu;
619 cmd->hwq_index = hwq_index;
620
621 cmd->sa.ioasc = 0;
622 cmd->rcb.ctx_id = hwq->ctx_hndl;
623 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
624 cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
625 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
626
627 if (scp->sc_data_direction == DMA_TO_DEVICE)
628 req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
629
630 cmd->rcb.req_flags = req_flags;
631 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
632
633 rc = afu->send_cmd(afu, cmd);
634 atomic_dec(&afu->cmds_active);
635out:
636 return rc;
637}
638
639
640
641
642
643static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
644{
645 struct pci_dev *pdev = cfg->dev;
646
647 if (pci_channel_offline(pdev))
648 wait_event_timeout(cfg->reset_waitq,
649 !pci_channel_offline(pdev),
650 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
651}
652
653
654
655
656
657static void free_mem(struct cxlflash_cfg *cfg)
658{
659 struct afu *afu = cfg->afu;
660
661 if (cfg->afu) {
662 free_pages((ulong)afu, get_order(sizeof(struct afu)));
663 cfg->afu = NULL;
664 }
665}
666
667
668
669
670
671static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
672{
673 if (cfg->async_reset_cookie == 0)
674 return;
675
676
677 async_synchronize_cookie(cfg->async_reset_cookie + 1);
678 cfg->async_reset_cookie = 0;
679}
680
681
682
683
684
685
686
687
688
689
690static void stop_afu(struct cxlflash_cfg *cfg)
691{
692 struct afu *afu = cfg->afu;
693 struct hwq *hwq;
694 int i;
695
696 cancel_work_sync(&cfg->work_q);
697 if (!current_is_async())
698 cxlflash_reset_sync(cfg);
699
700 if (likely(afu)) {
701 while (atomic_read(&afu->cmds_active))
702 ssleep(1);
703
704 if (afu_is_irqpoll_enabled(afu)) {
705 for (i = 0; i < afu->num_hwqs; i++) {
706 hwq = get_hwq(afu, i);
707
708 irq_poll_disable(&hwq->irqpoll);
709 }
710 }
711
712 if (likely(afu->afu_map)) {
713 cfg->ops->psa_unmap(afu->afu_map);
714 afu->afu_map = NULL;
715 }
716 }
717}
718
719
720
721
722
723
724
725
726
727static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
728 u32 index)
729{
730 struct afu *afu = cfg->afu;
731 struct device *dev = &cfg->dev->dev;
732 struct hwq *hwq;
733
734 if (!afu) {
735 dev_err(dev, "%s: returning with NULL afu\n", __func__);
736 return;
737 }
738
739 hwq = get_hwq(afu, index);
740
741 if (!hwq->ctx_cookie) {
742 dev_err(dev, "%s: returning with NULL MC\n", __func__);
743 return;
744 }
745
746 switch (level) {
747 case UNMAP_THREE:
748
749 if (index == PRIMARY_HWQ)
750 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
751 fallthrough;
752 case UNMAP_TWO:
753 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
754 fallthrough;
755 case UNMAP_ONE:
756 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
757 fallthrough;
758 case FREE_IRQ:
759 cfg->ops->free_afu_irqs(hwq->ctx_cookie);
760 fallthrough;
761 case UNDO_NOOP:
762
763 break;
764 }
765}
766
767
768
769
770
771
772
773
774static void term_mc(struct cxlflash_cfg *cfg, u32 index)
775{
776 struct afu *afu = cfg->afu;
777 struct device *dev = &cfg->dev->dev;
778 struct hwq *hwq;
779 ulong lock_flags;
780
781 if (!afu) {
782 dev_err(dev, "%s: returning with NULL afu\n", __func__);
783 return;
784 }
785
786 hwq = get_hwq(afu, index);
787
788 if (!hwq->ctx_cookie) {
789 dev_err(dev, "%s: returning with NULL MC\n", __func__);
790 return;
791 }
792
793 WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
794 if (index != PRIMARY_HWQ)
795 WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
796 hwq->ctx_cookie = NULL;
797
798 spin_lock_irqsave(&hwq->hrrq_slock, lock_flags);
799 hwq->hrrq_online = false;
800 spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags);
801
802 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
803 flush_pending_cmds(hwq);
804 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
805}
806
807
808
809
810
811
812
813static void term_afu(struct cxlflash_cfg *cfg)
814{
815 struct device *dev = &cfg->dev->dev;
816 int k;
817
818
819
820
821
822
823
824
825
826
827 for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
828 term_intr(cfg, UNMAP_THREE, k);
829
830 stop_afu(cfg);
831
832 for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
833 term_mc(cfg, k);
834
835 dev_dbg(dev, "%s: returning\n", __func__);
836}
837
838
839
840
841
842
843
844
845
846
847
848static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
849{
850 struct afu *afu = cfg->afu;
851 struct device *dev = &cfg->dev->dev;
852 struct dev_dependent_vals *ddv;
853 __be64 __iomem *fc_port_regs;
854 u64 reg, status;
855 int i, retry_cnt = 0;
856
857 ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
858 if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
859 return;
860
861 if (!afu || !afu->afu_map) {
862 dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
863 return;
864 }
865
866
867 for (i = 0; i < cfg->num_fc_ports; i++) {
868 fc_port_regs = get_fc_port_regs(cfg, i);
869
870 reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
871 reg |= SISL_FC_SHUTDOWN_NORMAL;
872 writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
873 }
874
875 if (!wait)
876 return;
877
878
879 for (i = 0; i < cfg->num_fc_ports; i++) {
880 fc_port_regs = get_fc_port_regs(cfg, i);
881 retry_cnt = 0;
882
883 while (true) {
884 status = readq_be(&fc_port_regs[FC_STATUS / 8]);
885 if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
886 break;
887 if (++retry_cnt >= MC_RETRY_CNT) {
888 dev_dbg(dev, "%s: port %d shutdown processing "
889 "not yet completed\n", __func__, i);
890 break;
891 }
892 msleep(100 * retry_cnt);
893 }
894 }
895}
896
897
898
899
900
901
902static int cxlflash_get_minor(void)
903{
904 int minor;
905 long bit;
906
907 bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
908 if (bit >= CXLFLASH_MAX_ADAPTERS)
909 return -1;
910
911 minor = bit & MINORMASK;
912 set_bit(minor, cxlflash_minor);
913 return minor;
914}
915
916
917
918
919
920static void cxlflash_put_minor(int minor)
921{
922 clear_bit(minor, cxlflash_minor);
923}
924
925
926
927
928
929static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
930{
931 device_unregister(cfg->chardev);
932 cfg->chardev = NULL;
933 cdev_del(&cfg->cdev);
934 cxlflash_put_minor(MINOR(cfg->cdev.dev));
935}
936
937
938
939
940
941
942
943
944static void cxlflash_remove(struct pci_dev *pdev)
945{
946 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
947 struct device *dev = &pdev->dev;
948 ulong lock_flags;
949
950 if (!pci_is_enabled(pdev)) {
951 dev_dbg(dev, "%s: Device is disabled\n", __func__);
952 return;
953 }
954
955
956 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
957 cfg->state != STATE_PROBING);
958 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
959 if (cfg->tmf_active)
960 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
961 !cfg->tmf_active,
962 cfg->tmf_slock);
963 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
964
965
966 notify_shutdown(cfg, true);
967
968 cfg->state = STATE_FAILTERM;
969 cxlflash_stop_term_user_contexts(cfg);
970
971 switch (cfg->init_state) {
972 case INIT_STATE_CDEV:
973 cxlflash_release_chrdev(cfg);
974 fallthrough;
975 case INIT_STATE_SCSI:
976 cxlflash_term_local_luns(cfg);
977 scsi_remove_host(cfg->host);
978 fallthrough;
979 case INIT_STATE_AFU:
980 term_afu(cfg);
981 fallthrough;
982 case INIT_STATE_PCI:
983 cfg->ops->destroy_afu(cfg->afu_cookie);
984 pci_disable_device(pdev);
985 fallthrough;
986 case INIT_STATE_NONE:
987 free_mem(cfg);
988 scsi_host_put(cfg->host);
989 break;
990 }
991
992 dev_dbg(dev, "%s: returning\n", __func__);
993}
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005static int alloc_mem(struct cxlflash_cfg *cfg)
1006{
1007 int rc = 0;
1008 struct device *dev = &cfg->dev->dev;
1009
1010
1011 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1012 get_order(sizeof(struct afu)));
1013 if (unlikely(!cfg->afu)) {
1014 dev_err(dev, "%s: cannot get %d free pages\n",
1015 __func__, get_order(sizeof(struct afu)));
1016 rc = -ENOMEM;
1017 goto out;
1018 }
1019 cfg->afu->parent = cfg;
1020 cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
1021 cfg->afu->afu_map = NULL;
1022out:
1023 return rc;
1024}
1025
1026
1027
1028
1029
1030
1031
1032static int init_pci(struct cxlflash_cfg *cfg)
1033{
1034 struct pci_dev *pdev = cfg->dev;
1035 struct device *dev = &cfg->dev->dev;
1036 int rc = 0;
1037
1038 rc = pci_enable_device(pdev);
1039 if (rc || pci_channel_offline(pdev)) {
1040 if (pci_channel_offline(pdev)) {
1041 cxlflash_wait_for_pci_err_recovery(cfg);
1042 rc = pci_enable_device(pdev);
1043 }
1044
1045 if (rc) {
1046 dev_err(dev, "%s: Cannot enable adapter\n", __func__);
1047 cxlflash_wait_for_pci_err_recovery(cfg);
1048 goto out;
1049 }
1050 }
1051
1052out:
1053 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1054 return rc;
1055}
1056
1057
1058
1059
1060
1061
1062
1063static int init_scsi(struct cxlflash_cfg *cfg)
1064{
1065 struct pci_dev *pdev = cfg->dev;
1066 struct device *dev = &cfg->dev->dev;
1067 int rc = 0;
1068
1069 rc = scsi_add_host(cfg->host, &pdev->dev);
1070 if (rc) {
1071 dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
1072 goto out;
1073 }
1074
1075 scsi_scan_host(cfg->host);
1076
1077out:
1078 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1079 return rc;
1080}
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090static void set_port_online(__be64 __iomem *fc_regs)
1091{
1092 u64 cmdcfg;
1093
1094 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1095 cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE);
1096 cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE);
1097 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1098}
1099
1100
1101
1102
1103
1104
1105
1106static void set_port_offline(__be64 __iomem *fc_regs)
1107{
1108 u64 cmdcfg;
1109
1110 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1111 cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE);
1112 cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE);
1113 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1114}
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1130{
1131 u64 status;
1132
1133 WARN_ON(delay_us < 1000);
1134
1135 do {
1136 msleep(delay_us / 1000);
1137 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1138 if (status == U64_MAX)
1139 nretry /= 2;
1140 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1141 nretry--);
1142
1143 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
1144}
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1159{
1160 u64 status;
1161
1162 WARN_ON(delay_us < 1000);
1163
1164 do {
1165 msleep(delay_us / 1000);
1166 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1167 if (status == U64_MAX)
1168 nretry /= 2;
1169 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1170 nretry--);
1171
1172 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1173}
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1189 u64 wwpn)
1190{
1191 struct cxlflash_cfg *cfg = afu->parent;
1192 struct device *dev = &cfg->dev->dev;
1193
1194 set_port_offline(fc_regs);
1195 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1196 FC_PORT_STATUS_RETRY_CNT)) {
1197 dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
1198 __func__, port);
1199 }
1200
1201 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1202
1203 set_port_online(fc_regs);
1204 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1205 FC_PORT_STATUS_RETRY_CNT)) {
1206 dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
1207 __func__, port);
1208 }
1209}
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1225{
1226 struct cxlflash_cfg *cfg = afu->parent;
1227 struct device *dev = &cfg->dev->dev;
1228 u64 port_sel;
1229
1230
1231 port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1232 port_sel &= ~(1ULL << port);
1233 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1234 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1235
1236 set_port_offline(fc_regs);
1237 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1238 FC_PORT_STATUS_RETRY_CNT))
1239 dev_err(dev, "%s: wait on port %d to go offline timed out\n",
1240 __func__, port);
1241
1242 set_port_online(fc_regs);
1243 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1244 FC_PORT_STATUS_RETRY_CNT))
1245 dev_err(dev, "%s: wait on port %d to go online timed out\n",
1246 __func__, port);
1247
1248
1249 port_sel |= (1ULL << port);
1250 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1251 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1252
1253 dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
1254}
1255
1256
1257
1258
1259
1260static void afu_err_intr_init(struct afu *afu)
1261{
1262 struct cxlflash_cfg *cfg = afu->parent;
1263 __be64 __iomem *fc_port_regs;
1264 int i;
1265 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1266 u64 reg;
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1277
1278 reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1279
1280 if (afu->internal_lun)
1281 reg |= 1;
1282 writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1283
1284 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1285
1286
1287 writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1288
1289
1290 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1291
1292
1293 fc_port_regs = get_fc_port_regs(cfg, 0);
1294 reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
1295 reg &= SISL_FC_INTERNAL_MASK;
1296 if (afu->internal_lun)
1297 reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1298 writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
1299
1300
1301 for (i = 0; i < cfg->num_fc_ports; i++) {
1302 fc_port_regs = get_fc_port_regs(cfg, i);
1303
1304 writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
1305 writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1306 }
1307
1308
1309
1310
1311
1312
1313
1314 for (i = 0; i < afu->num_hwqs; i++) {
1315 hwq = get_hwq(afu, i);
1316
1317 reg = readq_be(&hwq->host_map->ctx_ctrl);
1318 WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0);
1319 reg |= SISL_MSI_SYNC_ERROR;
1320 writeq_be(reg, &hwq->host_map->ctx_ctrl);
1321 writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
1322 }
1323}
1324
1325
1326
1327
1328
1329
1330
1331
1332static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1333{
1334 struct hwq *hwq = (struct hwq *)data;
1335 struct cxlflash_cfg *cfg = hwq->afu->parent;
1336 struct device *dev = &cfg->dev->dev;
1337 u64 reg;
1338 u64 reg_unmasked;
1339
1340 reg = readq_be(&hwq->host_map->intr_status);
1341 reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1342
1343 if (reg_unmasked == 0UL) {
1344 dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
1345 __func__, reg);
1346 goto cxlflash_sync_err_irq_exit;
1347 }
1348
1349 dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
1350 __func__, reg);
1351
1352 writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
1353
1354cxlflash_sync_err_irq_exit:
1355 return IRQ_HANDLED;
1356}
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
1369{
1370 struct afu *afu = hwq->afu;
1371 struct afu_cmd *cmd;
1372 struct sisl_ioasa *ioasa;
1373 struct sisl_ioarcb *ioarcb;
1374 bool toggle = hwq->toggle;
1375 int num_hrrq = 0;
1376 u64 entry,
1377 *hrrq_start = hwq->hrrq_start,
1378 *hrrq_end = hwq->hrrq_end,
1379 *hrrq_curr = hwq->hrrq_curr;
1380
1381
1382 while (true) {
1383 entry = *hrrq_curr;
1384
1385 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1386 break;
1387
1388 entry &= ~SISL_RESP_HANDLE_T_BIT;
1389
1390 if (afu_is_sq_cmd_mode(afu)) {
1391 ioasa = (struct sisl_ioasa *)entry;
1392 cmd = container_of(ioasa, struct afu_cmd, sa);
1393 } else {
1394 ioarcb = (struct sisl_ioarcb *)entry;
1395 cmd = container_of(ioarcb, struct afu_cmd, rcb);
1396 }
1397
1398 list_add_tail(&cmd->queue, doneq);
1399
1400
1401 if (hrrq_curr < hrrq_end)
1402 hrrq_curr++;
1403 else {
1404 hrrq_curr = hrrq_start;
1405 toggle ^= SISL_RESP_HANDLE_T_BIT;
1406 }
1407
1408 atomic_inc(&hwq->hsq_credits);
1409 num_hrrq++;
1410
1411 if (budget > 0 && num_hrrq >= budget)
1412 break;
1413 }
1414
1415 hwq->hrrq_curr = hrrq_curr;
1416 hwq->toggle = toggle;
1417
1418 return num_hrrq;
1419}
1420
1421
1422
1423
1424
1425
1426
1427static void process_cmd_doneq(struct list_head *doneq)
1428{
1429 struct afu_cmd *cmd, *tmp;
1430
1431 WARN_ON(list_empty(doneq));
1432
1433 list_for_each_entry_safe(cmd, tmp, doneq, queue)
1434 cmd_complete(cmd);
1435}
1436
1437
1438
1439
1440
1441
1442
1443
1444static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
1445{
1446 struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
1447 unsigned long hrrq_flags;
1448 LIST_HEAD(doneq);
1449 int num_entries = 0;
1450
1451 spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1452
1453 num_entries = process_hrrq(hwq, &doneq, budget);
1454 if (num_entries < budget)
1455 irq_poll_complete(irqpoll);
1456
1457 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1458
1459 process_cmd_doneq(&doneq);
1460 return num_entries;
1461}
1462
1463
1464
1465
1466
1467
1468
1469
1470static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1471{
1472 struct hwq *hwq = (struct hwq *)data;
1473 struct afu *afu = hwq->afu;
1474 unsigned long hrrq_flags;
1475 LIST_HEAD(doneq);
1476 int num_entries = 0;
1477
1478 spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1479
1480
1481 if (!hwq->hrrq_online) {
1482 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1483 return IRQ_HANDLED;
1484 }
1485
1486 if (afu_is_irqpoll_enabled(afu)) {
1487 irq_poll_sched(&hwq->irqpoll);
1488 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1489 return IRQ_HANDLED;
1490 }
1491
1492 num_entries = process_hrrq(hwq, &doneq, -1);
1493 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1494
1495 if (num_entries == 0)
1496 return IRQ_NONE;
1497
1498 process_cmd_doneq(&doneq);
1499 return IRQ_HANDLED;
1500}
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511#define ASTATUS_FC(_a, _b, _c, _d) \
1512 { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1513
1514#define BUILD_SISL_ASTATUS_FC_PORT(_a) \
1515 ASTATUS_FC(_a, LINK_UP, "link up", 0), \
1516 ASTATUS_FC(_a, LINK_DN, "link down", 0), \
1517 ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \
1518 ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \
1519 ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1520 ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \
1521 ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \
1522 ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
1523
1524static const struct asyc_intr_info ainfo[] = {
1525 BUILD_SISL_ASTATUS_FC_PORT(1),
1526 BUILD_SISL_ASTATUS_FC_PORT(0),
1527 BUILD_SISL_ASTATUS_FC_PORT(3),
1528 BUILD_SISL_ASTATUS_FC_PORT(2)
1529};
1530
1531
1532
1533
1534
1535
1536
1537
1538static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1539{
1540 struct hwq *hwq = (struct hwq *)data;
1541 struct afu *afu = hwq->afu;
1542 struct cxlflash_cfg *cfg = afu->parent;
1543 struct device *dev = &cfg->dev->dev;
1544 const struct asyc_intr_info *info;
1545 struct sisl_global_map __iomem *global = &afu->afu_map->global;
1546 __be64 __iomem *fc_port_regs;
1547 u64 reg_unmasked;
1548 u64 reg;
1549 u64 bit;
1550 u8 port;
1551
1552 reg = readq_be(&global->regs.aintr_status);
1553 reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1554
1555 if (unlikely(reg_unmasked == 0)) {
1556 dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
1557 __func__, reg);
1558 goto out;
1559 }
1560
1561
1562 writeq_be(reg_unmasked, &global->regs.aintr_clear);
1563
1564
1565 for_each_set_bit(bit, (ulong *)®_unmasked, BITS_PER_LONG) {
1566 if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
1567 WARN_ON_ONCE(1);
1568 continue;
1569 }
1570
1571 info = &ainfo[bit];
1572 if (unlikely(info->status != 1ULL << bit)) {
1573 WARN_ON_ONCE(1);
1574 continue;
1575 }
1576
1577 port = info->port;
1578 fc_port_regs = get_fc_port_regs(cfg, port);
1579
1580 dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1581 __func__, port, info->desc,
1582 readq_be(&fc_port_regs[FC_STATUS / 8]));
1583
1584
1585
1586
1587
1588 if (info->action & LINK_RESET) {
1589 dev_err(dev, "%s: FC Port %d: resetting link\n",
1590 __func__, port);
1591 cfg->lr_state = LINK_RESET_REQUIRED;
1592 cfg->lr_port = port;
1593 schedule_work(&cfg->work_q);
1594 }
1595
1596 if (info->action & CLR_FC_ERROR) {
1597 reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
1598
1599
1600
1601
1602
1603
1604 dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
1605 __func__, port, reg);
1606
1607 writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
1608 writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1609 }
1610
1611 if (info->action & SCAN_HOST) {
1612 atomic_inc(&cfg->scan_host_needed);
1613 schedule_work(&cfg->work_q);
1614 }
1615 }
1616
1617out:
1618 return IRQ_HANDLED;
1619}
1620
1621
1622
1623
1624
1625
1626
1627
1628static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1629{
1630 struct device *dev = &cfg->dev->dev;
1631 struct pci_dev *pdev = cfg->dev;
1632 int rc = 0;
1633 int ro_start, ro_size, i, j, k;
1634 ssize_t vpd_size;
1635 char vpd_data[CXLFLASH_VPD_LEN];
1636 char tmp_buf[WWPN_BUF_LEN] = { 0 };
1637 const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
1638 cfg->dev_id->driver_data;
1639 const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
1640 const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
1641
1642
1643 vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1644 if (unlikely(vpd_size <= 0)) {
1645 dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1646 __func__, vpd_size);
1647 rc = -ENODEV;
1648 goto out;
1649 }
1650
1651
1652 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1653 PCI_VPD_LRDT_RO_DATA);
1654 if (unlikely(ro_start < 0)) {
1655 dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
1656 rc = -ENODEV;
1657 goto out;
1658 }
1659
1660
1661 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1662 j = ro_size;
1663 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1664 if (unlikely((i + j) > vpd_size)) {
1665 dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
1666 __func__, (i + j), vpd_size);
1667 ro_size = vpd_size - i;
1668 }
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684 for (k = 0; k < cfg->num_fc_ports; k++) {
1685 j = ro_size;
1686 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1687
1688 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1689 if (i < 0) {
1690 if (wwpn_vpd_required)
1691 dev_err(dev, "%s: Port %d WWPN not found\n",
1692 __func__, k);
1693 wwpn[k] = 0ULL;
1694 continue;
1695 }
1696
1697 j = pci_vpd_info_field_size(&vpd_data[i]);
1698 i += PCI_VPD_INFO_FLD_HDR_SIZE;
1699 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1700 dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
1701 __func__, k);
1702 rc = -ENODEV;
1703 goto out;
1704 }
1705
1706 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1707 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1708 if (unlikely(rc)) {
1709 dev_err(dev, "%s: WWPN conversion failed for port %d\n",
1710 __func__, k);
1711 rc = -ENODEV;
1712 goto out;
1713 }
1714
1715 dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
1716 }
1717
1718out:
1719 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1720 return rc;
1721}
1722
1723
1724
1725
1726
1727
1728
1729
1730static void init_pcr(struct cxlflash_cfg *cfg)
1731{
1732 struct afu *afu = cfg->afu;
1733 struct sisl_ctrl_map __iomem *ctrl_map;
1734 struct hwq *hwq;
1735 void *cookie;
1736 int i;
1737
1738 for (i = 0; i < MAX_CONTEXT; i++) {
1739 ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1740
1741
1742 writeq_be(0, &ctrl_map->rht_start);
1743 writeq_be(0, &ctrl_map->rht_cnt_id);
1744 writeq_be(0, &ctrl_map->ctx_cap);
1745 }
1746
1747
1748 for (i = 0; i < afu->num_hwqs; i++) {
1749 hwq = get_hwq(afu, i);
1750 cookie = hwq->ctx_cookie;
1751
1752 hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
1753 hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
1754 hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
1755
1756
1757 writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
1758 }
1759}
1760
1761
1762
1763
1764
1765static int init_global(struct cxlflash_cfg *cfg)
1766{
1767 struct afu *afu = cfg->afu;
1768 struct device *dev = &cfg->dev->dev;
1769 struct hwq *hwq;
1770 struct sisl_host_map __iomem *hmap;
1771 __be64 __iomem *fc_port_regs;
1772 u64 wwpn[MAX_FC_PORTS];
1773 int i = 0, num_ports = 0;
1774 int rc = 0;
1775 int j;
1776 void *ctx;
1777 u64 reg;
1778
1779 rc = read_vpd(cfg, &wwpn[0]);
1780 if (rc) {
1781 dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1782 goto out;
1783 }
1784
1785
1786 for (i = 0; i < afu->num_hwqs; i++) {
1787 hwq = get_hwq(afu, i);
1788 hmap = hwq->host_map;
1789
1790 writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
1791 writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
1792 hwq->hrrq_online = true;
1793
1794 if (afu_is_sq_cmd_mode(afu)) {
1795 writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
1796 writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
1797 }
1798 }
1799
1800
1801 reg = readq_be(&afu->afu_map->global.regs.afu_config);
1802 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1803
1804
1805
1806
1807 writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1808
1809
1810 if (afu->internal_lun) {
1811
1812 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1813 num_ports = 0;
1814 } else {
1815 writeq_be(PORT_MASK(cfg->num_fc_ports),
1816 &afu->afu_map->global.regs.afu_port_sel);
1817 num_ports = cfg->num_fc_ports;
1818 }
1819
1820 for (i = 0; i < num_ports; i++) {
1821 fc_port_regs = get_fc_port_regs(cfg, i);
1822
1823
1824 writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
1825
1826 (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
1827 writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
1828
1829
1830 if (wwpn[i] != 0)
1831 afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
1832
1833
1834
1835 msleep(100);
1836 }
1837
1838 if (afu_is_ocxl_lisn(afu)) {
1839
1840 for (i = 0; i < afu->num_hwqs; i++) {
1841 hwq = get_hwq(afu, i);
1842 ctx = hwq->ctx_cookie;
1843
1844 for (j = 0; j < hwq->num_irqs; j++) {
1845 reg = cfg->ops->get_irq_objhndl(ctx, j);
1846 writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]);
1847 }
1848
1849 reg = hwq->ctx_hndl;
1850 writeq_be(SISL_LISN_PASID(reg, reg),
1851 &hwq->ctrl_map->lisn_pasid[0]);
1852 writeq_be(SISL_LISN_PASID(0UL, reg),
1853 &hwq->ctrl_map->lisn_pasid[1]);
1854 }
1855 }
1856
1857
1858
1859
1860 for (i = 0; i < afu->num_hwqs; i++) {
1861 hwq = get_hwq(afu, i);
1862
1863 (void)readq_be(&hwq->ctrl_map->mbox_r);
1864 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1865 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1866 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1867 &hwq->ctrl_map->ctx_cap);
1868 }
1869
1870
1871
1872
1873
1874
1875
1876 hwq = get_hwq(afu, PRIMARY_HWQ);
1877 reg = readq_be(&hwq->host_map->ctx_ctrl);
1878 if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
1879 cfg->ws_unmap = true;
1880
1881
1882 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1883out:
1884 return rc;
1885}
1886
1887
1888
1889
1890
1891static int start_afu(struct cxlflash_cfg *cfg)
1892{
1893 struct afu *afu = cfg->afu;
1894 struct device *dev = &cfg->dev->dev;
1895 struct hwq *hwq;
1896 int rc = 0;
1897 int i;
1898
1899 init_pcr(cfg);
1900
1901
1902 for (i = 0; i < afu->num_hwqs; i++) {
1903 hwq = get_hwq(afu, i);
1904
1905
1906 memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
1907
1908
1909 hwq->hrrq_start = &hwq->rrq_entry[0];
1910 hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
1911 hwq->hrrq_curr = hwq->hrrq_start;
1912 hwq->toggle = 1;
1913
1914
1915 spin_lock_init(&hwq->hrrq_slock);
1916 spin_lock_init(&hwq->hsq_slock);
1917
1918
1919 if (afu_is_sq_cmd_mode(afu)) {
1920 memset(&hwq->sq, 0, sizeof(hwq->sq));
1921 hwq->hsq_start = &hwq->sq[0];
1922 hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
1923 hwq->hsq_curr = hwq->hsq_start;
1924
1925 atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
1926 }
1927
1928
1929 if (afu_is_irqpoll_enabled(afu))
1930 irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
1931 cxlflash_irqpoll);
1932
1933 }
1934
1935 rc = init_global(cfg);
1936
1937 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1938 return rc;
1939}
1940
1941
1942
1943
1944
1945
1946
1947
1948static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1949 struct hwq *hwq)
1950{
1951 struct device *dev = &cfg->dev->dev;
1952 void *ctx = hwq->ctx_cookie;
1953 int rc = 0;
1954 enum undo_level level = UNDO_NOOP;
1955 bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
1956 int num_irqs = hwq->num_irqs;
1957
1958 rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
1959 if (unlikely(rc)) {
1960 dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1961 __func__, rc);
1962 level = UNDO_NOOP;
1963 goto out;
1964 }
1965
1966 rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
1967 "SISL_MSI_SYNC_ERROR");
1968 if (unlikely(rc <= 0)) {
1969 dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1970 level = FREE_IRQ;
1971 goto out;
1972 }
1973
1974 rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
1975 "SISL_MSI_RRQ_UPDATED");
1976 if (unlikely(rc <= 0)) {
1977 dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1978 level = UNMAP_ONE;
1979 goto out;
1980 }
1981
1982
1983 if (!is_primary_hwq)
1984 goto out;
1985
1986 rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
1987 "SISL_MSI_ASYNC_ERROR");
1988 if (unlikely(rc <= 0)) {
1989 dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1990 level = UNMAP_TWO;
1991 goto out;
1992 }
1993out:
1994 return level;
1995}
1996
1997
1998
1999
2000
2001
2002
2003
2004static int init_mc(struct cxlflash_cfg *cfg, u32 index)
2005{
2006 void *ctx;
2007 struct device *dev = &cfg->dev->dev;
2008 struct hwq *hwq = get_hwq(cfg->afu, index);
2009 int rc = 0;
2010 int num_irqs;
2011 enum undo_level level;
2012
2013 hwq->afu = cfg->afu;
2014 hwq->index = index;
2015 INIT_LIST_HEAD(&hwq->pending_cmds);
2016
2017 if (index == PRIMARY_HWQ) {
2018 ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
2019 num_irqs = 3;
2020 } else {
2021 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
2022 num_irqs = 2;
2023 }
2024 if (IS_ERR_OR_NULL(ctx)) {
2025 rc = -ENOMEM;
2026 goto err1;
2027 }
2028
2029 WARN_ON(hwq->ctx_cookie);
2030 hwq->ctx_cookie = ctx;
2031 hwq->num_irqs = num_irqs;
2032
2033
2034 cfg->ops->set_master(ctx);
2035
2036
2037 if (index == PRIMARY_HWQ) {
2038 rc = cfg->ops->afu_reset(ctx);
2039 if (unlikely(rc)) {
2040 dev_err(dev, "%s: AFU reset failed rc=%d\n",
2041 __func__, rc);
2042 goto err1;
2043 }
2044 }
2045
2046 level = init_intr(cfg, hwq);
2047 if (unlikely(level)) {
2048 dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
2049 goto err2;
2050 }
2051
2052
2053 rc = cfg->ops->start_context(hwq->ctx_cookie);
2054 if (unlikely(rc)) {
2055 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
2056 level = UNMAP_THREE;
2057 goto err2;
2058 }
2059
2060out:
2061 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2062 return rc;
2063err2:
2064 term_intr(cfg, level, index);
2065 if (index != PRIMARY_HWQ)
2066 cfg->ops->release_context(ctx);
2067err1:
2068 hwq->ctx_cookie = NULL;
2069 goto out;
2070}
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087static void get_num_afu_ports(struct cxlflash_cfg *cfg)
2088{
2089 struct afu *afu = cfg->afu;
2090 struct device *dev = &cfg->dev->dev;
2091 u64 port_mask;
2092 int num_fc_ports = LEGACY_FC_PORTS;
2093
2094 port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
2095 if (port_mask != 0ULL)
2096 num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
2097
2098 dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
2099 __func__, port_mask, num_fc_ports);
2100
2101 cfg->num_fc_ports = num_fc_ports;
2102 cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
2103}
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114static int init_afu(struct cxlflash_cfg *cfg)
2115{
2116 u64 reg;
2117 int rc = 0;
2118 struct afu *afu = cfg->afu;
2119 struct device *dev = &cfg->dev->dev;
2120 struct hwq *hwq;
2121 int i;
2122
2123 cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
2124
2125 mutex_init(&afu->sync_active);
2126 afu->num_hwqs = afu->desired_hwqs;
2127 for (i = 0; i < afu->num_hwqs; i++) {
2128 rc = init_mc(cfg, i);
2129 if (rc) {
2130 dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
2131 __func__, rc, i);
2132 goto err1;
2133 }
2134 }
2135
2136
2137 hwq = get_hwq(afu, PRIMARY_HWQ);
2138 afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
2139 if (!afu->afu_map) {
2140 dev_err(dev, "%s: psa_map failed\n", __func__);
2141 rc = -ENOMEM;
2142 goto err1;
2143 }
2144
2145
2146 reg = readq(&afu->afu_map->global.regs.afu_version);
2147 memcpy(afu->version, ®, sizeof(reg));
2148 afu->interface_version =
2149 readq_be(&afu->afu_map->global.regs.interface_version);
2150 if ((afu->interface_version + 1) == 0) {
2151 dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
2152 "interface version %016llx\n", afu->version,
2153 afu->interface_version);
2154 rc = -EINVAL;
2155 goto err1;
2156 }
2157
2158 if (afu_is_sq_cmd_mode(afu)) {
2159 afu->send_cmd = send_cmd_sq;
2160 afu->context_reset = context_reset_sq;
2161 } else {
2162 afu->send_cmd = send_cmd_ioarrin;
2163 afu->context_reset = context_reset_ioarrin;
2164 }
2165
2166 dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
2167 afu->version, afu->interface_version);
2168
2169 get_num_afu_ports(cfg);
2170
2171 rc = start_afu(cfg);
2172 if (rc) {
2173 dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
2174 goto err1;
2175 }
2176
2177 afu_err_intr_init(cfg->afu);
2178 for (i = 0; i < afu->num_hwqs; i++) {
2179 hwq = get_hwq(afu, i);
2180
2181 hwq->room = readq_be(&hwq->host_map->cmd_room);
2182 }
2183
2184
2185 cxlflash_restore_luntable(cfg);
2186out:
2187 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2188 return rc;
2189
2190err1:
2191 for (i = afu->num_hwqs - 1; i >= 0; i--) {
2192 term_intr(cfg, UNMAP_THREE, i);
2193 term_mc(cfg, i);
2194 }
2195 goto out;
2196}
2197
2198
2199
2200
2201
2202
2203
2204static int afu_reset(struct cxlflash_cfg *cfg)
2205{
2206 struct device *dev = &cfg->dev->dev;
2207 int rc = 0;
2208
2209
2210
2211
2212 term_afu(cfg);
2213
2214 rc = init_afu(cfg);
2215
2216 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2217 return rc;
2218}
2219
2220
2221
2222
2223
2224
2225
2226
2227static void drain_ioctls(struct cxlflash_cfg *cfg)
2228{
2229 down_write(&cfg->ioctl_rwsem);
2230 up_write(&cfg->ioctl_rwsem);
2231}
2232
2233
2234
2235
2236
2237
2238static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
2239{
2240 struct cxlflash_cfg *cfg = data;
2241 struct device *dev = &cfg->dev->dev;
2242 int rc = 0;
2243
2244 if (cfg->state != STATE_RESET) {
2245 dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
2246 __func__, cfg->state);
2247 goto out;
2248 }
2249
2250 drain_ioctls(cfg);
2251 cxlflash_mark_contexts_error(cfg);
2252 rc = afu_reset(cfg);
2253 if (rc)
2254 cfg->state = STATE_FAILTERM;
2255 else
2256 cfg->state = STATE_NORMAL;
2257 wake_up_all(&cfg->reset_waitq);
2258
2259out:
2260 scsi_unblock_requests(cfg->host);
2261}
2262
2263
2264
2265
2266
2267static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
2268{
2269 struct device *dev = &cfg->dev->dev;
2270
2271 if (cfg->state != STATE_NORMAL) {
2272 dev_dbg(dev, "%s: Not performing reset state=%d\n",
2273 __func__, cfg->state);
2274 return;
2275 }
2276
2277 cfg->state = STATE_RESET;
2278 scsi_block_requests(cfg->host);
2279 cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
2280 cfg);
2281}
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
2300{
2301 struct cxlflash_cfg *cfg = afu->parent;
2302 struct device *dev = &cfg->dev->dev;
2303 struct afu_cmd *cmd = NULL;
2304 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
2305 ulong lock_flags;
2306 char *buf = NULL;
2307 int rc = 0;
2308 int nretry = 0;
2309
2310 if (cfg->state != STATE_NORMAL) {
2311 dev_dbg(dev, "%s: Sync not required state=%u\n",
2312 __func__, cfg->state);
2313 return 0;
2314 }
2315
2316 mutex_lock(&afu->sync_active);
2317 atomic_inc(&afu->cmds_active);
2318 buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
2319 if (unlikely(!buf)) {
2320 dev_err(dev, "%s: no memory for command\n", __func__);
2321 rc = -ENOMEM;
2322 goto out;
2323 }
2324
2325 cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
2326
2327retry:
2328 memset(cmd, 0, sizeof(*cmd));
2329 memcpy(&cmd->rcb, rcb, sizeof(*rcb));
2330 INIT_LIST_HEAD(&cmd->queue);
2331 init_completion(&cmd->cevent);
2332 cmd->parent = afu;
2333 cmd->hwq_index = hwq->index;
2334 cmd->rcb.ctx_id = hwq->ctx_hndl;
2335
2336 dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
2337 __func__, afu, cmd, cmd->rcb.cdb[0], nretry);
2338
2339 rc = afu->send_cmd(afu, cmd);
2340 if (unlikely(rc)) {
2341 rc = -ENOBUFS;
2342 goto out;
2343 }
2344
2345 rc = wait_resp(afu, cmd);
2346 switch (rc) {
2347 case -ETIMEDOUT:
2348 rc = afu->context_reset(hwq);
2349 if (rc) {
2350
2351 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
2352 list_del(&cmd->list);
2353 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
2354
2355 cxlflash_schedule_async_reset(cfg);
2356 break;
2357 }
2358 fallthrough;
2359 case -EAGAIN:
2360 if (++nretry < 2)
2361 goto retry;
2362 fallthrough;
2363 default:
2364 break;
2365 }
2366
2367 if (rcb->ioasa)
2368 *rcb->ioasa = cmd->sa;
2369out:
2370 atomic_dec(&afu->cmds_active);
2371 mutex_unlock(&afu->sync_active);
2372 kfree(buf);
2373 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2374 return rc;
2375}
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
2394{
2395 struct cxlflash_cfg *cfg = afu->parent;
2396 struct device *dev = &cfg->dev->dev;
2397 struct sisl_ioarcb rcb = { 0 };
2398
2399 dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
2400 __func__, afu, ctx, res, mode);
2401
2402 rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
2403 rcb.msi = SISL_MSI_RRQ_UPDATED;
2404 rcb.timeout = MC_AFU_SYNC_TIMEOUT;
2405
2406 rcb.cdb[0] = SISL_AFU_CMD_SYNC;
2407 rcb.cdb[1] = mode;
2408 put_unaligned_be16(ctx, &rcb.cdb[2]);
2409 put_unaligned_be32(res, &rcb.cdb[4]);
2410
2411 return send_afu_cmd(afu, &rcb);
2412}
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
2425{
2426 int rc = FAILED;
2427 struct Scsi_Host *host = scp->device->host;
2428 struct cxlflash_cfg *cfg = shost_priv(host);
2429 struct afu_cmd *cmd = sc_to_afuc(scp);
2430 struct device *dev = &cfg->dev->dev;
2431 struct afu *afu = cfg->afu;
2432 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
2433
2434 dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2435 "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2436 scp->device->channel, scp->device->id, scp->device->lun,
2437 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2438 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2439 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2440 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2441
2442
2443
2444
2445 if (cfg->state != STATE_NORMAL) {
2446 dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
2447 __func__, cfg->state);
2448 goto out;
2449 }
2450
2451 rc = afu->context_reset(hwq);
2452 if (unlikely(rc))
2453 goto out;
2454
2455 rc = SUCCESS;
2456
2457out:
2458 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2459 return rc;
2460}
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
2471{
2472 int rc = SUCCESS;
2473 struct scsi_device *sdev = scp->device;
2474 struct Scsi_Host *host = sdev->host;
2475 struct cxlflash_cfg *cfg = shost_priv(host);
2476 struct device *dev = &cfg->dev->dev;
2477 int rcr = 0;
2478
2479 dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
2480 host->host_no, sdev->channel, sdev->id, sdev->lun);
2481retry:
2482 switch (cfg->state) {
2483 case STATE_NORMAL:
2484 rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
2485 if (unlikely(rcr))
2486 rc = FAILED;
2487 break;
2488 case STATE_RESET:
2489 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2490 goto retry;
2491 default:
2492 rc = FAILED;
2493 break;
2494 }
2495
2496 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2497 return rc;
2498}
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
2514{
2515 int rc = SUCCESS;
2516 int rcr = 0;
2517 struct Scsi_Host *host = scp->device->host;
2518 struct cxlflash_cfg *cfg = shost_priv(host);
2519 struct device *dev = &cfg->dev->dev;
2520
2521 dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
2522
2523 switch (cfg->state) {
2524 case STATE_NORMAL:
2525 cfg->state = STATE_RESET;
2526 drain_ioctls(cfg);
2527 cxlflash_mark_contexts_error(cfg);
2528 rcr = afu_reset(cfg);
2529 if (rcr) {
2530 rc = FAILED;
2531 cfg->state = STATE_FAILTERM;
2532 } else
2533 cfg->state = STATE_NORMAL;
2534 wake_up_all(&cfg->reset_waitq);
2535 ssleep(1);
2536 fallthrough;
2537 case STATE_RESET:
2538 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2539 if (cfg->state == STATE_NORMAL)
2540 break;
2541 fallthrough;
2542 default:
2543 rc = FAILED;
2544 break;
2545 }
2546
2547 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2548 return rc;
2549}
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
2561{
2562
2563 if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
2564 qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
2565
2566 scsi_change_queue_depth(sdev, qdepth);
2567 return sdev->queue_depth;
2568}
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578static ssize_t cxlflash_show_port_status(u32 port,
2579 struct cxlflash_cfg *cfg,
2580 char *buf)
2581{
2582 struct device *dev = &cfg->dev->dev;
2583 char *disp_status;
2584 u64 status;
2585 __be64 __iomem *fc_port_regs;
2586
2587 WARN_ON(port >= MAX_FC_PORTS);
2588
2589 if (port >= cfg->num_fc_ports) {
2590 dev_info(dev, "%s: Port %d not supported on this card.\n",
2591 __func__, port);
2592 return -EINVAL;
2593 }
2594
2595 fc_port_regs = get_fc_port_regs(cfg, port);
2596 status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
2597 status &= FC_MTIP_STATUS_MASK;
2598
2599 if (status == FC_MTIP_STATUS_ONLINE)
2600 disp_status = "online";
2601 else if (status == FC_MTIP_STATUS_OFFLINE)
2602 disp_status = "offline";
2603 else
2604 disp_status = "unknown";
2605
2606 return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2607}
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617static ssize_t port0_show(struct device *dev,
2618 struct device_attribute *attr,
2619 char *buf)
2620{
2621 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2622
2623 return cxlflash_show_port_status(0, cfg, buf);
2624}
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634static ssize_t port1_show(struct device *dev,
2635 struct device_attribute *attr,
2636 char *buf)
2637{
2638 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2639
2640 return cxlflash_show_port_status(1, cfg, buf);
2641}
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651static ssize_t port2_show(struct device *dev,
2652 struct device_attribute *attr,
2653 char *buf)
2654{
2655 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2656
2657 return cxlflash_show_port_status(2, cfg, buf);
2658}
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668static ssize_t port3_show(struct device *dev,
2669 struct device_attribute *attr,
2670 char *buf)
2671{
2672 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2673
2674 return cxlflash_show_port_status(3, cfg, buf);
2675}
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685static ssize_t lun_mode_show(struct device *dev,
2686 struct device_attribute *attr, char *buf)
2687{
2688 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2689 struct afu *afu = cfg->afu;
2690
2691 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2692}
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716static ssize_t lun_mode_store(struct device *dev,
2717 struct device_attribute *attr,
2718 const char *buf, size_t count)
2719{
2720 struct Scsi_Host *shost = class_to_shost(dev);
2721 struct cxlflash_cfg *cfg = shost_priv(shost);
2722 struct afu *afu = cfg->afu;
2723 int rc;
2724 u32 lun_mode;
2725
2726 rc = kstrtouint(buf, 10, &lun_mode);
2727 if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2728 afu->internal_lun = lun_mode;
2729
2730
2731
2732
2733
2734
2735 if (afu->internal_lun)
2736 shost->max_channel = 0;
2737 else
2738 shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
2739
2740 afu_reset(cfg);
2741 scsi_scan_host(cfg->host);
2742 }
2743
2744 return count;
2745}
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755static ssize_t ioctl_version_show(struct device *dev,
2756 struct device_attribute *attr, char *buf)
2757{
2758 ssize_t bytes = 0;
2759
2760 bytes = scnprintf(buf, PAGE_SIZE,
2761 "disk: %u\n", DK_CXLFLASH_VERSION_0);
2762 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2763 "host: %u\n", HT_CXLFLASH_VERSION_0);
2764
2765 return bytes;
2766}
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776static ssize_t cxlflash_show_port_lun_table(u32 port,
2777 struct cxlflash_cfg *cfg,
2778 char *buf)
2779{
2780 struct device *dev = &cfg->dev->dev;
2781 __be64 __iomem *fc_port_luns;
2782 int i;
2783 ssize_t bytes = 0;
2784
2785 WARN_ON(port >= MAX_FC_PORTS);
2786
2787 if (port >= cfg->num_fc_ports) {
2788 dev_info(dev, "%s: Port %d not supported on this card.\n",
2789 __func__, port);
2790 return -EINVAL;
2791 }
2792
2793 fc_port_luns = get_fc_port_luns(cfg, port);
2794
2795 for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2796 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2797 "%03d: %016llx\n",
2798 i, readq_be(&fc_port_luns[i]));
2799 return bytes;
2800}
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810static ssize_t port0_lun_table_show(struct device *dev,
2811 struct device_attribute *attr,
2812 char *buf)
2813{
2814 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2815
2816 return cxlflash_show_port_lun_table(0, cfg, buf);
2817}
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827static ssize_t port1_lun_table_show(struct device *dev,
2828 struct device_attribute *attr,
2829 char *buf)
2830{
2831 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2832
2833 return cxlflash_show_port_lun_table(1, cfg, buf);
2834}
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844static ssize_t port2_lun_table_show(struct device *dev,
2845 struct device_attribute *attr,
2846 char *buf)
2847{
2848 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2849
2850 return cxlflash_show_port_lun_table(2, cfg, buf);
2851}
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861static ssize_t port3_lun_table_show(struct device *dev,
2862 struct device_attribute *attr,
2863 char *buf)
2864{
2865 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2866
2867 return cxlflash_show_port_lun_table(3, cfg, buf);
2868}
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881static ssize_t irqpoll_weight_show(struct device *dev,
2882 struct device_attribute *attr, char *buf)
2883{
2884 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2885 struct afu *afu = cfg->afu;
2886
2887 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
2888}
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902static ssize_t irqpoll_weight_store(struct device *dev,
2903 struct device_attribute *attr,
2904 const char *buf, size_t count)
2905{
2906 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2907 struct device *cfgdev = &cfg->dev->dev;
2908 struct afu *afu = cfg->afu;
2909 struct hwq *hwq;
2910 u32 weight;
2911 int rc, i;
2912
2913 rc = kstrtouint(buf, 10, &weight);
2914 if (rc)
2915 return -EINVAL;
2916
2917 if (weight > 256) {
2918 dev_info(cfgdev,
2919 "Invalid IRQ poll weight. It must be 256 or less.\n");
2920 return -EINVAL;
2921 }
2922
2923 if (weight == afu->irqpoll_weight) {
2924 dev_info(cfgdev,
2925 "Current IRQ poll weight has the same weight.\n");
2926 return -EINVAL;
2927 }
2928
2929 if (afu_is_irqpoll_enabled(afu)) {
2930 for (i = 0; i < afu->num_hwqs; i++) {
2931 hwq = get_hwq(afu, i);
2932
2933 irq_poll_disable(&hwq->irqpoll);
2934 }
2935 }
2936
2937 afu->irqpoll_weight = weight;
2938
2939 if (weight > 0) {
2940 for (i = 0; i < afu->num_hwqs; i++) {
2941 hwq = get_hwq(afu, i);
2942
2943 irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
2944 }
2945 }
2946
2947 return count;
2948}
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959static ssize_t num_hwqs_show(struct device *dev,
2960 struct device_attribute *attr, char *buf)
2961{
2962 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2963 struct afu *afu = cfg->afu;
2964
2965 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
2966}
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982static ssize_t num_hwqs_store(struct device *dev,
2983 struct device_attribute *attr,
2984 const char *buf, size_t count)
2985{
2986 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2987 struct afu *afu = cfg->afu;
2988 int rc;
2989 int nhwqs, num_hwqs;
2990
2991 rc = kstrtoint(buf, 10, &nhwqs);
2992 if (rc)
2993 return -EINVAL;
2994
2995 if (nhwqs >= 1)
2996 num_hwqs = nhwqs;
2997 else if (nhwqs == 0)
2998 num_hwqs = num_online_cpus();
2999 else
3000 num_hwqs = num_online_cpus() / abs(nhwqs);
3001
3002 afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
3003 WARN_ON_ONCE(afu->desired_hwqs == 0);
3004
3005retry:
3006 switch (cfg->state) {
3007 case STATE_NORMAL:
3008 cfg->state = STATE_RESET;
3009 drain_ioctls(cfg);
3010 cxlflash_mark_contexts_error(cfg);
3011 rc = afu_reset(cfg);
3012 if (rc)
3013 cfg->state = STATE_FAILTERM;
3014 else
3015 cfg->state = STATE_NORMAL;
3016 wake_up_all(&cfg->reset_waitq);
3017 break;
3018 case STATE_RESET:
3019 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
3020 if (cfg->state == STATE_NORMAL)
3021 goto retry;
3022 fallthrough;
3023 default:
3024
3025 dev_err(dev, "%s: Device is not ready, state=%d\n",
3026 __func__, cfg->state);
3027 break;
3028 }
3029
3030 return count;
3031}
3032
3033static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044static ssize_t hwq_mode_show(struct device *dev,
3045 struct device_attribute *attr, char *buf)
3046{
3047 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
3048 struct afu *afu = cfg->afu;
3049
3050 return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
3051}
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067static ssize_t hwq_mode_store(struct device *dev,
3068 struct device_attribute *attr,
3069 const char *buf, size_t count)
3070{
3071 struct Scsi_Host *shost = class_to_shost(dev);
3072 struct cxlflash_cfg *cfg = shost_priv(shost);
3073 struct device *cfgdev = &cfg->dev->dev;
3074 struct afu *afu = cfg->afu;
3075 int i;
3076 u32 mode = MAX_HWQ_MODE;
3077
3078 for (i = 0; i < MAX_HWQ_MODE; i++) {
3079 if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
3080 mode = i;
3081 break;
3082 }
3083 }
3084
3085 if (mode >= MAX_HWQ_MODE) {
3086 dev_info(cfgdev, "Invalid HWQ steering mode.\n");
3087 return -EINVAL;
3088 }
3089
3090 afu->hwq_mode = mode;
3091
3092 return count;
3093}
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103static ssize_t mode_show(struct device *dev,
3104 struct device_attribute *attr, char *buf)
3105{
3106 struct scsi_device *sdev = to_scsi_device(dev);
3107
3108 return scnprintf(buf, PAGE_SIZE, "%s\n",
3109 sdev->hostdata ? "superpipe" : "legacy");
3110}
3111
3112
3113
3114
3115static DEVICE_ATTR_RO(port0);
3116static DEVICE_ATTR_RO(port1);
3117static DEVICE_ATTR_RO(port2);
3118static DEVICE_ATTR_RO(port3);
3119static DEVICE_ATTR_RW(lun_mode);
3120static DEVICE_ATTR_RO(ioctl_version);
3121static DEVICE_ATTR_RO(port0_lun_table);
3122static DEVICE_ATTR_RO(port1_lun_table);
3123static DEVICE_ATTR_RO(port2_lun_table);
3124static DEVICE_ATTR_RO(port3_lun_table);
3125static DEVICE_ATTR_RW(irqpoll_weight);
3126static DEVICE_ATTR_RW(num_hwqs);
3127static DEVICE_ATTR_RW(hwq_mode);
3128
3129static struct device_attribute *cxlflash_host_attrs[] = {
3130 &dev_attr_port0,
3131 &dev_attr_port1,
3132 &dev_attr_port2,
3133 &dev_attr_port3,
3134 &dev_attr_lun_mode,
3135 &dev_attr_ioctl_version,
3136 &dev_attr_port0_lun_table,
3137 &dev_attr_port1_lun_table,
3138 &dev_attr_port2_lun_table,
3139 &dev_attr_port3_lun_table,
3140 &dev_attr_irqpoll_weight,
3141 &dev_attr_num_hwqs,
3142 &dev_attr_hwq_mode,
3143 NULL
3144};
3145
3146
3147
3148
3149static DEVICE_ATTR_RO(mode);
3150
3151static struct device_attribute *cxlflash_dev_attrs[] = {
3152 &dev_attr_mode,
3153 NULL
3154};
3155
3156
3157
3158
3159static struct scsi_host_template driver_template = {
3160 .module = THIS_MODULE,
3161 .name = CXLFLASH_ADAPTER_NAME,
3162 .info = cxlflash_driver_info,
3163 .ioctl = cxlflash_ioctl,
3164 .proc_name = CXLFLASH_NAME,
3165 .queuecommand = cxlflash_queuecommand,
3166 .eh_abort_handler = cxlflash_eh_abort_handler,
3167 .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
3168 .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
3169 .change_queue_depth = cxlflash_change_queue_depth,
3170 .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
3171 .can_queue = CXLFLASH_MAX_CMDS,
3172 .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
3173 .this_id = -1,
3174 .sg_tablesize = 1,
3175 .max_sectors = CXLFLASH_MAX_SECTORS,
3176 .shost_attrs = cxlflash_host_attrs,
3177 .sdev_attrs = cxlflash_dev_attrs,
3178};
3179
3180
3181
3182
3183static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
3184 CXLFLASH_WWPN_VPD_REQUIRED };
3185static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
3186 CXLFLASH_NOTIFY_SHUTDOWN };
3187static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
3188 (CXLFLASH_NOTIFY_SHUTDOWN |
3189 CXLFLASH_OCXL_DEV) };
3190
3191
3192
3193
3194static struct pci_device_id cxlflash_pci_table[] = {
3195 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
3196 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
3197 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
3198 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
3199 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
3200 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
3201 {}
3202};
3203
3204MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215static void cxlflash_worker_thread(struct work_struct *work)
3216{
3217 struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
3218 work_q);
3219 struct afu *afu = cfg->afu;
3220 struct device *dev = &cfg->dev->dev;
3221 __be64 __iomem *fc_port_regs;
3222 int port;
3223 ulong lock_flags;
3224
3225
3226
3227 if (cfg->state != STATE_NORMAL)
3228 return;
3229
3230 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3231
3232 if (cfg->lr_state == LINK_RESET_REQUIRED) {
3233 port = cfg->lr_port;
3234 if (port < 0)
3235 dev_err(dev, "%s: invalid port index %d\n",
3236 __func__, port);
3237 else {
3238 spin_unlock_irqrestore(cfg->host->host_lock,
3239 lock_flags);
3240
3241
3242 fc_port_regs = get_fc_port_regs(cfg, port);
3243 afu_link_reset(afu, port, fc_port_regs);
3244 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3245 }
3246
3247 cfg->lr_state = LINK_RESET_COMPLETE;
3248 }
3249
3250 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
3251
3252 if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
3253 scsi_scan_host(cfg->host);
3254}
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265static int cxlflash_chr_open(struct inode *inode, struct file *file)
3266{
3267 struct cxlflash_cfg *cfg;
3268
3269 if (!capable(CAP_SYS_ADMIN))
3270 return -EACCES;
3271
3272 cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
3273 file->private_data = cfg;
3274
3275 return 0;
3276}
3277
3278
3279
3280
3281
3282
3283
3284static char *decode_hioctl(unsigned int cmd)
3285{
3286 switch (cmd) {
3287 case HT_CXLFLASH_LUN_PROVISION:
3288 return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
3289 }
3290
3291 return "UNKNOWN";
3292}
3293
3294
3295
3296
3297
3298
3299
3300
3301static int cxlflash_lun_provision(struct cxlflash_cfg *cfg,
3302 struct ht_cxlflash_lun_provision *lunprov)
3303{
3304 struct afu *afu = cfg->afu;
3305 struct device *dev = &cfg->dev->dev;
3306 struct sisl_ioarcb rcb;
3307 struct sisl_ioasa asa;
3308 __be64 __iomem *fc_port_regs;
3309 u16 port = lunprov->port;
3310 u16 scmd = lunprov->hdr.subcmd;
3311 u16 type;
3312 u64 reg;
3313 u64 size;
3314 u64 lun_id;
3315 int rc = 0;
3316
3317 if (!afu_is_lun_provision(afu)) {
3318 rc = -ENOTSUPP;
3319 goto out;
3320 }
3321
3322 if (port >= cfg->num_fc_ports) {
3323 rc = -EINVAL;
3324 goto out;
3325 }
3326
3327 switch (scmd) {
3328 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
3329 type = SISL_AFU_LUN_PROVISION_CREATE;
3330 size = lunprov->size;
3331 lun_id = 0;
3332 break;
3333 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
3334 type = SISL_AFU_LUN_PROVISION_DELETE;
3335 size = 0;
3336 lun_id = lunprov->lun_id;
3337 break;
3338 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
3339 fc_port_regs = get_fc_port_regs(cfg, port);
3340
3341 reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
3342 lunprov->max_num_luns = reg;
3343 reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
3344 lunprov->cur_num_luns = reg;
3345 reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
3346 lunprov->max_cap_port = reg;
3347 reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
3348 lunprov->cur_cap_port = reg;
3349
3350 goto out;
3351 default:
3352 rc = -EINVAL;
3353 goto out;
3354 }
3355
3356 memset(&rcb, 0, sizeof(rcb));
3357 memset(&asa, 0, sizeof(asa));
3358 rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
3359 rcb.lun_id = lun_id;
3360 rcb.msi = SISL_MSI_RRQ_UPDATED;
3361 rcb.timeout = MC_LUN_PROV_TIMEOUT;
3362 rcb.ioasa = &asa;
3363
3364 rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
3365 rcb.cdb[1] = type;
3366 rcb.cdb[2] = port;
3367 put_unaligned_be64(size, &rcb.cdb[8]);
3368
3369 rc = send_afu_cmd(afu, &rcb);
3370 if (rc) {
3371 dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3372 __func__, rc, asa.ioasc, asa.afu_extra);
3373 goto out;
3374 }
3375
3376 if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
3377 lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
3378 memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
3379 }
3380out:
3381 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3382 return rc;
3383}
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
3396 struct ht_cxlflash_afu_debug *afu_dbg)
3397{
3398 struct afu *afu = cfg->afu;
3399 struct device *dev = &cfg->dev->dev;
3400 struct sisl_ioarcb rcb;
3401 struct sisl_ioasa asa;
3402 char *buf = NULL;
3403 char *kbuf = NULL;
3404 void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
3405 u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
3406 u32 ulen = afu_dbg->data_len;
3407 bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
3408 int rc = 0;
3409
3410 if (!afu_is_afu_debug(afu)) {
3411 rc = -ENOTSUPP;
3412 goto out;
3413 }
3414
3415 if (ulen) {
3416 req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
3417
3418 if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
3419 rc = -EINVAL;
3420 goto out;
3421 }
3422
3423 buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
3424 if (unlikely(!buf)) {
3425 rc = -ENOMEM;
3426 goto out;
3427 }
3428
3429 kbuf = PTR_ALIGN(buf, cache_line_size());
3430
3431 if (is_write) {
3432 req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
3433
3434 if (copy_from_user(kbuf, ubuf, ulen)) {
3435 rc = -EFAULT;
3436 goto out;
3437 }
3438 }
3439 }
3440
3441 memset(&rcb, 0, sizeof(rcb));
3442 memset(&asa, 0, sizeof(asa));
3443
3444 rcb.req_flags = req_flags;
3445 rcb.msi = SISL_MSI_RRQ_UPDATED;
3446 rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
3447 rcb.ioasa = &asa;
3448
3449 if (ulen) {
3450 rcb.data_len = ulen;
3451 rcb.data_ea = (uintptr_t)kbuf;
3452 }
3453
3454 rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
3455 memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
3456 HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
3457
3458 rc = send_afu_cmd(afu, &rcb);
3459 if (rc) {
3460 dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3461 __func__, rc, asa.ioasc, asa.afu_extra);
3462 goto out;
3463 }
3464
3465 if (ulen && !is_write) {
3466 if (copy_to_user(ubuf, kbuf, ulen))
3467 rc = -EFAULT;
3468 }
3469out:
3470 kfree(buf);
3471 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3472 return rc;
3473}
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
3492 unsigned long arg)
3493{
3494 typedef int (*hioctl) (struct cxlflash_cfg *, void *);
3495
3496 struct cxlflash_cfg *cfg = file->private_data;
3497 struct device *dev = &cfg->dev->dev;
3498 char buf[sizeof(union cxlflash_ht_ioctls)];
3499 void __user *uarg = (void __user *)arg;
3500 struct ht_cxlflash_hdr *hdr;
3501 size_t size = 0;
3502 bool known_ioctl = false;
3503 int idx = 0;
3504 int rc = 0;
3505 hioctl do_ioctl = NULL;
3506
3507 static const struct {
3508 size_t size;
3509 hioctl ioctl;
3510 } ioctl_tbl[] = {
3511 { sizeof(struct ht_cxlflash_lun_provision),
3512 (hioctl)cxlflash_lun_provision },
3513 { sizeof(struct ht_cxlflash_afu_debug),
3514 (hioctl)cxlflash_afu_debug },
3515 };
3516
3517
3518 down_read(&cfg->ioctl_rwsem);
3519
3520 dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
3521 __func__, cmd, idx, sizeof(ioctl_tbl));
3522
3523 switch (cmd) {
3524 case HT_CXLFLASH_LUN_PROVISION:
3525 case HT_CXLFLASH_AFU_DEBUG:
3526 known_ioctl = true;
3527 idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
3528 size = ioctl_tbl[idx].size;
3529 do_ioctl = ioctl_tbl[idx].ioctl;
3530
3531 if (likely(do_ioctl))
3532 break;
3533
3534 fallthrough;
3535 default:
3536 rc = -EINVAL;
3537 goto out;
3538 }
3539
3540 if (unlikely(copy_from_user(&buf, uarg, size))) {
3541 dev_err(dev, "%s: copy_from_user() fail "
3542 "size=%lu cmd=%d (%s) uarg=%p\n",
3543 __func__, size, cmd, decode_hioctl(cmd), uarg);
3544 rc = -EFAULT;
3545 goto out;
3546 }
3547
3548 hdr = (struct ht_cxlflash_hdr *)&buf;
3549 if (hdr->version != HT_CXLFLASH_VERSION_0) {
3550 dev_dbg(dev, "%s: Version %u not supported for %s\n",
3551 __func__, hdr->version, decode_hioctl(cmd));
3552 rc = -EINVAL;
3553 goto out;
3554 }
3555
3556 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
3557 dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
3558 rc = -EINVAL;
3559 goto out;
3560 }
3561
3562 rc = do_ioctl(cfg, (void *)&buf);
3563 if (likely(!rc))
3564 if (unlikely(copy_to_user(uarg, &buf, size))) {
3565 dev_err(dev, "%s: copy_to_user() fail "
3566 "size=%lu cmd=%d (%s) uarg=%p\n",
3567 __func__, size, cmd, decode_hioctl(cmd), uarg);
3568 rc = -EFAULT;
3569 }
3570
3571
3572
3573out:
3574 up_read(&cfg->ioctl_rwsem);
3575 if (unlikely(rc && known_ioctl))
3576 dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3577 __func__, decode_hioctl(cmd), cmd, rc);
3578 else
3579 dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3580 __func__, decode_hioctl(cmd), cmd, rc);
3581 return rc;
3582}
3583
3584
3585
3586
3587static const struct file_operations cxlflash_chr_fops = {
3588 .owner = THIS_MODULE,
3589 .open = cxlflash_chr_open,
3590 .unlocked_ioctl = cxlflash_chr_ioctl,
3591 .compat_ioctl = compat_ptr_ioctl,
3592};
3593
3594
3595
3596
3597
3598
3599
3600static int init_chrdev(struct cxlflash_cfg *cfg)
3601{
3602 struct device *dev = &cfg->dev->dev;
3603 struct device *char_dev;
3604 dev_t devno;
3605 int minor;
3606 int rc = 0;
3607
3608 minor = cxlflash_get_minor();
3609 if (unlikely(minor < 0)) {
3610 dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
3611 rc = -ENOSPC;
3612 goto out;
3613 }
3614
3615 devno = MKDEV(cxlflash_major, minor);
3616 cdev_init(&cfg->cdev, &cxlflash_chr_fops);
3617
3618 rc = cdev_add(&cfg->cdev, devno, 1);
3619 if (rc) {
3620 dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
3621 goto err1;
3622 }
3623
3624 char_dev = device_create(cxlflash_class, NULL, devno,
3625 NULL, "cxlflash%d", minor);
3626 if (IS_ERR(char_dev)) {
3627 rc = PTR_ERR(char_dev);
3628 dev_err(dev, "%s: device_create failed rc=%d\n",
3629 __func__, rc);
3630 goto err2;
3631 }
3632
3633 cfg->chardev = char_dev;
3634out:
3635 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3636 return rc;
3637err2:
3638 cdev_del(&cfg->cdev);
3639err1:
3640 cxlflash_put_minor(minor);
3641 goto out;
3642}
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660static int cxlflash_probe(struct pci_dev *pdev,
3661 const struct pci_device_id *dev_id)
3662{
3663 struct Scsi_Host *host;
3664 struct cxlflash_cfg *cfg = NULL;
3665 struct device *dev = &pdev->dev;
3666 struct dev_dependent_vals *ddv;
3667 int rc = 0;
3668 int k;
3669
3670 dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
3671 __func__, pdev->irq);
3672
3673 ddv = (struct dev_dependent_vals *)dev_id->driver_data;
3674 driver_template.max_sectors = ddv->max_sectors;
3675
3676 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
3677 if (!host) {
3678 dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
3679 rc = -ENOMEM;
3680 goto out;
3681 }
3682
3683 host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
3684 host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
3685 host->unique_id = host->host_no;
3686 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
3687
3688 cfg = shost_priv(host);
3689 cfg->state = STATE_PROBING;
3690 cfg->host = host;
3691 rc = alloc_mem(cfg);
3692 if (rc) {
3693 dev_err(dev, "%s: alloc_mem failed\n", __func__);
3694 rc = -ENOMEM;
3695 scsi_host_put(cfg->host);
3696 goto out;
3697 }
3698
3699 cfg->init_state = INIT_STATE_NONE;
3700 cfg->dev = pdev;
3701 cfg->cxl_fops = cxlflash_cxl_fops;
3702 cfg->ops = cxlflash_assign_ops(ddv);
3703 WARN_ON_ONCE(!cfg->ops);
3704
3705
3706
3707
3708
3709
3710
3711
3712 cfg->promote_lun_index = 0;
3713
3714 for (k = 0; k < MAX_FC_PORTS; k++)
3715 cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
3716
3717 cfg->dev_id = (struct pci_device_id *)dev_id;
3718
3719 init_waitqueue_head(&cfg->tmf_waitq);
3720 init_waitqueue_head(&cfg->reset_waitq);
3721
3722 INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
3723 cfg->lr_state = LINK_RESET_INVALID;
3724 cfg->lr_port = -1;
3725 spin_lock_init(&cfg->tmf_slock);
3726 mutex_init(&cfg->ctx_tbl_list_mutex);
3727 mutex_init(&cfg->ctx_recovery_mutex);
3728 init_rwsem(&cfg->ioctl_rwsem);
3729 INIT_LIST_HEAD(&cfg->ctx_err_recovery);
3730 INIT_LIST_HEAD(&cfg->lluns);
3731
3732 pci_set_drvdata(pdev, cfg);
3733
3734 rc = init_pci(cfg);
3735 if (rc) {
3736 dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
3737 goto out_remove;
3738 }
3739 cfg->init_state = INIT_STATE_PCI;
3740
3741 cfg->afu_cookie = cfg->ops->create_afu(pdev);
3742 if (unlikely(!cfg->afu_cookie)) {
3743 dev_err(dev, "%s: create_afu failed\n", __func__);
3744 rc = -ENOMEM;
3745 goto out_remove;
3746 }
3747
3748 rc = init_afu(cfg);
3749 if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
3750 dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
3751 goto out_remove;
3752 }
3753 cfg->init_state = INIT_STATE_AFU;
3754
3755 rc = init_scsi(cfg);
3756 if (rc) {
3757 dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
3758 goto out_remove;
3759 }
3760 cfg->init_state = INIT_STATE_SCSI;
3761
3762 rc = init_chrdev(cfg);
3763 if (rc) {
3764 dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
3765 goto out_remove;
3766 }
3767 cfg->init_state = INIT_STATE_CDEV;
3768
3769 if (wq_has_sleeper(&cfg->reset_waitq)) {
3770 cfg->state = STATE_PROBED;
3771 wake_up_all(&cfg->reset_waitq);
3772 } else
3773 cfg->state = STATE_NORMAL;
3774out:
3775 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3776 return rc;
3777
3778out_remove:
3779 cfg->state = STATE_PROBED;
3780 cxlflash_remove(pdev);
3781 goto out;
3782}
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
3795 pci_channel_state_t state)
3796{
3797 int rc = 0;
3798 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3799 struct device *dev = &cfg->dev->dev;
3800
3801 dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
3802
3803 switch (state) {
3804 case pci_channel_io_frozen:
3805 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
3806 cfg->state != STATE_PROBING);
3807 if (cfg->state == STATE_FAILTERM)
3808 return PCI_ERS_RESULT_DISCONNECT;
3809
3810 cfg->state = STATE_RESET;
3811 scsi_block_requests(cfg->host);
3812 drain_ioctls(cfg);
3813 rc = cxlflash_mark_contexts_error(cfg);
3814 if (unlikely(rc))
3815 dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
3816 __func__, rc);
3817 term_afu(cfg);
3818 return PCI_ERS_RESULT_NEED_RESET;
3819 case pci_channel_io_perm_failure:
3820 cfg->state = STATE_FAILTERM;
3821 wake_up_all(&cfg->reset_waitq);
3822 scsi_unblock_requests(cfg->host);
3823 return PCI_ERS_RESULT_DISCONNECT;
3824 default:
3825 break;
3826 }
3827 return PCI_ERS_RESULT_NEED_RESET;
3828}
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
3840{
3841 int rc = 0;
3842 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3843 struct device *dev = &cfg->dev->dev;
3844
3845 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3846
3847 rc = init_afu(cfg);
3848 if (unlikely(rc)) {
3849 dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
3850 return PCI_ERS_RESULT_DISCONNECT;
3851 }
3852
3853 return PCI_ERS_RESULT_RECOVERED;
3854}
3855
3856
3857
3858
3859
3860static void cxlflash_pci_resume(struct pci_dev *pdev)
3861{
3862 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3863 struct device *dev = &cfg->dev->dev;
3864
3865 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3866
3867 cfg->state = STATE_NORMAL;
3868 wake_up_all(&cfg->reset_waitq);
3869 scsi_unblock_requests(cfg->host);
3870}
3871
3872
3873
3874
3875
3876
3877
3878
3879static char *cxlflash_devnode(struct device *dev, umode_t *mode)
3880{
3881 return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
3882}
3883
3884
3885
3886
3887
3888
3889static int cxlflash_class_init(void)
3890{
3891 dev_t devno;
3892 int rc = 0;
3893
3894 rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
3895 if (unlikely(rc)) {
3896 pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
3897 goto out;
3898 }
3899
3900 cxlflash_major = MAJOR(devno);
3901
3902 cxlflash_class = class_create(THIS_MODULE, "cxlflash");
3903 if (IS_ERR(cxlflash_class)) {
3904 rc = PTR_ERR(cxlflash_class);
3905 pr_err("%s: class_create failed rc=%d\n", __func__, rc);
3906 goto err;
3907 }
3908
3909 cxlflash_class->devnode = cxlflash_devnode;
3910out:
3911 pr_debug("%s: returning rc=%d\n", __func__, rc);
3912 return rc;
3913err:
3914 unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3915 goto out;
3916}
3917
3918
3919
3920
3921static void cxlflash_class_exit(void)
3922{
3923 dev_t devno = MKDEV(cxlflash_major, 0);
3924
3925 class_destroy(cxlflash_class);
3926 unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3927}
3928
3929static const struct pci_error_handlers cxlflash_err_handler = {
3930 .error_detected = cxlflash_pci_error_detected,
3931 .slot_reset = cxlflash_pci_slot_reset,
3932 .resume = cxlflash_pci_resume,
3933};
3934
3935
3936
3937
3938static struct pci_driver cxlflash_driver = {
3939 .name = CXLFLASH_NAME,
3940 .id_table = cxlflash_pci_table,
3941 .probe = cxlflash_probe,
3942 .remove = cxlflash_remove,
3943 .shutdown = cxlflash_remove,
3944 .err_handler = &cxlflash_err_handler,
3945};
3946
3947
3948
3949
3950
3951
3952static int __init init_cxlflash(void)
3953{
3954 int rc;
3955
3956 check_sizes();
3957 cxlflash_list_init();
3958 rc = cxlflash_class_init();
3959 if (unlikely(rc))
3960 goto out;
3961
3962 rc = pci_register_driver(&cxlflash_driver);
3963 if (unlikely(rc))
3964 goto err;
3965out:
3966 pr_debug("%s: returning rc=%d\n", __func__, rc);
3967 return rc;
3968err:
3969 cxlflash_class_exit();
3970 goto out;
3971}
3972
3973
3974
3975
3976static void __exit exit_cxlflash(void)
3977{
3978 cxlflash_term_global_luns();
3979 cxlflash_free_errpage();
3980
3981 pci_unregister_driver(&cxlflash_driver);
3982 cxlflash_class_exit();
3983}
3984
3985module_init(init_cxlflash);
3986module_exit(exit_cxlflash);
3987