1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/delay.h>
16#include <linux/list.h>
17#include <linux/module.h>
18#include <linux/pci.h>
19
20#include <asm/unaligned.h>
21
22#include <misc/cxl.h>
23
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_host.h>
26#include <uapi/scsi/cxlflash_ioctl.h>
27
28#include "main.h"
29#include "sislite.h"
30#include "common.h"
31
32MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
33MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35MODULE_LICENSE("GPL");
36
37static struct class *cxlflash_class;
38static u32 cxlflash_major;
39static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
40
41
42
43
44
45
46
47
48static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
49{
50 struct afu *afu = cmd->parent;
51 struct cxlflash_cfg *cfg = afu->parent;
52 struct device *dev = &cfg->dev->dev;
53 struct sisl_ioarcb *ioarcb;
54 struct sisl_ioasa *ioasa;
55 u32 resid;
56
57 if (unlikely(!cmd))
58 return;
59
60 ioarcb = &(cmd->rcb);
61 ioasa = &(cmd->sa);
62
63 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
64 resid = ioasa->resid;
65 scsi_set_resid(scp, resid);
66 dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
67 __func__, cmd, scp, resid);
68 }
69
70 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
71 dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
72 __func__, cmd, scp);
73 scp->result = (DID_ERROR << 16);
74 }
75
76 dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
77 "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
78 ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
79 ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
80
81 if (ioasa->rc.scsi_rc) {
82
83 if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
84 memcpy(scp->sense_buffer, ioasa->sense_data,
85 SISL_SENSE_DATA_LEN);
86 scp->result = ioasa->rc.scsi_rc;
87 } else
88 scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
89 }
90
91
92
93
94
95 if (ioasa->rc.fc_rc) {
96
97 switch (ioasa->rc.fc_rc) {
98 case SISL_FC_RC_LINKDOWN:
99 scp->result = (DID_REQUEUE << 16);
100 break;
101 case SISL_FC_RC_RESID:
102
103 if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
104
105
106
107
108
109 scp->result = (DID_ERROR << 16);
110 }
111 break;
112 case SISL_FC_RC_RESIDERR:
113
114 case SISL_FC_RC_TGTABORT:
115 case SISL_FC_RC_ABORTOK:
116 case SISL_FC_RC_ABORTFAIL:
117 case SISL_FC_RC_NOLOGI:
118 case SISL_FC_RC_ABORTPEND:
119 case SISL_FC_RC_WRABORTPEND:
120 case SISL_FC_RC_NOEXP:
121 case SISL_FC_RC_INUSE:
122 scp->result = (DID_ERROR << 16);
123 break;
124 }
125 }
126
127 if (ioasa->rc.afu_rc) {
128
129 switch (ioasa->rc.afu_rc) {
130 case SISL_AFU_RC_NO_CHANNELS:
131 scp->result = (DID_NO_CONNECT << 16);
132 break;
133 case SISL_AFU_RC_DATA_DMA_ERR:
134 switch (ioasa->afu_extra) {
135 case SISL_AFU_DMA_ERR_PAGE_IN:
136
137 scp->result = (DID_IMM_RETRY << 16);
138 break;
139 case SISL_AFU_DMA_ERR_INVALID_EA:
140 default:
141 scp->result = (DID_ERROR << 16);
142 }
143 break;
144 case SISL_AFU_RC_OUT_OF_DATA_BUFS:
145
146 scp->result = (DID_ALLOC_FAILURE << 16);
147 break;
148 default:
149 scp->result = (DID_ERROR << 16);
150 }
151 }
152}
153
154
155
156
157
158
159
160
161
162
163static void cmd_complete(struct afu_cmd *cmd)
164{
165 struct scsi_cmnd *scp;
166 ulong lock_flags;
167 struct afu *afu = cmd->parent;
168 struct cxlflash_cfg *cfg = afu->parent;
169 struct device *dev = &cfg->dev->dev;
170 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
171
172 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
173 list_del(&cmd->list);
174 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
175
176 if (cmd->scp) {
177 scp = cmd->scp;
178 if (unlikely(cmd->sa.ioasc))
179 process_cmd_err(cmd, scp);
180 else
181 scp->result = (DID_OK << 16);
182
183 dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
184 __func__, scp, scp->result, cmd->sa.ioasc);
185 scp->scsi_done(scp);
186 } else if (cmd->cmd_tmf) {
187 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
188 cfg->tmf_active = false;
189 wake_up_all_locked(&cfg->tmf_waitq);
190 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
191 } else
192 complete(&cmd->cevent);
193}
194
195
196
197
198
199
200
201
202static void flush_pending_cmds(struct hwq *hwq)
203{
204 struct cxlflash_cfg *cfg = hwq->afu->parent;
205 struct afu_cmd *cmd, *tmp;
206 struct scsi_cmnd *scp;
207 ulong lock_flags;
208
209 list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
210
211 if (!list_empty(&cmd->queue))
212 continue;
213
214 list_del(&cmd->list);
215
216 if (cmd->scp) {
217 scp = cmd->scp;
218 scp->result = (DID_IMM_RETRY << 16);
219 scp->scsi_done(scp);
220 } else {
221 cmd->cmd_aborted = true;
222
223 if (cmd->cmd_tmf) {
224 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
225 cfg->tmf_active = false;
226 wake_up_all_locked(&cfg->tmf_waitq);
227 spin_unlock_irqrestore(&cfg->tmf_slock,
228 lock_flags);
229 } else
230 complete(&cmd->cevent);
231 }
232 }
233}
234
235
236
237
238
239
240
241
242
243
244
245
246static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
247{
248 struct cxlflash_cfg *cfg = hwq->afu->parent;
249 struct device *dev = &cfg->dev->dev;
250 int rc = -ETIMEDOUT;
251 int nretry = 0;
252 u64 val = 0x1;
253 ulong lock_flags;
254
255 dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
256
257 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
258
259 writeq_be(val, reset_reg);
260 do {
261 val = readq_be(reset_reg);
262 if ((val & 0x1) == 0x0) {
263 rc = 0;
264 break;
265 }
266
267
268 udelay(1 << nretry);
269 } while (nretry++ < MC_ROOM_RETRY_CNT);
270
271 if (!rc)
272 flush_pending_cmds(hwq);
273
274 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
275
276 dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
277 __func__, rc, val, nretry);
278 return rc;
279}
280
281
282
283
284
285
286
287static int context_reset_ioarrin(struct hwq *hwq)
288{
289 return context_reset(hwq, &hwq->host_map->ioarrin);
290}
291
292
293
294
295
296
297
298static int context_reset_sq(struct hwq *hwq)
299{
300 return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
301}
302
303
304
305
306
307
308
309
310
311static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
312{
313 struct cxlflash_cfg *cfg = afu->parent;
314 struct device *dev = &cfg->dev->dev;
315 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
316 int rc = 0;
317 s64 room;
318 ulong lock_flags;
319
320
321
322
323
324 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
325 if (--hwq->room < 0) {
326 room = readq_be(&hwq->host_map->cmd_room);
327 if (room <= 0) {
328 dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
329 "0x%02X, room=0x%016llX\n",
330 __func__, cmd->rcb.cdb[0], room);
331 hwq->room = 0;
332 rc = SCSI_MLQUEUE_HOST_BUSY;
333 goto out;
334 }
335 hwq->room = room - 1;
336 }
337
338 list_add(&cmd->list, &hwq->pending_cmds);
339 writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
340out:
341 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
342 dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
343 cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
344 return rc;
345}
346
347
348
349
350
351
352
353
354
355static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
356{
357 struct cxlflash_cfg *cfg = afu->parent;
358 struct device *dev = &cfg->dev->dev;
359 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
360 int rc = 0;
361 int newval;
362 ulong lock_flags;
363
364 newval = atomic_dec_if_positive(&hwq->hsq_credits);
365 if (newval <= 0) {
366 rc = SCSI_MLQUEUE_HOST_BUSY;
367 goto out;
368 }
369
370 cmd->rcb.ioasa = &cmd->sa;
371
372 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
373
374 *hwq->hsq_curr = cmd->rcb;
375 if (hwq->hsq_curr < hwq->hsq_end)
376 hwq->hsq_curr++;
377 else
378 hwq->hsq_curr = hwq->hsq_start;
379
380 list_add(&cmd->list, &hwq->pending_cmds);
381 writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
382
383 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
384out:
385 dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
386 "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
387 cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
388 readq_be(&hwq->host_map->sq_head),
389 readq_be(&hwq->host_map->sq_tail));
390 return rc;
391}
392
393
394
395
396
397
398
399
400static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
401{
402 struct cxlflash_cfg *cfg = afu->parent;
403 struct device *dev = &cfg->dev->dev;
404 int rc = 0;
405 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
406
407 timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
408 if (!timeout)
409 rc = -ETIMEDOUT;
410
411 if (cmd->cmd_aborted)
412 rc = -EAGAIN;
413
414 if (unlikely(cmd->sa.ioasc != 0)) {
415 dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
416 __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
417 rc = -EIO;
418 }
419
420 return rc;
421}
422
423
424
425
426
427
428
429
430
431
432
433static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
434 struct afu *afu)
435{
436 u32 tag;
437 u32 hwq = 0;
438
439 if (afu->num_hwqs == 1)
440 return 0;
441
442 switch (afu->hwq_mode) {
443 case HWQ_MODE_RR:
444 hwq = afu->hwq_rr_count++ % afu->num_hwqs;
445 break;
446 case HWQ_MODE_TAG:
447 tag = blk_mq_unique_tag(scp->request);
448 hwq = blk_mq_unique_tag_to_hwq(tag);
449 break;
450 case HWQ_MODE_CPU:
451 hwq = smp_processor_id() % afu->num_hwqs;
452 break;
453 default:
454 WARN_ON_ONCE(1);
455 }
456
457 return hwq;
458}
459
460
461
462
463
464
465
466
467
468
469static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
470 u64 tmfcmd)
471{
472 struct afu *afu = cfg->afu;
473 struct afu_cmd *cmd = NULL;
474 struct device *dev = &cfg->dev->dev;
475 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
476 char *buf = NULL;
477 ulong lock_flags;
478 int rc = 0;
479 ulong to;
480
481 buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
482 if (unlikely(!buf)) {
483 dev_err(dev, "%s: no memory for command\n", __func__);
484 rc = -ENOMEM;
485 goto out;
486 }
487
488 cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
489 INIT_LIST_HEAD(&cmd->queue);
490
491
492 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
493 if (cfg->tmf_active)
494 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
495 !cfg->tmf_active,
496 cfg->tmf_slock);
497 cfg->tmf_active = true;
498 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
499
500 cmd->parent = afu;
501 cmd->cmd_tmf = true;
502 cmd->hwq_index = hwq->index;
503
504 cmd->rcb.ctx_id = hwq->ctx_hndl;
505 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
506 cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
507 cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
508 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
509 SISL_REQ_FLAGS_SUP_UNDERRUN |
510 SISL_REQ_FLAGS_TMF_CMD);
511 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
512
513 rc = afu->send_cmd(afu, cmd);
514 if (unlikely(rc)) {
515 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
516 cfg->tmf_active = false;
517 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
518 goto out;
519 }
520
521 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
522 to = msecs_to_jiffies(5000);
523 to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
524 !cfg->tmf_active,
525 cfg->tmf_slock,
526 to);
527 if (!to) {
528 dev_err(dev, "%s: TMF timed out\n", __func__);
529 rc = -ETIMEDOUT;
530 } else if (cmd->cmd_aborted) {
531 dev_err(dev, "%s: TMF aborted\n", __func__);
532 rc = -EAGAIN;
533 } else if (cmd->sa.ioasc) {
534 dev_err(dev, "%s: TMF failed ioasc=%08x\n",
535 __func__, cmd->sa.ioasc);
536 rc = -EIO;
537 }
538 cfg->tmf_active = false;
539 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
540out:
541 kfree(buf);
542 return rc;
543}
544
545
546
547
548
549
550
551static const char *cxlflash_driver_info(struct Scsi_Host *host)
552{
553 return CXLFLASH_ADAPTER_NAME;
554}
555
556
557
558
559
560
561
562
563static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
564{
565 struct cxlflash_cfg *cfg = shost_priv(host);
566 struct afu *afu = cfg->afu;
567 struct device *dev = &cfg->dev->dev;
568 struct afu_cmd *cmd = sc_to_afuci(scp);
569 struct scatterlist *sg = scsi_sglist(scp);
570 int hwq_index = cmd_to_target_hwq(host, scp, afu);
571 struct hwq *hwq = get_hwq(afu, hwq_index);
572 u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
573 ulong lock_flags;
574 int rc = 0;
575
576 dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
577 "cdb=(%08x-%08x-%08x-%08x)\n",
578 __func__, scp, host->host_no, scp->device->channel,
579 scp->device->id, scp->device->lun,
580 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
581 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
582 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
583 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
584
585
586
587
588
589 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
590 if (cfg->tmf_active) {
591 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
592 rc = SCSI_MLQUEUE_HOST_BUSY;
593 goto out;
594 }
595 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
596
597 switch (cfg->state) {
598 case STATE_PROBING:
599 case STATE_PROBED:
600 case STATE_RESET:
601 dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
602 rc = SCSI_MLQUEUE_HOST_BUSY;
603 goto out;
604 case STATE_FAILTERM:
605 dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
606 scp->result = (DID_NO_CONNECT << 16);
607 scp->scsi_done(scp);
608 rc = 0;
609 goto out;
610 default:
611 break;
612 }
613
614 if (likely(sg)) {
615 cmd->rcb.data_len = sg->length;
616 cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
617 }
618
619 cmd->scp = scp;
620 cmd->parent = afu;
621 cmd->hwq_index = hwq_index;
622
623 cmd->sa.ioasc = 0;
624 cmd->rcb.ctx_id = hwq->ctx_hndl;
625 cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
626 cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
627 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
628
629 if (scp->sc_data_direction == DMA_TO_DEVICE)
630 req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
631
632 cmd->rcb.req_flags = req_flags;
633 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
634
635 rc = afu->send_cmd(afu, cmd);
636out:
637 return rc;
638}
639
640
641
642
643
644static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
645{
646 struct pci_dev *pdev = cfg->dev;
647
648 if (pci_channel_offline(pdev))
649 wait_event_timeout(cfg->reset_waitq,
650 !pci_channel_offline(pdev),
651 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
652}
653
654
655
656
657
658static void free_mem(struct cxlflash_cfg *cfg)
659{
660 struct afu *afu = cfg->afu;
661
662 if (cfg->afu) {
663 free_pages((ulong)afu, get_order(sizeof(struct afu)));
664 cfg->afu = NULL;
665 }
666}
667
668
669
670
671
672static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
673{
674 if (cfg->async_reset_cookie == 0)
675 return;
676
677
678 async_synchronize_cookie(cfg->async_reset_cookie + 1);
679 cfg->async_reset_cookie = 0;
680}
681
682
683
684
685
686
687
688
689
690
691static void stop_afu(struct cxlflash_cfg *cfg)
692{
693 struct afu *afu = cfg->afu;
694 struct hwq *hwq;
695 int i;
696
697 cancel_work_sync(&cfg->work_q);
698 if (!current_is_async())
699 cxlflash_reset_sync(cfg);
700
701 if (likely(afu)) {
702 while (atomic_read(&afu->cmds_active))
703 ssleep(1);
704
705 if (afu_is_irqpoll_enabled(afu)) {
706 for (i = 0; i < afu->num_hwqs; i++) {
707 hwq = get_hwq(afu, i);
708
709 irq_poll_disable(&hwq->irqpoll);
710 }
711 }
712
713 if (likely(afu->afu_map)) {
714 cfg->ops->psa_unmap(afu->afu_map);
715 afu->afu_map = NULL;
716 }
717 }
718}
719
720
721
722
723
724
725
726
727
728static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
729 u32 index)
730{
731 struct afu *afu = cfg->afu;
732 struct device *dev = &cfg->dev->dev;
733 struct hwq *hwq;
734
735 if (!afu) {
736 dev_err(dev, "%s: returning with NULL afu\n", __func__);
737 return;
738 }
739
740 hwq = get_hwq(afu, index);
741
742 if (!hwq->ctx_cookie) {
743 dev_err(dev, "%s: returning with NULL MC\n", __func__);
744 return;
745 }
746
747 switch (level) {
748 case UNMAP_THREE:
749
750 if (index == PRIMARY_HWQ)
751 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
752 case UNMAP_TWO:
753 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
754 case UNMAP_ONE:
755 cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
756 case FREE_IRQ:
757 cfg->ops->free_afu_irqs(hwq->ctx_cookie);
758
759 case UNDO_NOOP:
760
761 break;
762 }
763}
764
765
766
767
768
769
770
771
772static void term_mc(struct cxlflash_cfg *cfg, u32 index)
773{
774 struct afu *afu = cfg->afu;
775 struct device *dev = &cfg->dev->dev;
776 struct hwq *hwq;
777 ulong lock_flags;
778
779 if (!afu) {
780 dev_err(dev, "%s: returning with NULL afu\n", __func__);
781 return;
782 }
783
784 hwq = get_hwq(afu, index);
785
786 if (!hwq->ctx_cookie) {
787 dev_err(dev, "%s: returning with NULL MC\n", __func__);
788 return;
789 }
790
791 WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
792 if (index != PRIMARY_HWQ)
793 WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
794 hwq->ctx_cookie = NULL;
795
796 spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
797 flush_pending_cmds(hwq);
798 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
799}
800
801
802
803
804
805
806
807static void term_afu(struct cxlflash_cfg *cfg)
808{
809 struct device *dev = &cfg->dev->dev;
810 int k;
811
812
813
814
815
816
817
818
819
820
821 for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
822 term_intr(cfg, UNMAP_THREE, k);
823
824 stop_afu(cfg);
825
826 for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
827 term_mc(cfg, k);
828
829 dev_dbg(dev, "%s: returning\n", __func__);
830}
831
832
833
834
835
836
837
838
839
840
841
842static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
843{
844 struct afu *afu = cfg->afu;
845 struct device *dev = &cfg->dev->dev;
846 struct dev_dependent_vals *ddv;
847 __be64 __iomem *fc_port_regs;
848 u64 reg, status;
849 int i, retry_cnt = 0;
850
851 ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
852 if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
853 return;
854
855 if (!afu || !afu->afu_map) {
856 dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
857 return;
858 }
859
860
861 for (i = 0; i < cfg->num_fc_ports; i++) {
862 fc_port_regs = get_fc_port_regs(cfg, i);
863
864 reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
865 reg |= SISL_FC_SHUTDOWN_NORMAL;
866 writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
867 }
868
869 if (!wait)
870 return;
871
872
873 for (i = 0; i < cfg->num_fc_ports; i++) {
874 fc_port_regs = get_fc_port_regs(cfg, i);
875 retry_cnt = 0;
876
877 while (true) {
878 status = readq_be(&fc_port_regs[FC_STATUS / 8]);
879 if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
880 break;
881 if (++retry_cnt >= MC_RETRY_CNT) {
882 dev_dbg(dev, "%s: port %d shutdown processing "
883 "not yet completed\n", __func__, i);
884 break;
885 }
886 msleep(100 * retry_cnt);
887 }
888 }
889}
890
891
892
893
894
895
896static int cxlflash_get_minor(void)
897{
898 int minor;
899 long bit;
900
901 bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
902 if (bit >= CXLFLASH_MAX_ADAPTERS)
903 return -1;
904
905 minor = bit & MINORMASK;
906 set_bit(minor, cxlflash_minor);
907 return minor;
908}
909
910
911
912
913
914static void cxlflash_put_minor(int minor)
915{
916 clear_bit(minor, cxlflash_minor);
917}
918
919
920
921
922
923static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
924{
925 device_unregister(cfg->chardev);
926 cfg->chardev = NULL;
927 cdev_del(&cfg->cdev);
928 cxlflash_put_minor(MINOR(cfg->cdev.dev));
929}
930
931
932
933
934
935
936
937
938static void cxlflash_remove(struct pci_dev *pdev)
939{
940 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
941 struct device *dev = &pdev->dev;
942 ulong lock_flags;
943
944 if (!pci_is_enabled(pdev)) {
945 dev_dbg(dev, "%s: Device is disabled\n", __func__);
946 return;
947 }
948
949
950
951
952 spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
953 if (cfg->tmf_active)
954 wait_event_interruptible_lock_irq(cfg->tmf_waitq,
955 !cfg->tmf_active,
956 cfg->tmf_slock);
957 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
958
959
960 notify_shutdown(cfg, true);
961
962 cfg->state = STATE_FAILTERM;
963 cxlflash_stop_term_user_contexts(cfg);
964
965 switch (cfg->init_state) {
966 case INIT_STATE_CDEV:
967 cxlflash_release_chrdev(cfg);
968 case INIT_STATE_SCSI:
969 cxlflash_term_local_luns(cfg);
970 scsi_remove_host(cfg->host);
971 case INIT_STATE_AFU:
972 term_afu(cfg);
973 case INIT_STATE_PCI:
974 pci_disable_device(pdev);
975 case INIT_STATE_NONE:
976 free_mem(cfg);
977 scsi_host_put(cfg->host);
978 break;
979 }
980
981 dev_dbg(dev, "%s: returning\n", __func__);
982}
983
984
985
986
987
988
989
990
991
992
993
994static int alloc_mem(struct cxlflash_cfg *cfg)
995{
996 int rc = 0;
997 struct device *dev = &cfg->dev->dev;
998
999
1000 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1001 get_order(sizeof(struct afu)));
1002 if (unlikely(!cfg->afu)) {
1003 dev_err(dev, "%s: cannot get %d free pages\n",
1004 __func__, get_order(sizeof(struct afu)));
1005 rc = -ENOMEM;
1006 goto out;
1007 }
1008 cfg->afu->parent = cfg;
1009 cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
1010 cfg->afu->afu_map = NULL;
1011out:
1012 return rc;
1013}
1014
1015
1016
1017
1018
1019
1020
1021static int init_pci(struct cxlflash_cfg *cfg)
1022{
1023 struct pci_dev *pdev = cfg->dev;
1024 struct device *dev = &cfg->dev->dev;
1025 int rc = 0;
1026
1027 rc = pci_enable_device(pdev);
1028 if (rc || pci_channel_offline(pdev)) {
1029 if (pci_channel_offline(pdev)) {
1030 cxlflash_wait_for_pci_err_recovery(cfg);
1031 rc = pci_enable_device(pdev);
1032 }
1033
1034 if (rc) {
1035 dev_err(dev, "%s: Cannot enable adapter\n", __func__);
1036 cxlflash_wait_for_pci_err_recovery(cfg);
1037 goto out;
1038 }
1039 }
1040
1041out:
1042 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1043 return rc;
1044}
1045
1046
1047
1048
1049
1050
1051
1052static int init_scsi(struct cxlflash_cfg *cfg)
1053{
1054 struct pci_dev *pdev = cfg->dev;
1055 struct device *dev = &cfg->dev->dev;
1056 int rc = 0;
1057
1058 rc = scsi_add_host(cfg->host, &pdev->dev);
1059 if (rc) {
1060 dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
1061 goto out;
1062 }
1063
1064 scsi_scan_host(cfg->host);
1065
1066out:
1067 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1068 return rc;
1069}
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079static void set_port_online(__be64 __iomem *fc_regs)
1080{
1081 u64 cmdcfg;
1082
1083 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1084 cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE);
1085 cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE);
1086 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1087}
1088
1089
1090
1091
1092
1093
1094
1095static void set_port_offline(__be64 __iomem *fc_regs)
1096{
1097 u64 cmdcfg;
1098
1099 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1100 cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE);
1101 cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE);
1102 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1103}
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1119{
1120 u64 status;
1121
1122 WARN_ON(delay_us < 1000);
1123
1124 do {
1125 msleep(delay_us / 1000);
1126 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1127 if (status == U64_MAX)
1128 nretry /= 2;
1129 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1130 nretry--);
1131
1132 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
1133}
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1148{
1149 u64 status;
1150
1151 WARN_ON(delay_us < 1000);
1152
1153 do {
1154 msleep(delay_us / 1000);
1155 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1156 if (status == U64_MAX)
1157 nretry /= 2;
1158 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1159 nretry--);
1160
1161 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1162}
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1178 u64 wwpn)
1179{
1180 struct cxlflash_cfg *cfg = afu->parent;
1181 struct device *dev = &cfg->dev->dev;
1182
1183 set_port_offline(fc_regs);
1184 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1185 FC_PORT_STATUS_RETRY_CNT)) {
1186 dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
1187 __func__, port);
1188 }
1189
1190 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1191
1192 set_port_online(fc_regs);
1193 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1194 FC_PORT_STATUS_RETRY_CNT)) {
1195 dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
1196 __func__, port);
1197 }
1198}
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1214{
1215 struct cxlflash_cfg *cfg = afu->parent;
1216 struct device *dev = &cfg->dev->dev;
1217 u64 port_sel;
1218
1219
1220 port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1221 port_sel &= ~(1ULL << port);
1222 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1223 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1224
1225 set_port_offline(fc_regs);
1226 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1227 FC_PORT_STATUS_RETRY_CNT))
1228 dev_err(dev, "%s: wait on port %d to go offline timed out\n",
1229 __func__, port);
1230
1231 set_port_online(fc_regs);
1232 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1233 FC_PORT_STATUS_RETRY_CNT))
1234 dev_err(dev, "%s: wait on port %d to go online timed out\n",
1235 __func__, port);
1236
1237
1238 port_sel |= (1ULL << port);
1239 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1240 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1241
1242 dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
1243}
1244
1245
1246
1247
1248
1249static void afu_err_intr_init(struct afu *afu)
1250{
1251 struct cxlflash_cfg *cfg = afu->parent;
1252 __be64 __iomem *fc_port_regs;
1253 int i;
1254 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1255 u64 reg;
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1266
1267 reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1268
1269 if (afu->internal_lun)
1270 reg |= 1;
1271 writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1272
1273 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1274
1275
1276 writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1277
1278
1279 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1280
1281
1282 fc_port_regs = get_fc_port_regs(cfg, 0);
1283 reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
1284 reg &= SISL_FC_INTERNAL_MASK;
1285 if (afu->internal_lun)
1286 reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1287 writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
1288
1289
1290 for (i = 0; i < cfg->num_fc_ports; i++) {
1291 fc_port_regs = get_fc_port_regs(cfg, i);
1292
1293 writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
1294 writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1295 }
1296
1297
1298
1299
1300
1301
1302
1303 for (i = 0; i < afu->num_hwqs; i++) {
1304 hwq = get_hwq(afu, i);
1305
1306 writeq_be(SISL_MSI_SYNC_ERROR, &hwq->host_map->ctx_ctrl);
1307 writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
1308 }
1309}
1310
1311
1312
1313
1314
1315
1316
1317
1318static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1319{
1320 struct hwq *hwq = (struct hwq *)data;
1321 struct cxlflash_cfg *cfg = hwq->afu->parent;
1322 struct device *dev = &cfg->dev->dev;
1323 u64 reg;
1324 u64 reg_unmasked;
1325
1326 reg = readq_be(&hwq->host_map->intr_status);
1327 reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1328
1329 if (reg_unmasked == 0UL) {
1330 dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
1331 __func__, reg);
1332 goto cxlflash_sync_err_irq_exit;
1333 }
1334
1335 dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
1336 __func__, reg);
1337
1338 writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
1339
1340cxlflash_sync_err_irq_exit:
1341 return IRQ_HANDLED;
1342}
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
1355{
1356 struct afu *afu = hwq->afu;
1357 struct afu_cmd *cmd;
1358 struct sisl_ioasa *ioasa;
1359 struct sisl_ioarcb *ioarcb;
1360 bool toggle = hwq->toggle;
1361 int num_hrrq = 0;
1362 u64 entry,
1363 *hrrq_start = hwq->hrrq_start,
1364 *hrrq_end = hwq->hrrq_end,
1365 *hrrq_curr = hwq->hrrq_curr;
1366
1367
1368 while (true) {
1369 entry = *hrrq_curr;
1370
1371 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1372 break;
1373
1374 entry &= ~SISL_RESP_HANDLE_T_BIT;
1375
1376 if (afu_is_sq_cmd_mode(afu)) {
1377 ioasa = (struct sisl_ioasa *)entry;
1378 cmd = container_of(ioasa, struct afu_cmd, sa);
1379 } else {
1380 ioarcb = (struct sisl_ioarcb *)entry;
1381 cmd = container_of(ioarcb, struct afu_cmd, rcb);
1382 }
1383
1384 list_add_tail(&cmd->queue, doneq);
1385
1386
1387 if (hrrq_curr < hrrq_end)
1388 hrrq_curr++;
1389 else {
1390 hrrq_curr = hrrq_start;
1391 toggle ^= SISL_RESP_HANDLE_T_BIT;
1392 }
1393
1394 atomic_inc(&hwq->hsq_credits);
1395 num_hrrq++;
1396
1397 if (budget > 0 && num_hrrq >= budget)
1398 break;
1399 }
1400
1401 hwq->hrrq_curr = hrrq_curr;
1402 hwq->toggle = toggle;
1403
1404 return num_hrrq;
1405}
1406
1407
1408
1409
1410
1411
1412
1413static void process_cmd_doneq(struct list_head *doneq)
1414{
1415 struct afu_cmd *cmd, *tmp;
1416
1417 WARN_ON(list_empty(doneq));
1418
1419 list_for_each_entry_safe(cmd, tmp, doneq, queue)
1420 cmd_complete(cmd);
1421}
1422
1423
1424
1425
1426
1427
1428
1429
1430static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
1431{
1432 struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
1433 unsigned long hrrq_flags;
1434 LIST_HEAD(doneq);
1435 int num_entries = 0;
1436
1437 spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1438
1439 num_entries = process_hrrq(hwq, &doneq, budget);
1440 if (num_entries < budget)
1441 irq_poll_complete(irqpoll);
1442
1443 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1444
1445 process_cmd_doneq(&doneq);
1446 return num_entries;
1447}
1448
1449
1450
1451
1452
1453
1454
1455
1456static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1457{
1458 struct hwq *hwq = (struct hwq *)data;
1459 struct afu *afu = hwq->afu;
1460 unsigned long hrrq_flags;
1461 LIST_HEAD(doneq);
1462 int num_entries = 0;
1463
1464 spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1465
1466 if (afu_is_irqpoll_enabled(afu)) {
1467 irq_poll_sched(&hwq->irqpoll);
1468 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1469 return IRQ_HANDLED;
1470 }
1471
1472 num_entries = process_hrrq(hwq, &doneq, -1);
1473 spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1474
1475 if (num_entries == 0)
1476 return IRQ_NONE;
1477
1478 process_cmd_doneq(&doneq);
1479 return IRQ_HANDLED;
1480}
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491#define ASTATUS_FC(_a, _b, _c, _d) \
1492 { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1493
1494#define BUILD_SISL_ASTATUS_FC_PORT(_a) \
1495 ASTATUS_FC(_a, LINK_UP, "link up", 0), \
1496 ASTATUS_FC(_a, LINK_DN, "link down", 0), \
1497 ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \
1498 ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \
1499 ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1500 ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \
1501 ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \
1502 ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
1503
1504static const struct asyc_intr_info ainfo[] = {
1505 BUILD_SISL_ASTATUS_FC_PORT(1),
1506 BUILD_SISL_ASTATUS_FC_PORT(0),
1507 BUILD_SISL_ASTATUS_FC_PORT(3),
1508 BUILD_SISL_ASTATUS_FC_PORT(2)
1509};
1510
1511
1512
1513
1514
1515
1516
1517
1518static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1519{
1520 struct hwq *hwq = (struct hwq *)data;
1521 struct afu *afu = hwq->afu;
1522 struct cxlflash_cfg *cfg = afu->parent;
1523 struct device *dev = &cfg->dev->dev;
1524 const struct asyc_intr_info *info;
1525 struct sisl_global_map __iomem *global = &afu->afu_map->global;
1526 __be64 __iomem *fc_port_regs;
1527 u64 reg_unmasked;
1528 u64 reg;
1529 u64 bit;
1530 u8 port;
1531
1532 reg = readq_be(&global->regs.aintr_status);
1533 reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1534
1535 if (unlikely(reg_unmasked == 0)) {
1536 dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
1537 __func__, reg);
1538 goto out;
1539 }
1540
1541
1542 writeq_be(reg_unmasked, &global->regs.aintr_clear);
1543
1544
1545 for_each_set_bit(bit, (ulong *)®_unmasked, BITS_PER_LONG) {
1546 if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
1547 WARN_ON_ONCE(1);
1548 continue;
1549 }
1550
1551 info = &ainfo[bit];
1552 if (unlikely(info->status != 1ULL << bit)) {
1553 WARN_ON_ONCE(1);
1554 continue;
1555 }
1556
1557 port = info->port;
1558 fc_port_regs = get_fc_port_regs(cfg, port);
1559
1560 dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1561 __func__, port, info->desc,
1562 readq_be(&fc_port_regs[FC_STATUS / 8]));
1563
1564
1565
1566
1567
1568 if (info->action & LINK_RESET) {
1569 dev_err(dev, "%s: FC Port %d: resetting link\n",
1570 __func__, port);
1571 cfg->lr_state = LINK_RESET_REQUIRED;
1572 cfg->lr_port = port;
1573 schedule_work(&cfg->work_q);
1574 }
1575
1576 if (info->action & CLR_FC_ERROR) {
1577 reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
1578
1579
1580
1581
1582
1583
1584 dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
1585 __func__, port, reg);
1586
1587 writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
1588 writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1589 }
1590
1591 if (info->action & SCAN_HOST) {
1592 atomic_inc(&cfg->scan_host_needed);
1593 schedule_work(&cfg->work_q);
1594 }
1595 }
1596
1597out:
1598 return IRQ_HANDLED;
1599}
1600
1601
1602
1603
1604
1605
1606
1607
1608static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1609{
1610 struct device *dev = &cfg->dev->dev;
1611 struct pci_dev *pdev = cfg->dev;
1612 int rc = 0;
1613 int ro_start, ro_size, i, j, k;
1614 ssize_t vpd_size;
1615 char vpd_data[CXLFLASH_VPD_LEN];
1616 char tmp_buf[WWPN_BUF_LEN] = { 0 };
1617 const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
1618 cfg->dev_id->driver_data;
1619 const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
1620 const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
1621
1622
1623 vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1624 if (unlikely(vpd_size <= 0)) {
1625 dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1626 __func__, vpd_size);
1627 rc = -ENODEV;
1628 goto out;
1629 }
1630
1631
1632 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1633 PCI_VPD_LRDT_RO_DATA);
1634 if (unlikely(ro_start < 0)) {
1635 dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
1636 rc = -ENODEV;
1637 goto out;
1638 }
1639
1640
1641 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1642 j = ro_size;
1643 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1644 if (unlikely((i + j) > vpd_size)) {
1645 dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
1646 __func__, (i + j), vpd_size);
1647 ro_size = vpd_size - i;
1648 }
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664 for (k = 0; k < cfg->num_fc_ports; k++) {
1665 j = ro_size;
1666 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1667
1668 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1669 if (i < 0) {
1670 if (wwpn_vpd_required)
1671 dev_err(dev, "%s: Port %d WWPN not found\n",
1672 __func__, k);
1673 wwpn[k] = 0ULL;
1674 continue;
1675 }
1676
1677 j = pci_vpd_info_field_size(&vpd_data[i]);
1678 i += PCI_VPD_INFO_FLD_HDR_SIZE;
1679 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1680 dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
1681 __func__, k);
1682 rc = -ENODEV;
1683 goto out;
1684 }
1685
1686 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1687 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1688 if (unlikely(rc)) {
1689 dev_err(dev, "%s: WWPN conversion failed for port %d\n",
1690 __func__, k);
1691 rc = -ENODEV;
1692 goto out;
1693 }
1694
1695 dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
1696 }
1697
1698out:
1699 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1700 return rc;
1701}
1702
1703
1704
1705
1706
1707
1708
1709
1710static void init_pcr(struct cxlflash_cfg *cfg)
1711{
1712 struct afu *afu = cfg->afu;
1713 struct sisl_ctrl_map __iomem *ctrl_map;
1714 struct hwq *hwq;
1715 void *cookie;
1716 int i;
1717
1718 for (i = 0; i < MAX_CONTEXT; i++) {
1719 ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1720
1721
1722 writeq_be(0, &ctrl_map->rht_start);
1723 writeq_be(0, &ctrl_map->rht_cnt_id);
1724 writeq_be(0, &ctrl_map->ctx_cap);
1725 }
1726
1727
1728 for (i = 0; i < afu->num_hwqs; i++) {
1729 hwq = get_hwq(afu, i);
1730 cookie = hwq->ctx_cookie;
1731
1732 hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
1733 hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
1734 hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
1735
1736
1737 writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
1738 }
1739}
1740
1741
1742
1743
1744
1745static int init_global(struct cxlflash_cfg *cfg)
1746{
1747 struct afu *afu = cfg->afu;
1748 struct device *dev = &cfg->dev->dev;
1749 struct hwq *hwq;
1750 struct sisl_host_map __iomem *hmap;
1751 __be64 __iomem *fc_port_regs;
1752 u64 wwpn[MAX_FC_PORTS];
1753 int i = 0, num_ports = 0;
1754 int rc = 0;
1755 u64 reg;
1756
1757 rc = read_vpd(cfg, &wwpn[0]);
1758 if (rc) {
1759 dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1760 goto out;
1761 }
1762
1763
1764 for (i = 0; i < afu->num_hwqs; i++) {
1765 hwq = get_hwq(afu, i);
1766 hmap = hwq->host_map;
1767
1768 writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
1769 writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
1770
1771 if (afu_is_sq_cmd_mode(afu)) {
1772 writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
1773 writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
1774 }
1775 }
1776
1777
1778 reg = readq_be(&afu->afu_map->global.regs.afu_config);
1779 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1780
1781
1782
1783
1784 writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1785
1786
1787 if (afu->internal_lun) {
1788
1789 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1790 num_ports = 0;
1791 } else {
1792 writeq_be(PORT_MASK(cfg->num_fc_ports),
1793 &afu->afu_map->global.regs.afu_port_sel);
1794 num_ports = cfg->num_fc_ports;
1795 }
1796
1797 for (i = 0; i < num_ports; i++) {
1798 fc_port_regs = get_fc_port_regs(cfg, i);
1799
1800
1801 writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
1802
1803 (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
1804 writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
1805
1806
1807 if (wwpn[i] != 0)
1808 afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
1809
1810
1811
1812 msleep(100);
1813 }
1814
1815
1816
1817
1818 for (i = 0; i < afu->num_hwqs; i++) {
1819 hwq = get_hwq(afu, i);
1820
1821 (void)readq_be(&hwq->ctrl_map->mbox_r);
1822 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1823 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1824 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1825 &hwq->ctrl_map->ctx_cap);
1826 }
1827
1828
1829
1830
1831
1832
1833
1834 hwq = get_hwq(afu, PRIMARY_HWQ);
1835 reg = readq_be(&hwq->host_map->ctx_ctrl);
1836 if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
1837 cfg->ws_unmap = true;
1838
1839
1840 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1841out:
1842 return rc;
1843}
1844
1845
1846
1847
1848
1849static int start_afu(struct cxlflash_cfg *cfg)
1850{
1851 struct afu *afu = cfg->afu;
1852 struct device *dev = &cfg->dev->dev;
1853 struct hwq *hwq;
1854 int rc = 0;
1855 int i;
1856
1857 init_pcr(cfg);
1858
1859
1860 for (i = 0; i < afu->num_hwqs; i++) {
1861 hwq = get_hwq(afu, i);
1862
1863
1864 memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
1865
1866
1867 hwq->hrrq_start = &hwq->rrq_entry[0];
1868 hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
1869 hwq->hrrq_curr = hwq->hrrq_start;
1870 hwq->toggle = 1;
1871
1872
1873 spin_lock_init(&hwq->hrrq_slock);
1874 spin_lock_init(&hwq->hsq_slock);
1875
1876
1877 if (afu_is_sq_cmd_mode(afu)) {
1878 memset(&hwq->sq, 0, sizeof(hwq->sq));
1879 hwq->hsq_start = &hwq->sq[0];
1880 hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
1881 hwq->hsq_curr = hwq->hsq_start;
1882
1883 atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
1884 }
1885
1886
1887 if (afu_is_irqpoll_enabled(afu))
1888 irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
1889 cxlflash_irqpoll);
1890
1891 }
1892
1893 rc = init_global(cfg);
1894
1895 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1896 return rc;
1897}
1898
1899
1900
1901
1902
1903
1904
1905
1906static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1907 struct hwq *hwq)
1908{
1909 struct device *dev = &cfg->dev->dev;
1910 void *ctx = hwq->ctx_cookie;
1911 int rc = 0;
1912 enum undo_level level = UNDO_NOOP;
1913 bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
1914 int num_irqs = is_primary_hwq ? 3 : 2;
1915
1916 rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
1917 if (unlikely(rc)) {
1918 dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1919 __func__, rc);
1920 level = UNDO_NOOP;
1921 goto out;
1922 }
1923
1924 rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
1925 "SISL_MSI_SYNC_ERROR");
1926 if (unlikely(rc <= 0)) {
1927 dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1928 level = FREE_IRQ;
1929 goto out;
1930 }
1931
1932 rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
1933 "SISL_MSI_RRQ_UPDATED");
1934 if (unlikely(rc <= 0)) {
1935 dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1936 level = UNMAP_ONE;
1937 goto out;
1938 }
1939
1940
1941 if (!is_primary_hwq)
1942 goto out;
1943
1944 rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
1945 "SISL_MSI_ASYNC_ERROR");
1946 if (unlikely(rc <= 0)) {
1947 dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1948 level = UNMAP_TWO;
1949 goto out;
1950 }
1951out:
1952 return level;
1953}
1954
1955
1956
1957
1958
1959
1960
1961
1962static int init_mc(struct cxlflash_cfg *cfg, u32 index)
1963{
1964 void *ctx;
1965 struct device *dev = &cfg->dev->dev;
1966 struct hwq *hwq = get_hwq(cfg->afu, index);
1967 int rc = 0;
1968 enum undo_level level;
1969
1970 hwq->afu = cfg->afu;
1971 hwq->index = index;
1972 INIT_LIST_HEAD(&hwq->pending_cmds);
1973
1974 if (index == PRIMARY_HWQ)
1975 ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
1976 else
1977 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
1978 if (IS_ERR_OR_NULL(ctx)) {
1979 rc = -ENOMEM;
1980 goto err1;
1981 }
1982
1983 WARN_ON(hwq->ctx_cookie);
1984 hwq->ctx_cookie = ctx;
1985
1986
1987 cfg->ops->set_master(ctx);
1988
1989
1990 if (index == PRIMARY_HWQ) {
1991 rc = cfg->ops->afu_reset(ctx);
1992 if (unlikely(rc)) {
1993 dev_err(dev, "%s: AFU reset failed rc=%d\n",
1994 __func__, rc);
1995 goto err1;
1996 }
1997 }
1998
1999 level = init_intr(cfg, hwq);
2000 if (unlikely(level)) {
2001 dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
2002 goto err2;
2003 }
2004
2005
2006 rc = cfg->ops->start_context(hwq->ctx_cookie);
2007 if (unlikely(rc)) {
2008 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
2009 level = UNMAP_THREE;
2010 goto err2;
2011 }
2012
2013out:
2014 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2015 return rc;
2016err2:
2017 term_intr(cfg, level, index);
2018 if (index != PRIMARY_HWQ)
2019 cfg->ops->release_context(ctx);
2020err1:
2021 hwq->ctx_cookie = NULL;
2022 goto out;
2023}
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040static void get_num_afu_ports(struct cxlflash_cfg *cfg)
2041{
2042 struct afu *afu = cfg->afu;
2043 struct device *dev = &cfg->dev->dev;
2044 u64 port_mask;
2045 int num_fc_ports = LEGACY_FC_PORTS;
2046
2047 port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
2048 if (port_mask != 0ULL)
2049 num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
2050
2051 dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
2052 __func__, port_mask, num_fc_ports);
2053
2054 cfg->num_fc_ports = num_fc_ports;
2055 cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
2056}
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067static int init_afu(struct cxlflash_cfg *cfg)
2068{
2069 u64 reg;
2070 int rc = 0;
2071 struct afu *afu = cfg->afu;
2072 struct device *dev = &cfg->dev->dev;
2073 struct hwq *hwq;
2074 int i;
2075
2076 cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
2077
2078 afu->num_hwqs = afu->desired_hwqs;
2079 for (i = 0; i < afu->num_hwqs; i++) {
2080 rc = init_mc(cfg, i);
2081 if (rc) {
2082 dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
2083 __func__, rc, i);
2084 goto err1;
2085 }
2086 }
2087
2088
2089 hwq = get_hwq(afu, PRIMARY_HWQ);
2090 afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
2091 if (!afu->afu_map) {
2092 dev_err(dev, "%s: psa_map failed\n", __func__);
2093 rc = -ENOMEM;
2094 goto err1;
2095 }
2096
2097
2098 reg = readq(&afu->afu_map->global.regs.afu_version);
2099 memcpy(afu->version, ®, sizeof(reg));
2100 afu->interface_version =
2101 readq_be(&afu->afu_map->global.regs.interface_version);
2102 if ((afu->interface_version + 1) == 0) {
2103 dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
2104 "interface version %016llx\n", afu->version,
2105 afu->interface_version);
2106 rc = -EINVAL;
2107 goto err1;
2108 }
2109
2110 if (afu_is_sq_cmd_mode(afu)) {
2111 afu->send_cmd = send_cmd_sq;
2112 afu->context_reset = context_reset_sq;
2113 } else {
2114 afu->send_cmd = send_cmd_ioarrin;
2115 afu->context_reset = context_reset_ioarrin;
2116 }
2117
2118 dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
2119 afu->version, afu->interface_version);
2120
2121 get_num_afu_ports(cfg);
2122
2123 rc = start_afu(cfg);
2124 if (rc) {
2125 dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
2126 goto err1;
2127 }
2128
2129 afu_err_intr_init(cfg->afu);
2130 for (i = 0; i < afu->num_hwqs; i++) {
2131 hwq = get_hwq(afu, i);
2132
2133 hwq->room = readq_be(&hwq->host_map->cmd_room);
2134 }
2135
2136
2137 cxlflash_restore_luntable(cfg);
2138out:
2139 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2140 return rc;
2141
2142err1:
2143 for (i = afu->num_hwqs - 1; i >= 0; i--) {
2144 term_intr(cfg, UNMAP_THREE, i);
2145 term_mc(cfg, i);
2146 }
2147 goto out;
2148}
2149
2150
2151
2152
2153
2154
2155
2156static int afu_reset(struct cxlflash_cfg *cfg)
2157{
2158 struct device *dev = &cfg->dev->dev;
2159 int rc = 0;
2160
2161
2162
2163
2164 term_afu(cfg);
2165
2166 rc = init_afu(cfg);
2167
2168 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2169 return rc;
2170}
2171
2172
2173
2174
2175
2176
2177
2178
2179static void drain_ioctls(struct cxlflash_cfg *cfg)
2180{
2181 down_write(&cfg->ioctl_rwsem);
2182 up_write(&cfg->ioctl_rwsem);
2183}
2184
2185
2186
2187
2188
2189
2190static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
2191{
2192 struct cxlflash_cfg *cfg = data;
2193 struct device *dev = &cfg->dev->dev;
2194 int rc = 0;
2195
2196 if (cfg->state != STATE_RESET) {
2197 dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
2198 __func__, cfg->state);
2199 goto out;
2200 }
2201
2202 drain_ioctls(cfg);
2203 cxlflash_mark_contexts_error(cfg);
2204 rc = afu_reset(cfg);
2205 if (rc)
2206 cfg->state = STATE_FAILTERM;
2207 else
2208 cfg->state = STATE_NORMAL;
2209 wake_up_all(&cfg->reset_waitq);
2210
2211out:
2212 scsi_unblock_requests(cfg->host);
2213}
2214
2215
2216
2217
2218
2219static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
2220{
2221 struct device *dev = &cfg->dev->dev;
2222
2223 if (cfg->state != STATE_NORMAL) {
2224 dev_dbg(dev, "%s: Not performing reset state=%d\n",
2225 __func__, cfg->state);
2226 return;
2227 }
2228
2229 cfg->state = STATE_RESET;
2230 scsi_block_requests(cfg->host);
2231 cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
2232 cfg);
2233}
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
2252{
2253 struct cxlflash_cfg *cfg = afu->parent;
2254 struct device *dev = &cfg->dev->dev;
2255 struct afu_cmd *cmd = NULL;
2256 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
2257 char *buf = NULL;
2258 int rc = 0;
2259 int nretry = 0;
2260 static DEFINE_MUTEX(sync_active);
2261
2262 if (cfg->state != STATE_NORMAL) {
2263 dev_dbg(dev, "%s: Sync not required state=%u\n",
2264 __func__, cfg->state);
2265 return 0;
2266 }
2267
2268 mutex_lock(&sync_active);
2269 atomic_inc(&afu->cmds_active);
2270 buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
2271 if (unlikely(!buf)) {
2272 dev_err(dev, "%s: no memory for command\n", __func__);
2273 rc = -ENOMEM;
2274 goto out;
2275 }
2276
2277 cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
2278
2279retry:
2280 memset(cmd, 0, sizeof(*cmd));
2281 memcpy(&cmd->rcb, rcb, sizeof(*rcb));
2282 INIT_LIST_HEAD(&cmd->queue);
2283 init_completion(&cmd->cevent);
2284 cmd->parent = afu;
2285 cmd->hwq_index = hwq->index;
2286 cmd->rcb.ctx_id = hwq->ctx_hndl;
2287
2288 dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
2289 __func__, afu, cmd, cmd->rcb.cdb[0], nretry);
2290
2291 rc = afu->send_cmd(afu, cmd);
2292 if (unlikely(rc)) {
2293 rc = -ENOBUFS;
2294 goto out;
2295 }
2296
2297 rc = wait_resp(afu, cmd);
2298 switch (rc) {
2299 case -ETIMEDOUT:
2300 rc = afu->context_reset(hwq);
2301 if (rc) {
2302 cxlflash_schedule_async_reset(cfg);
2303 break;
2304 }
2305
2306 case -EAGAIN:
2307 if (++nretry < 2)
2308 goto retry;
2309
2310 default:
2311 break;
2312 }
2313
2314 if (rcb->ioasa)
2315 *rcb->ioasa = cmd->sa;
2316out:
2317 atomic_dec(&afu->cmds_active);
2318 mutex_unlock(&sync_active);
2319 kfree(buf);
2320 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2321 return rc;
2322}
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
2341{
2342 struct cxlflash_cfg *cfg = afu->parent;
2343 struct device *dev = &cfg->dev->dev;
2344 struct sisl_ioarcb rcb = { 0 };
2345
2346 dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
2347 __func__, afu, ctx, res, mode);
2348
2349 rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
2350 rcb.msi = SISL_MSI_RRQ_UPDATED;
2351 rcb.timeout = MC_AFU_SYNC_TIMEOUT;
2352
2353 rcb.cdb[0] = SISL_AFU_CMD_SYNC;
2354 rcb.cdb[1] = mode;
2355 put_unaligned_be16(ctx, &rcb.cdb[2]);
2356 put_unaligned_be32(res, &rcb.cdb[4]);
2357
2358 return send_afu_cmd(afu, &rcb);
2359}
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
2372{
2373 int rc = FAILED;
2374 struct Scsi_Host *host = scp->device->host;
2375 struct cxlflash_cfg *cfg = shost_priv(host);
2376 struct afu_cmd *cmd = sc_to_afuc(scp);
2377 struct device *dev = &cfg->dev->dev;
2378 struct afu *afu = cfg->afu;
2379 struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
2380
2381 dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2382 "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2383 scp->device->channel, scp->device->id, scp->device->lun,
2384 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2385 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2386 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2387 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2388
2389
2390
2391
2392 if (cfg->state != STATE_NORMAL) {
2393 dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
2394 __func__, cfg->state);
2395 goto out;
2396 }
2397
2398 rc = afu->context_reset(hwq);
2399 if (unlikely(rc))
2400 goto out;
2401
2402 rc = SUCCESS;
2403
2404out:
2405 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2406 return rc;
2407}
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
2418{
2419 int rc = SUCCESS;
2420 struct scsi_device *sdev = scp->device;
2421 struct Scsi_Host *host = sdev->host;
2422 struct cxlflash_cfg *cfg = shost_priv(host);
2423 struct device *dev = &cfg->dev->dev;
2424 int rcr = 0;
2425
2426 dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
2427 host->host_no, sdev->channel, sdev->id, sdev->lun);
2428retry:
2429 switch (cfg->state) {
2430 case STATE_NORMAL:
2431 rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
2432 if (unlikely(rcr))
2433 rc = FAILED;
2434 break;
2435 case STATE_RESET:
2436 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2437 goto retry;
2438 default:
2439 rc = FAILED;
2440 break;
2441 }
2442
2443 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2444 return rc;
2445}
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
2461{
2462 int rc = SUCCESS;
2463 int rcr = 0;
2464 struct Scsi_Host *host = scp->device->host;
2465 struct cxlflash_cfg *cfg = shost_priv(host);
2466 struct device *dev = &cfg->dev->dev;
2467
2468 dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
2469
2470 switch (cfg->state) {
2471 case STATE_NORMAL:
2472 cfg->state = STATE_RESET;
2473 drain_ioctls(cfg);
2474 cxlflash_mark_contexts_error(cfg);
2475 rcr = afu_reset(cfg);
2476 if (rcr) {
2477 rc = FAILED;
2478 cfg->state = STATE_FAILTERM;
2479 } else
2480 cfg->state = STATE_NORMAL;
2481 wake_up_all(&cfg->reset_waitq);
2482 ssleep(1);
2483
2484 case STATE_RESET:
2485 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2486 if (cfg->state == STATE_NORMAL)
2487 break;
2488
2489 default:
2490 rc = FAILED;
2491 break;
2492 }
2493
2494 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2495 return rc;
2496}
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
2508{
2509
2510 if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
2511 qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
2512
2513 scsi_change_queue_depth(sdev, qdepth);
2514 return sdev->queue_depth;
2515}
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525static ssize_t cxlflash_show_port_status(u32 port,
2526 struct cxlflash_cfg *cfg,
2527 char *buf)
2528{
2529 struct device *dev = &cfg->dev->dev;
2530 char *disp_status;
2531 u64 status;
2532 __be64 __iomem *fc_port_regs;
2533
2534 WARN_ON(port >= MAX_FC_PORTS);
2535
2536 if (port >= cfg->num_fc_ports) {
2537 dev_info(dev, "%s: Port %d not supported on this card.\n",
2538 __func__, port);
2539 return -EINVAL;
2540 }
2541
2542 fc_port_regs = get_fc_port_regs(cfg, port);
2543 status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
2544 status &= FC_MTIP_STATUS_MASK;
2545
2546 if (status == FC_MTIP_STATUS_ONLINE)
2547 disp_status = "online";
2548 else if (status == FC_MTIP_STATUS_OFFLINE)
2549 disp_status = "offline";
2550 else
2551 disp_status = "unknown";
2552
2553 return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2554}
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564static ssize_t port0_show(struct device *dev,
2565 struct device_attribute *attr,
2566 char *buf)
2567{
2568 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2569
2570 return cxlflash_show_port_status(0, cfg, buf);
2571}
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581static ssize_t port1_show(struct device *dev,
2582 struct device_attribute *attr,
2583 char *buf)
2584{
2585 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2586
2587 return cxlflash_show_port_status(1, cfg, buf);
2588}
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598static ssize_t port2_show(struct device *dev,
2599 struct device_attribute *attr,
2600 char *buf)
2601{
2602 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2603
2604 return cxlflash_show_port_status(2, cfg, buf);
2605}
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615static ssize_t port3_show(struct device *dev,
2616 struct device_attribute *attr,
2617 char *buf)
2618{
2619 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2620
2621 return cxlflash_show_port_status(3, cfg, buf);
2622}
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632static ssize_t lun_mode_show(struct device *dev,
2633 struct device_attribute *attr, char *buf)
2634{
2635 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2636 struct afu *afu = cfg->afu;
2637
2638 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2639}
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663static ssize_t lun_mode_store(struct device *dev,
2664 struct device_attribute *attr,
2665 const char *buf, size_t count)
2666{
2667 struct Scsi_Host *shost = class_to_shost(dev);
2668 struct cxlflash_cfg *cfg = shost_priv(shost);
2669 struct afu *afu = cfg->afu;
2670 int rc;
2671 u32 lun_mode;
2672
2673 rc = kstrtouint(buf, 10, &lun_mode);
2674 if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2675 afu->internal_lun = lun_mode;
2676
2677
2678
2679
2680
2681
2682 if (afu->internal_lun)
2683 shost->max_channel = 0;
2684 else
2685 shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
2686
2687 afu_reset(cfg);
2688 scsi_scan_host(cfg->host);
2689 }
2690
2691 return count;
2692}
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702static ssize_t ioctl_version_show(struct device *dev,
2703 struct device_attribute *attr, char *buf)
2704{
2705 ssize_t bytes = 0;
2706
2707 bytes = scnprintf(buf, PAGE_SIZE,
2708 "disk: %u\n", DK_CXLFLASH_VERSION_0);
2709 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2710 "host: %u\n", HT_CXLFLASH_VERSION_0);
2711
2712 return bytes;
2713}
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723static ssize_t cxlflash_show_port_lun_table(u32 port,
2724 struct cxlflash_cfg *cfg,
2725 char *buf)
2726{
2727 struct device *dev = &cfg->dev->dev;
2728 __be64 __iomem *fc_port_luns;
2729 int i;
2730 ssize_t bytes = 0;
2731
2732 WARN_ON(port >= MAX_FC_PORTS);
2733
2734 if (port >= cfg->num_fc_ports) {
2735 dev_info(dev, "%s: Port %d not supported on this card.\n",
2736 __func__, port);
2737 return -EINVAL;
2738 }
2739
2740 fc_port_luns = get_fc_port_luns(cfg, port);
2741
2742 for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2743 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2744 "%03d: %016llx\n",
2745 i, readq_be(&fc_port_luns[i]));
2746 return bytes;
2747}
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757static ssize_t port0_lun_table_show(struct device *dev,
2758 struct device_attribute *attr,
2759 char *buf)
2760{
2761 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2762
2763 return cxlflash_show_port_lun_table(0, cfg, buf);
2764}
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774static ssize_t port1_lun_table_show(struct device *dev,
2775 struct device_attribute *attr,
2776 char *buf)
2777{
2778 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2779
2780 return cxlflash_show_port_lun_table(1, cfg, buf);
2781}
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791static ssize_t port2_lun_table_show(struct device *dev,
2792 struct device_attribute *attr,
2793 char *buf)
2794{
2795 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2796
2797 return cxlflash_show_port_lun_table(2, cfg, buf);
2798}
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808static ssize_t port3_lun_table_show(struct device *dev,
2809 struct device_attribute *attr,
2810 char *buf)
2811{
2812 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2813
2814 return cxlflash_show_port_lun_table(3, cfg, buf);
2815}
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828static ssize_t irqpoll_weight_show(struct device *dev,
2829 struct device_attribute *attr, char *buf)
2830{
2831 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2832 struct afu *afu = cfg->afu;
2833
2834 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
2835}
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849static ssize_t irqpoll_weight_store(struct device *dev,
2850 struct device_attribute *attr,
2851 const char *buf, size_t count)
2852{
2853 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2854 struct device *cfgdev = &cfg->dev->dev;
2855 struct afu *afu = cfg->afu;
2856 struct hwq *hwq;
2857 u32 weight;
2858 int rc, i;
2859
2860 rc = kstrtouint(buf, 10, &weight);
2861 if (rc)
2862 return -EINVAL;
2863
2864 if (weight > 256) {
2865 dev_info(cfgdev,
2866 "Invalid IRQ poll weight. It must be 256 or less.\n");
2867 return -EINVAL;
2868 }
2869
2870 if (weight == afu->irqpoll_weight) {
2871 dev_info(cfgdev,
2872 "Current IRQ poll weight has the same weight.\n");
2873 return -EINVAL;
2874 }
2875
2876 if (afu_is_irqpoll_enabled(afu)) {
2877 for (i = 0; i < afu->num_hwqs; i++) {
2878 hwq = get_hwq(afu, i);
2879
2880 irq_poll_disable(&hwq->irqpoll);
2881 }
2882 }
2883
2884 afu->irqpoll_weight = weight;
2885
2886 if (weight > 0) {
2887 for (i = 0; i < afu->num_hwqs; i++) {
2888 hwq = get_hwq(afu, i);
2889
2890 irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
2891 }
2892 }
2893
2894 return count;
2895}
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906static ssize_t num_hwqs_show(struct device *dev,
2907 struct device_attribute *attr, char *buf)
2908{
2909 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2910 struct afu *afu = cfg->afu;
2911
2912 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
2913}
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929static ssize_t num_hwqs_store(struct device *dev,
2930 struct device_attribute *attr,
2931 const char *buf, size_t count)
2932{
2933 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2934 struct afu *afu = cfg->afu;
2935 int rc;
2936 int nhwqs, num_hwqs;
2937
2938 rc = kstrtoint(buf, 10, &nhwqs);
2939 if (rc)
2940 return -EINVAL;
2941
2942 if (nhwqs >= 1)
2943 num_hwqs = nhwqs;
2944 else if (nhwqs == 0)
2945 num_hwqs = num_online_cpus();
2946 else
2947 num_hwqs = num_online_cpus() / abs(nhwqs);
2948
2949 afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
2950 WARN_ON_ONCE(afu->desired_hwqs == 0);
2951
2952retry:
2953 switch (cfg->state) {
2954 case STATE_NORMAL:
2955 cfg->state = STATE_RESET;
2956 drain_ioctls(cfg);
2957 cxlflash_mark_contexts_error(cfg);
2958 rc = afu_reset(cfg);
2959 if (rc)
2960 cfg->state = STATE_FAILTERM;
2961 else
2962 cfg->state = STATE_NORMAL;
2963 wake_up_all(&cfg->reset_waitq);
2964 break;
2965 case STATE_RESET:
2966 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2967 if (cfg->state == STATE_NORMAL)
2968 goto retry;
2969 default:
2970
2971 dev_err(dev, "%s: Device is not ready, state=%d\n",
2972 __func__, cfg->state);
2973 break;
2974 }
2975
2976 return count;
2977}
2978
2979static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990static ssize_t hwq_mode_show(struct device *dev,
2991 struct device_attribute *attr, char *buf)
2992{
2993 struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2994 struct afu *afu = cfg->afu;
2995
2996 return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
2997}
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013static ssize_t hwq_mode_store(struct device *dev,
3014 struct device_attribute *attr,
3015 const char *buf, size_t count)
3016{
3017 struct Scsi_Host *shost = class_to_shost(dev);
3018 struct cxlflash_cfg *cfg = shost_priv(shost);
3019 struct device *cfgdev = &cfg->dev->dev;
3020 struct afu *afu = cfg->afu;
3021 int i;
3022 u32 mode = MAX_HWQ_MODE;
3023
3024 for (i = 0; i < MAX_HWQ_MODE; i++) {
3025 if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
3026 mode = i;
3027 break;
3028 }
3029 }
3030
3031 if (mode >= MAX_HWQ_MODE) {
3032 dev_info(cfgdev, "Invalid HWQ steering mode.\n");
3033 return -EINVAL;
3034 }
3035
3036 if ((mode == HWQ_MODE_TAG) && !shost_use_blk_mq(shost)) {
3037 dev_info(cfgdev, "SCSI-MQ is not enabled, use a different "
3038 "HWQ steering mode.\n");
3039 return -EINVAL;
3040 }
3041
3042 afu->hwq_mode = mode;
3043
3044 return count;
3045}
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055static ssize_t mode_show(struct device *dev,
3056 struct device_attribute *attr, char *buf)
3057{
3058 struct scsi_device *sdev = to_scsi_device(dev);
3059
3060 return scnprintf(buf, PAGE_SIZE, "%s\n",
3061 sdev->hostdata ? "superpipe" : "legacy");
3062}
3063
3064
3065
3066
3067static DEVICE_ATTR_RO(port0);
3068static DEVICE_ATTR_RO(port1);
3069static DEVICE_ATTR_RO(port2);
3070static DEVICE_ATTR_RO(port3);
3071static DEVICE_ATTR_RW(lun_mode);
3072static DEVICE_ATTR_RO(ioctl_version);
3073static DEVICE_ATTR_RO(port0_lun_table);
3074static DEVICE_ATTR_RO(port1_lun_table);
3075static DEVICE_ATTR_RO(port2_lun_table);
3076static DEVICE_ATTR_RO(port3_lun_table);
3077static DEVICE_ATTR_RW(irqpoll_weight);
3078static DEVICE_ATTR_RW(num_hwqs);
3079static DEVICE_ATTR_RW(hwq_mode);
3080
3081static struct device_attribute *cxlflash_host_attrs[] = {
3082 &dev_attr_port0,
3083 &dev_attr_port1,
3084 &dev_attr_port2,
3085 &dev_attr_port3,
3086 &dev_attr_lun_mode,
3087 &dev_attr_ioctl_version,
3088 &dev_attr_port0_lun_table,
3089 &dev_attr_port1_lun_table,
3090 &dev_attr_port2_lun_table,
3091 &dev_attr_port3_lun_table,
3092 &dev_attr_irqpoll_weight,
3093 &dev_attr_num_hwqs,
3094 &dev_attr_hwq_mode,
3095 NULL
3096};
3097
3098
3099
3100
3101static DEVICE_ATTR_RO(mode);
3102
3103static struct device_attribute *cxlflash_dev_attrs[] = {
3104 &dev_attr_mode,
3105 NULL
3106};
3107
3108
3109
3110
3111static struct scsi_host_template driver_template = {
3112 .module = THIS_MODULE,
3113 .name = CXLFLASH_ADAPTER_NAME,
3114 .info = cxlflash_driver_info,
3115 .ioctl = cxlflash_ioctl,
3116 .proc_name = CXLFLASH_NAME,
3117 .queuecommand = cxlflash_queuecommand,
3118 .eh_abort_handler = cxlflash_eh_abort_handler,
3119 .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
3120 .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
3121 .change_queue_depth = cxlflash_change_queue_depth,
3122 .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
3123 .can_queue = CXLFLASH_MAX_CMDS,
3124 .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
3125 .this_id = -1,
3126 .sg_tablesize = 1,
3127 .max_sectors = CXLFLASH_MAX_SECTORS,
3128 .use_clustering = ENABLE_CLUSTERING,
3129 .shost_attrs = cxlflash_host_attrs,
3130 .sdev_attrs = cxlflash_dev_attrs,
3131};
3132
3133
3134
3135
3136static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
3137 CXLFLASH_WWPN_VPD_REQUIRED };
3138static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
3139 CXLFLASH_NOTIFY_SHUTDOWN };
3140static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
3141 CXLFLASH_NOTIFY_SHUTDOWN };
3142
3143
3144
3145
3146static struct pci_device_id cxlflash_pci_table[] = {
3147 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
3148 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
3149 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
3150 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
3151 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
3152 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
3153 {}
3154};
3155
3156MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167static void cxlflash_worker_thread(struct work_struct *work)
3168{
3169 struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
3170 work_q);
3171 struct afu *afu = cfg->afu;
3172 struct device *dev = &cfg->dev->dev;
3173 __be64 __iomem *fc_port_regs;
3174 int port;
3175 ulong lock_flags;
3176
3177
3178
3179 if (cfg->state != STATE_NORMAL)
3180 return;
3181
3182 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3183
3184 if (cfg->lr_state == LINK_RESET_REQUIRED) {
3185 port = cfg->lr_port;
3186 if (port < 0)
3187 dev_err(dev, "%s: invalid port index %d\n",
3188 __func__, port);
3189 else {
3190 spin_unlock_irqrestore(cfg->host->host_lock,
3191 lock_flags);
3192
3193
3194 fc_port_regs = get_fc_port_regs(cfg, port);
3195 afu_link_reset(afu, port, fc_port_regs);
3196 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3197 }
3198
3199 cfg->lr_state = LINK_RESET_COMPLETE;
3200 }
3201
3202 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
3203
3204 if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
3205 scsi_scan_host(cfg->host);
3206}
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217static int cxlflash_chr_open(struct inode *inode, struct file *file)
3218{
3219 struct cxlflash_cfg *cfg;
3220
3221 if (!capable(CAP_SYS_ADMIN))
3222 return -EACCES;
3223
3224 cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
3225 file->private_data = cfg;
3226
3227 return 0;
3228}
3229
3230
3231
3232
3233
3234
3235
3236static char *decode_hioctl(int cmd)
3237{
3238 switch (cmd) {
3239 case HT_CXLFLASH_LUN_PROVISION:
3240 return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
3241 }
3242
3243 return "UNKNOWN";
3244}
3245
3246
3247
3248
3249
3250
3251
3252
3253static int cxlflash_lun_provision(struct cxlflash_cfg *cfg,
3254 struct ht_cxlflash_lun_provision *lunprov)
3255{
3256 struct afu *afu = cfg->afu;
3257 struct device *dev = &cfg->dev->dev;
3258 struct sisl_ioarcb rcb;
3259 struct sisl_ioasa asa;
3260 __be64 __iomem *fc_port_regs;
3261 u16 port = lunprov->port;
3262 u16 scmd = lunprov->hdr.subcmd;
3263 u16 type;
3264 u64 reg;
3265 u64 size;
3266 u64 lun_id;
3267 int rc = 0;
3268
3269 if (!afu_is_lun_provision(afu)) {
3270 rc = -ENOTSUPP;
3271 goto out;
3272 }
3273
3274 if (port >= cfg->num_fc_ports) {
3275 rc = -EINVAL;
3276 goto out;
3277 }
3278
3279 switch (scmd) {
3280 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
3281 type = SISL_AFU_LUN_PROVISION_CREATE;
3282 size = lunprov->size;
3283 lun_id = 0;
3284 break;
3285 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
3286 type = SISL_AFU_LUN_PROVISION_DELETE;
3287 size = 0;
3288 lun_id = lunprov->lun_id;
3289 break;
3290 case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
3291 fc_port_regs = get_fc_port_regs(cfg, port);
3292
3293 reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
3294 lunprov->max_num_luns = reg;
3295 reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
3296 lunprov->cur_num_luns = reg;
3297 reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
3298 lunprov->max_cap_port = reg;
3299 reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
3300 lunprov->cur_cap_port = reg;
3301
3302 goto out;
3303 default:
3304 rc = -EINVAL;
3305 goto out;
3306 }
3307
3308 memset(&rcb, 0, sizeof(rcb));
3309 memset(&asa, 0, sizeof(asa));
3310 rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
3311 rcb.lun_id = lun_id;
3312 rcb.msi = SISL_MSI_RRQ_UPDATED;
3313 rcb.timeout = MC_LUN_PROV_TIMEOUT;
3314 rcb.ioasa = &asa;
3315
3316 rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
3317 rcb.cdb[1] = type;
3318 rcb.cdb[2] = port;
3319 put_unaligned_be64(size, &rcb.cdb[8]);
3320
3321 rc = send_afu_cmd(afu, &rcb);
3322 if (rc) {
3323 dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3324 __func__, rc, asa.ioasc, asa.afu_extra);
3325 goto out;
3326 }
3327
3328 if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
3329 lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
3330 memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
3331 }
3332out:
3333 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3334 return rc;
3335}
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
3348 struct ht_cxlflash_afu_debug *afu_dbg)
3349{
3350 struct afu *afu = cfg->afu;
3351 struct device *dev = &cfg->dev->dev;
3352 struct sisl_ioarcb rcb;
3353 struct sisl_ioasa asa;
3354 char *buf = NULL;
3355 char *kbuf = NULL;
3356 void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
3357 u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
3358 u32 ulen = afu_dbg->data_len;
3359 bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
3360 int rc = 0;
3361
3362 if (!afu_is_afu_debug(afu)) {
3363 rc = -ENOTSUPP;
3364 goto out;
3365 }
3366
3367 if (ulen) {
3368 req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
3369
3370 if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
3371 rc = -EINVAL;
3372 goto out;
3373 }
3374
3375 buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
3376 if (unlikely(!buf)) {
3377 rc = -ENOMEM;
3378 goto out;
3379 }
3380
3381 kbuf = PTR_ALIGN(buf, cache_line_size());
3382
3383 if (is_write) {
3384 req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
3385
3386 if (copy_from_user(kbuf, ubuf, ulen)) {
3387 rc = -EFAULT;
3388 goto out;
3389 }
3390 }
3391 }
3392
3393 memset(&rcb, 0, sizeof(rcb));
3394 memset(&asa, 0, sizeof(asa));
3395
3396 rcb.req_flags = req_flags;
3397 rcb.msi = SISL_MSI_RRQ_UPDATED;
3398 rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
3399 rcb.ioasa = &asa;
3400
3401 if (ulen) {
3402 rcb.data_len = ulen;
3403 rcb.data_ea = (uintptr_t)kbuf;
3404 }
3405
3406 rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
3407 memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
3408 HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
3409
3410 rc = send_afu_cmd(afu, &rcb);
3411 if (rc) {
3412 dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3413 __func__, rc, asa.ioasc, asa.afu_extra);
3414 goto out;
3415 }
3416
3417 if (ulen && !is_write) {
3418 if (copy_to_user(ubuf, kbuf, ulen))
3419 rc = -EFAULT;
3420 }
3421out:
3422 kfree(buf);
3423 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3424 return rc;
3425}
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
3444 unsigned long arg)
3445{
3446 typedef int (*hioctl) (struct cxlflash_cfg *, void *);
3447
3448 struct cxlflash_cfg *cfg = file->private_data;
3449 struct device *dev = &cfg->dev->dev;
3450 char buf[sizeof(union cxlflash_ht_ioctls)];
3451 void __user *uarg = (void __user *)arg;
3452 struct ht_cxlflash_hdr *hdr;
3453 size_t size = 0;
3454 bool known_ioctl = false;
3455 int idx = 0;
3456 int rc = 0;
3457 hioctl do_ioctl = NULL;
3458
3459 static const struct {
3460 size_t size;
3461 hioctl ioctl;
3462 } ioctl_tbl[] = {
3463 { sizeof(struct ht_cxlflash_lun_provision),
3464 (hioctl)cxlflash_lun_provision },
3465 { sizeof(struct ht_cxlflash_afu_debug),
3466 (hioctl)cxlflash_afu_debug },
3467 };
3468
3469
3470 down_read(&cfg->ioctl_rwsem);
3471
3472 dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
3473 __func__, cmd, idx, sizeof(ioctl_tbl));
3474
3475 switch (cmd) {
3476 case HT_CXLFLASH_LUN_PROVISION:
3477 case HT_CXLFLASH_AFU_DEBUG:
3478 known_ioctl = true;
3479 idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
3480 size = ioctl_tbl[idx].size;
3481 do_ioctl = ioctl_tbl[idx].ioctl;
3482
3483 if (likely(do_ioctl))
3484 break;
3485
3486
3487 default:
3488 rc = -EINVAL;
3489 goto out;
3490 }
3491
3492 if (unlikely(copy_from_user(&buf, uarg, size))) {
3493 dev_err(dev, "%s: copy_from_user() fail "
3494 "size=%lu cmd=%d (%s) uarg=%p\n",
3495 __func__, size, cmd, decode_hioctl(cmd), uarg);
3496 rc = -EFAULT;
3497 goto out;
3498 }
3499
3500 hdr = (struct ht_cxlflash_hdr *)&buf;
3501 if (hdr->version != HT_CXLFLASH_VERSION_0) {
3502 dev_dbg(dev, "%s: Version %u not supported for %s\n",
3503 __func__, hdr->version, decode_hioctl(cmd));
3504 rc = -EINVAL;
3505 goto out;
3506 }
3507
3508 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
3509 dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
3510 rc = -EINVAL;
3511 goto out;
3512 }
3513
3514 rc = do_ioctl(cfg, (void *)&buf);
3515 if (likely(!rc))
3516 if (unlikely(copy_to_user(uarg, &buf, size))) {
3517 dev_err(dev, "%s: copy_to_user() fail "
3518 "size=%lu cmd=%d (%s) uarg=%p\n",
3519 __func__, size, cmd, decode_hioctl(cmd), uarg);
3520 rc = -EFAULT;
3521 }
3522
3523
3524
3525out:
3526 up_read(&cfg->ioctl_rwsem);
3527 if (unlikely(rc && known_ioctl))
3528 dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3529 __func__, decode_hioctl(cmd), cmd, rc);
3530 else
3531 dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3532 __func__, decode_hioctl(cmd), cmd, rc);
3533 return rc;
3534}
3535
3536
3537
3538
3539static const struct file_operations cxlflash_chr_fops = {
3540 .owner = THIS_MODULE,
3541 .open = cxlflash_chr_open,
3542 .unlocked_ioctl = cxlflash_chr_ioctl,
3543 .compat_ioctl = cxlflash_chr_ioctl,
3544};
3545
3546
3547
3548
3549
3550
3551
3552static int init_chrdev(struct cxlflash_cfg *cfg)
3553{
3554 struct device *dev = &cfg->dev->dev;
3555 struct device *char_dev;
3556 dev_t devno;
3557 int minor;
3558 int rc = 0;
3559
3560 minor = cxlflash_get_minor();
3561 if (unlikely(minor < 0)) {
3562 dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
3563 rc = -ENOSPC;
3564 goto out;
3565 }
3566
3567 devno = MKDEV(cxlflash_major, minor);
3568 cdev_init(&cfg->cdev, &cxlflash_chr_fops);
3569
3570 rc = cdev_add(&cfg->cdev, devno, 1);
3571 if (rc) {
3572 dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
3573 goto err1;
3574 }
3575
3576 char_dev = device_create(cxlflash_class, NULL, devno,
3577 NULL, "cxlflash%d", minor);
3578 if (IS_ERR(char_dev)) {
3579 rc = PTR_ERR(char_dev);
3580 dev_err(dev, "%s: device_create failed rc=%d\n",
3581 __func__, rc);
3582 goto err2;
3583 }
3584
3585 cfg->chardev = char_dev;
3586out:
3587 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3588 return rc;
3589err2:
3590 cdev_del(&cfg->cdev);
3591err1:
3592 cxlflash_put_minor(minor);
3593 goto out;
3594}
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612static int cxlflash_probe(struct pci_dev *pdev,
3613 const struct pci_device_id *dev_id)
3614{
3615 struct Scsi_Host *host;
3616 struct cxlflash_cfg *cfg = NULL;
3617 struct device *dev = &pdev->dev;
3618 struct dev_dependent_vals *ddv;
3619 int rc = 0;
3620 int k;
3621
3622 dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
3623 __func__, pdev->irq);
3624
3625 ddv = (struct dev_dependent_vals *)dev_id->driver_data;
3626 driver_template.max_sectors = ddv->max_sectors;
3627
3628 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
3629 if (!host) {
3630 dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
3631 rc = -ENOMEM;
3632 goto out;
3633 }
3634
3635 host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
3636 host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
3637 host->unique_id = host->host_no;
3638 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
3639
3640 cfg = shost_priv(host);
3641 cfg->host = host;
3642 rc = alloc_mem(cfg);
3643 if (rc) {
3644 dev_err(dev, "%s: alloc_mem failed\n", __func__);
3645 rc = -ENOMEM;
3646 scsi_host_put(cfg->host);
3647 goto out;
3648 }
3649
3650 cfg->init_state = INIT_STATE_NONE;
3651 cfg->dev = pdev;
3652 cfg->ops = &cxlflash_cxl_ops;
3653 cfg->cxl_fops = cxlflash_cxl_fops;
3654
3655
3656
3657
3658
3659
3660
3661
3662 cfg->promote_lun_index = 0;
3663
3664 for (k = 0; k < MAX_FC_PORTS; k++)
3665 cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
3666
3667 cfg->dev_id = (struct pci_device_id *)dev_id;
3668
3669 init_waitqueue_head(&cfg->tmf_waitq);
3670 init_waitqueue_head(&cfg->reset_waitq);
3671
3672 INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
3673 cfg->lr_state = LINK_RESET_INVALID;
3674 cfg->lr_port = -1;
3675 spin_lock_init(&cfg->tmf_slock);
3676 mutex_init(&cfg->ctx_tbl_list_mutex);
3677 mutex_init(&cfg->ctx_recovery_mutex);
3678 init_rwsem(&cfg->ioctl_rwsem);
3679 INIT_LIST_HEAD(&cfg->ctx_err_recovery);
3680 INIT_LIST_HEAD(&cfg->lluns);
3681
3682 pci_set_drvdata(pdev, cfg);
3683
3684 cfg->afu_cookie = cfg->ops->create_afu(pdev);
3685
3686 rc = init_pci(cfg);
3687 if (rc) {
3688 dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
3689 goto out_remove;
3690 }
3691 cfg->init_state = INIT_STATE_PCI;
3692
3693 rc = init_afu(cfg);
3694 if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
3695 dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
3696 goto out_remove;
3697 }
3698 cfg->init_state = INIT_STATE_AFU;
3699
3700 rc = init_scsi(cfg);
3701 if (rc) {
3702 dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
3703 goto out_remove;
3704 }
3705 cfg->init_state = INIT_STATE_SCSI;
3706
3707 rc = init_chrdev(cfg);
3708 if (rc) {
3709 dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
3710 goto out_remove;
3711 }
3712 cfg->init_state = INIT_STATE_CDEV;
3713
3714 if (wq_has_sleeper(&cfg->reset_waitq)) {
3715 cfg->state = STATE_PROBED;
3716 wake_up_all(&cfg->reset_waitq);
3717 } else
3718 cfg->state = STATE_NORMAL;
3719out:
3720 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3721 return rc;
3722
3723out_remove:
3724 cxlflash_remove(pdev);
3725 goto out;
3726}
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
3739 pci_channel_state_t state)
3740{
3741 int rc = 0;
3742 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3743 struct device *dev = &cfg->dev->dev;
3744
3745 dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
3746
3747 switch (state) {
3748 case pci_channel_io_frozen:
3749 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
3750 cfg->state != STATE_PROBING);
3751 if (cfg->state == STATE_FAILTERM)
3752 return PCI_ERS_RESULT_DISCONNECT;
3753
3754 cfg->state = STATE_RESET;
3755 scsi_block_requests(cfg->host);
3756 drain_ioctls(cfg);
3757 rc = cxlflash_mark_contexts_error(cfg);
3758 if (unlikely(rc))
3759 dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
3760 __func__, rc);
3761 term_afu(cfg);
3762 return PCI_ERS_RESULT_NEED_RESET;
3763 case pci_channel_io_perm_failure:
3764 cfg->state = STATE_FAILTERM;
3765 wake_up_all(&cfg->reset_waitq);
3766 scsi_unblock_requests(cfg->host);
3767 return PCI_ERS_RESULT_DISCONNECT;
3768 default:
3769 break;
3770 }
3771 return PCI_ERS_RESULT_NEED_RESET;
3772}
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
3784{
3785 int rc = 0;
3786 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3787 struct device *dev = &cfg->dev->dev;
3788
3789 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3790
3791 rc = init_afu(cfg);
3792 if (unlikely(rc)) {
3793 dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
3794 return PCI_ERS_RESULT_DISCONNECT;
3795 }
3796
3797 return PCI_ERS_RESULT_RECOVERED;
3798}
3799
3800
3801
3802
3803
3804static void cxlflash_pci_resume(struct pci_dev *pdev)
3805{
3806 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3807 struct device *dev = &cfg->dev->dev;
3808
3809 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3810
3811 cfg->state = STATE_NORMAL;
3812 wake_up_all(&cfg->reset_waitq);
3813 scsi_unblock_requests(cfg->host);
3814}
3815
3816
3817
3818
3819
3820
3821
3822
3823static char *cxlflash_devnode(struct device *dev, umode_t *mode)
3824{
3825 return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
3826}
3827
3828
3829
3830
3831
3832
3833static int cxlflash_class_init(void)
3834{
3835 dev_t devno;
3836 int rc = 0;
3837
3838 rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
3839 if (unlikely(rc)) {
3840 pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
3841 goto out;
3842 }
3843
3844 cxlflash_major = MAJOR(devno);
3845
3846 cxlflash_class = class_create(THIS_MODULE, "cxlflash");
3847 if (IS_ERR(cxlflash_class)) {
3848 rc = PTR_ERR(cxlflash_class);
3849 pr_err("%s: class_create failed rc=%d\n", __func__, rc);
3850 goto err;
3851 }
3852
3853 cxlflash_class->devnode = cxlflash_devnode;
3854out:
3855 pr_debug("%s: returning rc=%d\n", __func__, rc);
3856 return rc;
3857err:
3858 unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3859 goto out;
3860}
3861
3862
3863
3864
3865static void cxlflash_class_exit(void)
3866{
3867 dev_t devno = MKDEV(cxlflash_major, 0);
3868
3869 class_destroy(cxlflash_class);
3870 unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3871}
3872
3873static const struct pci_error_handlers cxlflash_err_handler = {
3874 .error_detected = cxlflash_pci_error_detected,
3875 .slot_reset = cxlflash_pci_slot_reset,
3876 .resume = cxlflash_pci_resume,
3877};
3878
3879
3880
3881
3882static struct pci_driver cxlflash_driver = {
3883 .name = CXLFLASH_NAME,
3884 .id_table = cxlflash_pci_table,
3885 .probe = cxlflash_probe,
3886 .remove = cxlflash_remove,
3887 .shutdown = cxlflash_remove,
3888 .err_handler = &cxlflash_err_handler,
3889};
3890
3891
3892
3893
3894
3895
3896static int __init init_cxlflash(void)
3897{
3898 int rc;
3899
3900 check_sizes();
3901 cxlflash_list_init();
3902 rc = cxlflash_class_init();
3903 if (unlikely(rc))
3904 goto out;
3905
3906 rc = pci_register_driver(&cxlflash_driver);
3907 if (unlikely(rc))
3908 goto err;
3909out:
3910 pr_debug("%s: returning rc=%d\n", __func__, rc);
3911 return rc;
3912err:
3913 cxlflash_class_exit();
3914 goto out;
3915}
3916
3917
3918
3919
3920static void __exit exit_cxlflash(void)
3921{
3922 cxlflash_term_global_luns();
3923 cxlflash_free_errpage();
3924
3925 pci_unregister_driver(&cxlflash_driver);
3926 cxlflash_class_exit();
3927}
3928
3929module_init(init_cxlflash);
3930module_exit(exit_cxlflash);
3931