1
2
3
4
5
6
7
8
9
10
11#include <linux/delay.h>
12#include <linux/file.h>
13#include <linux/interrupt.h>
14#include <linux/pci.h>
15#include <linux/syscalls.h>
16#include <asm/unaligned.h>
17
18#include <scsi/scsi.h>
19#include <scsi/scsi_host.h>
20#include <scsi/scsi_cmnd.h>
21#include <scsi/scsi_eh.h>
22#include <uapi/scsi/cxlflash_ioctl.h>
23
24#include "sislite.h"
25#include "common.h"
26#include "vlun.h"
27#include "superpipe.h"
28
29struct cxlflash_global global;
30
31
32
33
34
35
36static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
37 struct dk_cxlflash_resize *resize)
38{
39 resize->hdr = release->hdr;
40 resize->context_id = release->context_id;
41 resize->rsrc_handle = release->rsrc_handle;
42}
43
44
45
46
47
48
49static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
50 struct dk_cxlflash_release *release)
51{
52 release->hdr = detach->hdr;
53 release->context_id = detach->context_id;
54}
55
56
57
58
59
60
61static void marshal_udir_to_rele(struct dk_cxlflash_udirect *udirect,
62 struct dk_cxlflash_release *release)
63{
64 release->hdr = udirect->hdr;
65 release->context_id = udirect->context_id;
66 release->rsrc_handle = udirect->rsrc_handle;
67}
68
69
70
71
72void cxlflash_free_errpage(void)
73{
74
75 mutex_lock(&global.mutex);
76 if (global.err_page) {
77 __free_page(global.err_page);
78 global.err_page = NULL;
79 }
80 mutex_unlock(&global.mutex);
81}
82
83
84
85
86
87
88
89
90
91
92
93
94
95void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
96{
97 struct device *dev = &cfg->dev->dev;
98 int i, found = true;
99
100 cxlflash_mark_contexts_error(cfg);
101
102 while (true) {
103 for (i = 0; i < MAX_CONTEXT; i++)
104 if (cfg->ctx_tbl[i]) {
105 found = true;
106 break;
107 }
108
109 if (!found && list_empty(&cfg->ctx_err_recovery))
110 return;
111
112 dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
113 __func__);
114 wake_up_all(&cfg->reset_waitq);
115 ssleep(1);
116 found = false;
117 }
118}
119
120
121
122
123
124
125
126
127
128static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid,
129 struct file *file)
130{
131 struct ctx_info *ctxi;
132
133 list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list)
134 if ((ctxi->ctxid == rctxid) || (ctxi->file == file))
135 return ctxi;
136
137 return NULL;
138}
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
156 void *arg, enum ctx_ctrl ctx_ctrl)
157{
158 struct device *dev = &cfg->dev->dev;
159 struct ctx_info *ctxi = NULL;
160 struct lun_access *lun_access = NULL;
161 struct file *file = NULL;
162 struct llun_info *lli = arg;
163 u64 ctxid = DECODE_CTXID(rctxid);
164 int rc;
165 pid_t pid = task_tgid_nr(current), ctxpid = 0;
166
167 if (ctx_ctrl & CTX_CTRL_FILE) {
168 lli = NULL;
169 file = (struct file *)arg;
170 }
171
172 if (ctx_ctrl & CTX_CTRL_CLONE)
173 pid = task_ppid_nr(current);
174
175 if (likely(ctxid < MAX_CONTEXT)) {
176 while (true) {
177 mutex_lock(&cfg->ctx_tbl_list_mutex);
178 ctxi = cfg->ctx_tbl[ctxid];
179 if (ctxi)
180 if ((file && (ctxi->file != file)) ||
181 (!file && (ctxi->ctxid != rctxid)))
182 ctxi = NULL;
183
184 if ((ctx_ctrl & CTX_CTRL_ERR) ||
185 (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK)))
186 ctxi = find_error_context(cfg, rctxid, file);
187 if (!ctxi) {
188 mutex_unlock(&cfg->ctx_tbl_list_mutex);
189 goto out;
190 }
191
192
193
194
195
196
197
198
199
200
201
202
203
204 rc = mutex_trylock(&ctxi->mutex);
205 mutex_unlock(&cfg->ctx_tbl_list_mutex);
206 if (rc)
207 break;
208 }
209
210 if (ctxi->unavail)
211 goto denied;
212
213 ctxpid = ctxi->pid;
214 if (likely(!(ctx_ctrl & CTX_CTRL_NOPID)))
215 if (pid != ctxpid)
216 goto denied;
217
218 if (lli) {
219 list_for_each_entry(lun_access, &ctxi->luns, list)
220 if (lun_access->lli == lli)
221 goto out;
222 goto denied;
223 }
224 }
225
226out:
227 dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u "
228 "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
229 ctx_ctrl);
230
231 return ctxi;
232
233denied:
234 mutex_unlock(&ctxi->mutex);
235 ctxi = NULL;
236 goto out;
237}
238
239
240
241
242
243
244
245void put_context(struct ctx_info *ctxi)
246{
247 mutex_unlock(&ctxi->mutex);
248}
249
250
251
252
253
254
255
256
257
258
259
260
261static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
262{
263 struct device *dev = &cfg->dev->dev;
264 struct afu *afu = cfg->afu;
265 struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map;
266 int rc = 0;
267 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
268 u64 val;
269 int i;
270
271
272 readq_be(&ctrl_map->mbox_r);
273 val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD);
274 writeq_be(val, &ctrl_map->ctx_cap);
275 val = readq_be(&ctrl_map->ctx_cap);
276 if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
277 dev_err(dev, "%s: ctx may be closed val=%016llx\n",
278 __func__, val);
279 rc = -EAGAIN;
280 goto out;
281 }
282
283 if (afu_is_ocxl_lisn(afu)) {
284
285 for (i = 0; i < ctxi->irqs; i++) {
286 val = cfg->ops->get_irq_objhndl(ctxi->ctx, i);
287 writeq_be(val, &ctrl_map->lisn_ea[i]);
288 }
289
290
291 val = hwq->ctx_hndl;
292 writeq_be(SISL_LISN_PASID(val, val), &ctrl_map->lisn_pasid[0]);
293 writeq_be(SISL_LISN_PASID(0UL, val), &ctrl_map->lisn_pasid[1]);
294 }
295
296
297 writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
298 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl));
299 writeq_be(val, &ctrl_map->rht_cnt_id);
300out:
301 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
302 return rc;
303}
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
331{
332 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
333 struct device *dev = &cfg->dev->dev;
334 struct glun_info *gli = lli->parent;
335 struct scsi_sense_hdr sshdr;
336 u8 *cmd_buf = NULL;
337 u8 *scsi_cmd = NULL;
338 int rc = 0;
339 int result = 0;
340 int retry_cnt = 0;
341 u32 to = CMD_TIMEOUT * HZ;
342
343retry:
344 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
345 scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
346 if (unlikely(!cmd_buf || !scsi_cmd)) {
347 rc = -ENOMEM;
348 goto out;
349 }
350
351 scsi_cmd[0] = SERVICE_ACTION_IN_16;
352 scsi_cmd[1] = SAI_READ_CAPACITY_16;
353 put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
354
355 dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__,
356 retry_cnt ? "re" : "", scsi_cmd[0]);
357
358
359 up_read(&cfg->ioctl_rwsem);
360 result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
361 CMD_BUFSIZE, NULL, &sshdr, to, CMD_RETRIES,
362 0, 0, NULL);
363 down_read(&cfg->ioctl_rwsem);
364 rc = check_state(cfg);
365 if (rc) {
366 dev_err(dev, "%s: Failed state result=%08x\n",
367 __func__, result);
368 rc = -ENODEV;
369 goto out;
370 }
371
372 if (result > 0 && scsi_sense_valid(&sshdr)) {
373 if (result & SAM_STAT_CHECK_CONDITION) {
374 switch (sshdr.sense_key) {
375 case NO_SENSE:
376 case RECOVERED_ERROR:
377 case NOT_READY:
378 result &= ~SAM_STAT_CHECK_CONDITION;
379 break;
380 case UNIT_ATTENTION:
381 switch (sshdr.asc) {
382 case 0x29:
383 fallthrough;
384 case 0x2A:
385 case 0x3F:
386
387 if (retry_cnt++ < 1) {
388 kfree(cmd_buf);
389 kfree(scsi_cmd);
390 goto retry;
391 }
392 }
393 break;
394 default:
395 break;
396 }
397 }
398 }
399
400 if (result) {
401 dev_err(dev, "%s: command failed, result=%08x\n",
402 __func__, result);
403 rc = -EIO;
404 goto out;
405 }
406
407
408
409
410
411
412 mutex_lock(&gli->mutex);
413 gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0]));
414 gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8]));
415 mutex_unlock(&gli->mutex);
416
417out:
418 kfree(cmd_buf);
419 kfree(scsi_cmd);
420
421 dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
422 __func__, gli->max_lba, gli->blk_len, rc);
423 return rc;
424}
425
426
427
428
429
430
431
432
433
434struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
435 struct llun_info *lli)
436{
437 struct cxlflash_cfg *cfg = ctxi->cfg;
438 struct device *dev = &cfg->dev->dev;
439 struct sisl_rht_entry *rhte = NULL;
440
441 if (unlikely(!ctxi->rht_start)) {
442 dev_dbg(dev, "%s: Context does not have allocated RHT\n",
443 __func__);
444 goto out;
445 }
446
447 if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
448 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
449 __func__, rhndl);
450 goto out;
451 }
452
453 if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
454 dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n",
455 __func__, rhndl);
456 goto out;
457 }
458
459 rhte = &ctxi->rht_start[rhndl];
460 if (unlikely(rhte->nmask == 0)) {
461 dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n",
462 __func__, rhndl);
463 rhte = NULL;
464 goto out;
465 }
466
467out:
468 return rhte;
469}
470
471
472
473
474
475
476
477
478struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
479 struct llun_info *lli)
480{
481 struct cxlflash_cfg *cfg = ctxi->cfg;
482 struct device *dev = &cfg->dev->dev;
483 struct sisl_rht_entry *rhte = NULL;
484 int i;
485
486
487 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
488 if (ctxi->rht_start[i].nmask == 0) {
489 rhte = &ctxi->rht_start[i];
490 ctxi->rht_out++;
491 break;
492 }
493
494 if (likely(rhte))
495 ctxi->rht_lun[i] = lli;
496
497 dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i);
498 return rhte;
499}
500
501
502
503
504
505
506void rhte_checkin(struct ctx_info *ctxi,
507 struct sisl_rht_entry *rhte)
508{
509 u32 rsrc_handle = rhte - ctxi->rht_start;
510
511 rhte->nmask = 0;
512 rhte->fp = 0;
513 ctxi->rht_out--;
514 ctxi->rht_lun[rsrc_handle] = NULL;
515 ctxi->rht_needs_ws[rsrc_handle] = false;
516}
517
518
519
520
521
522
523
524
525static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm,
526 u32 port_sel)
527{
528
529
530
531
532
533 struct sisl_rht_entry_f1 dummy = { 0 };
534 struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
535
536 memset(rhte_f1, 0, sizeof(*rhte_f1));
537 rhte_f1->fp = SISL_RHT_FP(1U, 0);
538 dma_wmb();
539
540 rhte_f1->lun_id = lun_id;
541 dma_wmb();
542
543
544
545
546
547
548 dummy.valid = 0x80;
549 dummy.fp = SISL_RHT_FP(1U, perm);
550 dummy.port_sel = port_sel;
551 rhte_f1->dw = dummy.dw;
552
553 dma_wmb();
554}
555
556
557
558
559
560
561
562
563
564int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
565{
566 int rc = 0;
567
568 if (!locked)
569 mutex_lock(&gli->mutex);
570
571 if (gli->mode == MODE_NONE)
572 gli->mode = mode;
573 else if (gli->mode != mode) {
574 pr_debug("%s: gli_mode=%d requested_mode=%d\n",
575 __func__, gli->mode, mode);
576 rc = -EINVAL;
577 goto out;
578 }
579
580 gli->users++;
581 WARN_ON(gli->users <= 0);
582out:
583 pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
584 __func__, rc, gli->mode, gli->users);
585 if (!locked)
586 mutex_unlock(&gli->mutex);
587 return rc;
588}
589
590
591
592
593
594
595
596
597
598
599
600void cxlflash_lun_detach(struct glun_info *gli)
601{
602 mutex_lock(&gli->mutex);
603 WARN_ON(gli->mode == MODE_NONE);
604 if (--gli->users == 0) {
605 gli->mode = MODE_NONE;
606 cxlflash_ba_terminate(&gli->blka.ba_lun);
607 }
608 pr_debug("%s: gli->users=%u\n", __func__, gli->users);
609 WARN_ON(gli->users < 0);
610 mutex_unlock(&gli->mutex);
611}
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628int _cxlflash_disk_release(struct scsi_device *sdev,
629 struct ctx_info *ctxi,
630 struct dk_cxlflash_release *release)
631{
632 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
633 struct device *dev = &cfg->dev->dev;
634 struct llun_info *lli = sdev->hostdata;
635 struct glun_info *gli = lli->parent;
636 struct afu *afu = cfg->afu;
637 bool put_ctx = false;
638
639 struct dk_cxlflash_resize size;
640 res_hndl_t rhndl = release->rsrc_handle;
641
642 int rc = 0;
643 int rcr = 0;
644 u64 ctxid = DECODE_CTXID(release->context_id),
645 rctxid = release->context_id;
646
647 struct sisl_rht_entry *rhte;
648 struct sisl_rht_entry_f1 *rhte_f1;
649
650 dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n",
651 __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
652
653 if (!ctxi) {
654 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
655 if (unlikely(!ctxi)) {
656 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
657 __func__, ctxid);
658 rc = -EINVAL;
659 goto out;
660 }
661
662 put_ctx = true;
663 }
664
665 rhte = get_rhte(ctxi, rhndl, lli);
666 if (unlikely(!rhte)) {
667 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
668 __func__, rhndl);
669 rc = -EINVAL;
670 goto out;
671 }
672
673
674
675
676
677
678
679
680 switch (gli->mode) {
681 case MODE_VIRTUAL:
682 marshal_rele_to_resize(release, &size);
683 size.req_size = 0;
684 rc = _cxlflash_vlun_resize(sdev, ctxi, &size);
685 if (rc) {
686 dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc);
687 goto out;
688 }
689
690 break;
691 case MODE_PHYSICAL:
692
693
694
695
696
697 rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
698
699 rhte_f1->valid = 0;
700 dma_wmb();
701
702 rhte_f1->lun_id = 0;
703 dma_wmb();
704
705 rhte_f1->dw = 0;
706 dma_wmb();
707
708 if (!ctxi->err_recovery_active) {
709 rcr = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
710 if (unlikely(rcr))
711 dev_dbg(dev, "%s: AFU sync failed rc=%d\n",
712 __func__, rcr);
713 }
714 break;
715 default:
716 WARN(1, "Unsupported LUN mode!");
717 goto out;
718 }
719
720 rhte_checkin(ctxi, rhte);
721 cxlflash_lun_detach(gli);
722
723out:
724 if (put_ctx)
725 put_context(ctxi);
726 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
727 return rc;
728}
729
730int cxlflash_disk_release(struct scsi_device *sdev,
731 struct dk_cxlflash_release *release)
732{
733 return _cxlflash_disk_release(sdev, NULL, release);
734}
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749static void destroy_context(struct cxlflash_cfg *cfg,
750 struct ctx_info *ctxi)
751{
752 struct afu *afu = cfg->afu;
753
754 if (ctxi->initialized) {
755 WARN_ON(!list_empty(&ctxi->luns));
756
757
758 if (afu->afu_map && ctxi->ctrl_map) {
759 writeq_be(0, &ctxi->ctrl_map->rht_start);
760 writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
761 writeq_be(0, &ctxi->ctrl_map->ctx_cap);
762 }
763 }
764
765
766 free_page((ulong)ctxi->rht_start);
767 kfree(ctxi->rht_needs_ws);
768 kfree(ctxi->rht_lun);
769 kfree(ctxi);
770}
771
772
773
774
775
776
777
778static struct ctx_info *create_context(struct cxlflash_cfg *cfg)
779{
780 struct device *dev = &cfg->dev->dev;
781 struct ctx_info *ctxi = NULL;
782 struct llun_info **lli = NULL;
783 u8 *ws = NULL;
784 struct sisl_rht_entry *rhte;
785
786 ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
787 lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
788 ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
789 if (unlikely(!ctxi || !lli || !ws)) {
790 dev_err(dev, "%s: Unable to allocate context\n", __func__);
791 goto err;
792 }
793
794 rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
795 if (unlikely(!rhte)) {
796 dev_err(dev, "%s: Unable to allocate RHT\n", __func__);
797 goto err;
798 }
799
800 ctxi->rht_lun = lli;
801 ctxi->rht_needs_ws = ws;
802 ctxi->rht_start = rhte;
803out:
804 return ctxi;
805
806err:
807 kfree(ws);
808 kfree(lli);
809 kfree(ctxi);
810 ctxi = NULL;
811 goto out;
812}
813
814
815
816
817
818
819
820
821
822
823
824static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
825 void *ctx, int ctxid, struct file *file, u32 perms,
826 u64 irqs)
827{
828 struct afu *afu = cfg->afu;
829
830 ctxi->rht_perms = perms;
831 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
832 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
833 ctxi->irqs = irqs;
834 ctxi->pid = task_tgid_nr(current);
835 ctxi->ctx = ctx;
836 ctxi->cfg = cfg;
837 ctxi->file = file;
838 ctxi->initialized = true;
839 mutex_init(&ctxi->mutex);
840 kref_init(&ctxi->kref);
841 INIT_LIST_HEAD(&ctxi->luns);
842 INIT_LIST_HEAD(&ctxi->list);
843}
844
845
846
847
848
849
850
851
852
853static void remove_context(struct kref *kref)
854{
855 struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref);
856 struct cxlflash_cfg *cfg = ctxi->cfg;
857 u64 ctxid = DECODE_CTXID(ctxi->ctxid);
858
859
860 WARN_ON(!mutex_is_locked(&ctxi->mutex));
861 ctxi->unavail = true;
862 mutex_unlock(&ctxi->mutex);
863 mutex_lock(&cfg->ctx_tbl_list_mutex);
864 mutex_lock(&ctxi->mutex);
865
866 if (!list_empty(&ctxi->list))
867 list_del(&ctxi->list);
868 cfg->ctx_tbl[ctxid] = NULL;
869 mutex_unlock(&cfg->ctx_tbl_list_mutex);
870 mutex_unlock(&ctxi->mutex);
871
872
873 destroy_context(cfg, ctxi);
874}
875
876
877
878
879
880
881
882
883
884
885
886
887
888static int _cxlflash_disk_detach(struct scsi_device *sdev,
889 struct ctx_info *ctxi,
890 struct dk_cxlflash_detach *detach)
891{
892 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
893 struct device *dev = &cfg->dev->dev;
894 struct llun_info *lli = sdev->hostdata;
895 struct lun_access *lun_access, *t;
896 struct dk_cxlflash_release rel;
897 bool put_ctx = false;
898
899 int i;
900 int rc = 0;
901 u64 ctxid = DECODE_CTXID(detach->context_id),
902 rctxid = detach->context_id;
903
904 dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid);
905
906 if (!ctxi) {
907 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
908 if (unlikely(!ctxi)) {
909 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
910 __func__, ctxid);
911 rc = -EINVAL;
912 goto out;
913 }
914
915 put_ctx = true;
916 }
917
918
919 if (ctxi->rht_out) {
920 marshal_det_to_rele(detach, &rel);
921 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
922 if (ctxi->rht_lun[i] == lli) {
923 rel.rsrc_handle = i;
924 _cxlflash_disk_release(sdev, ctxi, &rel);
925 }
926
927
928 if (ctxi->rht_out == 0)
929 break;
930 }
931 }
932
933
934 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
935 if (lun_access->lli == lli) {
936 list_del(&lun_access->list);
937 kfree(lun_access);
938 lun_access = NULL;
939 break;
940 }
941
942
943
944
945
946 if (kref_put(&ctxi->kref, remove_context))
947 put_ctx = false;
948 scsi_device_put(sdev);
949out:
950 if (put_ctx)
951 put_context(ctxi);
952 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
953 return rc;
954}
955
956static int cxlflash_disk_detach(struct scsi_device *sdev,
957 struct dk_cxlflash_detach *detach)
958{
959 return _cxlflash_disk_detach(sdev, NULL, detach);
960}
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988static int cxlflash_cxl_release(struct inode *inode, struct file *file)
989{
990 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
991 cxl_fops);
992 void *ctx = cfg->ops->fops_get_context(file);
993 struct device *dev = &cfg->dev->dev;
994 struct ctx_info *ctxi = NULL;
995 struct dk_cxlflash_detach detach = { { 0 }, 0 };
996 struct lun_access *lun_access, *t;
997 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
998 int ctxid;
999
1000 ctxid = cfg->ops->process_element(ctx);
1001 if (unlikely(ctxid < 0)) {
1002 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1003 __func__, ctx, ctxid);
1004 goto out;
1005 }
1006
1007 ctxi = get_context(cfg, ctxid, file, ctrl);
1008 if (unlikely(!ctxi)) {
1009 ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
1010 if (!ctxi) {
1011 dev_dbg(dev, "%s: ctxid=%d already free\n",
1012 __func__, ctxid);
1013 goto out_release;
1014 }
1015
1016 dev_dbg(dev, "%s: Another process owns ctxid=%d\n",
1017 __func__, ctxid);
1018 put_context(ctxi);
1019 goto out;
1020 }
1021
1022 dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid);
1023
1024 down_read(&cfg->ioctl_rwsem);
1025 detach.context_id = ctxi->ctxid;
1026 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
1027 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
1028 up_read(&cfg->ioctl_rwsem);
1029out_release:
1030 cfg->ops->fd_release(inode, file);
1031out:
1032 dev_dbg(dev, "%s: returning\n", __func__);
1033 return 0;
1034}
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044static void unmap_context(struct ctx_info *ctxi)
1045{
1046 unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1);
1047}
1048
1049
1050
1051
1052
1053
1054
1055static struct page *get_err_page(struct cxlflash_cfg *cfg)
1056{
1057 struct page *err_page = global.err_page;
1058 struct device *dev = &cfg->dev->dev;
1059
1060 if (unlikely(!err_page)) {
1061 err_page = alloc_page(GFP_KERNEL);
1062 if (unlikely(!err_page)) {
1063 dev_err(dev, "%s: Unable to allocate err_page\n",
1064 __func__);
1065 goto out;
1066 }
1067
1068 memset(page_address(err_page), -1, PAGE_SIZE);
1069
1070
1071 mutex_lock(&global.mutex);
1072 if (likely(!global.err_page))
1073 global.err_page = err_page;
1074 else {
1075 __free_page(err_page);
1076 err_page = global.err_page;
1077 }
1078 mutex_unlock(&global.mutex);
1079 }
1080
1081out:
1082 dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page);
1083 return err_page;
1084}
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101static vm_fault_t cxlflash_mmap_fault(struct vm_fault *vmf)
1102{
1103 struct vm_area_struct *vma = vmf->vma;
1104 struct file *file = vma->vm_file;
1105 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1106 cxl_fops);
1107 void *ctx = cfg->ops->fops_get_context(file);
1108 struct device *dev = &cfg->dev->dev;
1109 struct ctx_info *ctxi = NULL;
1110 struct page *err_page = NULL;
1111 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1112 vm_fault_t rc = 0;
1113 int ctxid;
1114
1115 ctxid = cfg->ops->process_element(ctx);
1116 if (unlikely(ctxid < 0)) {
1117 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1118 __func__, ctx, ctxid);
1119 goto err;
1120 }
1121
1122 ctxi = get_context(cfg, ctxid, file, ctrl);
1123 if (unlikely(!ctxi)) {
1124 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
1125 goto err;
1126 }
1127
1128 dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid);
1129
1130 if (likely(!ctxi->err_recovery_active)) {
1131 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1132 rc = ctxi->cxl_mmap_vmops->fault(vmf);
1133 } else {
1134 dev_dbg(dev, "%s: err recovery active, use err_page\n",
1135 __func__);
1136
1137 err_page = get_err_page(cfg);
1138 if (unlikely(!err_page)) {
1139 dev_err(dev, "%s: Could not get err_page\n", __func__);
1140 rc = VM_FAULT_RETRY;
1141 goto out;
1142 }
1143
1144 get_page(err_page);
1145 vmf->page = err_page;
1146 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
1147 }
1148
1149out:
1150 if (likely(ctxi))
1151 put_context(ctxi);
1152 dev_dbg(dev, "%s: returning rc=%x\n", __func__, rc);
1153 return rc;
1154
1155err:
1156 rc = VM_FAULT_SIGBUS;
1157 goto out;
1158}
1159
1160
1161
1162
1163static const struct vm_operations_struct cxlflash_mmap_vmops = {
1164 .fault = cxlflash_mmap_fault,
1165};
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
1177{
1178 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1179 cxl_fops);
1180 void *ctx = cfg->ops->fops_get_context(file);
1181 struct device *dev = &cfg->dev->dev;
1182 struct ctx_info *ctxi = NULL;
1183 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1184 int ctxid;
1185 int rc = 0;
1186
1187 ctxid = cfg->ops->process_element(ctx);
1188 if (unlikely(ctxid < 0)) {
1189 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1190 __func__, ctx, ctxid);
1191 rc = -EIO;
1192 goto out;
1193 }
1194
1195 ctxi = get_context(cfg, ctxid, file, ctrl);
1196 if (unlikely(!ctxi)) {
1197 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
1198 rc = -EIO;
1199 goto out;
1200 }
1201
1202 dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
1203
1204 rc = cfg->ops->fd_mmap(file, vma);
1205 if (likely(!rc)) {
1206
1207 ctxi->cxl_mmap_vmops = vma->vm_ops;
1208 vma->vm_ops = &cxlflash_mmap_vmops;
1209 }
1210
1211out:
1212 if (likely(ctxi))
1213 put_context(ctxi);
1214 return rc;
1215}
1216
1217const struct file_operations cxlflash_cxl_fops = {
1218 .owner = THIS_MODULE,
1219 .mmap = cxlflash_cxl_mmap,
1220 .release = cxlflash_cxl_release,
1221};
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg)
1233{
1234 int i, rc = 0;
1235 struct ctx_info *ctxi = NULL;
1236
1237 mutex_lock(&cfg->ctx_tbl_list_mutex);
1238
1239 for (i = 0; i < MAX_CONTEXT; i++) {
1240 ctxi = cfg->ctx_tbl[i];
1241 if (ctxi) {
1242 mutex_lock(&ctxi->mutex);
1243 cfg->ctx_tbl[i] = NULL;
1244 list_add(&ctxi->list, &cfg->ctx_err_recovery);
1245 ctxi->err_recovery_active = true;
1246 ctxi->ctrl_map = NULL;
1247 unmap_context(ctxi);
1248 mutex_unlock(&ctxi->mutex);
1249 }
1250 }
1251
1252 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1253 return rc;
1254}
1255
1256
1257
1258
1259static const struct file_operations null_fops = {
1260 .owner = THIS_MODULE,
1261};
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276int check_state(struct cxlflash_cfg *cfg)
1277{
1278 struct device *dev = &cfg->dev->dev;
1279 int rc = 0;
1280
1281retry:
1282 switch (cfg->state) {
1283 case STATE_RESET:
1284 dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__);
1285 up_read(&cfg->ioctl_rwsem);
1286 rc = wait_event_interruptible(cfg->reset_waitq,
1287 cfg->state != STATE_RESET);
1288 down_read(&cfg->ioctl_rwsem);
1289 if (unlikely(rc))
1290 break;
1291 goto retry;
1292 case STATE_FAILTERM:
1293 dev_dbg(dev, "%s: Failed/Terminating\n", __func__);
1294 rc = -ENODEV;
1295 break;
1296 default:
1297 break;
1298 }
1299
1300 return rc;
1301}
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315static int cxlflash_disk_attach(struct scsi_device *sdev,
1316 struct dk_cxlflash_attach *attach)
1317{
1318 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1319 struct device *dev = &cfg->dev->dev;
1320 struct afu *afu = cfg->afu;
1321 struct llun_info *lli = sdev->hostdata;
1322 struct glun_info *gli = lli->parent;
1323 struct ctx_info *ctxi = NULL;
1324 struct lun_access *lun_access = NULL;
1325 int rc = 0;
1326 u32 perms;
1327 int ctxid = -1;
1328 u64 irqs = attach->num_interrupts;
1329 u64 flags = 0UL;
1330 u64 rctxid = 0UL;
1331 struct file *file = NULL;
1332
1333 void *ctx = NULL;
1334
1335 int fd = -1;
1336
1337 if (irqs > 4) {
1338 dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
1339 __func__, irqs);
1340 rc = -EINVAL;
1341 goto out;
1342 }
1343
1344 if (gli->max_lba == 0) {
1345 dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n",
1346 __func__, lli->lun_id[sdev->channel]);
1347 rc = read_cap16(sdev, lli);
1348 if (rc) {
1349 dev_err(dev, "%s: Invalid device rc=%d\n",
1350 __func__, rc);
1351 rc = -ENODEV;
1352 goto out;
1353 }
1354 dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba);
1355 dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len);
1356 }
1357
1358 if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
1359 rctxid = attach->context_id;
1360 ctxi = get_context(cfg, rctxid, NULL, 0);
1361 if (!ctxi) {
1362 dev_dbg(dev, "%s: Bad context rctxid=%016llx\n",
1363 __func__, rctxid);
1364 rc = -EINVAL;
1365 goto out;
1366 }
1367
1368 list_for_each_entry(lun_access, &ctxi->luns, list)
1369 if (lun_access->lli == lli) {
1370 dev_dbg(dev, "%s: Already attached\n",
1371 __func__);
1372 rc = -EINVAL;
1373 goto out;
1374 }
1375 }
1376
1377 rc = scsi_device_get(sdev);
1378 if (unlikely(rc)) {
1379 dev_err(dev, "%s: Unable to get sdev reference\n", __func__);
1380 goto out;
1381 }
1382
1383 lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
1384 if (unlikely(!lun_access)) {
1385 dev_err(dev, "%s: Unable to allocate lun_access\n", __func__);
1386 rc = -ENOMEM;
1387 goto err;
1388 }
1389
1390 lun_access->lli = lli;
1391 lun_access->sdev = sdev;
1392
1393
1394 if (ctxi) {
1395 dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n",
1396 __func__, rctxid);
1397 kref_get(&ctxi->kref);
1398 list_add(&lun_access->list, &ctxi->luns);
1399 goto out_attach;
1400 }
1401
1402 ctxi = create_context(cfg);
1403 if (unlikely(!ctxi)) {
1404 dev_err(dev, "%s: Failed to create context ctxid=%d\n",
1405 __func__, ctxid);
1406 rc = -ENOMEM;
1407 goto err;
1408 }
1409
1410 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
1411 if (IS_ERR_OR_NULL(ctx)) {
1412 dev_err(dev, "%s: Could not initialize context %p\n",
1413 __func__, ctx);
1414 rc = -ENODEV;
1415 goto err;
1416 }
1417
1418 rc = cfg->ops->start_work(ctx, irqs);
1419 if (unlikely(rc)) {
1420 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1421 __func__, rc);
1422 goto err;
1423 }
1424
1425 ctxid = cfg->ops->process_element(ctx);
1426 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1427 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
1428 rc = -EPERM;
1429 goto err;
1430 }
1431
1432 file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
1433 if (unlikely(fd < 0)) {
1434 rc = -ENODEV;
1435 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1436 goto err;
1437 }
1438
1439
1440 perms = SISL_RHT_PERM(attach->hdr.flags + 1);
1441
1442
1443 init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs);
1444
1445 rc = afu_attach(cfg, ctxi);
1446 if (unlikely(rc)) {
1447 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1448 goto err;
1449 }
1450
1451
1452
1453
1454
1455
1456
1457 list_add(&lun_access->list, &ctxi->luns);
1458 mutex_lock(&cfg->ctx_tbl_list_mutex);
1459 mutex_lock(&ctxi->mutex);
1460 cfg->ctx_tbl[ctxid] = ctxi;
1461 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1462 fd_install(fd, file);
1463
1464out_attach:
1465 if (fd != -1)
1466 flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD;
1467 if (afu_is_sq_cmd_mode(afu))
1468 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
1469
1470 attach->hdr.return_flags = flags;
1471 attach->context_id = ctxi->ctxid;
1472 attach->block_size = gli->blk_len;
1473 attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1474 attach->last_lba = gli->max_lba;
1475 attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT;
1476 attach->max_xfer /= gli->blk_len;
1477
1478out:
1479 attach->adap_fd = fd;
1480
1481 if (ctxi)
1482 put_context(ctxi);
1483
1484 dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
1485 __func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
1486 return rc;
1487
1488err:
1489
1490 if (!IS_ERR_OR_NULL(ctx)) {
1491 cfg->ops->stop_context(ctx);
1492 cfg->ops->release_context(ctx);
1493 ctx = NULL;
1494 }
1495
1496
1497
1498
1499
1500
1501
1502
1503 if (fd > 0) {
1504 file->f_op = &null_fops;
1505 fput(file);
1506 put_unused_fd(fd);
1507 fd = -1;
1508 file = NULL;
1509 }
1510
1511
1512 if (ctxi) {
1513 destroy_context(cfg, ctxi);
1514 ctxi = NULL;
1515 }
1516
1517 kfree(lun_access);
1518 scsi_device_put(sdev);
1519 goto out;
1520}
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532static int recover_context(struct cxlflash_cfg *cfg,
1533 struct ctx_info *ctxi,
1534 int *adap_fd)
1535{
1536 struct device *dev = &cfg->dev->dev;
1537 int rc = 0;
1538 int fd = -1;
1539 int ctxid = -1;
1540 struct file *file;
1541 void *ctx;
1542 struct afu *afu = cfg->afu;
1543
1544 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
1545 if (IS_ERR_OR_NULL(ctx)) {
1546 dev_err(dev, "%s: Could not initialize context %p\n",
1547 __func__, ctx);
1548 rc = -ENODEV;
1549 goto out;
1550 }
1551
1552 rc = cfg->ops->start_work(ctx, ctxi->irqs);
1553 if (unlikely(rc)) {
1554 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1555 __func__, rc);
1556 goto err1;
1557 }
1558
1559 ctxid = cfg->ops->process_element(ctx);
1560 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1561 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
1562 rc = -EPERM;
1563 goto err2;
1564 }
1565
1566 file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
1567 if (unlikely(fd < 0)) {
1568 rc = -ENODEV;
1569 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1570 goto err2;
1571 }
1572
1573
1574 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
1575
1576 rc = afu_attach(cfg, ctxi);
1577 if (rc) {
1578 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1579 goto err3;
1580 }
1581
1582
1583
1584
1585
1586 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
1587 ctxi->ctx = ctx;
1588 ctxi->file = file;
1589
1590
1591
1592
1593
1594
1595
1596 mutex_unlock(&ctxi->mutex);
1597 mutex_lock(&cfg->ctx_tbl_list_mutex);
1598 mutex_lock(&ctxi->mutex);
1599 list_del_init(&ctxi->list);
1600 cfg->ctx_tbl[ctxid] = ctxi;
1601 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1602 fd_install(fd, file);
1603 *adap_fd = fd;
1604out:
1605 dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
1606 __func__, ctxid, fd, rc);
1607 return rc;
1608
1609err3:
1610 fput(file);
1611 put_unused_fd(fd);
1612err2:
1613 cfg->ops->stop_context(ctx);
1614err1:
1615 cfg->ops->release_context(ctx);
1616 goto out;
1617}
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649static int cxlflash_afu_recover(struct scsi_device *sdev,
1650 struct dk_cxlflash_recover_afu *recover)
1651{
1652 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1653 struct device *dev = &cfg->dev->dev;
1654 struct llun_info *lli = sdev->hostdata;
1655 struct afu *afu = cfg->afu;
1656 struct ctx_info *ctxi = NULL;
1657 struct mutex *mutex = &cfg->ctx_recovery_mutex;
1658 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1659 u64 flags;
1660 u64 ctxid = DECODE_CTXID(recover->context_id),
1661 rctxid = recover->context_id;
1662 long reg;
1663 bool locked = true;
1664 int lretry = 20;
1665 int new_adap_fd = -1;
1666 int rc = 0;
1667
1668 atomic_inc(&cfg->recovery_threads);
1669 up_read(&cfg->ioctl_rwsem);
1670 rc = mutex_lock_interruptible(mutex);
1671 down_read(&cfg->ioctl_rwsem);
1672 if (rc) {
1673 locked = false;
1674 goto out;
1675 }
1676
1677 rc = check_state(cfg);
1678 if (rc) {
1679 dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc);
1680 rc = -ENODEV;
1681 goto out;
1682 }
1683
1684 dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n",
1685 __func__, recover->reason, rctxid);
1686
1687retry:
1688
1689 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
1690 if (unlikely(!ctxi)) {
1691 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1692 rc = -EINVAL;
1693 goto out;
1694 }
1695
1696 if (ctxi->err_recovery_active) {
1697retry_recover:
1698 rc = recover_context(cfg, ctxi, &new_adap_fd);
1699 if (unlikely(rc)) {
1700 dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n",
1701 __func__, ctxid, rc);
1702 if ((rc == -ENODEV) &&
1703 ((atomic_read(&cfg->recovery_threads) > 1) ||
1704 (lretry--))) {
1705 dev_dbg(dev, "%s: Going to try again\n",
1706 __func__);
1707 mutex_unlock(mutex);
1708 msleep(100);
1709 rc = mutex_lock_interruptible(mutex);
1710 if (rc) {
1711 locked = false;
1712 goto out;
1713 }
1714 goto retry_recover;
1715 }
1716
1717 goto out;
1718 }
1719
1720 ctxi->err_recovery_active = false;
1721
1722 flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
1723 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
1724 if (afu_is_sq_cmd_mode(afu))
1725 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
1726
1727 recover->hdr.return_flags = flags;
1728 recover->context_id = ctxi->ctxid;
1729 recover->adap_fd = new_adap_fd;
1730 recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1731 goto out;
1732 }
1733
1734
1735 reg = readq_be(&hwq->ctrl_map->mbox_r);
1736 if (reg == -1) {
1737 dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__);
1738
1739
1740
1741
1742
1743
1744 put_context(ctxi);
1745 ctxi = NULL;
1746 ssleep(1);
1747 rc = check_state(cfg);
1748 if (unlikely(rc))
1749 goto out;
1750 goto retry;
1751 }
1752
1753 dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__);
1754out:
1755 if (likely(ctxi))
1756 put_context(ctxi);
1757 if (locked)
1758 mutex_unlock(mutex);
1759 atomic_dec_if_positive(&cfg->recovery_threads);
1760 return rc;
1761}
1762
1763
1764
1765
1766
1767
1768
1769
1770static int process_sense(struct scsi_device *sdev,
1771 struct dk_cxlflash_verify *verify)
1772{
1773 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1774 struct device *dev = &cfg->dev->dev;
1775 struct llun_info *lli = sdev->hostdata;
1776 struct glun_info *gli = lli->parent;
1777 u64 prev_lba = gli->max_lba;
1778 struct scsi_sense_hdr sshdr = { 0 };
1779 int rc = 0;
1780
1781 rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
1782 DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
1783 if (!rc) {
1784 dev_err(dev, "%s: Failed to normalize sense data\n", __func__);
1785 rc = -EINVAL;
1786 goto out;
1787 }
1788
1789 switch (sshdr.sense_key) {
1790 case NO_SENSE:
1791 case RECOVERED_ERROR:
1792 case NOT_READY:
1793 break;
1794 case UNIT_ATTENTION:
1795 switch (sshdr.asc) {
1796 case 0x29:
1797 fallthrough;
1798 case 0x2A:
1799 rc = read_cap16(sdev, lli);
1800 if (rc) {
1801 rc = -ENODEV;
1802 break;
1803 }
1804 if (prev_lba != gli->max_lba)
1805 dev_dbg(dev, "%s: Capacity changed old=%lld "
1806 "new=%lld\n", __func__, prev_lba,
1807 gli->max_lba);
1808 break;
1809 case 0x3F:
1810 scsi_scan_host(cfg->host);
1811 break;
1812 default:
1813 rc = -EIO;
1814 break;
1815 }
1816 break;
1817 default:
1818 rc = -EIO;
1819 break;
1820 }
1821out:
1822 dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__,
1823 sshdr.sense_key, sshdr.asc, sshdr.ascq, rc);
1824 return rc;
1825}
1826
1827
1828
1829
1830
1831
1832
1833
1834static int cxlflash_disk_verify(struct scsi_device *sdev,
1835 struct dk_cxlflash_verify *verify)
1836{
1837 int rc = 0;
1838 struct ctx_info *ctxi = NULL;
1839 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1840 struct device *dev = &cfg->dev->dev;
1841 struct llun_info *lli = sdev->hostdata;
1842 struct glun_info *gli = lli->parent;
1843 struct sisl_rht_entry *rhte = NULL;
1844 res_hndl_t rhndl = verify->rsrc_handle;
1845 u64 ctxid = DECODE_CTXID(verify->context_id),
1846 rctxid = verify->context_id;
1847 u64 last_lba = 0;
1848
1849 dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, "
1850 "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle,
1851 verify->hint, verify->hdr.flags);
1852
1853 ctxi = get_context(cfg, rctxid, lli, 0);
1854 if (unlikely(!ctxi)) {
1855 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1856 rc = -EINVAL;
1857 goto out;
1858 }
1859
1860 rhte = get_rhte(ctxi, rhndl, lli);
1861 if (unlikely(!rhte)) {
1862 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
1863 __func__, rhndl);
1864 rc = -EINVAL;
1865 goto out;
1866 }
1867
1868
1869
1870
1871
1872 if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
1873
1874
1875
1876 ctxi->unavail = true;
1877 mutex_unlock(&ctxi->mutex);
1878 rc = process_sense(sdev, verify);
1879 if (unlikely(rc)) {
1880 dev_err(dev, "%s: Failed to validate sense data (%d)\n",
1881 __func__, rc);
1882 mutex_lock(&ctxi->mutex);
1883 ctxi->unavail = false;
1884 goto out;
1885 }
1886 mutex_lock(&ctxi->mutex);
1887 ctxi->unavail = false;
1888 }
1889
1890 switch (gli->mode) {
1891 case MODE_PHYSICAL:
1892 last_lba = gli->max_lba;
1893 break;
1894 case MODE_VIRTUAL:
1895
1896 last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len);
1897 last_lba /= CXLFLASH_BLOCK_SIZE;
1898 last_lba--;
1899 break;
1900 default:
1901 WARN(1, "Unsupported LUN mode!");
1902 }
1903
1904 verify->last_lba = last_lba;
1905
1906out:
1907 if (likely(ctxi))
1908 put_context(ctxi);
1909 dev_dbg(dev, "%s: returning rc=%d llba=%llx\n",
1910 __func__, rc, verify->last_lba);
1911 return rc;
1912}
1913
1914
1915
1916
1917
1918
1919
1920static char *decode_ioctl(unsigned int cmd)
1921{
1922 switch (cmd) {
1923 case DK_CXLFLASH_ATTACH:
1924 return __stringify_1(DK_CXLFLASH_ATTACH);
1925 case DK_CXLFLASH_USER_DIRECT:
1926 return __stringify_1(DK_CXLFLASH_USER_DIRECT);
1927 case DK_CXLFLASH_USER_VIRTUAL:
1928 return __stringify_1(DK_CXLFLASH_USER_VIRTUAL);
1929 case DK_CXLFLASH_VLUN_RESIZE:
1930 return __stringify_1(DK_CXLFLASH_VLUN_RESIZE);
1931 case DK_CXLFLASH_RELEASE:
1932 return __stringify_1(DK_CXLFLASH_RELEASE);
1933 case DK_CXLFLASH_DETACH:
1934 return __stringify_1(DK_CXLFLASH_DETACH);
1935 case DK_CXLFLASH_VERIFY:
1936 return __stringify_1(DK_CXLFLASH_VERIFY);
1937 case DK_CXLFLASH_VLUN_CLONE:
1938 return __stringify_1(DK_CXLFLASH_VLUN_CLONE);
1939 case DK_CXLFLASH_RECOVER_AFU:
1940 return __stringify_1(DK_CXLFLASH_RECOVER_AFU);
1941 case DK_CXLFLASH_MANAGE_LUN:
1942 return __stringify_1(DK_CXLFLASH_MANAGE_LUN);
1943 }
1944
1945 return "UNKNOWN";
1946}
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
1960{
1961 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1962 struct device *dev = &cfg->dev->dev;
1963 struct afu *afu = cfg->afu;
1964 struct llun_info *lli = sdev->hostdata;
1965 struct glun_info *gli = lli->parent;
1966 struct dk_cxlflash_release rel = { { 0 }, 0 };
1967
1968 struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
1969
1970 u64 ctxid = DECODE_CTXID(pphys->context_id),
1971 rctxid = pphys->context_id;
1972 u64 lun_size = 0;
1973 u64 last_lba = 0;
1974 u64 rsrc_handle = -1;
1975 u32 port = CHAN2PORTMASK(sdev->channel);
1976
1977 int rc = 0;
1978
1979 struct ctx_info *ctxi = NULL;
1980 struct sisl_rht_entry *rhte = NULL;
1981
1982 dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
1983
1984 rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
1985 if (unlikely(rc)) {
1986 dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__);
1987 goto out;
1988 }
1989
1990 ctxi = get_context(cfg, rctxid, lli, 0);
1991 if (unlikely(!ctxi)) {
1992 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1993 rc = -EINVAL;
1994 goto err1;
1995 }
1996
1997 rhte = rhte_checkout(ctxi, lli);
1998 if (unlikely(!rhte)) {
1999 dev_dbg(dev, "%s: Too many opens ctxid=%lld\n",
2000 __func__, ctxid);
2001 rc = -EMFILE;
2002 goto err1;
2003 }
2004
2005 rsrc_handle = (rhte - ctxi->rht_start);
2006
2007 rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
2008
2009 last_lba = gli->max_lba;
2010 pphys->hdr.return_flags = 0;
2011 pphys->last_lba = last_lba;
2012 pphys->rsrc_handle = rsrc_handle;
2013
2014 rc = cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
2015 if (unlikely(rc)) {
2016 dev_dbg(dev, "%s: AFU sync failed rc=%d\n", __func__, rc);
2017 goto err2;
2018 }
2019
2020out:
2021 if (likely(ctxi))
2022 put_context(ctxi);
2023 dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
2024 __func__, rsrc_handle, rc, last_lba);
2025 return rc;
2026
2027err2:
2028 marshal_udir_to_rele(pphys, &rel);
2029 _cxlflash_disk_release(sdev, ctxi, &rel);
2030 goto out;
2031err1:
2032 cxlflash_lun_detach(gli);
2033 goto out;
2034}
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047static int ioctl_common(struct scsi_device *sdev, unsigned int cmd)
2048{
2049 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
2050 struct device *dev = &cfg->dev->dev;
2051 struct llun_info *lli = sdev->hostdata;
2052 int rc = 0;
2053
2054 if (unlikely(!lli)) {
2055 dev_dbg(dev, "%s: Unknown LUN\n", __func__);
2056 rc = -EINVAL;
2057 goto out;
2058 }
2059
2060 rc = check_state(cfg);
2061 if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) {
2062 switch (cmd) {
2063 case DK_CXLFLASH_VLUN_RESIZE:
2064 case DK_CXLFLASH_RELEASE:
2065 case DK_CXLFLASH_DETACH:
2066 dev_dbg(dev, "%s: Command override rc=%d\n",
2067 __func__, rc);
2068 rc = 0;
2069 break;
2070 }
2071 }
2072out:
2073 return rc;
2074}
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
2093{
2094 typedef int (*sioctl) (struct scsi_device *, void *);
2095
2096 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
2097 struct device *dev = &cfg->dev->dev;
2098 struct afu *afu = cfg->afu;
2099 struct dk_cxlflash_hdr *hdr;
2100 char buf[sizeof(union cxlflash_ioctls)];
2101 size_t size = 0;
2102 bool known_ioctl = false;
2103 int idx;
2104 int rc = 0;
2105 struct Scsi_Host *shost = sdev->host;
2106 sioctl do_ioctl = NULL;
2107
2108 static const struct {
2109 size_t size;
2110 sioctl ioctl;
2111 } ioctl_tbl[] = {
2112 {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach},
2113 {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
2114 {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release},
2115 {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach},
2116 {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify},
2117 {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover},
2118 {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun},
2119 {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open},
2120 {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize},
2121 {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone},
2122 };
2123
2124
2125 down_read(&cfg->ioctl_rwsem);
2126
2127
2128 if (afu->internal_lun)
2129 switch (cmd) {
2130 case DK_CXLFLASH_RELEASE:
2131 case DK_CXLFLASH_USER_VIRTUAL:
2132 case DK_CXLFLASH_VLUN_RESIZE:
2133 case DK_CXLFLASH_VLUN_CLONE:
2134 dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n",
2135 __func__, decode_ioctl(cmd), afu->internal_lun);
2136 rc = -EINVAL;
2137 goto cxlflash_ioctl_exit;
2138 }
2139
2140 switch (cmd) {
2141 case DK_CXLFLASH_ATTACH:
2142 case DK_CXLFLASH_USER_DIRECT:
2143 case DK_CXLFLASH_RELEASE:
2144 case DK_CXLFLASH_DETACH:
2145 case DK_CXLFLASH_VERIFY:
2146 case DK_CXLFLASH_RECOVER_AFU:
2147 case DK_CXLFLASH_USER_VIRTUAL:
2148 case DK_CXLFLASH_VLUN_RESIZE:
2149 case DK_CXLFLASH_VLUN_CLONE:
2150 dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n",
2151 __func__, decode_ioctl(cmd), cmd, shost->host_no,
2152 sdev->channel, sdev->id, sdev->lun);
2153 rc = ioctl_common(sdev, cmd);
2154 if (unlikely(rc))
2155 goto cxlflash_ioctl_exit;
2156
2157 fallthrough;
2158
2159 case DK_CXLFLASH_MANAGE_LUN:
2160 known_ioctl = true;
2161 idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH);
2162 size = ioctl_tbl[idx].size;
2163 do_ioctl = ioctl_tbl[idx].ioctl;
2164
2165 if (likely(do_ioctl))
2166 break;
2167
2168 fallthrough;
2169 default:
2170 rc = -EINVAL;
2171 goto cxlflash_ioctl_exit;
2172 }
2173
2174 if (unlikely(copy_from_user(&buf, arg, size))) {
2175 dev_err(dev, "%s: copy_from_user() fail size=%lu cmd=%u (%s) arg=%p\n",
2176 __func__, size, cmd, decode_ioctl(cmd), arg);
2177 rc = -EFAULT;
2178 goto cxlflash_ioctl_exit;
2179 }
2180
2181 hdr = (struct dk_cxlflash_hdr *)&buf;
2182 if (hdr->version != DK_CXLFLASH_VERSION_0) {
2183 dev_dbg(dev, "%s: Version %u not supported for %s\n",
2184 __func__, hdr->version, decode_ioctl(cmd));
2185 rc = -EINVAL;
2186 goto cxlflash_ioctl_exit;
2187 }
2188
2189 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
2190 dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
2191 rc = -EINVAL;
2192 goto cxlflash_ioctl_exit;
2193 }
2194
2195 rc = do_ioctl(sdev, (void *)&buf);
2196 if (likely(!rc))
2197 if (unlikely(copy_to_user(arg, &buf, size))) {
2198 dev_err(dev, "%s: copy_to_user() fail size=%lu cmd=%u (%s) arg=%p\n",
2199 __func__, size, cmd, decode_ioctl(cmd), arg);
2200 rc = -EFAULT;
2201 }
2202
2203
2204
2205cxlflash_ioctl_exit:
2206 up_read(&cfg->ioctl_rwsem);
2207 if (unlikely(rc && known_ioctl))
2208 dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2209 "returned rc %d\n", __func__,
2210 decode_ioctl(cmd), cmd, shost->host_no,
2211 sdev->channel, sdev->id, sdev->lun, rc);
2212 else
2213 dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2214 "returned rc %d\n", __func__, decode_ioctl(cmd),
2215 cmd, shost->host_no, sdev->channel, sdev->id,
2216 sdev->lun, rc);
2217 return rc;
2218}
2219