1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/delay.h>
16#include <linux/file.h>
17#include <linux/interrupt.h>
18#include <linux/pci.h>
19#include <linux/syscalls.h>
20#include <asm/unaligned.h>
21
22#include <scsi/scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_eh.h>
26#include <uapi/scsi/cxlflash_ioctl.h>
27
28#include "sislite.h"
29#include "common.h"
30#include "vlun.h"
31#include "superpipe.h"
32
33struct cxlflash_global global;
34
35
36
37
38
39
40static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
41 struct dk_cxlflash_resize *resize)
42{
43 resize->hdr = release->hdr;
44 resize->context_id = release->context_id;
45 resize->rsrc_handle = release->rsrc_handle;
46}
47
48
49
50
51
52
53static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
54 struct dk_cxlflash_release *release)
55{
56 release->hdr = detach->hdr;
57 release->context_id = detach->context_id;
58}
59
60
61
62
63
64
65static void marshal_udir_to_rele(struct dk_cxlflash_udirect *udirect,
66 struct dk_cxlflash_release *release)
67{
68 release->hdr = udirect->hdr;
69 release->context_id = udirect->context_id;
70 release->rsrc_handle = udirect->rsrc_handle;
71}
72
73
74
75
76void cxlflash_free_errpage(void)
77{
78
79 mutex_lock(&global.mutex);
80 if (global.err_page) {
81 __free_page(global.err_page);
82 global.err_page = NULL;
83 }
84 mutex_unlock(&global.mutex);
85}
86
87
88
89
90
91
92
93
94
95
96
97
98
99void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
100{
101 struct device *dev = &cfg->dev->dev;
102 int i, found = true;
103
104 cxlflash_mark_contexts_error(cfg);
105
106 while (true) {
107 for (i = 0; i < MAX_CONTEXT; i++)
108 if (cfg->ctx_tbl[i]) {
109 found = true;
110 break;
111 }
112
113 if (!found && list_empty(&cfg->ctx_err_recovery))
114 return;
115
116 dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
117 __func__);
118 wake_up_all(&cfg->reset_waitq);
119 ssleep(1);
120 found = false;
121 }
122}
123
124
125
126
127
128
129
130
131
132static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid,
133 struct file *file)
134{
135 struct ctx_info *ctxi;
136
137 list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list)
138 if ((ctxi->ctxid == rctxid) || (ctxi->file == file))
139 return ctxi;
140
141 return NULL;
142}
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
160 void *arg, enum ctx_ctrl ctx_ctrl)
161{
162 struct device *dev = &cfg->dev->dev;
163 struct ctx_info *ctxi = NULL;
164 struct lun_access *lun_access = NULL;
165 struct file *file = NULL;
166 struct llun_info *lli = arg;
167 u64 ctxid = DECODE_CTXID(rctxid);
168 int rc;
169 pid_t pid = task_tgid_nr(current), ctxpid = 0;
170
171 if (ctx_ctrl & CTX_CTRL_FILE) {
172 lli = NULL;
173 file = (struct file *)arg;
174 }
175
176 if (ctx_ctrl & CTX_CTRL_CLONE)
177 pid = task_ppid_nr(current);
178
179 if (likely(ctxid < MAX_CONTEXT)) {
180 while (true) {
181 mutex_lock(&cfg->ctx_tbl_list_mutex);
182 ctxi = cfg->ctx_tbl[ctxid];
183 if (ctxi)
184 if ((file && (ctxi->file != file)) ||
185 (!file && (ctxi->ctxid != rctxid)))
186 ctxi = NULL;
187
188 if ((ctx_ctrl & CTX_CTRL_ERR) ||
189 (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK)))
190 ctxi = find_error_context(cfg, rctxid, file);
191 if (!ctxi) {
192 mutex_unlock(&cfg->ctx_tbl_list_mutex);
193 goto out;
194 }
195
196
197
198
199
200
201
202
203
204
205
206
207
208 rc = mutex_trylock(&ctxi->mutex);
209 mutex_unlock(&cfg->ctx_tbl_list_mutex);
210 if (rc)
211 break;
212 }
213
214 if (ctxi->unavail)
215 goto denied;
216
217 ctxpid = ctxi->pid;
218 if (likely(!(ctx_ctrl & CTX_CTRL_NOPID)))
219 if (pid != ctxpid)
220 goto denied;
221
222 if (lli) {
223 list_for_each_entry(lun_access, &ctxi->luns, list)
224 if (lun_access->lli == lli)
225 goto out;
226 goto denied;
227 }
228 }
229
230out:
231 dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u "
232 "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
233 ctx_ctrl);
234
235 return ctxi;
236
237denied:
238 mutex_unlock(&ctxi->mutex);
239 ctxi = NULL;
240 goto out;
241}
242
243
244
245
246
247
248
249void put_context(struct ctx_info *ctxi)
250{
251 mutex_unlock(&ctxi->mutex);
252}
253
254
255
256
257
258
259
260
261
262
263
264
265static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
266{
267 struct device *dev = &cfg->dev->dev;
268 struct afu *afu = cfg->afu;
269 struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map;
270 int rc = 0;
271 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
272 u64 val;
273 int i;
274
275
276 readq_be(&ctrl_map->mbox_r);
277 val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD);
278 writeq_be(val, &ctrl_map->ctx_cap);
279 val = readq_be(&ctrl_map->ctx_cap);
280 if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
281 dev_err(dev, "%s: ctx may be closed val=%016llx\n",
282 __func__, val);
283 rc = -EAGAIN;
284 goto out;
285 }
286
287 if (afu_is_ocxl_lisn(afu)) {
288
289 for (i = 0; i < ctxi->irqs; i++) {
290 val = cfg->ops->get_irq_objhndl(ctxi->ctx, i);
291 writeq_be(val, &ctrl_map->lisn_ea[i]);
292 }
293
294
295 val = hwq->ctx_hndl;
296 writeq_be(SISL_LISN_PASID(val, val), &ctrl_map->lisn_pasid[0]);
297 writeq_be(SISL_LISN_PASID(0UL, val), &ctrl_map->lisn_pasid[1]);
298 }
299
300
301 writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
302 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl));
303 writeq_be(val, &ctrl_map->rht_cnt_id);
304out:
305 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
306 return rc;
307}
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
335{
336 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
337 struct device *dev = &cfg->dev->dev;
338 struct glun_info *gli = lli->parent;
339 struct scsi_sense_hdr sshdr;
340 u8 *cmd_buf = NULL;
341 u8 *scsi_cmd = NULL;
342 int rc = 0;
343 int result = 0;
344 int retry_cnt = 0;
345 u32 to = CMD_TIMEOUT * HZ;
346
347retry:
348 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
349 scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
350 if (unlikely(!cmd_buf || !scsi_cmd)) {
351 rc = -ENOMEM;
352 goto out;
353 }
354
355 scsi_cmd[0] = SERVICE_ACTION_IN_16;
356 scsi_cmd[1] = SAI_READ_CAPACITY_16;
357 put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
358
359 dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__,
360 retry_cnt ? "re" : "", scsi_cmd[0]);
361
362
363 up_read(&cfg->ioctl_rwsem);
364 result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
365 CMD_BUFSIZE, NULL, &sshdr, to, CMD_RETRIES,
366 0, 0, NULL);
367 down_read(&cfg->ioctl_rwsem);
368 rc = check_state(cfg);
369 if (rc) {
370 dev_err(dev, "%s: Failed state result=%08x\n",
371 __func__, result);
372 rc = -ENODEV;
373 goto out;
374 }
375
376 if (driver_byte(result) == DRIVER_SENSE) {
377 result &= ~(0xFF<<24);
378 if (result & SAM_STAT_CHECK_CONDITION) {
379 switch (sshdr.sense_key) {
380 case NO_SENSE:
381 case RECOVERED_ERROR:
382
383 case NOT_READY:
384 result &= ~SAM_STAT_CHECK_CONDITION;
385 break;
386 case UNIT_ATTENTION:
387 switch (sshdr.asc) {
388 case 0x29:
389
390 case 0x2A:
391 case 0x3F:
392
393 if (retry_cnt++ < 1) {
394 kfree(cmd_buf);
395 kfree(scsi_cmd);
396 goto retry;
397 }
398 }
399 break;
400 default:
401 break;
402 }
403 }
404 }
405
406 if (result) {
407 dev_err(dev, "%s: command failed, result=%08x\n",
408 __func__, result);
409 rc = -EIO;
410 goto out;
411 }
412
413
414
415
416
417
418 mutex_lock(&gli->mutex);
419 gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0]));
420 gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8]));
421 mutex_unlock(&gli->mutex);
422
423out:
424 kfree(cmd_buf);
425 kfree(scsi_cmd);
426
427 dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
428 __func__, gli->max_lba, gli->blk_len, rc);
429 return rc;
430}
431
432
433
434
435
436
437
438
439
440struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
441 struct llun_info *lli)
442{
443 struct cxlflash_cfg *cfg = ctxi->cfg;
444 struct device *dev = &cfg->dev->dev;
445 struct sisl_rht_entry *rhte = NULL;
446
447 if (unlikely(!ctxi->rht_start)) {
448 dev_dbg(dev, "%s: Context does not have allocated RHT\n",
449 __func__);
450 goto out;
451 }
452
453 if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
454 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
455 __func__, rhndl);
456 goto out;
457 }
458
459 if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
460 dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n",
461 __func__, rhndl);
462 goto out;
463 }
464
465 rhte = &ctxi->rht_start[rhndl];
466 if (unlikely(rhte->nmask == 0)) {
467 dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n",
468 __func__, rhndl);
469 rhte = NULL;
470 goto out;
471 }
472
473out:
474 return rhte;
475}
476
477
478
479
480
481
482
483
484struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
485 struct llun_info *lli)
486{
487 struct cxlflash_cfg *cfg = ctxi->cfg;
488 struct device *dev = &cfg->dev->dev;
489 struct sisl_rht_entry *rhte = NULL;
490 int i;
491
492
493 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
494 if (ctxi->rht_start[i].nmask == 0) {
495 rhte = &ctxi->rht_start[i];
496 ctxi->rht_out++;
497 break;
498 }
499
500 if (likely(rhte))
501 ctxi->rht_lun[i] = lli;
502
503 dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i);
504 return rhte;
505}
506
507
508
509
510
511
512void rhte_checkin(struct ctx_info *ctxi,
513 struct sisl_rht_entry *rhte)
514{
515 u32 rsrc_handle = rhte - ctxi->rht_start;
516
517 rhte->nmask = 0;
518 rhte->fp = 0;
519 ctxi->rht_out--;
520 ctxi->rht_lun[rsrc_handle] = NULL;
521 ctxi->rht_needs_ws[rsrc_handle] = false;
522}
523
524
525
526
527
528
529
530
531static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm,
532 u32 port_sel)
533{
534
535
536
537
538
539 struct sisl_rht_entry_f1 dummy = { 0 };
540 struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
541
542 memset(rhte_f1, 0, sizeof(*rhte_f1));
543 rhte_f1->fp = SISL_RHT_FP(1U, 0);
544 dma_wmb();
545
546 rhte_f1->lun_id = lun_id;
547 dma_wmb();
548
549
550
551
552
553
554 dummy.valid = 0x80;
555 dummy.fp = SISL_RHT_FP(1U, perm);
556 dummy.port_sel = port_sel;
557 rhte_f1->dw = dummy.dw;
558
559 dma_wmb();
560}
561
562
563
564
565
566
567
568
569
570int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
571{
572 int rc = 0;
573
574 if (!locked)
575 mutex_lock(&gli->mutex);
576
577 if (gli->mode == MODE_NONE)
578 gli->mode = mode;
579 else if (gli->mode != mode) {
580 pr_debug("%s: gli_mode=%d requested_mode=%d\n",
581 __func__, gli->mode, mode);
582 rc = -EINVAL;
583 goto out;
584 }
585
586 gli->users++;
587 WARN_ON(gli->users <= 0);
588out:
589 pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
590 __func__, rc, gli->mode, gli->users);
591 if (!locked)
592 mutex_unlock(&gli->mutex);
593 return rc;
594}
595
596
597
598
599
600
601
602
603
604
605
606void cxlflash_lun_detach(struct glun_info *gli)
607{
608 mutex_lock(&gli->mutex);
609 WARN_ON(gli->mode == MODE_NONE);
610 if (--gli->users == 0) {
611 gli->mode = MODE_NONE;
612 cxlflash_ba_terminate(&gli->blka.ba_lun);
613 }
614 pr_debug("%s: gli->users=%u\n", __func__, gli->users);
615 WARN_ON(gli->users < 0);
616 mutex_unlock(&gli->mutex);
617}
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634int _cxlflash_disk_release(struct scsi_device *sdev,
635 struct ctx_info *ctxi,
636 struct dk_cxlflash_release *release)
637{
638 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
639 struct device *dev = &cfg->dev->dev;
640 struct llun_info *lli = sdev->hostdata;
641 struct glun_info *gli = lli->parent;
642 struct afu *afu = cfg->afu;
643 bool put_ctx = false;
644
645 struct dk_cxlflash_resize size;
646 res_hndl_t rhndl = release->rsrc_handle;
647
648 int rc = 0;
649 int rcr = 0;
650 u64 ctxid = DECODE_CTXID(release->context_id),
651 rctxid = release->context_id;
652
653 struct sisl_rht_entry *rhte;
654 struct sisl_rht_entry_f1 *rhte_f1;
655
656 dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n",
657 __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
658
659 if (!ctxi) {
660 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
661 if (unlikely(!ctxi)) {
662 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
663 __func__, ctxid);
664 rc = -EINVAL;
665 goto out;
666 }
667
668 put_ctx = true;
669 }
670
671 rhte = get_rhte(ctxi, rhndl, lli);
672 if (unlikely(!rhte)) {
673 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
674 __func__, rhndl);
675 rc = -EINVAL;
676 goto out;
677 }
678
679
680
681
682
683
684
685
686 switch (gli->mode) {
687 case MODE_VIRTUAL:
688 marshal_rele_to_resize(release, &size);
689 size.req_size = 0;
690 rc = _cxlflash_vlun_resize(sdev, ctxi, &size);
691 if (rc) {
692 dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc);
693 goto out;
694 }
695
696 break;
697 case MODE_PHYSICAL:
698
699
700
701
702
703 rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
704
705 rhte_f1->valid = 0;
706 dma_wmb();
707
708 rhte_f1->lun_id = 0;
709 dma_wmb();
710
711 rhte_f1->dw = 0;
712 dma_wmb();
713
714 if (!ctxi->err_recovery_active) {
715 rcr = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
716 if (unlikely(rcr))
717 dev_dbg(dev, "%s: AFU sync failed rc=%d\n",
718 __func__, rcr);
719 }
720 break;
721 default:
722 WARN(1, "Unsupported LUN mode!");
723 goto out;
724 }
725
726 rhte_checkin(ctxi, rhte);
727 cxlflash_lun_detach(gli);
728
729out:
730 if (put_ctx)
731 put_context(ctxi);
732 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
733 return rc;
734}
735
736int cxlflash_disk_release(struct scsi_device *sdev,
737 struct dk_cxlflash_release *release)
738{
739 return _cxlflash_disk_release(sdev, NULL, release);
740}
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755static void destroy_context(struct cxlflash_cfg *cfg,
756 struct ctx_info *ctxi)
757{
758 struct afu *afu = cfg->afu;
759
760 if (ctxi->initialized) {
761 WARN_ON(!list_empty(&ctxi->luns));
762
763
764 if (afu->afu_map && ctxi->ctrl_map) {
765 writeq_be(0, &ctxi->ctrl_map->rht_start);
766 writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
767 writeq_be(0, &ctxi->ctrl_map->ctx_cap);
768 }
769 }
770
771
772 free_page((ulong)ctxi->rht_start);
773 kfree(ctxi->rht_needs_ws);
774 kfree(ctxi->rht_lun);
775 kfree(ctxi);
776}
777
778
779
780
781
782
783
784static struct ctx_info *create_context(struct cxlflash_cfg *cfg)
785{
786 struct device *dev = &cfg->dev->dev;
787 struct ctx_info *ctxi = NULL;
788 struct llun_info **lli = NULL;
789 u8 *ws = NULL;
790 struct sisl_rht_entry *rhte;
791
792 ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
793 lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
794 ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
795 if (unlikely(!ctxi || !lli || !ws)) {
796 dev_err(dev, "%s: Unable to allocate context\n", __func__);
797 goto err;
798 }
799
800 rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
801 if (unlikely(!rhte)) {
802 dev_err(dev, "%s: Unable to allocate RHT\n", __func__);
803 goto err;
804 }
805
806 ctxi->rht_lun = lli;
807 ctxi->rht_needs_ws = ws;
808 ctxi->rht_start = rhte;
809out:
810 return ctxi;
811
812err:
813 kfree(ws);
814 kfree(lli);
815 kfree(ctxi);
816 ctxi = NULL;
817 goto out;
818}
819
820
821
822
823
824
825
826
827
828
829
830static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
831 void *ctx, int ctxid, struct file *file, u32 perms,
832 u64 irqs)
833{
834 struct afu *afu = cfg->afu;
835
836 ctxi->rht_perms = perms;
837 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
838 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
839 ctxi->irqs = irqs;
840 ctxi->pid = task_tgid_nr(current);
841 ctxi->ctx = ctx;
842 ctxi->cfg = cfg;
843 ctxi->file = file;
844 ctxi->initialized = true;
845 mutex_init(&ctxi->mutex);
846 kref_init(&ctxi->kref);
847 INIT_LIST_HEAD(&ctxi->luns);
848 INIT_LIST_HEAD(&ctxi->list);
849}
850
851
852
853
854
855
856
857
858
859static void remove_context(struct kref *kref)
860{
861 struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref);
862 struct cxlflash_cfg *cfg = ctxi->cfg;
863 u64 ctxid = DECODE_CTXID(ctxi->ctxid);
864
865
866 WARN_ON(!mutex_is_locked(&ctxi->mutex));
867 ctxi->unavail = true;
868 mutex_unlock(&ctxi->mutex);
869 mutex_lock(&cfg->ctx_tbl_list_mutex);
870 mutex_lock(&ctxi->mutex);
871
872 if (!list_empty(&ctxi->list))
873 list_del(&ctxi->list);
874 cfg->ctx_tbl[ctxid] = NULL;
875 mutex_unlock(&cfg->ctx_tbl_list_mutex);
876 mutex_unlock(&ctxi->mutex);
877
878
879 destroy_context(cfg, ctxi);
880}
881
882
883
884
885
886
887
888
889
890
891
892
893
894static int _cxlflash_disk_detach(struct scsi_device *sdev,
895 struct ctx_info *ctxi,
896 struct dk_cxlflash_detach *detach)
897{
898 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
899 struct device *dev = &cfg->dev->dev;
900 struct llun_info *lli = sdev->hostdata;
901 struct lun_access *lun_access, *t;
902 struct dk_cxlflash_release rel;
903 bool put_ctx = false;
904
905 int i;
906 int rc = 0;
907 u64 ctxid = DECODE_CTXID(detach->context_id),
908 rctxid = detach->context_id;
909
910 dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid);
911
912 if (!ctxi) {
913 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
914 if (unlikely(!ctxi)) {
915 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
916 __func__, ctxid);
917 rc = -EINVAL;
918 goto out;
919 }
920
921 put_ctx = true;
922 }
923
924
925 if (ctxi->rht_out) {
926 marshal_det_to_rele(detach, &rel);
927 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
928 if (ctxi->rht_lun[i] == lli) {
929 rel.rsrc_handle = i;
930 _cxlflash_disk_release(sdev, ctxi, &rel);
931 }
932
933
934 if (ctxi->rht_out == 0)
935 break;
936 }
937 }
938
939
940 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
941 if (lun_access->lli == lli) {
942 list_del(&lun_access->list);
943 kfree(lun_access);
944 lun_access = NULL;
945 break;
946 }
947
948
949
950
951
952 if (kref_put(&ctxi->kref, remove_context))
953 put_ctx = false;
954 scsi_device_put(sdev);
955out:
956 if (put_ctx)
957 put_context(ctxi);
958 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
959 return rc;
960}
961
962static int cxlflash_disk_detach(struct scsi_device *sdev,
963 struct dk_cxlflash_detach *detach)
964{
965 return _cxlflash_disk_detach(sdev, NULL, detach);
966}
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994static int cxlflash_cxl_release(struct inode *inode, struct file *file)
995{
996 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
997 cxl_fops);
998 void *ctx = cfg->ops->fops_get_context(file);
999 struct device *dev = &cfg->dev->dev;
1000 struct ctx_info *ctxi = NULL;
1001 struct dk_cxlflash_detach detach = { { 0 }, 0 };
1002 struct lun_access *lun_access, *t;
1003 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1004 int ctxid;
1005
1006 ctxid = cfg->ops->process_element(ctx);
1007 if (unlikely(ctxid < 0)) {
1008 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1009 __func__, ctx, ctxid);
1010 goto out;
1011 }
1012
1013 ctxi = get_context(cfg, ctxid, file, ctrl);
1014 if (unlikely(!ctxi)) {
1015 ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
1016 if (!ctxi) {
1017 dev_dbg(dev, "%s: ctxid=%d already free\n",
1018 __func__, ctxid);
1019 goto out_release;
1020 }
1021
1022 dev_dbg(dev, "%s: Another process owns ctxid=%d\n",
1023 __func__, ctxid);
1024 put_context(ctxi);
1025 goto out;
1026 }
1027
1028 dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid);
1029
1030 down_read(&cfg->ioctl_rwsem);
1031 detach.context_id = ctxi->ctxid;
1032 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
1033 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
1034 up_read(&cfg->ioctl_rwsem);
1035out_release:
1036 cfg->ops->fd_release(inode, file);
1037out:
1038 dev_dbg(dev, "%s: returning\n", __func__);
1039 return 0;
1040}
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050static void unmap_context(struct ctx_info *ctxi)
1051{
1052 unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1);
1053}
1054
1055
1056
1057
1058
1059
1060
1061static struct page *get_err_page(struct cxlflash_cfg *cfg)
1062{
1063 struct page *err_page = global.err_page;
1064 struct device *dev = &cfg->dev->dev;
1065
1066 if (unlikely(!err_page)) {
1067 err_page = alloc_page(GFP_KERNEL);
1068 if (unlikely(!err_page)) {
1069 dev_err(dev, "%s: Unable to allocate err_page\n",
1070 __func__);
1071 goto out;
1072 }
1073
1074 memset(page_address(err_page), -1, PAGE_SIZE);
1075
1076
1077 mutex_lock(&global.mutex);
1078 if (likely(!global.err_page))
1079 global.err_page = err_page;
1080 else {
1081 __free_page(err_page);
1082 err_page = global.err_page;
1083 }
1084 mutex_unlock(&global.mutex);
1085 }
1086
1087out:
1088 dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page);
1089 return err_page;
1090}
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107static vm_fault_t cxlflash_mmap_fault(struct vm_fault *vmf)
1108{
1109 struct vm_area_struct *vma = vmf->vma;
1110 struct file *file = vma->vm_file;
1111 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1112 cxl_fops);
1113 void *ctx = cfg->ops->fops_get_context(file);
1114 struct device *dev = &cfg->dev->dev;
1115 struct ctx_info *ctxi = NULL;
1116 struct page *err_page = NULL;
1117 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1118 vm_fault_t rc = 0;
1119 int ctxid;
1120
1121 ctxid = cfg->ops->process_element(ctx);
1122 if (unlikely(ctxid < 0)) {
1123 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1124 __func__, ctx, ctxid);
1125 goto err;
1126 }
1127
1128 ctxi = get_context(cfg, ctxid, file, ctrl);
1129 if (unlikely(!ctxi)) {
1130 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
1131 goto err;
1132 }
1133
1134 dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid);
1135
1136 if (likely(!ctxi->err_recovery_active)) {
1137 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1138 rc = ctxi->cxl_mmap_vmops->fault(vmf);
1139 } else {
1140 dev_dbg(dev, "%s: err recovery active, use err_page\n",
1141 __func__);
1142
1143 err_page = get_err_page(cfg);
1144 if (unlikely(!err_page)) {
1145 dev_err(dev, "%s: Could not get err_page\n", __func__);
1146 rc = VM_FAULT_RETRY;
1147 goto out;
1148 }
1149
1150 get_page(err_page);
1151 vmf->page = err_page;
1152 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
1153 }
1154
1155out:
1156 if (likely(ctxi))
1157 put_context(ctxi);
1158 dev_dbg(dev, "%s: returning rc=%x\n", __func__, rc);
1159 return rc;
1160
1161err:
1162 rc = VM_FAULT_SIGBUS;
1163 goto out;
1164}
1165
1166
1167
1168
1169static const struct vm_operations_struct cxlflash_mmap_vmops = {
1170 .fault = cxlflash_mmap_fault,
1171};
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
1183{
1184 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1185 cxl_fops);
1186 void *ctx = cfg->ops->fops_get_context(file);
1187 struct device *dev = &cfg->dev->dev;
1188 struct ctx_info *ctxi = NULL;
1189 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1190 int ctxid;
1191 int rc = 0;
1192
1193 ctxid = cfg->ops->process_element(ctx);
1194 if (unlikely(ctxid < 0)) {
1195 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1196 __func__, ctx, ctxid);
1197 rc = -EIO;
1198 goto out;
1199 }
1200
1201 ctxi = get_context(cfg, ctxid, file, ctrl);
1202 if (unlikely(!ctxi)) {
1203 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
1204 rc = -EIO;
1205 goto out;
1206 }
1207
1208 dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
1209
1210 rc = cfg->ops->fd_mmap(file, vma);
1211 if (likely(!rc)) {
1212
1213 ctxi->cxl_mmap_vmops = vma->vm_ops;
1214 vma->vm_ops = &cxlflash_mmap_vmops;
1215 }
1216
1217out:
1218 if (likely(ctxi))
1219 put_context(ctxi);
1220 return rc;
1221}
1222
1223const struct file_operations cxlflash_cxl_fops = {
1224 .owner = THIS_MODULE,
1225 .mmap = cxlflash_cxl_mmap,
1226 .release = cxlflash_cxl_release,
1227};
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg)
1239{
1240 int i, rc = 0;
1241 struct ctx_info *ctxi = NULL;
1242
1243 mutex_lock(&cfg->ctx_tbl_list_mutex);
1244
1245 for (i = 0; i < MAX_CONTEXT; i++) {
1246 ctxi = cfg->ctx_tbl[i];
1247 if (ctxi) {
1248 mutex_lock(&ctxi->mutex);
1249 cfg->ctx_tbl[i] = NULL;
1250 list_add(&ctxi->list, &cfg->ctx_err_recovery);
1251 ctxi->err_recovery_active = true;
1252 ctxi->ctrl_map = NULL;
1253 unmap_context(ctxi);
1254 mutex_unlock(&ctxi->mutex);
1255 }
1256 }
1257
1258 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1259 return rc;
1260}
1261
1262
1263
1264
1265static const struct file_operations null_fops = {
1266 .owner = THIS_MODULE,
1267};
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282int check_state(struct cxlflash_cfg *cfg)
1283{
1284 struct device *dev = &cfg->dev->dev;
1285 int rc = 0;
1286
1287retry:
1288 switch (cfg->state) {
1289 case STATE_RESET:
1290 dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__);
1291 up_read(&cfg->ioctl_rwsem);
1292 rc = wait_event_interruptible(cfg->reset_waitq,
1293 cfg->state != STATE_RESET);
1294 down_read(&cfg->ioctl_rwsem);
1295 if (unlikely(rc))
1296 break;
1297 goto retry;
1298 case STATE_FAILTERM:
1299 dev_dbg(dev, "%s: Failed/Terminating\n", __func__);
1300 rc = -ENODEV;
1301 break;
1302 default:
1303 break;
1304 }
1305
1306 return rc;
1307}
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321static int cxlflash_disk_attach(struct scsi_device *sdev,
1322 struct dk_cxlflash_attach *attach)
1323{
1324 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1325 struct device *dev = &cfg->dev->dev;
1326 struct afu *afu = cfg->afu;
1327 struct llun_info *lli = sdev->hostdata;
1328 struct glun_info *gli = lli->parent;
1329 struct ctx_info *ctxi = NULL;
1330 struct lun_access *lun_access = NULL;
1331 int rc = 0;
1332 u32 perms;
1333 int ctxid = -1;
1334 u64 irqs = attach->num_interrupts;
1335 u64 flags = 0UL;
1336 u64 rctxid = 0UL;
1337 struct file *file = NULL;
1338
1339 void *ctx = NULL;
1340
1341 int fd = -1;
1342
1343 if (irqs > 4) {
1344 dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
1345 __func__, irqs);
1346 rc = -EINVAL;
1347 goto out;
1348 }
1349
1350 if (gli->max_lba == 0) {
1351 dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n",
1352 __func__, lli->lun_id[sdev->channel]);
1353 rc = read_cap16(sdev, lli);
1354 if (rc) {
1355 dev_err(dev, "%s: Invalid device rc=%d\n",
1356 __func__, rc);
1357 rc = -ENODEV;
1358 goto out;
1359 }
1360 dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba);
1361 dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len);
1362 }
1363
1364 if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
1365 rctxid = attach->context_id;
1366 ctxi = get_context(cfg, rctxid, NULL, 0);
1367 if (!ctxi) {
1368 dev_dbg(dev, "%s: Bad context rctxid=%016llx\n",
1369 __func__, rctxid);
1370 rc = -EINVAL;
1371 goto out;
1372 }
1373
1374 list_for_each_entry(lun_access, &ctxi->luns, list)
1375 if (lun_access->lli == lli) {
1376 dev_dbg(dev, "%s: Already attached\n",
1377 __func__);
1378 rc = -EINVAL;
1379 goto out;
1380 }
1381 }
1382
1383 rc = scsi_device_get(sdev);
1384 if (unlikely(rc)) {
1385 dev_err(dev, "%s: Unable to get sdev reference\n", __func__);
1386 goto out;
1387 }
1388
1389 lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
1390 if (unlikely(!lun_access)) {
1391 dev_err(dev, "%s: Unable to allocate lun_access\n", __func__);
1392 rc = -ENOMEM;
1393 goto err;
1394 }
1395
1396 lun_access->lli = lli;
1397 lun_access->sdev = sdev;
1398
1399
1400 if (ctxi) {
1401 dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n",
1402 __func__, rctxid);
1403 kref_get(&ctxi->kref);
1404 list_add(&lun_access->list, &ctxi->luns);
1405 goto out_attach;
1406 }
1407
1408 ctxi = create_context(cfg);
1409 if (unlikely(!ctxi)) {
1410 dev_err(dev, "%s: Failed to create context ctxid=%d\n",
1411 __func__, ctxid);
1412 rc = -ENOMEM;
1413 goto err;
1414 }
1415
1416 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
1417 if (IS_ERR_OR_NULL(ctx)) {
1418 dev_err(dev, "%s: Could not initialize context %p\n",
1419 __func__, ctx);
1420 rc = -ENODEV;
1421 goto err;
1422 }
1423
1424 rc = cfg->ops->start_work(ctx, irqs);
1425 if (unlikely(rc)) {
1426 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1427 __func__, rc);
1428 goto err;
1429 }
1430
1431 ctxid = cfg->ops->process_element(ctx);
1432 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1433 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
1434 rc = -EPERM;
1435 goto err;
1436 }
1437
1438 file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
1439 if (unlikely(fd < 0)) {
1440 rc = -ENODEV;
1441 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1442 goto err;
1443 }
1444
1445
1446 perms = SISL_RHT_PERM(attach->hdr.flags + 1);
1447
1448
1449 init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs);
1450
1451 rc = afu_attach(cfg, ctxi);
1452 if (unlikely(rc)) {
1453 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1454 goto err;
1455 }
1456
1457
1458
1459
1460
1461
1462
1463 list_add(&lun_access->list, &ctxi->luns);
1464 mutex_lock(&cfg->ctx_tbl_list_mutex);
1465 mutex_lock(&ctxi->mutex);
1466 cfg->ctx_tbl[ctxid] = ctxi;
1467 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1468 fd_install(fd, file);
1469
1470out_attach:
1471 if (fd != -1)
1472 flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD;
1473 if (afu_is_sq_cmd_mode(afu))
1474 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
1475
1476 attach->hdr.return_flags = flags;
1477 attach->context_id = ctxi->ctxid;
1478 attach->block_size = gli->blk_len;
1479 attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1480 attach->last_lba = gli->max_lba;
1481 attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT;
1482 attach->max_xfer /= gli->blk_len;
1483
1484out:
1485 attach->adap_fd = fd;
1486
1487 if (ctxi)
1488 put_context(ctxi);
1489
1490 dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
1491 __func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
1492 return rc;
1493
1494err:
1495
1496 if (!IS_ERR_OR_NULL(ctx)) {
1497 cfg->ops->stop_context(ctx);
1498 cfg->ops->release_context(ctx);
1499 ctx = NULL;
1500 }
1501
1502
1503
1504
1505
1506
1507
1508
1509 if (fd > 0) {
1510 file->f_op = &null_fops;
1511 fput(file);
1512 put_unused_fd(fd);
1513 fd = -1;
1514 file = NULL;
1515 }
1516
1517
1518 if (ctxi) {
1519 destroy_context(cfg, ctxi);
1520 ctxi = NULL;
1521 }
1522
1523 kfree(lun_access);
1524 scsi_device_put(sdev);
1525 goto out;
1526}
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538static int recover_context(struct cxlflash_cfg *cfg,
1539 struct ctx_info *ctxi,
1540 int *adap_fd)
1541{
1542 struct device *dev = &cfg->dev->dev;
1543 int rc = 0;
1544 int fd = -1;
1545 int ctxid = -1;
1546 struct file *file;
1547 void *ctx;
1548 struct afu *afu = cfg->afu;
1549
1550 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
1551 if (IS_ERR_OR_NULL(ctx)) {
1552 dev_err(dev, "%s: Could not initialize context %p\n",
1553 __func__, ctx);
1554 rc = -ENODEV;
1555 goto out;
1556 }
1557
1558 rc = cfg->ops->start_work(ctx, ctxi->irqs);
1559 if (unlikely(rc)) {
1560 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1561 __func__, rc);
1562 goto err1;
1563 }
1564
1565 ctxid = cfg->ops->process_element(ctx);
1566 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1567 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
1568 rc = -EPERM;
1569 goto err2;
1570 }
1571
1572 file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
1573 if (unlikely(fd < 0)) {
1574 rc = -ENODEV;
1575 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1576 goto err2;
1577 }
1578
1579
1580 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
1581
1582 rc = afu_attach(cfg, ctxi);
1583 if (rc) {
1584 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1585 goto err3;
1586 }
1587
1588
1589
1590
1591
1592 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
1593 ctxi->ctx = ctx;
1594 ctxi->file = file;
1595
1596
1597
1598
1599
1600
1601
1602 mutex_unlock(&ctxi->mutex);
1603 mutex_lock(&cfg->ctx_tbl_list_mutex);
1604 mutex_lock(&ctxi->mutex);
1605 list_del_init(&ctxi->list);
1606 cfg->ctx_tbl[ctxid] = ctxi;
1607 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1608 fd_install(fd, file);
1609 *adap_fd = fd;
1610out:
1611 dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
1612 __func__, ctxid, fd, rc);
1613 return rc;
1614
1615err3:
1616 fput(file);
1617 put_unused_fd(fd);
1618err2:
1619 cfg->ops->stop_context(ctx);
1620err1:
1621 cfg->ops->release_context(ctx);
1622 goto out;
1623}
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655static int cxlflash_afu_recover(struct scsi_device *sdev,
1656 struct dk_cxlflash_recover_afu *recover)
1657{
1658 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1659 struct device *dev = &cfg->dev->dev;
1660 struct llun_info *lli = sdev->hostdata;
1661 struct afu *afu = cfg->afu;
1662 struct ctx_info *ctxi = NULL;
1663 struct mutex *mutex = &cfg->ctx_recovery_mutex;
1664 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1665 u64 flags;
1666 u64 ctxid = DECODE_CTXID(recover->context_id),
1667 rctxid = recover->context_id;
1668 long reg;
1669 bool locked = true;
1670 int lretry = 20;
1671 int new_adap_fd = -1;
1672 int rc = 0;
1673
1674 atomic_inc(&cfg->recovery_threads);
1675 up_read(&cfg->ioctl_rwsem);
1676 rc = mutex_lock_interruptible(mutex);
1677 down_read(&cfg->ioctl_rwsem);
1678 if (rc) {
1679 locked = false;
1680 goto out;
1681 }
1682
1683 rc = check_state(cfg);
1684 if (rc) {
1685 dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc);
1686 rc = -ENODEV;
1687 goto out;
1688 }
1689
1690 dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n",
1691 __func__, recover->reason, rctxid);
1692
1693retry:
1694
1695 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
1696 if (unlikely(!ctxi)) {
1697 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1698 rc = -EINVAL;
1699 goto out;
1700 }
1701
1702 if (ctxi->err_recovery_active) {
1703retry_recover:
1704 rc = recover_context(cfg, ctxi, &new_adap_fd);
1705 if (unlikely(rc)) {
1706 dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n",
1707 __func__, ctxid, rc);
1708 if ((rc == -ENODEV) &&
1709 ((atomic_read(&cfg->recovery_threads) > 1) ||
1710 (lretry--))) {
1711 dev_dbg(dev, "%s: Going to try again\n",
1712 __func__);
1713 mutex_unlock(mutex);
1714 msleep(100);
1715 rc = mutex_lock_interruptible(mutex);
1716 if (rc) {
1717 locked = false;
1718 goto out;
1719 }
1720 goto retry_recover;
1721 }
1722
1723 goto out;
1724 }
1725
1726 ctxi->err_recovery_active = false;
1727
1728 flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
1729 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
1730 if (afu_is_sq_cmd_mode(afu))
1731 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
1732
1733 recover->hdr.return_flags = flags;
1734 recover->context_id = ctxi->ctxid;
1735 recover->adap_fd = new_adap_fd;
1736 recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1737 goto out;
1738 }
1739
1740
1741 reg = readq_be(&hwq->ctrl_map->mbox_r);
1742 if (reg == -1) {
1743 dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__);
1744
1745
1746
1747
1748
1749
1750 put_context(ctxi);
1751 ctxi = NULL;
1752 ssleep(1);
1753 rc = check_state(cfg);
1754 if (unlikely(rc))
1755 goto out;
1756 goto retry;
1757 }
1758
1759 dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__);
1760out:
1761 if (likely(ctxi))
1762 put_context(ctxi);
1763 if (locked)
1764 mutex_unlock(mutex);
1765 atomic_dec_if_positive(&cfg->recovery_threads);
1766 return rc;
1767}
1768
1769
1770
1771
1772
1773
1774
1775
1776static int process_sense(struct scsi_device *sdev,
1777 struct dk_cxlflash_verify *verify)
1778{
1779 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1780 struct device *dev = &cfg->dev->dev;
1781 struct llun_info *lli = sdev->hostdata;
1782 struct glun_info *gli = lli->parent;
1783 u64 prev_lba = gli->max_lba;
1784 struct scsi_sense_hdr sshdr = { 0 };
1785 int rc = 0;
1786
1787 rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
1788 DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
1789 if (!rc) {
1790 dev_err(dev, "%s: Failed to normalize sense data\n", __func__);
1791 rc = -EINVAL;
1792 goto out;
1793 }
1794
1795 switch (sshdr.sense_key) {
1796 case NO_SENSE:
1797 case RECOVERED_ERROR:
1798
1799 case NOT_READY:
1800 break;
1801 case UNIT_ATTENTION:
1802 switch (sshdr.asc) {
1803 case 0x29:
1804
1805 case 0x2A:
1806 rc = read_cap16(sdev, lli);
1807 if (rc) {
1808 rc = -ENODEV;
1809 break;
1810 }
1811 if (prev_lba != gli->max_lba)
1812 dev_dbg(dev, "%s: Capacity changed old=%lld "
1813 "new=%lld\n", __func__, prev_lba,
1814 gli->max_lba);
1815 break;
1816 case 0x3F:
1817 scsi_scan_host(cfg->host);
1818 break;
1819 default:
1820 rc = -EIO;
1821 break;
1822 }
1823 break;
1824 default:
1825 rc = -EIO;
1826 break;
1827 }
1828out:
1829 dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__,
1830 sshdr.sense_key, sshdr.asc, sshdr.ascq, rc);
1831 return rc;
1832}
1833
1834
1835
1836
1837
1838
1839
1840
1841static int cxlflash_disk_verify(struct scsi_device *sdev,
1842 struct dk_cxlflash_verify *verify)
1843{
1844 int rc = 0;
1845 struct ctx_info *ctxi = NULL;
1846 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1847 struct device *dev = &cfg->dev->dev;
1848 struct llun_info *lli = sdev->hostdata;
1849 struct glun_info *gli = lli->parent;
1850 struct sisl_rht_entry *rhte = NULL;
1851 res_hndl_t rhndl = verify->rsrc_handle;
1852 u64 ctxid = DECODE_CTXID(verify->context_id),
1853 rctxid = verify->context_id;
1854 u64 last_lba = 0;
1855
1856 dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, "
1857 "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle,
1858 verify->hint, verify->hdr.flags);
1859
1860 ctxi = get_context(cfg, rctxid, lli, 0);
1861 if (unlikely(!ctxi)) {
1862 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1863 rc = -EINVAL;
1864 goto out;
1865 }
1866
1867 rhte = get_rhte(ctxi, rhndl, lli);
1868 if (unlikely(!rhte)) {
1869 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
1870 __func__, rhndl);
1871 rc = -EINVAL;
1872 goto out;
1873 }
1874
1875
1876
1877
1878
1879 if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
1880
1881
1882
1883 ctxi->unavail = true;
1884 mutex_unlock(&ctxi->mutex);
1885 rc = process_sense(sdev, verify);
1886 if (unlikely(rc)) {
1887 dev_err(dev, "%s: Failed to validate sense data (%d)\n",
1888 __func__, rc);
1889 mutex_lock(&ctxi->mutex);
1890 ctxi->unavail = false;
1891 goto out;
1892 }
1893 mutex_lock(&ctxi->mutex);
1894 ctxi->unavail = false;
1895 }
1896
1897 switch (gli->mode) {
1898 case MODE_PHYSICAL:
1899 last_lba = gli->max_lba;
1900 break;
1901 case MODE_VIRTUAL:
1902
1903 last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len);
1904 last_lba /= CXLFLASH_BLOCK_SIZE;
1905 last_lba--;
1906 break;
1907 default:
1908 WARN(1, "Unsupported LUN mode!");
1909 }
1910
1911 verify->last_lba = last_lba;
1912
1913out:
1914 if (likely(ctxi))
1915 put_context(ctxi);
1916 dev_dbg(dev, "%s: returning rc=%d llba=%llx\n",
1917 __func__, rc, verify->last_lba);
1918 return rc;
1919}
1920
1921
1922
1923
1924
1925
1926
1927static char *decode_ioctl(int cmd)
1928{
1929 switch (cmd) {
1930 case DK_CXLFLASH_ATTACH:
1931 return __stringify_1(DK_CXLFLASH_ATTACH);
1932 case DK_CXLFLASH_USER_DIRECT:
1933 return __stringify_1(DK_CXLFLASH_USER_DIRECT);
1934 case DK_CXLFLASH_USER_VIRTUAL:
1935 return __stringify_1(DK_CXLFLASH_USER_VIRTUAL);
1936 case DK_CXLFLASH_VLUN_RESIZE:
1937 return __stringify_1(DK_CXLFLASH_VLUN_RESIZE);
1938 case DK_CXLFLASH_RELEASE:
1939 return __stringify_1(DK_CXLFLASH_RELEASE);
1940 case DK_CXLFLASH_DETACH:
1941 return __stringify_1(DK_CXLFLASH_DETACH);
1942 case DK_CXLFLASH_VERIFY:
1943 return __stringify_1(DK_CXLFLASH_VERIFY);
1944 case DK_CXLFLASH_VLUN_CLONE:
1945 return __stringify_1(DK_CXLFLASH_VLUN_CLONE);
1946 case DK_CXLFLASH_RECOVER_AFU:
1947 return __stringify_1(DK_CXLFLASH_RECOVER_AFU);
1948 case DK_CXLFLASH_MANAGE_LUN:
1949 return __stringify_1(DK_CXLFLASH_MANAGE_LUN);
1950 }
1951
1952 return "UNKNOWN";
1953}
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
1967{
1968 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1969 struct device *dev = &cfg->dev->dev;
1970 struct afu *afu = cfg->afu;
1971 struct llun_info *lli = sdev->hostdata;
1972 struct glun_info *gli = lli->parent;
1973 struct dk_cxlflash_release rel = { { 0 }, 0 };
1974
1975 struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
1976
1977 u64 ctxid = DECODE_CTXID(pphys->context_id),
1978 rctxid = pphys->context_id;
1979 u64 lun_size = 0;
1980 u64 last_lba = 0;
1981 u64 rsrc_handle = -1;
1982 u32 port = CHAN2PORTMASK(sdev->channel);
1983
1984 int rc = 0;
1985
1986 struct ctx_info *ctxi = NULL;
1987 struct sisl_rht_entry *rhte = NULL;
1988
1989 dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
1990
1991 rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
1992 if (unlikely(rc)) {
1993 dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__);
1994 goto out;
1995 }
1996
1997 ctxi = get_context(cfg, rctxid, lli, 0);
1998 if (unlikely(!ctxi)) {
1999 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
2000 rc = -EINVAL;
2001 goto err1;
2002 }
2003
2004 rhte = rhte_checkout(ctxi, lli);
2005 if (unlikely(!rhte)) {
2006 dev_dbg(dev, "%s: Too many opens ctxid=%lld\n",
2007 __func__, ctxid);
2008 rc = -EMFILE;
2009 goto err1;
2010 }
2011
2012 rsrc_handle = (rhte - ctxi->rht_start);
2013
2014 rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
2015
2016 last_lba = gli->max_lba;
2017 pphys->hdr.return_flags = 0;
2018 pphys->last_lba = last_lba;
2019 pphys->rsrc_handle = rsrc_handle;
2020
2021 rc = cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
2022 if (unlikely(rc)) {
2023 dev_dbg(dev, "%s: AFU sync failed rc=%d\n", __func__, rc);
2024 goto err2;
2025 }
2026
2027out:
2028 if (likely(ctxi))
2029 put_context(ctxi);
2030 dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
2031 __func__, rsrc_handle, rc, last_lba);
2032 return rc;
2033
2034err2:
2035 marshal_udir_to_rele(pphys, &rel);
2036 _cxlflash_disk_release(sdev, ctxi, &rel);
2037 goto out;
2038err1:
2039 cxlflash_lun_detach(gli);
2040 goto out;
2041}
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054static int ioctl_common(struct scsi_device *sdev, int cmd)
2055{
2056 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
2057 struct device *dev = &cfg->dev->dev;
2058 struct llun_info *lli = sdev->hostdata;
2059 int rc = 0;
2060
2061 if (unlikely(!lli)) {
2062 dev_dbg(dev, "%s: Unknown LUN\n", __func__);
2063 rc = -EINVAL;
2064 goto out;
2065 }
2066
2067 rc = check_state(cfg);
2068 if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) {
2069 switch (cmd) {
2070 case DK_CXLFLASH_VLUN_RESIZE:
2071 case DK_CXLFLASH_RELEASE:
2072 case DK_CXLFLASH_DETACH:
2073 dev_dbg(dev, "%s: Command override rc=%d\n",
2074 __func__, rc);
2075 rc = 0;
2076 break;
2077 }
2078 }
2079out:
2080 return rc;
2081}
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
2100{
2101 typedef int (*sioctl) (struct scsi_device *, void *);
2102
2103 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
2104 struct device *dev = &cfg->dev->dev;
2105 struct afu *afu = cfg->afu;
2106 struct dk_cxlflash_hdr *hdr;
2107 char buf[sizeof(union cxlflash_ioctls)];
2108 size_t size = 0;
2109 bool known_ioctl = false;
2110 int idx;
2111 int rc = 0;
2112 struct Scsi_Host *shost = sdev->host;
2113 sioctl do_ioctl = NULL;
2114
2115 static const struct {
2116 size_t size;
2117 sioctl ioctl;
2118 } ioctl_tbl[] = {
2119 {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach},
2120 {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
2121 {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release},
2122 {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach},
2123 {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify},
2124 {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover},
2125 {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun},
2126 {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open},
2127 {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize},
2128 {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone},
2129 };
2130
2131
2132 down_read(&cfg->ioctl_rwsem);
2133
2134
2135 if (afu->internal_lun)
2136 switch (cmd) {
2137 case DK_CXLFLASH_RELEASE:
2138 case DK_CXLFLASH_USER_VIRTUAL:
2139 case DK_CXLFLASH_VLUN_RESIZE:
2140 case DK_CXLFLASH_VLUN_CLONE:
2141 dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n",
2142 __func__, decode_ioctl(cmd), afu->internal_lun);
2143 rc = -EINVAL;
2144 goto cxlflash_ioctl_exit;
2145 }
2146
2147 switch (cmd) {
2148 case DK_CXLFLASH_ATTACH:
2149 case DK_CXLFLASH_USER_DIRECT:
2150 case DK_CXLFLASH_RELEASE:
2151 case DK_CXLFLASH_DETACH:
2152 case DK_CXLFLASH_VERIFY:
2153 case DK_CXLFLASH_RECOVER_AFU:
2154 case DK_CXLFLASH_USER_VIRTUAL:
2155 case DK_CXLFLASH_VLUN_RESIZE:
2156 case DK_CXLFLASH_VLUN_CLONE:
2157 dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n",
2158 __func__, decode_ioctl(cmd), cmd, shost->host_no,
2159 sdev->channel, sdev->id, sdev->lun);
2160 rc = ioctl_common(sdev, cmd);
2161 if (unlikely(rc))
2162 goto cxlflash_ioctl_exit;
2163
2164
2165
2166 case DK_CXLFLASH_MANAGE_LUN:
2167 known_ioctl = true;
2168 idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH);
2169 size = ioctl_tbl[idx].size;
2170 do_ioctl = ioctl_tbl[idx].ioctl;
2171
2172 if (likely(do_ioctl))
2173 break;
2174
2175
2176 default:
2177 rc = -EINVAL;
2178 goto cxlflash_ioctl_exit;
2179 }
2180
2181 if (unlikely(copy_from_user(&buf, arg, size))) {
2182 dev_err(dev, "%s: copy_from_user() fail "
2183 "size=%lu cmd=%d (%s) arg=%p\n",
2184 __func__, size, cmd, decode_ioctl(cmd), arg);
2185 rc = -EFAULT;
2186 goto cxlflash_ioctl_exit;
2187 }
2188
2189 hdr = (struct dk_cxlflash_hdr *)&buf;
2190 if (hdr->version != DK_CXLFLASH_VERSION_0) {
2191 dev_dbg(dev, "%s: Version %u not supported for %s\n",
2192 __func__, hdr->version, decode_ioctl(cmd));
2193 rc = -EINVAL;
2194 goto cxlflash_ioctl_exit;
2195 }
2196
2197 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
2198 dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
2199 rc = -EINVAL;
2200 goto cxlflash_ioctl_exit;
2201 }
2202
2203 rc = do_ioctl(sdev, (void *)&buf);
2204 if (likely(!rc))
2205 if (unlikely(copy_to_user(arg, &buf, size))) {
2206 dev_err(dev, "%s: copy_to_user() fail "
2207 "size=%lu cmd=%d (%s) arg=%p\n",
2208 __func__, size, cmd, decode_ioctl(cmd), arg);
2209 rc = -EFAULT;
2210 }
2211
2212
2213
2214cxlflash_ioctl_exit:
2215 up_read(&cfg->ioctl_rwsem);
2216 if (unlikely(rc && known_ioctl))
2217 dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2218 "returned rc %d\n", __func__,
2219 decode_ioctl(cmd), cmd, shost->host_no,
2220 sdev->channel, sdev->id, sdev->lun, rc);
2221 else
2222 dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2223 "returned rc %d\n", __func__, decode_ioctl(cmd),
2224 cmd, shost->host_no, sdev->channel, sdev->id,
2225 sdev->lun, rc);
2226 return rc;
2227}
2228