1
2
3
4
5
6
7
8
9
10
11#include <linux/delay.h>
12#include <linux/file.h>
13#include <linux/interrupt.h>
14#include <linux/pci.h>
15#include <linux/syscalls.h>
16#include <asm/unaligned.h>
17
18#include <scsi/scsi.h>
19#include <scsi/scsi_host.h>
20#include <scsi/scsi_cmnd.h>
21#include <scsi/scsi_eh.h>
22#include <uapi/scsi/cxlflash_ioctl.h>
23
24#include "sislite.h"
25#include "common.h"
26#include "vlun.h"
27#include "superpipe.h"
28
29struct cxlflash_global global;
30
31
32
33
34
35
36static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
37 struct dk_cxlflash_resize *resize)
38{
39 resize->hdr = release->hdr;
40 resize->context_id = release->context_id;
41 resize->rsrc_handle = release->rsrc_handle;
42}
43
44
45
46
47
48
49static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
50 struct dk_cxlflash_release *release)
51{
52 release->hdr = detach->hdr;
53 release->context_id = detach->context_id;
54}
55
56
57
58
59
60
61static void marshal_udir_to_rele(struct dk_cxlflash_udirect *udirect,
62 struct dk_cxlflash_release *release)
63{
64 release->hdr = udirect->hdr;
65 release->context_id = udirect->context_id;
66 release->rsrc_handle = udirect->rsrc_handle;
67}
68
69
70
71
72void cxlflash_free_errpage(void)
73{
74
75 mutex_lock(&global.mutex);
76 if (global.err_page) {
77 __free_page(global.err_page);
78 global.err_page = NULL;
79 }
80 mutex_unlock(&global.mutex);
81}
82
83
84
85
86
87
88
89
90
91
92
93
94
95void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
96{
97 struct device *dev = &cfg->dev->dev;
98 int i, found = true;
99
100 cxlflash_mark_contexts_error(cfg);
101
102 while (true) {
103 for (i = 0; i < MAX_CONTEXT; i++)
104 if (cfg->ctx_tbl[i]) {
105 found = true;
106 break;
107 }
108
109 if (!found && list_empty(&cfg->ctx_err_recovery))
110 return;
111
112 dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
113 __func__);
114 wake_up_all(&cfg->reset_waitq);
115 ssleep(1);
116 found = false;
117 }
118}
119
120
121
122
123
124
125
126
127
128static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid,
129 struct file *file)
130{
131 struct ctx_info *ctxi;
132
133 list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list)
134 if ((ctxi->ctxid == rctxid) || (ctxi->file == file))
135 return ctxi;
136
137 return NULL;
138}
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
156 void *arg, enum ctx_ctrl ctx_ctrl)
157{
158 struct device *dev = &cfg->dev->dev;
159 struct ctx_info *ctxi = NULL;
160 struct lun_access *lun_access = NULL;
161 struct file *file = NULL;
162 struct llun_info *lli = arg;
163 u64 ctxid = DECODE_CTXID(rctxid);
164 int rc;
165 pid_t pid = task_tgid_nr(current), ctxpid = 0;
166
167 if (ctx_ctrl & CTX_CTRL_FILE) {
168 lli = NULL;
169 file = (struct file *)arg;
170 }
171
172 if (ctx_ctrl & CTX_CTRL_CLONE)
173 pid = task_ppid_nr(current);
174
175 if (likely(ctxid < MAX_CONTEXT)) {
176 while (true) {
177 mutex_lock(&cfg->ctx_tbl_list_mutex);
178 ctxi = cfg->ctx_tbl[ctxid];
179 if (ctxi)
180 if ((file && (ctxi->file != file)) ||
181 (!file && (ctxi->ctxid != rctxid)))
182 ctxi = NULL;
183
184 if ((ctx_ctrl & CTX_CTRL_ERR) ||
185 (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK)))
186 ctxi = find_error_context(cfg, rctxid, file);
187 if (!ctxi) {
188 mutex_unlock(&cfg->ctx_tbl_list_mutex);
189 goto out;
190 }
191
192
193
194
195
196
197
198
199
200
201
202
203
204 rc = mutex_trylock(&ctxi->mutex);
205 mutex_unlock(&cfg->ctx_tbl_list_mutex);
206 if (rc)
207 break;
208 }
209
210 if (ctxi->unavail)
211 goto denied;
212
213 ctxpid = ctxi->pid;
214 if (likely(!(ctx_ctrl & CTX_CTRL_NOPID)))
215 if (pid != ctxpid)
216 goto denied;
217
218 if (lli) {
219 list_for_each_entry(lun_access, &ctxi->luns, list)
220 if (lun_access->lli == lli)
221 goto out;
222 goto denied;
223 }
224 }
225
226out:
227 dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u "
228 "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
229 ctx_ctrl);
230
231 return ctxi;
232
233denied:
234 mutex_unlock(&ctxi->mutex);
235 ctxi = NULL;
236 goto out;
237}
238
239
240
241
242
243
244
245void put_context(struct ctx_info *ctxi)
246{
247 mutex_unlock(&ctxi->mutex);
248}
249
250
251
252
253
254
255
256
257
258
259
260
261static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
262{
263 struct device *dev = &cfg->dev->dev;
264 struct afu *afu = cfg->afu;
265 struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map;
266 int rc = 0;
267 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
268 u64 val;
269 int i;
270
271
272 readq_be(&ctrl_map->mbox_r);
273 val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD);
274 writeq_be(val, &ctrl_map->ctx_cap);
275 val = readq_be(&ctrl_map->ctx_cap);
276 if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
277 dev_err(dev, "%s: ctx may be closed val=%016llx\n",
278 __func__, val);
279 rc = -EAGAIN;
280 goto out;
281 }
282
283 if (afu_is_ocxl_lisn(afu)) {
284
285 for (i = 0; i < ctxi->irqs; i++) {
286 val = cfg->ops->get_irq_objhndl(ctxi->ctx, i);
287 writeq_be(val, &ctrl_map->lisn_ea[i]);
288 }
289
290
291 val = hwq->ctx_hndl;
292 writeq_be(SISL_LISN_PASID(val, val), &ctrl_map->lisn_pasid[0]);
293 writeq_be(SISL_LISN_PASID(0UL, val), &ctrl_map->lisn_pasid[1]);
294 }
295
296
297 writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
298 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl));
299 writeq_be(val, &ctrl_map->rht_cnt_id);
300out:
301 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
302 return rc;
303}
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
331{
332 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
333 struct device *dev = &cfg->dev->dev;
334 struct glun_info *gli = lli->parent;
335 struct scsi_sense_hdr sshdr;
336 u8 *cmd_buf = NULL;
337 u8 *scsi_cmd = NULL;
338 int rc = 0;
339 int result = 0;
340 int retry_cnt = 0;
341 u32 to = CMD_TIMEOUT * HZ;
342
343retry:
344 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
345 scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
346 if (unlikely(!cmd_buf || !scsi_cmd)) {
347 rc = -ENOMEM;
348 goto out;
349 }
350
351 scsi_cmd[0] = SERVICE_ACTION_IN_16;
352 scsi_cmd[1] = SAI_READ_CAPACITY_16;
353 put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
354
355 dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__,
356 retry_cnt ? "re" : "", scsi_cmd[0]);
357
358
359 up_read(&cfg->ioctl_rwsem);
360 result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
361 CMD_BUFSIZE, NULL, &sshdr, to, CMD_RETRIES,
362 0, 0, NULL);
363 down_read(&cfg->ioctl_rwsem);
364 rc = check_state(cfg);
365 if (rc) {
366 dev_err(dev, "%s: Failed state result=%08x\n",
367 __func__, result);
368 rc = -ENODEV;
369 goto out;
370 }
371
372 if (driver_byte(result) == DRIVER_SENSE) {
373 result &= ~(0xFF<<24);
374 if (result & SAM_STAT_CHECK_CONDITION) {
375 switch (sshdr.sense_key) {
376 case NO_SENSE:
377 case RECOVERED_ERROR:
378
379 case NOT_READY:
380 result &= ~SAM_STAT_CHECK_CONDITION;
381 break;
382 case UNIT_ATTENTION:
383 switch (sshdr.asc) {
384 case 0x29:
385
386 case 0x2A:
387 case 0x3F:
388
389 if (retry_cnt++ < 1) {
390 kfree(cmd_buf);
391 kfree(scsi_cmd);
392 goto retry;
393 }
394 }
395 break;
396 default:
397 break;
398 }
399 }
400 }
401
402 if (result) {
403 dev_err(dev, "%s: command failed, result=%08x\n",
404 __func__, result);
405 rc = -EIO;
406 goto out;
407 }
408
409
410
411
412
413
414 mutex_lock(&gli->mutex);
415 gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0]));
416 gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8]));
417 mutex_unlock(&gli->mutex);
418
419out:
420 kfree(cmd_buf);
421 kfree(scsi_cmd);
422
423 dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
424 __func__, gli->max_lba, gli->blk_len, rc);
425 return rc;
426}
427
428
429
430
431
432
433
434
435
436struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
437 struct llun_info *lli)
438{
439 struct cxlflash_cfg *cfg = ctxi->cfg;
440 struct device *dev = &cfg->dev->dev;
441 struct sisl_rht_entry *rhte = NULL;
442
443 if (unlikely(!ctxi->rht_start)) {
444 dev_dbg(dev, "%s: Context does not have allocated RHT\n",
445 __func__);
446 goto out;
447 }
448
449 if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
450 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
451 __func__, rhndl);
452 goto out;
453 }
454
455 if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
456 dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n",
457 __func__, rhndl);
458 goto out;
459 }
460
461 rhte = &ctxi->rht_start[rhndl];
462 if (unlikely(rhte->nmask == 0)) {
463 dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n",
464 __func__, rhndl);
465 rhte = NULL;
466 goto out;
467 }
468
469out:
470 return rhte;
471}
472
473
474
475
476
477
478
479
480struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
481 struct llun_info *lli)
482{
483 struct cxlflash_cfg *cfg = ctxi->cfg;
484 struct device *dev = &cfg->dev->dev;
485 struct sisl_rht_entry *rhte = NULL;
486 int i;
487
488
489 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
490 if (ctxi->rht_start[i].nmask == 0) {
491 rhte = &ctxi->rht_start[i];
492 ctxi->rht_out++;
493 break;
494 }
495
496 if (likely(rhte))
497 ctxi->rht_lun[i] = lli;
498
499 dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i);
500 return rhte;
501}
502
503
504
505
506
507
508void rhte_checkin(struct ctx_info *ctxi,
509 struct sisl_rht_entry *rhte)
510{
511 u32 rsrc_handle = rhte - ctxi->rht_start;
512
513 rhte->nmask = 0;
514 rhte->fp = 0;
515 ctxi->rht_out--;
516 ctxi->rht_lun[rsrc_handle] = NULL;
517 ctxi->rht_needs_ws[rsrc_handle] = false;
518}
519
520
521
522
523
524
525
526
527static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm,
528 u32 port_sel)
529{
530
531
532
533
534
535 struct sisl_rht_entry_f1 dummy = { 0 };
536 struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
537
538 memset(rhte_f1, 0, sizeof(*rhte_f1));
539 rhte_f1->fp = SISL_RHT_FP(1U, 0);
540 dma_wmb();
541
542 rhte_f1->lun_id = lun_id;
543 dma_wmb();
544
545
546
547
548
549
550 dummy.valid = 0x80;
551 dummy.fp = SISL_RHT_FP(1U, perm);
552 dummy.port_sel = port_sel;
553 rhte_f1->dw = dummy.dw;
554
555 dma_wmb();
556}
557
558
559
560
561
562
563
564
565
566int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
567{
568 int rc = 0;
569
570 if (!locked)
571 mutex_lock(&gli->mutex);
572
573 if (gli->mode == MODE_NONE)
574 gli->mode = mode;
575 else if (gli->mode != mode) {
576 pr_debug("%s: gli_mode=%d requested_mode=%d\n",
577 __func__, gli->mode, mode);
578 rc = -EINVAL;
579 goto out;
580 }
581
582 gli->users++;
583 WARN_ON(gli->users <= 0);
584out:
585 pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
586 __func__, rc, gli->mode, gli->users);
587 if (!locked)
588 mutex_unlock(&gli->mutex);
589 return rc;
590}
591
592
593
594
595
596
597
598
599
600
601
602void cxlflash_lun_detach(struct glun_info *gli)
603{
604 mutex_lock(&gli->mutex);
605 WARN_ON(gli->mode == MODE_NONE);
606 if (--gli->users == 0) {
607 gli->mode = MODE_NONE;
608 cxlflash_ba_terminate(&gli->blka.ba_lun);
609 }
610 pr_debug("%s: gli->users=%u\n", __func__, gli->users);
611 WARN_ON(gli->users < 0);
612 mutex_unlock(&gli->mutex);
613}
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630int _cxlflash_disk_release(struct scsi_device *sdev,
631 struct ctx_info *ctxi,
632 struct dk_cxlflash_release *release)
633{
634 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
635 struct device *dev = &cfg->dev->dev;
636 struct llun_info *lli = sdev->hostdata;
637 struct glun_info *gli = lli->parent;
638 struct afu *afu = cfg->afu;
639 bool put_ctx = false;
640
641 struct dk_cxlflash_resize size;
642 res_hndl_t rhndl = release->rsrc_handle;
643
644 int rc = 0;
645 int rcr = 0;
646 u64 ctxid = DECODE_CTXID(release->context_id),
647 rctxid = release->context_id;
648
649 struct sisl_rht_entry *rhte;
650 struct sisl_rht_entry_f1 *rhte_f1;
651
652 dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n",
653 __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
654
655 if (!ctxi) {
656 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
657 if (unlikely(!ctxi)) {
658 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
659 __func__, ctxid);
660 rc = -EINVAL;
661 goto out;
662 }
663
664 put_ctx = true;
665 }
666
667 rhte = get_rhte(ctxi, rhndl, lli);
668 if (unlikely(!rhte)) {
669 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
670 __func__, rhndl);
671 rc = -EINVAL;
672 goto out;
673 }
674
675
676
677
678
679
680
681
682 switch (gli->mode) {
683 case MODE_VIRTUAL:
684 marshal_rele_to_resize(release, &size);
685 size.req_size = 0;
686 rc = _cxlflash_vlun_resize(sdev, ctxi, &size);
687 if (rc) {
688 dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc);
689 goto out;
690 }
691
692 break;
693 case MODE_PHYSICAL:
694
695
696
697
698
699 rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
700
701 rhte_f1->valid = 0;
702 dma_wmb();
703
704 rhte_f1->lun_id = 0;
705 dma_wmb();
706
707 rhte_f1->dw = 0;
708 dma_wmb();
709
710 if (!ctxi->err_recovery_active) {
711 rcr = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
712 if (unlikely(rcr))
713 dev_dbg(dev, "%s: AFU sync failed rc=%d\n",
714 __func__, rcr);
715 }
716 break;
717 default:
718 WARN(1, "Unsupported LUN mode!");
719 goto out;
720 }
721
722 rhte_checkin(ctxi, rhte);
723 cxlflash_lun_detach(gli);
724
725out:
726 if (put_ctx)
727 put_context(ctxi);
728 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
729 return rc;
730}
731
732int cxlflash_disk_release(struct scsi_device *sdev,
733 struct dk_cxlflash_release *release)
734{
735 return _cxlflash_disk_release(sdev, NULL, release);
736}
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751static void destroy_context(struct cxlflash_cfg *cfg,
752 struct ctx_info *ctxi)
753{
754 struct afu *afu = cfg->afu;
755
756 if (ctxi->initialized) {
757 WARN_ON(!list_empty(&ctxi->luns));
758
759
760 if (afu->afu_map && ctxi->ctrl_map) {
761 writeq_be(0, &ctxi->ctrl_map->rht_start);
762 writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
763 writeq_be(0, &ctxi->ctrl_map->ctx_cap);
764 }
765 }
766
767
768 free_page((ulong)ctxi->rht_start);
769 kfree(ctxi->rht_needs_ws);
770 kfree(ctxi->rht_lun);
771 kfree(ctxi);
772}
773
774
775
776
777
778
779
780static struct ctx_info *create_context(struct cxlflash_cfg *cfg)
781{
782 struct device *dev = &cfg->dev->dev;
783 struct ctx_info *ctxi = NULL;
784 struct llun_info **lli = NULL;
785 u8 *ws = NULL;
786 struct sisl_rht_entry *rhte;
787
788 ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
789 lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
790 ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
791 if (unlikely(!ctxi || !lli || !ws)) {
792 dev_err(dev, "%s: Unable to allocate context\n", __func__);
793 goto err;
794 }
795
796 rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
797 if (unlikely(!rhte)) {
798 dev_err(dev, "%s: Unable to allocate RHT\n", __func__);
799 goto err;
800 }
801
802 ctxi->rht_lun = lli;
803 ctxi->rht_needs_ws = ws;
804 ctxi->rht_start = rhte;
805out:
806 return ctxi;
807
808err:
809 kfree(ws);
810 kfree(lli);
811 kfree(ctxi);
812 ctxi = NULL;
813 goto out;
814}
815
816
817
818
819
820
821
822
823
824
825
826static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
827 void *ctx, int ctxid, struct file *file, u32 perms,
828 u64 irqs)
829{
830 struct afu *afu = cfg->afu;
831
832 ctxi->rht_perms = perms;
833 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
834 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
835 ctxi->irqs = irqs;
836 ctxi->pid = task_tgid_nr(current);
837 ctxi->ctx = ctx;
838 ctxi->cfg = cfg;
839 ctxi->file = file;
840 ctxi->initialized = true;
841 mutex_init(&ctxi->mutex);
842 kref_init(&ctxi->kref);
843 INIT_LIST_HEAD(&ctxi->luns);
844 INIT_LIST_HEAD(&ctxi->list);
845}
846
847
848
849
850
851
852
853
854
855static void remove_context(struct kref *kref)
856{
857 struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref);
858 struct cxlflash_cfg *cfg = ctxi->cfg;
859 u64 ctxid = DECODE_CTXID(ctxi->ctxid);
860
861
862 WARN_ON(!mutex_is_locked(&ctxi->mutex));
863 ctxi->unavail = true;
864 mutex_unlock(&ctxi->mutex);
865 mutex_lock(&cfg->ctx_tbl_list_mutex);
866 mutex_lock(&ctxi->mutex);
867
868 if (!list_empty(&ctxi->list))
869 list_del(&ctxi->list);
870 cfg->ctx_tbl[ctxid] = NULL;
871 mutex_unlock(&cfg->ctx_tbl_list_mutex);
872 mutex_unlock(&ctxi->mutex);
873
874
875 destroy_context(cfg, ctxi);
876}
877
878
879
880
881
882
883
884
885
886
887
888
889
890static int _cxlflash_disk_detach(struct scsi_device *sdev,
891 struct ctx_info *ctxi,
892 struct dk_cxlflash_detach *detach)
893{
894 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
895 struct device *dev = &cfg->dev->dev;
896 struct llun_info *lli = sdev->hostdata;
897 struct lun_access *lun_access, *t;
898 struct dk_cxlflash_release rel;
899 bool put_ctx = false;
900
901 int i;
902 int rc = 0;
903 u64 ctxid = DECODE_CTXID(detach->context_id),
904 rctxid = detach->context_id;
905
906 dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid);
907
908 if (!ctxi) {
909 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
910 if (unlikely(!ctxi)) {
911 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
912 __func__, ctxid);
913 rc = -EINVAL;
914 goto out;
915 }
916
917 put_ctx = true;
918 }
919
920
921 if (ctxi->rht_out) {
922 marshal_det_to_rele(detach, &rel);
923 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
924 if (ctxi->rht_lun[i] == lli) {
925 rel.rsrc_handle = i;
926 _cxlflash_disk_release(sdev, ctxi, &rel);
927 }
928
929
930 if (ctxi->rht_out == 0)
931 break;
932 }
933 }
934
935
936 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
937 if (lun_access->lli == lli) {
938 list_del(&lun_access->list);
939 kfree(lun_access);
940 lun_access = NULL;
941 break;
942 }
943
944
945
946
947
948 if (kref_put(&ctxi->kref, remove_context))
949 put_ctx = false;
950 scsi_device_put(sdev);
951out:
952 if (put_ctx)
953 put_context(ctxi);
954 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
955 return rc;
956}
957
958static int cxlflash_disk_detach(struct scsi_device *sdev,
959 struct dk_cxlflash_detach *detach)
960{
961 return _cxlflash_disk_detach(sdev, NULL, detach);
962}
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990static int cxlflash_cxl_release(struct inode *inode, struct file *file)
991{
992 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
993 cxl_fops);
994 void *ctx = cfg->ops->fops_get_context(file);
995 struct device *dev = &cfg->dev->dev;
996 struct ctx_info *ctxi = NULL;
997 struct dk_cxlflash_detach detach = { { 0 }, 0 };
998 struct lun_access *lun_access, *t;
999 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1000 int ctxid;
1001
1002 ctxid = cfg->ops->process_element(ctx);
1003 if (unlikely(ctxid < 0)) {
1004 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1005 __func__, ctx, ctxid);
1006 goto out;
1007 }
1008
1009 ctxi = get_context(cfg, ctxid, file, ctrl);
1010 if (unlikely(!ctxi)) {
1011 ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
1012 if (!ctxi) {
1013 dev_dbg(dev, "%s: ctxid=%d already free\n",
1014 __func__, ctxid);
1015 goto out_release;
1016 }
1017
1018 dev_dbg(dev, "%s: Another process owns ctxid=%d\n",
1019 __func__, ctxid);
1020 put_context(ctxi);
1021 goto out;
1022 }
1023
1024 dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid);
1025
1026 down_read(&cfg->ioctl_rwsem);
1027 detach.context_id = ctxi->ctxid;
1028 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
1029 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
1030 up_read(&cfg->ioctl_rwsem);
1031out_release:
1032 cfg->ops->fd_release(inode, file);
1033out:
1034 dev_dbg(dev, "%s: returning\n", __func__);
1035 return 0;
1036}
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046static void unmap_context(struct ctx_info *ctxi)
1047{
1048 unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1);
1049}
1050
1051
1052
1053
1054
1055
1056
1057static struct page *get_err_page(struct cxlflash_cfg *cfg)
1058{
1059 struct page *err_page = global.err_page;
1060 struct device *dev = &cfg->dev->dev;
1061
1062 if (unlikely(!err_page)) {
1063 err_page = alloc_page(GFP_KERNEL);
1064 if (unlikely(!err_page)) {
1065 dev_err(dev, "%s: Unable to allocate err_page\n",
1066 __func__);
1067 goto out;
1068 }
1069
1070 memset(page_address(err_page), -1, PAGE_SIZE);
1071
1072
1073 mutex_lock(&global.mutex);
1074 if (likely(!global.err_page))
1075 global.err_page = err_page;
1076 else {
1077 __free_page(err_page);
1078 err_page = global.err_page;
1079 }
1080 mutex_unlock(&global.mutex);
1081 }
1082
1083out:
1084 dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page);
1085 return err_page;
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103static vm_fault_t cxlflash_mmap_fault(struct vm_fault *vmf)
1104{
1105 struct vm_area_struct *vma = vmf->vma;
1106 struct file *file = vma->vm_file;
1107 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1108 cxl_fops);
1109 void *ctx = cfg->ops->fops_get_context(file);
1110 struct device *dev = &cfg->dev->dev;
1111 struct ctx_info *ctxi = NULL;
1112 struct page *err_page = NULL;
1113 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1114 vm_fault_t rc = 0;
1115 int ctxid;
1116
1117 ctxid = cfg->ops->process_element(ctx);
1118 if (unlikely(ctxid < 0)) {
1119 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1120 __func__, ctx, ctxid);
1121 goto err;
1122 }
1123
1124 ctxi = get_context(cfg, ctxid, file, ctrl);
1125 if (unlikely(!ctxi)) {
1126 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
1127 goto err;
1128 }
1129
1130 dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid);
1131
1132 if (likely(!ctxi->err_recovery_active)) {
1133 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1134 rc = ctxi->cxl_mmap_vmops->fault(vmf);
1135 } else {
1136 dev_dbg(dev, "%s: err recovery active, use err_page\n",
1137 __func__);
1138
1139 err_page = get_err_page(cfg);
1140 if (unlikely(!err_page)) {
1141 dev_err(dev, "%s: Could not get err_page\n", __func__);
1142 rc = VM_FAULT_RETRY;
1143 goto out;
1144 }
1145
1146 get_page(err_page);
1147 vmf->page = err_page;
1148 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
1149 }
1150
1151out:
1152 if (likely(ctxi))
1153 put_context(ctxi);
1154 dev_dbg(dev, "%s: returning rc=%x\n", __func__, rc);
1155 return rc;
1156
1157err:
1158 rc = VM_FAULT_SIGBUS;
1159 goto out;
1160}
1161
1162
1163
1164
1165static const struct vm_operations_struct cxlflash_mmap_vmops = {
1166 .fault = cxlflash_mmap_fault,
1167};
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
1179{
1180 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1181 cxl_fops);
1182 void *ctx = cfg->ops->fops_get_context(file);
1183 struct device *dev = &cfg->dev->dev;
1184 struct ctx_info *ctxi = NULL;
1185 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1186 int ctxid;
1187 int rc = 0;
1188
1189 ctxid = cfg->ops->process_element(ctx);
1190 if (unlikely(ctxid < 0)) {
1191 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1192 __func__, ctx, ctxid);
1193 rc = -EIO;
1194 goto out;
1195 }
1196
1197 ctxi = get_context(cfg, ctxid, file, ctrl);
1198 if (unlikely(!ctxi)) {
1199 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
1200 rc = -EIO;
1201 goto out;
1202 }
1203
1204 dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
1205
1206 rc = cfg->ops->fd_mmap(file, vma);
1207 if (likely(!rc)) {
1208
1209 ctxi->cxl_mmap_vmops = vma->vm_ops;
1210 vma->vm_ops = &cxlflash_mmap_vmops;
1211 }
1212
1213out:
1214 if (likely(ctxi))
1215 put_context(ctxi);
1216 return rc;
1217}
1218
1219const struct file_operations cxlflash_cxl_fops = {
1220 .owner = THIS_MODULE,
1221 .mmap = cxlflash_cxl_mmap,
1222 .release = cxlflash_cxl_release,
1223};
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg)
1235{
1236 int i, rc = 0;
1237 struct ctx_info *ctxi = NULL;
1238
1239 mutex_lock(&cfg->ctx_tbl_list_mutex);
1240
1241 for (i = 0; i < MAX_CONTEXT; i++) {
1242 ctxi = cfg->ctx_tbl[i];
1243 if (ctxi) {
1244 mutex_lock(&ctxi->mutex);
1245 cfg->ctx_tbl[i] = NULL;
1246 list_add(&ctxi->list, &cfg->ctx_err_recovery);
1247 ctxi->err_recovery_active = true;
1248 ctxi->ctrl_map = NULL;
1249 unmap_context(ctxi);
1250 mutex_unlock(&ctxi->mutex);
1251 }
1252 }
1253
1254 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1255 return rc;
1256}
1257
1258
1259
1260
1261static const struct file_operations null_fops = {
1262 .owner = THIS_MODULE,
1263};
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278int check_state(struct cxlflash_cfg *cfg)
1279{
1280 struct device *dev = &cfg->dev->dev;
1281 int rc = 0;
1282
1283retry:
1284 switch (cfg->state) {
1285 case STATE_RESET:
1286 dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__);
1287 up_read(&cfg->ioctl_rwsem);
1288 rc = wait_event_interruptible(cfg->reset_waitq,
1289 cfg->state != STATE_RESET);
1290 down_read(&cfg->ioctl_rwsem);
1291 if (unlikely(rc))
1292 break;
1293 goto retry;
1294 case STATE_FAILTERM:
1295 dev_dbg(dev, "%s: Failed/Terminating\n", __func__);
1296 rc = -ENODEV;
1297 break;
1298 default:
1299 break;
1300 }
1301
1302 return rc;
1303}
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317static int cxlflash_disk_attach(struct scsi_device *sdev,
1318 struct dk_cxlflash_attach *attach)
1319{
1320 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1321 struct device *dev = &cfg->dev->dev;
1322 struct afu *afu = cfg->afu;
1323 struct llun_info *lli = sdev->hostdata;
1324 struct glun_info *gli = lli->parent;
1325 struct ctx_info *ctxi = NULL;
1326 struct lun_access *lun_access = NULL;
1327 int rc = 0;
1328 u32 perms;
1329 int ctxid = -1;
1330 u64 irqs = attach->num_interrupts;
1331 u64 flags = 0UL;
1332 u64 rctxid = 0UL;
1333 struct file *file = NULL;
1334
1335 void *ctx = NULL;
1336
1337 int fd = -1;
1338
1339 if (irqs > 4) {
1340 dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
1341 __func__, irqs);
1342 rc = -EINVAL;
1343 goto out;
1344 }
1345
1346 if (gli->max_lba == 0) {
1347 dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n",
1348 __func__, lli->lun_id[sdev->channel]);
1349 rc = read_cap16(sdev, lli);
1350 if (rc) {
1351 dev_err(dev, "%s: Invalid device rc=%d\n",
1352 __func__, rc);
1353 rc = -ENODEV;
1354 goto out;
1355 }
1356 dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba);
1357 dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len);
1358 }
1359
1360 if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
1361 rctxid = attach->context_id;
1362 ctxi = get_context(cfg, rctxid, NULL, 0);
1363 if (!ctxi) {
1364 dev_dbg(dev, "%s: Bad context rctxid=%016llx\n",
1365 __func__, rctxid);
1366 rc = -EINVAL;
1367 goto out;
1368 }
1369
1370 list_for_each_entry(lun_access, &ctxi->luns, list)
1371 if (lun_access->lli == lli) {
1372 dev_dbg(dev, "%s: Already attached\n",
1373 __func__);
1374 rc = -EINVAL;
1375 goto out;
1376 }
1377 }
1378
1379 rc = scsi_device_get(sdev);
1380 if (unlikely(rc)) {
1381 dev_err(dev, "%s: Unable to get sdev reference\n", __func__);
1382 goto out;
1383 }
1384
1385 lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
1386 if (unlikely(!lun_access)) {
1387 dev_err(dev, "%s: Unable to allocate lun_access\n", __func__);
1388 rc = -ENOMEM;
1389 goto err;
1390 }
1391
1392 lun_access->lli = lli;
1393 lun_access->sdev = sdev;
1394
1395
1396 if (ctxi) {
1397 dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n",
1398 __func__, rctxid);
1399 kref_get(&ctxi->kref);
1400 list_add(&lun_access->list, &ctxi->luns);
1401 goto out_attach;
1402 }
1403
1404 ctxi = create_context(cfg);
1405 if (unlikely(!ctxi)) {
1406 dev_err(dev, "%s: Failed to create context ctxid=%d\n",
1407 __func__, ctxid);
1408 rc = -ENOMEM;
1409 goto err;
1410 }
1411
1412 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
1413 if (IS_ERR_OR_NULL(ctx)) {
1414 dev_err(dev, "%s: Could not initialize context %p\n",
1415 __func__, ctx);
1416 rc = -ENODEV;
1417 goto err;
1418 }
1419
1420 rc = cfg->ops->start_work(ctx, irqs);
1421 if (unlikely(rc)) {
1422 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1423 __func__, rc);
1424 goto err;
1425 }
1426
1427 ctxid = cfg->ops->process_element(ctx);
1428 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1429 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
1430 rc = -EPERM;
1431 goto err;
1432 }
1433
1434 file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
1435 if (unlikely(fd < 0)) {
1436 rc = -ENODEV;
1437 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1438 goto err;
1439 }
1440
1441
1442 perms = SISL_RHT_PERM(attach->hdr.flags + 1);
1443
1444
1445 init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs);
1446
1447 rc = afu_attach(cfg, ctxi);
1448 if (unlikely(rc)) {
1449 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1450 goto err;
1451 }
1452
1453
1454
1455
1456
1457
1458
1459 list_add(&lun_access->list, &ctxi->luns);
1460 mutex_lock(&cfg->ctx_tbl_list_mutex);
1461 mutex_lock(&ctxi->mutex);
1462 cfg->ctx_tbl[ctxid] = ctxi;
1463 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1464 fd_install(fd, file);
1465
1466out_attach:
1467 if (fd != -1)
1468 flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD;
1469 if (afu_is_sq_cmd_mode(afu))
1470 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
1471
1472 attach->hdr.return_flags = flags;
1473 attach->context_id = ctxi->ctxid;
1474 attach->block_size = gli->blk_len;
1475 attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1476 attach->last_lba = gli->max_lba;
1477 attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT;
1478 attach->max_xfer /= gli->blk_len;
1479
1480out:
1481 attach->adap_fd = fd;
1482
1483 if (ctxi)
1484 put_context(ctxi);
1485
1486 dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
1487 __func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
1488 return rc;
1489
1490err:
1491
1492 if (!IS_ERR_OR_NULL(ctx)) {
1493 cfg->ops->stop_context(ctx);
1494 cfg->ops->release_context(ctx);
1495 ctx = NULL;
1496 }
1497
1498
1499
1500
1501
1502
1503
1504
1505 if (fd > 0) {
1506 file->f_op = &null_fops;
1507 fput(file);
1508 put_unused_fd(fd);
1509 fd = -1;
1510 file = NULL;
1511 }
1512
1513
1514 if (ctxi) {
1515 destroy_context(cfg, ctxi);
1516 ctxi = NULL;
1517 }
1518
1519 kfree(lun_access);
1520 scsi_device_put(sdev);
1521 goto out;
1522}
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534static int recover_context(struct cxlflash_cfg *cfg,
1535 struct ctx_info *ctxi,
1536 int *adap_fd)
1537{
1538 struct device *dev = &cfg->dev->dev;
1539 int rc = 0;
1540 int fd = -1;
1541 int ctxid = -1;
1542 struct file *file;
1543 void *ctx;
1544 struct afu *afu = cfg->afu;
1545
1546 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
1547 if (IS_ERR_OR_NULL(ctx)) {
1548 dev_err(dev, "%s: Could not initialize context %p\n",
1549 __func__, ctx);
1550 rc = -ENODEV;
1551 goto out;
1552 }
1553
1554 rc = cfg->ops->start_work(ctx, ctxi->irqs);
1555 if (unlikely(rc)) {
1556 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1557 __func__, rc);
1558 goto err1;
1559 }
1560
1561 ctxid = cfg->ops->process_element(ctx);
1562 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1563 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
1564 rc = -EPERM;
1565 goto err2;
1566 }
1567
1568 file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
1569 if (unlikely(fd < 0)) {
1570 rc = -ENODEV;
1571 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1572 goto err2;
1573 }
1574
1575
1576 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
1577
1578 rc = afu_attach(cfg, ctxi);
1579 if (rc) {
1580 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1581 goto err3;
1582 }
1583
1584
1585
1586
1587
1588 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
1589 ctxi->ctx = ctx;
1590 ctxi->file = file;
1591
1592
1593
1594
1595
1596
1597
1598 mutex_unlock(&ctxi->mutex);
1599 mutex_lock(&cfg->ctx_tbl_list_mutex);
1600 mutex_lock(&ctxi->mutex);
1601 list_del_init(&ctxi->list);
1602 cfg->ctx_tbl[ctxid] = ctxi;
1603 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1604 fd_install(fd, file);
1605 *adap_fd = fd;
1606out:
1607 dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
1608 __func__, ctxid, fd, rc);
1609 return rc;
1610
1611err3:
1612 fput(file);
1613 put_unused_fd(fd);
1614err2:
1615 cfg->ops->stop_context(ctx);
1616err1:
1617 cfg->ops->release_context(ctx);
1618 goto out;
1619}
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651static int cxlflash_afu_recover(struct scsi_device *sdev,
1652 struct dk_cxlflash_recover_afu *recover)
1653{
1654 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1655 struct device *dev = &cfg->dev->dev;
1656 struct llun_info *lli = sdev->hostdata;
1657 struct afu *afu = cfg->afu;
1658 struct ctx_info *ctxi = NULL;
1659 struct mutex *mutex = &cfg->ctx_recovery_mutex;
1660 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1661 u64 flags;
1662 u64 ctxid = DECODE_CTXID(recover->context_id),
1663 rctxid = recover->context_id;
1664 long reg;
1665 bool locked = true;
1666 int lretry = 20;
1667 int new_adap_fd = -1;
1668 int rc = 0;
1669
1670 atomic_inc(&cfg->recovery_threads);
1671 up_read(&cfg->ioctl_rwsem);
1672 rc = mutex_lock_interruptible(mutex);
1673 down_read(&cfg->ioctl_rwsem);
1674 if (rc) {
1675 locked = false;
1676 goto out;
1677 }
1678
1679 rc = check_state(cfg);
1680 if (rc) {
1681 dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc);
1682 rc = -ENODEV;
1683 goto out;
1684 }
1685
1686 dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n",
1687 __func__, recover->reason, rctxid);
1688
1689retry:
1690
1691 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
1692 if (unlikely(!ctxi)) {
1693 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1694 rc = -EINVAL;
1695 goto out;
1696 }
1697
1698 if (ctxi->err_recovery_active) {
1699retry_recover:
1700 rc = recover_context(cfg, ctxi, &new_adap_fd);
1701 if (unlikely(rc)) {
1702 dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n",
1703 __func__, ctxid, rc);
1704 if ((rc == -ENODEV) &&
1705 ((atomic_read(&cfg->recovery_threads) > 1) ||
1706 (lretry--))) {
1707 dev_dbg(dev, "%s: Going to try again\n",
1708 __func__);
1709 mutex_unlock(mutex);
1710 msleep(100);
1711 rc = mutex_lock_interruptible(mutex);
1712 if (rc) {
1713 locked = false;
1714 goto out;
1715 }
1716 goto retry_recover;
1717 }
1718
1719 goto out;
1720 }
1721
1722 ctxi->err_recovery_active = false;
1723
1724 flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
1725 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
1726 if (afu_is_sq_cmd_mode(afu))
1727 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
1728
1729 recover->hdr.return_flags = flags;
1730 recover->context_id = ctxi->ctxid;
1731 recover->adap_fd = new_adap_fd;
1732 recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1733 goto out;
1734 }
1735
1736
1737 reg = readq_be(&hwq->ctrl_map->mbox_r);
1738 if (reg == -1) {
1739 dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__);
1740
1741
1742
1743
1744
1745
1746 put_context(ctxi);
1747 ctxi = NULL;
1748 ssleep(1);
1749 rc = check_state(cfg);
1750 if (unlikely(rc))
1751 goto out;
1752 goto retry;
1753 }
1754
1755 dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__);
1756out:
1757 if (likely(ctxi))
1758 put_context(ctxi);
1759 if (locked)
1760 mutex_unlock(mutex);
1761 atomic_dec_if_positive(&cfg->recovery_threads);
1762 return rc;
1763}
1764
1765
1766
1767
1768
1769
1770
1771
1772static int process_sense(struct scsi_device *sdev,
1773 struct dk_cxlflash_verify *verify)
1774{
1775 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1776 struct device *dev = &cfg->dev->dev;
1777 struct llun_info *lli = sdev->hostdata;
1778 struct glun_info *gli = lli->parent;
1779 u64 prev_lba = gli->max_lba;
1780 struct scsi_sense_hdr sshdr = { 0 };
1781 int rc = 0;
1782
1783 rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
1784 DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
1785 if (!rc) {
1786 dev_err(dev, "%s: Failed to normalize sense data\n", __func__);
1787 rc = -EINVAL;
1788 goto out;
1789 }
1790
1791 switch (sshdr.sense_key) {
1792 case NO_SENSE:
1793 case RECOVERED_ERROR:
1794
1795 case NOT_READY:
1796 break;
1797 case UNIT_ATTENTION:
1798 switch (sshdr.asc) {
1799 case 0x29:
1800
1801 case 0x2A:
1802 rc = read_cap16(sdev, lli);
1803 if (rc) {
1804 rc = -ENODEV;
1805 break;
1806 }
1807 if (prev_lba != gli->max_lba)
1808 dev_dbg(dev, "%s: Capacity changed old=%lld "
1809 "new=%lld\n", __func__, prev_lba,
1810 gli->max_lba);
1811 break;
1812 case 0x3F:
1813 scsi_scan_host(cfg->host);
1814 break;
1815 default:
1816 rc = -EIO;
1817 break;
1818 }
1819 break;
1820 default:
1821 rc = -EIO;
1822 break;
1823 }
1824out:
1825 dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__,
1826 sshdr.sense_key, sshdr.asc, sshdr.ascq, rc);
1827 return rc;
1828}
1829
1830
1831
1832
1833
1834
1835
1836
1837static int cxlflash_disk_verify(struct scsi_device *sdev,
1838 struct dk_cxlflash_verify *verify)
1839{
1840 int rc = 0;
1841 struct ctx_info *ctxi = NULL;
1842 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1843 struct device *dev = &cfg->dev->dev;
1844 struct llun_info *lli = sdev->hostdata;
1845 struct glun_info *gli = lli->parent;
1846 struct sisl_rht_entry *rhte = NULL;
1847 res_hndl_t rhndl = verify->rsrc_handle;
1848 u64 ctxid = DECODE_CTXID(verify->context_id),
1849 rctxid = verify->context_id;
1850 u64 last_lba = 0;
1851
1852 dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, "
1853 "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle,
1854 verify->hint, verify->hdr.flags);
1855
1856 ctxi = get_context(cfg, rctxid, lli, 0);
1857 if (unlikely(!ctxi)) {
1858 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1859 rc = -EINVAL;
1860 goto out;
1861 }
1862
1863 rhte = get_rhte(ctxi, rhndl, lli);
1864 if (unlikely(!rhte)) {
1865 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
1866 __func__, rhndl);
1867 rc = -EINVAL;
1868 goto out;
1869 }
1870
1871
1872
1873
1874
1875 if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
1876
1877
1878
1879 ctxi->unavail = true;
1880 mutex_unlock(&ctxi->mutex);
1881 rc = process_sense(sdev, verify);
1882 if (unlikely(rc)) {
1883 dev_err(dev, "%s: Failed to validate sense data (%d)\n",
1884 __func__, rc);
1885 mutex_lock(&ctxi->mutex);
1886 ctxi->unavail = false;
1887 goto out;
1888 }
1889 mutex_lock(&ctxi->mutex);
1890 ctxi->unavail = false;
1891 }
1892
1893 switch (gli->mode) {
1894 case MODE_PHYSICAL:
1895 last_lba = gli->max_lba;
1896 break;
1897 case MODE_VIRTUAL:
1898
1899 last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len);
1900 last_lba /= CXLFLASH_BLOCK_SIZE;
1901 last_lba--;
1902 break;
1903 default:
1904 WARN(1, "Unsupported LUN mode!");
1905 }
1906
1907 verify->last_lba = last_lba;
1908
1909out:
1910 if (likely(ctxi))
1911 put_context(ctxi);
1912 dev_dbg(dev, "%s: returning rc=%d llba=%llx\n",
1913 __func__, rc, verify->last_lba);
1914 return rc;
1915}
1916
1917
1918
1919
1920
1921
1922
1923static char *decode_ioctl(unsigned int cmd)
1924{
1925 switch (cmd) {
1926 case DK_CXLFLASH_ATTACH:
1927 return __stringify_1(DK_CXLFLASH_ATTACH);
1928 case DK_CXLFLASH_USER_DIRECT:
1929 return __stringify_1(DK_CXLFLASH_USER_DIRECT);
1930 case DK_CXLFLASH_USER_VIRTUAL:
1931 return __stringify_1(DK_CXLFLASH_USER_VIRTUAL);
1932 case DK_CXLFLASH_VLUN_RESIZE:
1933 return __stringify_1(DK_CXLFLASH_VLUN_RESIZE);
1934 case DK_CXLFLASH_RELEASE:
1935 return __stringify_1(DK_CXLFLASH_RELEASE);
1936 case DK_CXLFLASH_DETACH:
1937 return __stringify_1(DK_CXLFLASH_DETACH);
1938 case DK_CXLFLASH_VERIFY:
1939 return __stringify_1(DK_CXLFLASH_VERIFY);
1940 case DK_CXLFLASH_VLUN_CLONE:
1941 return __stringify_1(DK_CXLFLASH_VLUN_CLONE);
1942 case DK_CXLFLASH_RECOVER_AFU:
1943 return __stringify_1(DK_CXLFLASH_RECOVER_AFU);
1944 case DK_CXLFLASH_MANAGE_LUN:
1945 return __stringify_1(DK_CXLFLASH_MANAGE_LUN);
1946 }
1947
1948 return "UNKNOWN";
1949}
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
1963{
1964 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1965 struct device *dev = &cfg->dev->dev;
1966 struct afu *afu = cfg->afu;
1967 struct llun_info *lli = sdev->hostdata;
1968 struct glun_info *gli = lli->parent;
1969 struct dk_cxlflash_release rel = { { 0 }, 0 };
1970
1971 struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
1972
1973 u64 ctxid = DECODE_CTXID(pphys->context_id),
1974 rctxid = pphys->context_id;
1975 u64 lun_size = 0;
1976 u64 last_lba = 0;
1977 u64 rsrc_handle = -1;
1978 u32 port = CHAN2PORTMASK(sdev->channel);
1979
1980 int rc = 0;
1981
1982 struct ctx_info *ctxi = NULL;
1983 struct sisl_rht_entry *rhte = NULL;
1984
1985 dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
1986
1987 rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
1988 if (unlikely(rc)) {
1989 dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__);
1990 goto out;
1991 }
1992
1993 ctxi = get_context(cfg, rctxid, lli, 0);
1994 if (unlikely(!ctxi)) {
1995 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1996 rc = -EINVAL;
1997 goto err1;
1998 }
1999
2000 rhte = rhte_checkout(ctxi, lli);
2001 if (unlikely(!rhte)) {
2002 dev_dbg(dev, "%s: Too many opens ctxid=%lld\n",
2003 __func__, ctxid);
2004 rc = -EMFILE;
2005 goto err1;
2006 }
2007
2008 rsrc_handle = (rhte - ctxi->rht_start);
2009
2010 rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
2011
2012 last_lba = gli->max_lba;
2013 pphys->hdr.return_flags = 0;
2014 pphys->last_lba = last_lba;
2015 pphys->rsrc_handle = rsrc_handle;
2016
2017 rc = cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
2018 if (unlikely(rc)) {
2019 dev_dbg(dev, "%s: AFU sync failed rc=%d\n", __func__, rc);
2020 goto err2;
2021 }
2022
2023out:
2024 if (likely(ctxi))
2025 put_context(ctxi);
2026 dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
2027 __func__, rsrc_handle, rc, last_lba);
2028 return rc;
2029
2030err2:
2031 marshal_udir_to_rele(pphys, &rel);
2032 _cxlflash_disk_release(sdev, ctxi, &rel);
2033 goto out;
2034err1:
2035 cxlflash_lun_detach(gli);
2036 goto out;
2037}
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050static int ioctl_common(struct scsi_device *sdev, unsigned int cmd)
2051{
2052 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
2053 struct device *dev = &cfg->dev->dev;
2054 struct llun_info *lli = sdev->hostdata;
2055 int rc = 0;
2056
2057 if (unlikely(!lli)) {
2058 dev_dbg(dev, "%s: Unknown LUN\n", __func__);
2059 rc = -EINVAL;
2060 goto out;
2061 }
2062
2063 rc = check_state(cfg);
2064 if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) {
2065 switch (cmd) {
2066 case DK_CXLFLASH_VLUN_RESIZE:
2067 case DK_CXLFLASH_RELEASE:
2068 case DK_CXLFLASH_DETACH:
2069 dev_dbg(dev, "%s: Command override rc=%d\n",
2070 __func__, rc);
2071 rc = 0;
2072 break;
2073 }
2074 }
2075out:
2076 return rc;
2077}
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
2096{
2097 typedef int (*sioctl) (struct scsi_device *, void *);
2098
2099 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
2100 struct device *dev = &cfg->dev->dev;
2101 struct afu *afu = cfg->afu;
2102 struct dk_cxlflash_hdr *hdr;
2103 char buf[sizeof(union cxlflash_ioctls)];
2104 size_t size = 0;
2105 bool known_ioctl = false;
2106 int idx;
2107 int rc = 0;
2108 struct Scsi_Host *shost = sdev->host;
2109 sioctl do_ioctl = NULL;
2110
2111 static const struct {
2112 size_t size;
2113 sioctl ioctl;
2114 } ioctl_tbl[] = {
2115 {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach},
2116 {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
2117 {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release},
2118 {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach},
2119 {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify},
2120 {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover},
2121 {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun},
2122 {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open},
2123 {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize},
2124 {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone},
2125 };
2126
2127
2128 down_read(&cfg->ioctl_rwsem);
2129
2130
2131 if (afu->internal_lun)
2132 switch (cmd) {
2133 case DK_CXLFLASH_RELEASE:
2134 case DK_CXLFLASH_USER_VIRTUAL:
2135 case DK_CXLFLASH_VLUN_RESIZE:
2136 case DK_CXLFLASH_VLUN_CLONE:
2137 dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n",
2138 __func__, decode_ioctl(cmd), afu->internal_lun);
2139 rc = -EINVAL;
2140 goto cxlflash_ioctl_exit;
2141 }
2142
2143 switch (cmd) {
2144 case DK_CXLFLASH_ATTACH:
2145 case DK_CXLFLASH_USER_DIRECT:
2146 case DK_CXLFLASH_RELEASE:
2147 case DK_CXLFLASH_DETACH:
2148 case DK_CXLFLASH_VERIFY:
2149 case DK_CXLFLASH_RECOVER_AFU:
2150 case DK_CXLFLASH_USER_VIRTUAL:
2151 case DK_CXLFLASH_VLUN_RESIZE:
2152 case DK_CXLFLASH_VLUN_CLONE:
2153 dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n",
2154 __func__, decode_ioctl(cmd), cmd, shost->host_no,
2155 sdev->channel, sdev->id, sdev->lun);
2156 rc = ioctl_common(sdev, cmd);
2157 if (unlikely(rc))
2158 goto cxlflash_ioctl_exit;
2159
2160
2161
2162 case DK_CXLFLASH_MANAGE_LUN:
2163 known_ioctl = true;
2164 idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH);
2165 size = ioctl_tbl[idx].size;
2166 do_ioctl = ioctl_tbl[idx].ioctl;
2167
2168 if (likely(do_ioctl))
2169 break;
2170
2171
2172 default:
2173 rc = -EINVAL;
2174 goto cxlflash_ioctl_exit;
2175 }
2176
2177 if (unlikely(copy_from_user(&buf, arg, size))) {
2178 dev_err(dev, "%s: copy_from_user() fail size=%lu cmd=%u (%s) arg=%p\n",
2179 __func__, size, cmd, decode_ioctl(cmd), arg);
2180 rc = -EFAULT;
2181 goto cxlflash_ioctl_exit;
2182 }
2183
2184 hdr = (struct dk_cxlflash_hdr *)&buf;
2185 if (hdr->version != DK_CXLFLASH_VERSION_0) {
2186 dev_dbg(dev, "%s: Version %u not supported for %s\n",
2187 __func__, hdr->version, decode_ioctl(cmd));
2188 rc = -EINVAL;
2189 goto cxlflash_ioctl_exit;
2190 }
2191
2192 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
2193 dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
2194 rc = -EINVAL;
2195 goto cxlflash_ioctl_exit;
2196 }
2197
2198 rc = do_ioctl(sdev, (void *)&buf);
2199 if (likely(!rc))
2200 if (unlikely(copy_to_user(arg, &buf, size))) {
2201 dev_err(dev, "%s: copy_to_user() fail size=%lu cmd=%u (%s) arg=%p\n",
2202 __func__, size, cmd, decode_ioctl(cmd), arg);
2203 rc = -EFAULT;
2204 }
2205
2206
2207
2208cxlflash_ioctl_exit:
2209 up_read(&cfg->ioctl_rwsem);
2210 if (unlikely(rc && known_ioctl))
2211 dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2212 "returned rc %d\n", __func__,
2213 decode_ioctl(cmd), cmd, shost->host_no,
2214 sdev->channel, sdev->id, sdev->lun, rc);
2215 else
2216 dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2217 "returned rc %d\n", __func__, decode_ioctl(cmd),
2218 cmd, shost->host_no, sdev->channel, sdev->id,
2219 sdev->lun, rc);
2220 return rc;
2221}
2222