1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/delay.h>
16#include <linux/file.h>
17#include <linux/syscalls.h>
18#include <misc/cxl.h>
19#include <asm/unaligned.h>
20
21#include <scsi/scsi.h>
22#include <scsi/scsi_host.h>
23#include <scsi/scsi_cmnd.h>
24#include <scsi/scsi_eh.h>
25#include <uapi/scsi/cxlflash_ioctl.h>
26
27#include "sislite.h"
28#include "common.h"
29#include "vlun.h"
30#include "superpipe.h"
31
32struct cxlflash_global global;
33
34
35
36
37
38
39static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
40 struct dk_cxlflash_resize *resize)
41{
42 resize->hdr = release->hdr;
43 resize->context_id = release->context_id;
44 resize->rsrc_handle = release->rsrc_handle;
45}
46
47
48
49
50
51
52static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
53 struct dk_cxlflash_release *release)
54{
55 release->hdr = detach->hdr;
56 release->context_id = detach->context_id;
57}
58
59
60
61
62
63
64static void marshal_udir_to_rele(struct dk_cxlflash_udirect *udirect,
65 struct dk_cxlflash_release *release)
66{
67 release->hdr = udirect->hdr;
68 release->context_id = udirect->context_id;
69 release->rsrc_handle = udirect->rsrc_handle;
70}
71
72
73
74
75void cxlflash_free_errpage(void)
76{
77
78 mutex_lock(&global.mutex);
79 if (global.err_page) {
80 __free_page(global.err_page);
81 global.err_page = NULL;
82 }
83 mutex_unlock(&global.mutex);
84}
85
86
87
88
89
90
91
92
93
94
95
96
97
98void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
99{
100 struct device *dev = &cfg->dev->dev;
101 int i, found = true;
102
103 cxlflash_mark_contexts_error(cfg);
104
105 while (true) {
106 for (i = 0; i < MAX_CONTEXT; i++)
107 if (cfg->ctx_tbl[i]) {
108 found = true;
109 break;
110 }
111
112 if (!found && list_empty(&cfg->ctx_err_recovery))
113 return;
114
115 dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
116 __func__);
117 wake_up_all(&cfg->reset_waitq);
118 ssleep(1);
119 found = false;
120 }
121}
122
123
124
125
126
127
128
129
130
131static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid,
132 struct file *file)
133{
134 struct ctx_info *ctxi;
135
136 list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list)
137 if ((ctxi->ctxid == rctxid) || (ctxi->file == file))
138 return ctxi;
139
140 return NULL;
141}
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
159 void *arg, enum ctx_ctrl ctx_ctrl)
160{
161 struct device *dev = &cfg->dev->dev;
162 struct ctx_info *ctxi = NULL;
163 struct lun_access *lun_access = NULL;
164 struct file *file = NULL;
165 struct llun_info *lli = arg;
166 u64 ctxid = DECODE_CTXID(rctxid);
167 int rc;
168 pid_t pid = current->tgid, ctxpid = 0;
169
170 if (ctx_ctrl & CTX_CTRL_FILE) {
171 lli = NULL;
172 file = (struct file *)arg;
173 }
174
175 if (ctx_ctrl & CTX_CTRL_CLONE)
176 pid = current->parent->tgid;
177
178 if (likely(ctxid < MAX_CONTEXT)) {
179 while (true) {
180 mutex_lock(&cfg->ctx_tbl_list_mutex);
181 ctxi = cfg->ctx_tbl[ctxid];
182 if (ctxi)
183 if ((file && (ctxi->file != file)) ||
184 (!file && (ctxi->ctxid != rctxid)))
185 ctxi = NULL;
186
187 if ((ctx_ctrl & CTX_CTRL_ERR) ||
188 (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK)))
189 ctxi = find_error_context(cfg, rctxid, file);
190 if (!ctxi) {
191 mutex_unlock(&cfg->ctx_tbl_list_mutex);
192 goto out;
193 }
194
195
196
197
198
199
200
201
202
203
204
205
206
207 rc = mutex_trylock(&ctxi->mutex);
208 mutex_unlock(&cfg->ctx_tbl_list_mutex);
209 if (rc)
210 break;
211 }
212
213 if (ctxi->unavail)
214 goto denied;
215
216 ctxpid = ctxi->pid;
217 if (likely(!(ctx_ctrl & CTX_CTRL_NOPID)))
218 if (pid != ctxpid)
219 goto denied;
220
221 if (lli) {
222 list_for_each_entry(lun_access, &ctxi->luns, list)
223 if (lun_access->lli == lli)
224 goto out;
225 goto denied;
226 }
227 }
228
229out:
230 dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u "
231 "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
232 ctx_ctrl);
233
234 return ctxi;
235
236denied:
237 mutex_unlock(&ctxi->mutex);
238 ctxi = NULL;
239 goto out;
240}
241
242
243
244
245
246
247
248void put_context(struct ctx_info *ctxi)
249{
250 mutex_unlock(&ctxi->mutex);
251}
252
253
254
255
256
257
258
259
260
261
262
263
264static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
265{
266 struct device *dev = &cfg->dev->dev;
267 struct afu *afu = cfg->afu;
268 struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map;
269 int rc = 0;
270 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
271 u64 val;
272
273
274 readq_be(&ctrl_map->mbox_r);
275 val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD);
276 writeq_be(val, &ctrl_map->ctx_cap);
277 val = readq_be(&ctrl_map->ctx_cap);
278 if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
279 dev_err(dev, "%s: ctx may be closed val=%016llx\n",
280 __func__, val);
281 rc = -EAGAIN;
282 goto out;
283 }
284
285
286 writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
287 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl));
288 writeq_be(val, &ctrl_map->rht_cnt_id);
289out:
290 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
291 return rc;
292}
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
320{
321 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
322 struct device *dev = &cfg->dev->dev;
323 struct glun_info *gli = lli->parent;
324 u8 *cmd_buf = NULL;
325 u8 *scsi_cmd = NULL;
326 u8 *sense_buf = NULL;
327 int rc = 0;
328 int result = 0;
329 int retry_cnt = 0;
330 u32 to = CMD_TIMEOUT * HZ;
331
332retry:
333 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
334 scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
335 sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
336 if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) {
337 rc = -ENOMEM;
338 goto out;
339 }
340
341 scsi_cmd[0] = SERVICE_ACTION_IN_16;
342 scsi_cmd[1] = SAI_READ_CAPACITY_16;
343 put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
344
345 dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__,
346 retry_cnt ? "re" : "", scsi_cmd[0]);
347
348
349 up_read(&cfg->ioctl_rwsem);
350 result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
351 CMD_BUFSIZE, sense_buf, to, CMD_RETRIES, 0, NULL);
352 down_read(&cfg->ioctl_rwsem);
353 rc = check_state(cfg);
354 if (rc) {
355 dev_err(dev, "%s: Failed state result=%08x\n",
356 __func__, result);
357 rc = -ENODEV;
358 goto out;
359 }
360
361 if (driver_byte(result) == DRIVER_SENSE) {
362 result &= ~(0xFF<<24);
363 if (result & SAM_STAT_CHECK_CONDITION) {
364 struct scsi_sense_hdr sshdr;
365
366 scsi_normalize_sense(sense_buf, SCSI_SENSE_BUFFERSIZE,
367 &sshdr);
368 switch (sshdr.sense_key) {
369 case NO_SENSE:
370 case RECOVERED_ERROR:
371
372 case NOT_READY:
373 result &= ~SAM_STAT_CHECK_CONDITION;
374 break;
375 case UNIT_ATTENTION:
376 switch (sshdr.asc) {
377 case 0x29:
378
379 case 0x2A:
380 case 0x3F:
381
382 if (retry_cnt++ < 1) {
383 kfree(cmd_buf);
384 kfree(scsi_cmd);
385 kfree(sense_buf);
386 goto retry;
387 }
388 }
389 break;
390 default:
391 break;
392 }
393 }
394 }
395
396 if (result) {
397 dev_err(dev, "%s: command failed, result=%08x\n",
398 __func__, result);
399 rc = -EIO;
400 goto out;
401 }
402
403
404
405
406
407
408 mutex_lock(&gli->mutex);
409 gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0]));
410 gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8]));
411 mutex_unlock(&gli->mutex);
412
413out:
414 kfree(cmd_buf);
415 kfree(scsi_cmd);
416 kfree(sense_buf);
417
418 dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
419 __func__, gli->max_lba, gli->blk_len, rc);
420 return rc;
421}
422
423
424
425
426
427
428
429
430
431struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
432 struct llun_info *lli)
433{
434 struct cxlflash_cfg *cfg = ctxi->cfg;
435 struct device *dev = &cfg->dev->dev;
436 struct sisl_rht_entry *rhte = NULL;
437
438 if (unlikely(!ctxi->rht_start)) {
439 dev_dbg(dev, "%s: Context does not have allocated RHT\n",
440 __func__);
441 goto out;
442 }
443
444 if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
445 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
446 __func__, rhndl);
447 goto out;
448 }
449
450 if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
451 dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n",
452 __func__, rhndl);
453 goto out;
454 }
455
456 rhte = &ctxi->rht_start[rhndl];
457 if (unlikely(rhte->nmask == 0)) {
458 dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n",
459 __func__, rhndl);
460 rhte = NULL;
461 goto out;
462 }
463
464out:
465 return rhte;
466}
467
468
469
470
471
472
473
474
475struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
476 struct llun_info *lli)
477{
478 struct cxlflash_cfg *cfg = ctxi->cfg;
479 struct device *dev = &cfg->dev->dev;
480 struct sisl_rht_entry *rhte = NULL;
481 int i;
482
483
484 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
485 if (ctxi->rht_start[i].nmask == 0) {
486 rhte = &ctxi->rht_start[i];
487 ctxi->rht_out++;
488 break;
489 }
490
491 if (likely(rhte))
492 ctxi->rht_lun[i] = lli;
493
494 dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i);
495 return rhte;
496}
497
498
499
500
501
502
503void rhte_checkin(struct ctx_info *ctxi,
504 struct sisl_rht_entry *rhte)
505{
506 u32 rsrc_handle = rhte - ctxi->rht_start;
507
508 rhte->nmask = 0;
509 rhte->fp = 0;
510 ctxi->rht_out--;
511 ctxi->rht_lun[rsrc_handle] = NULL;
512 ctxi->rht_needs_ws[rsrc_handle] = false;
513}
514
515
516
517
518
519
520
521
522static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm,
523 u32 port_sel)
524{
525
526
527
528
529
530 struct sisl_rht_entry_f1 dummy = { 0 };
531 struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
532
533 memset(rhte_f1, 0, sizeof(*rhte_f1));
534 rhte_f1->fp = SISL_RHT_FP(1U, 0);
535 dma_wmb();
536
537 rhte_f1->lun_id = lun_id;
538 dma_wmb();
539
540
541
542
543
544
545 dummy.valid = 0x80;
546 dummy.fp = SISL_RHT_FP(1U, perm);
547 dummy.port_sel = port_sel;
548 rhte_f1->dw = dummy.dw;
549
550 dma_wmb();
551}
552
553
554
555
556
557
558
559
560
561int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
562{
563 int rc = 0;
564
565 if (!locked)
566 mutex_lock(&gli->mutex);
567
568 if (gli->mode == MODE_NONE)
569 gli->mode = mode;
570 else if (gli->mode != mode) {
571 pr_debug("%s: gli_mode=%d requested_mode=%d\n",
572 __func__, gli->mode, mode);
573 rc = -EINVAL;
574 goto out;
575 }
576
577 gli->users++;
578 WARN_ON(gli->users <= 0);
579out:
580 pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
581 __func__, rc, gli->mode, gli->users);
582 if (!locked)
583 mutex_unlock(&gli->mutex);
584 return rc;
585}
586
587
588
589
590
591
592
593
594
595
596
597void cxlflash_lun_detach(struct glun_info *gli)
598{
599 mutex_lock(&gli->mutex);
600 WARN_ON(gli->mode == MODE_NONE);
601 if (--gli->users == 0) {
602 gli->mode = MODE_NONE;
603 cxlflash_ba_terminate(&gli->blka.ba_lun);
604 }
605 pr_debug("%s: gli->users=%u\n", __func__, gli->users);
606 WARN_ON(gli->users < 0);
607 mutex_unlock(&gli->mutex);
608}
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625int _cxlflash_disk_release(struct scsi_device *sdev,
626 struct ctx_info *ctxi,
627 struct dk_cxlflash_release *release)
628{
629 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
630 struct device *dev = &cfg->dev->dev;
631 struct llun_info *lli = sdev->hostdata;
632 struct glun_info *gli = lli->parent;
633 struct afu *afu = cfg->afu;
634 bool put_ctx = false;
635
636 struct dk_cxlflash_resize size;
637 res_hndl_t rhndl = release->rsrc_handle;
638
639 int rc = 0;
640 int rcr = 0;
641 u64 ctxid = DECODE_CTXID(release->context_id),
642 rctxid = release->context_id;
643
644 struct sisl_rht_entry *rhte;
645 struct sisl_rht_entry_f1 *rhte_f1;
646
647 dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n",
648 __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
649
650 if (!ctxi) {
651 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
652 if (unlikely(!ctxi)) {
653 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
654 __func__, ctxid);
655 rc = -EINVAL;
656 goto out;
657 }
658
659 put_ctx = true;
660 }
661
662 rhte = get_rhte(ctxi, rhndl, lli);
663 if (unlikely(!rhte)) {
664 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
665 __func__, rhndl);
666 rc = -EINVAL;
667 goto out;
668 }
669
670
671
672
673
674
675
676
677 switch (gli->mode) {
678 case MODE_VIRTUAL:
679 marshal_rele_to_resize(release, &size);
680 size.req_size = 0;
681 rc = _cxlflash_vlun_resize(sdev, ctxi, &size);
682 if (rc) {
683 dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc);
684 goto out;
685 }
686
687 break;
688 case MODE_PHYSICAL:
689
690
691
692
693
694 rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
695
696 rhte_f1->valid = 0;
697 dma_wmb();
698
699 rhte_f1->lun_id = 0;
700 dma_wmb();
701
702 rhte_f1->dw = 0;
703 dma_wmb();
704
705 if (!ctxi->err_recovery_active) {
706 rcr = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
707 if (unlikely(rcr))
708 dev_dbg(dev, "%s: AFU sync failed rc=%d\n",
709 __func__, rcr);
710 }
711 break;
712 default:
713 WARN(1, "Unsupported LUN mode!");
714 goto out;
715 }
716
717 rhte_checkin(ctxi, rhte);
718 cxlflash_lun_detach(gli);
719
720out:
721 if (put_ctx)
722 put_context(ctxi);
723 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
724 return rc;
725}
726
727int cxlflash_disk_release(struct scsi_device *sdev,
728 struct dk_cxlflash_release *release)
729{
730 return _cxlflash_disk_release(sdev, NULL, release);
731}
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746static void destroy_context(struct cxlflash_cfg *cfg,
747 struct ctx_info *ctxi)
748{
749 struct afu *afu = cfg->afu;
750
751 if (ctxi->initialized) {
752 WARN_ON(!list_empty(&ctxi->luns));
753
754
755 if (afu->afu_map && ctxi->ctrl_map) {
756 writeq_be(0, &ctxi->ctrl_map->rht_start);
757 writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
758 writeq_be(0, &ctxi->ctrl_map->ctx_cap);
759 }
760 }
761
762
763 free_page((ulong)ctxi->rht_start);
764 kfree(ctxi->rht_needs_ws);
765 kfree(ctxi->rht_lun);
766 kfree(ctxi);
767}
768
769
770
771
772
773
774
775static struct ctx_info *create_context(struct cxlflash_cfg *cfg)
776{
777 struct device *dev = &cfg->dev->dev;
778 struct ctx_info *ctxi = NULL;
779 struct llun_info **lli = NULL;
780 u8 *ws = NULL;
781 struct sisl_rht_entry *rhte;
782
783 ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
784 lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
785 ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
786 if (unlikely(!ctxi || !lli || !ws)) {
787 dev_err(dev, "%s: Unable to allocate context\n", __func__);
788 goto err;
789 }
790
791 rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
792 if (unlikely(!rhte)) {
793 dev_err(dev, "%s: Unable to allocate RHT\n", __func__);
794 goto err;
795 }
796
797 ctxi->rht_lun = lli;
798 ctxi->rht_needs_ws = ws;
799 ctxi->rht_start = rhte;
800out:
801 return ctxi;
802
803err:
804 kfree(ws);
805 kfree(lli);
806 kfree(ctxi);
807 ctxi = NULL;
808 goto out;
809}
810
811
812
813
814
815
816
817
818
819
820static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
821 struct cxl_context *ctx, int ctxid, struct file *file,
822 u32 perms)
823{
824 struct afu *afu = cfg->afu;
825
826 ctxi->rht_perms = perms;
827 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
828 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
829 ctxi->pid = current->tgid;
830 ctxi->ctx = ctx;
831 ctxi->cfg = cfg;
832 ctxi->file = file;
833 ctxi->initialized = true;
834 mutex_init(&ctxi->mutex);
835 kref_init(&ctxi->kref);
836 INIT_LIST_HEAD(&ctxi->luns);
837 INIT_LIST_HEAD(&ctxi->list);
838}
839
840
841
842
843
844
845
846
847
848static void remove_context(struct kref *kref)
849{
850 struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref);
851 struct cxlflash_cfg *cfg = ctxi->cfg;
852 u64 ctxid = DECODE_CTXID(ctxi->ctxid);
853
854
855 WARN_ON(!mutex_is_locked(&ctxi->mutex));
856 ctxi->unavail = true;
857 mutex_unlock(&ctxi->mutex);
858 mutex_lock(&cfg->ctx_tbl_list_mutex);
859 mutex_lock(&ctxi->mutex);
860
861 if (!list_empty(&ctxi->list))
862 list_del(&ctxi->list);
863 cfg->ctx_tbl[ctxid] = NULL;
864 mutex_unlock(&cfg->ctx_tbl_list_mutex);
865 mutex_unlock(&ctxi->mutex);
866
867
868 destroy_context(cfg, ctxi);
869}
870
871
872
873
874
875
876
877
878
879
880
881
882
883static int _cxlflash_disk_detach(struct scsi_device *sdev,
884 struct ctx_info *ctxi,
885 struct dk_cxlflash_detach *detach)
886{
887 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
888 struct device *dev = &cfg->dev->dev;
889 struct llun_info *lli = sdev->hostdata;
890 struct lun_access *lun_access, *t;
891 struct dk_cxlflash_release rel;
892 bool put_ctx = false;
893
894 int i;
895 int rc = 0;
896 u64 ctxid = DECODE_CTXID(detach->context_id),
897 rctxid = detach->context_id;
898
899 dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid);
900
901 if (!ctxi) {
902 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
903 if (unlikely(!ctxi)) {
904 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
905 __func__, ctxid);
906 rc = -EINVAL;
907 goto out;
908 }
909
910 put_ctx = true;
911 }
912
913
914 if (ctxi->rht_out) {
915 marshal_det_to_rele(detach, &rel);
916 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
917 if (ctxi->rht_lun[i] == lli) {
918 rel.rsrc_handle = i;
919 _cxlflash_disk_release(sdev, ctxi, &rel);
920 }
921
922
923 if (ctxi->rht_out == 0)
924 break;
925 }
926 }
927
928
929 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
930 if (lun_access->lli == lli) {
931 list_del(&lun_access->list);
932 kfree(lun_access);
933 lun_access = NULL;
934 break;
935 }
936
937
938
939
940
941 if (kref_put(&ctxi->kref, remove_context))
942 put_ctx = false;
943 scsi_device_put(sdev);
944out:
945 if (put_ctx)
946 put_context(ctxi);
947 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
948 return rc;
949}
950
951static int cxlflash_disk_detach(struct scsi_device *sdev,
952 struct dk_cxlflash_detach *detach)
953{
954 return _cxlflash_disk_detach(sdev, NULL, detach);
955}
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979static int cxlflash_cxl_release(struct inode *inode, struct file *file)
980{
981 struct cxl_context *ctx = cxl_fops_get_context(file);
982 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
983 cxl_fops);
984 struct device *dev = &cfg->dev->dev;
985 struct ctx_info *ctxi = NULL;
986 struct dk_cxlflash_detach detach = { { 0 }, 0 };
987 struct lun_access *lun_access, *t;
988 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
989 int ctxid;
990
991 ctxid = cxl_process_element(ctx);
992 if (unlikely(ctxid < 0)) {
993 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
994 __func__, ctx, ctxid);
995 goto out;
996 }
997
998 ctxi = get_context(cfg, ctxid, file, ctrl);
999 if (unlikely(!ctxi)) {
1000 ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
1001 if (!ctxi) {
1002 dev_dbg(dev, "%s: ctxid=%d already free\n",
1003 __func__, ctxid);
1004 goto out_release;
1005 }
1006
1007 dev_dbg(dev, "%s: Another process owns ctxid=%d\n",
1008 __func__, ctxid);
1009 put_context(ctxi);
1010 goto out;
1011 }
1012
1013 dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid);
1014
1015 detach.context_id = ctxi->ctxid;
1016 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
1017 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
1018out_release:
1019 cxl_fd_release(inode, file);
1020out:
1021 dev_dbg(dev, "%s: returning\n", __func__);
1022 return 0;
1023}
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033static void unmap_context(struct ctx_info *ctxi)
1034{
1035 unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1);
1036}
1037
1038
1039
1040
1041
1042
1043
1044static struct page *get_err_page(struct cxlflash_cfg *cfg)
1045{
1046 struct page *err_page = global.err_page;
1047 struct device *dev = &cfg->dev->dev;
1048
1049 if (unlikely(!err_page)) {
1050 err_page = alloc_page(GFP_KERNEL);
1051 if (unlikely(!err_page)) {
1052 dev_err(dev, "%s: Unable to allocate err_page\n",
1053 __func__);
1054 goto out;
1055 }
1056
1057 memset(page_address(err_page), -1, PAGE_SIZE);
1058
1059
1060 mutex_lock(&global.mutex);
1061 if (likely(!global.err_page))
1062 global.err_page = err_page;
1063 else {
1064 __free_page(err_page);
1065 err_page = global.err_page;
1066 }
1067 mutex_unlock(&global.mutex);
1068 }
1069
1070out:
1071 dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page);
1072 return err_page;
1073}
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1092{
1093 struct file *file = vma->vm_file;
1094 struct cxl_context *ctx = cxl_fops_get_context(file);
1095 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1096 cxl_fops);
1097 struct device *dev = &cfg->dev->dev;
1098 struct ctx_info *ctxi = NULL;
1099 struct page *err_page = NULL;
1100 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1101 int rc = 0;
1102 int ctxid;
1103
1104 ctxid = cxl_process_element(ctx);
1105 if (unlikely(ctxid < 0)) {
1106 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1107 __func__, ctx, ctxid);
1108 goto err;
1109 }
1110
1111 ctxi = get_context(cfg, ctxid, file, ctrl);
1112 if (unlikely(!ctxi)) {
1113 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
1114 goto err;
1115 }
1116
1117 dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid);
1118
1119 if (likely(!ctxi->err_recovery_active)) {
1120 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1121 rc = ctxi->cxl_mmap_vmops->fault(vma, vmf);
1122 } else {
1123 dev_dbg(dev, "%s: err recovery active, use err_page\n",
1124 __func__);
1125
1126 err_page = get_err_page(cfg);
1127 if (unlikely(!err_page)) {
1128 dev_err(dev, "%s: Could not get err_page\n", __func__);
1129 rc = VM_FAULT_RETRY;
1130 goto out;
1131 }
1132
1133 get_page(err_page);
1134 vmf->page = err_page;
1135 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
1136 }
1137
1138out:
1139 if (likely(ctxi))
1140 put_context(ctxi);
1141 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1142 return rc;
1143
1144err:
1145 rc = VM_FAULT_SIGBUS;
1146 goto out;
1147}
1148
1149
1150
1151
1152static const struct vm_operations_struct cxlflash_mmap_vmops = {
1153 .fault = cxlflash_mmap_fault,
1154};
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
1166{
1167 struct cxl_context *ctx = cxl_fops_get_context(file);
1168 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1169 cxl_fops);
1170 struct device *dev = &cfg->dev->dev;
1171 struct ctx_info *ctxi = NULL;
1172 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1173 int ctxid;
1174 int rc = 0;
1175
1176 ctxid = cxl_process_element(ctx);
1177 if (unlikely(ctxid < 0)) {
1178 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1179 __func__, ctx, ctxid);
1180 rc = -EIO;
1181 goto out;
1182 }
1183
1184 ctxi = get_context(cfg, ctxid, file, ctrl);
1185 if (unlikely(!ctxi)) {
1186 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
1187 rc = -EIO;
1188 goto out;
1189 }
1190
1191 dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
1192
1193 rc = cxl_fd_mmap(file, vma);
1194 if (likely(!rc)) {
1195
1196 ctxi->cxl_mmap_vmops = vma->vm_ops;
1197 vma->vm_ops = &cxlflash_mmap_vmops;
1198 }
1199
1200out:
1201 if (likely(ctxi))
1202 put_context(ctxi);
1203 return rc;
1204}
1205
1206const struct file_operations cxlflash_cxl_fops = {
1207 .owner = THIS_MODULE,
1208 .mmap = cxlflash_cxl_mmap,
1209 .release = cxlflash_cxl_release,
1210};
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg)
1222{
1223 int i, rc = 0;
1224 struct ctx_info *ctxi = NULL;
1225
1226 mutex_lock(&cfg->ctx_tbl_list_mutex);
1227
1228 for (i = 0; i < MAX_CONTEXT; i++) {
1229 ctxi = cfg->ctx_tbl[i];
1230 if (ctxi) {
1231 mutex_lock(&ctxi->mutex);
1232 cfg->ctx_tbl[i] = NULL;
1233 list_add(&ctxi->list, &cfg->ctx_err_recovery);
1234 ctxi->err_recovery_active = true;
1235 ctxi->ctrl_map = NULL;
1236 unmap_context(ctxi);
1237 mutex_unlock(&ctxi->mutex);
1238 }
1239 }
1240
1241 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1242 return rc;
1243}
1244
1245
1246
1247
1248static const struct file_operations null_fops = {
1249 .owner = THIS_MODULE,
1250};
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265int check_state(struct cxlflash_cfg *cfg)
1266{
1267 struct device *dev = &cfg->dev->dev;
1268 int rc = 0;
1269
1270retry:
1271 switch (cfg->state) {
1272 case STATE_RESET:
1273 dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__);
1274 up_read(&cfg->ioctl_rwsem);
1275 rc = wait_event_interruptible(cfg->reset_waitq,
1276 cfg->state != STATE_RESET);
1277 down_read(&cfg->ioctl_rwsem);
1278 if (unlikely(rc))
1279 break;
1280 goto retry;
1281 case STATE_FAILTERM:
1282 dev_dbg(dev, "%s: Failed/Terminating\n", __func__);
1283 rc = -ENODEV;
1284 break;
1285 default:
1286 break;
1287 }
1288
1289 return rc;
1290}
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304static int cxlflash_disk_attach(struct scsi_device *sdev,
1305 struct dk_cxlflash_attach *attach)
1306{
1307 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1308 struct device *dev = &cfg->dev->dev;
1309 struct afu *afu = cfg->afu;
1310 struct llun_info *lli = sdev->hostdata;
1311 struct glun_info *gli = lli->parent;
1312 struct cxl_ioctl_start_work *work;
1313 struct ctx_info *ctxi = NULL;
1314 struct lun_access *lun_access = NULL;
1315 int rc = 0;
1316 u32 perms;
1317 int ctxid = -1;
1318 u64 flags = 0UL;
1319 u64 rctxid = 0UL;
1320 struct file *file = NULL;
1321
1322 struct cxl_context *ctx = NULL;
1323
1324 int fd = -1;
1325
1326 if (attach->num_interrupts > 4) {
1327 dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
1328 __func__, attach->num_interrupts);
1329 rc = -EINVAL;
1330 goto out;
1331 }
1332
1333 if (gli->max_lba == 0) {
1334 dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n",
1335 __func__, lli->lun_id[sdev->channel]);
1336 rc = read_cap16(sdev, lli);
1337 if (rc) {
1338 dev_err(dev, "%s: Invalid device rc=%d\n",
1339 __func__, rc);
1340 rc = -ENODEV;
1341 goto out;
1342 }
1343 dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba);
1344 dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len);
1345 }
1346
1347 if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
1348 rctxid = attach->context_id;
1349 ctxi = get_context(cfg, rctxid, NULL, 0);
1350 if (!ctxi) {
1351 dev_dbg(dev, "%s: Bad context rctxid=%016llx\n",
1352 __func__, rctxid);
1353 rc = -EINVAL;
1354 goto out;
1355 }
1356
1357 list_for_each_entry(lun_access, &ctxi->luns, list)
1358 if (lun_access->lli == lli) {
1359 dev_dbg(dev, "%s: Already attached\n",
1360 __func__);
1361 rc = -EINVAL;
1362 goto out;
1363 }
1364 }
1365
1366 rc = scsi_device_get(sdev);
1367 if (unlikely(rc)) {
1368 dev_err(dev, "%s: Unable to get sdev reference\n", __func__);
1369 goto out;
1370 }
1371
1372 lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
1373 if (unlikely(!lun_access)) {
1374 dev_err(dev, "%s: Unable to allocate lun_access\n", __func__);
1375 rc = -ENOMEM;
1376 goto err;
1377 }
1378
1379 lun_access->lli = lli;
1380 lun_access->sdev = sdev;
1381
1382
1383 if (ctxi) {
1384 dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n",
1385 __func__, rctxid);
1386 kref_get(&ctxi->kref);
1387 list_add(&lun_access->list, &ctxi->luns);
1388 goto out_attach;
1389 }
1390
1391 ctxi = create_context(cfg);
1392 if (unlikely(!ctxi)) {
1393 dev_err(dev, "%s: Failed to create context ctxid=%d\n",
1394 __func__, ctxid);
1395 rc = -ENOMEM;
1396 goto err;
1397 }
1398
1399 ctx = cxl_dev_context_init(cfg->dev);
1400 if (IS_ERR_OR_NULL(ctx)) {
1401 dev_err(dev, "%s: Could not initialize context %p\n",
1402 __func__, ctx);
1403 rc = -ENODEV;
1404 goto err;
1405 }
1406
1407 work = &ctxi->work;
1408 work->num_interrupts = attach->num_interrupts;
1409 work->flags = CXL_START_WORK_NUM_IRQS;
1410
1411 rc = cxl_start_work(ctx, work);
1412 if (unlikely(rc)) {
1413 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1414 __func__, rc);
1415 goto err;
1416 }
1417
1418 ctxid = cxl_process_element(ctx);
1419 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1420 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
1421 rc = -EPERM;
1422 goto err;
1423 }
1424
1425 file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
1426 if (unlikely(fd < 0)) {
1427 rc = -ENODEV;
1428 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1429 goto err;
1430 }
1431
1432
1433 perms = SISL_RHT_PERM(attach->hdr.flags + 1);
1434
1435
1436 init_context(ctxi, cfg, ctx, ctxid, file, perms);
1437
1438 rc = afu_attach(cfg, ctxi);
1439 if (unlikely(rc)) {
1440 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1441 goto err;
1442 }
1443
1444
1445
1446
1447
1448
1449
1450 list_add(&lun_access->list, &ctxi->luns);
1451 mutex_lock(&cfg->ctx_tbl_list_mutex);
1452 mutex_lock(&ctxi->mutex);
1453 cfg->ctx_tbl[ctxid] = ctxi;
1454 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1455 fd_install(fd, file);
1456
1457out_attach:
1458 if (fd != -1)
1459 flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD;
1460 if (afu_is_sq_cmd_mode(afu))
1461 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
1462
1463 attach->hdr.return_flags = flags;
1464 attach->context_id = ctxi->ctxid;
1465 attach->block_size = gli->blk_len;
1466 attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1467 attach->last_lba = gli->max_lba;
1468 attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT;
1469 attach->max_xfer /= gli->blk_len;
1470
1471out:
1472 attach->adap_fd = fd;
1473
1474 if (ctxi)
1475 put_context(ctxi);
1476
1477 dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
1478 __func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
1479 return rc;
1480
1481err:
1482
1483 if (!IS_ERR_OR_NULL(ctx)) {
1484 cxl_stop_context(ctx);
1485 cxl_release_context(ctx);
1486 ctx = NULL;
1487 }
1488
1489
1490
1491
1492
1493
1494
1495
1496 if (fd > 0) {
1497 file->f_op = &null_fops;
1498 fput(file);
1499 put_unused_fd(fd);
1500 fd = -1;
1501 file = NULL;
1502 }
1503
1504
1505 if (ctxi) {
1506 destroy_context(cfg, ctxi);
1507 ctxi = NULL;
1508 }
1509
1510 kfree(lun_access);
1511 scsi_device_put(sdev);
1512 goto out;
1513}
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525static int recover_context(struct cxlflash_cfg *cfg,
1526 struct ctx_info *ctxi,
1527 int *adap_fd)
1528{
1529 struct device *dev = &cfg->dev->dev;
1530 int rc = 0;
1531 int fd = -1;
1532 int ctxid = -1;
1533 struct file *file;
1534 struct cxl_context *ctx;
1535 struct afu *afu = cfg->afu;
1536
1537 ctx = cxl_dev_context_init(cfg->dev);
1538 if (IS_ERR_OR_NULL(ctx)) {
1539 dev_err(dev, "%s: Could not initialize context %p\n",
1540 __func__, ctx);
1541 rc = -ENODEV;
1542 goto out;
1543 }
1544
1545 rc = cxl_start_work(ctx, &ctxi->work);
1546 if (unlikely(rc)) {
1547 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1548 __func__, rc);
1549 goto err1;
1550 }
1551
1552 ctxid = cxl_process_element(ctx);
1553 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1554 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
1555 rc = -EPERM;
1556 goto err2;
1557 }
1558
1559 file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
1560 if (unlikely(fd < 0)) {
1561 rc = -ENODEV;
1562 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1563 goto err2;
1564 }
1565
1566
1567 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
1568
1569 rc = afu_attach(cfg, ctxi);
1570 if (rc) {
1571 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1572 goto err3;
1573 }
1574
1575
1576
1577
1578
1579 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
1580 ctxi->ctx = ctx;
1581 ctxi->file = file;
1582
1583
1584
1585
1586
1587
1588
1589 mutex_unlock(&ctxi->mutex);
1590 mutex_lock(&cfg->ctx_tbl_list_mutex);
1591 mutex_lock(&ctxi->mutex);
1592 list_del_init(&ctxi->list);
1593 cfg->ctx_tbl[ctxid] = ctxi;
1594 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1595 fd_install(fd, file);
1596 *adap_fd = fd;
1597out:
1598 dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
1599 __func__, ctxid, fd, rc);
1600 return rc;
1601
1602err3:
1603 fput(file);
1604 put_unused_fd(fd);
1605err2:
1606 cxl_stop_context(ctx);
1607err1:
1608 cxl_release_context(ctx);
1609 goto out;
1610}
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642static int cxlflash_afu_recover(struct scsi_device *sdev,
1643 struct dk_cxlflash_recover_afu *recover)
1644{
1645 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1646 struct device *dev = &cfg->dev->dev;
1647 struct llun_info *lli = sdev->hostdata;
1648 struct afu *afu = cfg->afu;
1649 struct ctx_info *ctxi = NULL;
1650 struct mutex *mutex = &cfg->ctx_recovery_mutex;
1651 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1652 u64 flags;
1653 u64 ctxid = DECODE_CTXID(recover->context_id),
1654 rctxid = recover->context_id;
1655 long reg;
1656 bool locked = true;
1657 int lretry = 20;
1658 int new_adap_fd = -1;
1659 int rc = 0;
1660
1661 atomic_inc(&cfg->recovery_threads);
1662 up_read(&cfg->ioctl_rwsem);
1663 rc = mutex_lock_interruptible(mutex);
1664 down_read(&cfg->ioctl_rwsem);
1665 if (rc) {
1666 locked = false;
1667 goto out;
1668 }
1669
1670 rc = check_state(cfg);
1671 if (rc) {
1672 dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc);
1673 rc = -ENODEV;
1674 goto out;
1675 }
1676
1677 dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n",
1678 __func__, recover->reason, rctxid);
1679
1680retry:
1681
1682 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
1683 if (unlikely(!ctxi)) {
1684 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1685 rc = -EINVAL;
1686 goto out;
1687 }
1688
1689 if (ctxi->err_recovery_active) {
1690retry_recover:
1691 rc = recover_context(cfg, ctxi, &new_adap_fd);
1692 if (unlikely(rc)) {
1693 dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n",
1694 __func__, ctxid, rc);
1695 if ((rc == -ENODEV) &&
1696 ((atomic_read(&cfg->recovery_threads) > 1) ||
1697 (lretry--))) {
1698 dev_dbg(dev, "%s: Going to try again\n",
1699 __func__);
1700 mutex_unlock(mutex);
1701 msleep(100);
1702 rc = mutex_lock_interruptible(mutex);
1703 if (rc) {
1704 locked = false;
1705 goto out;
1706 }
1707 goto retry_recover;
1708 }
1709
1710 goto out;
1711 }
1712
1713 ctxi->err_recovery_active = false;
1714
1715 flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
1716 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
1717 if (afu_is_sq_cmd_mode(afu))
1718 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
1719
1720 recover->hdr.return_flags = flags;
1721 recover->context_id = ctxi->ctxid;
1722 recover->adap_fd = new_adap_fd;
1723 recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1724 goto out;
1725 }
1726
1727
1728 reg = readq_be(&hwq->ctrl_map->mbox_r);
1729 if (reg == -1) {
1730 dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__);
1731
1732
1733
1734
1735
1736
1737 put_context(ctxi);
1738 ctxi = NULL;
1739 ssleep(1);
1740 rc = check_state(cfg);
1741 if (unlikely(rc))
1742 goto out;
1743 goto retry;
1744 }
1745
1746 dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__);
1747out:
1748 if (likely(ctxi))
1749 put_context(ctxi);
1750 if (locked)
1751 mutex_unlock(mutex);
1752 atomic_dec_if_positive(&cfg->recovery_threads);
1753 return rc;
1754}
1755
1756
1757
1758
1759
1760
1761
1762
1763static int process_sense(struct scsi_device *sdev,
1764 struct dk_cxlflash_verify *verify)
1765{
1766 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1767 struct device *dev = &cfg->dev->dev;
1768 struct llun_info *lli = sdev->hostdata;
1769 struct glun_info *gli = lli->parent;
1770 u64 prev_lba = gli->max_lba;
1771 struct scsi_sense_hdr sshdr = { 0 };
1772 int rc = 0;
1773
1774 rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
1775 DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
1776 if (!rc) {
1777 dev_err(dev, "%s: Failed to normalize sense data\n", __func__);
1778 rc = -EINVAL;
1779 goto out;
1780 }
1781
1782 switch (sshdr.sense_key) {
1783 case NO_SENSE:
1784 case RECOVERED_ERROR:
1785
1786 case NOT_READY:
1787 break;
1788 case UNIT_ATTENTION:
1789 switch (sshdr.asc) {
1790 case 0x29:
1791
1792 case 0x2A:
1793 rc = read_cap16(sdev, lli);
1794 if (rc) {
1795 rc = -ENODEV;
1796 break;
1797 }
1798 if (prev_lba != gli->max_lba)
1799 dev_dbg(dev, "%s: Capacity changed old=%lld "
1800 "new=%lld\n", __func__, prev_lba,
1801 gli->max_lba);
1802 break;
1803 case 0x3F:
1804 scsi_scan_host(cfg->host);
1805 break;
1806 default:
1807 rc = -EIO;
1808 break;
1809 }
1810 break;
1811 default:
1812 rc = -EIO;
1813 break;
1814 }
1815out:
1816 dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__,
1817 sshdr.sense_key, sshdr.asc, sshdr.ascq, rc);
1818 return rc;
1819}
1820
1821
1822
1823
1824
1825
1826
1827
1828static int cxlflash_disk_verify(struct scsi_device *sdev,
1829 struct dk_cxlflash_verify *verify)
1830{
1831 int rc = 0;
1832 struct ctx_info *ctxi = NULL;
1833 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1834 struct device *dev = &cfg->dev->dev;
1835 struct llun_info *lli = sdev->hostdata;
1836 struct glun_info *gli = lli->parent;
1837 struct sisl_rht_entry *rhte = NULL;
1838 res_hndl_t rhndl = verify->rsrc_handle;
1839 u64 ctxid = DECODE_CTXID(verify->context_id),
1840 rctxid = verify->context_id;
1841 u64 last_lba = 0;
1842
1843 dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, "
1844 "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle,
1845 verify->hint, verify->hdr.flags);
1846
1847 ctxi = get_context(cfg, rctxid, lli, 0);
1848 if (unlikely(!ctxi)) {
1849 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1850 rc = -EINVAL;
1851 goto out;
1852 }
1853
1854 rhte = get_rhte(ctxi, rhndl, lli);
1855 if (unlikely(!rhte)) {
1856 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
1857 __func__, rhndl);
1858 rc = -EINVAL;
1859 goto out;
1860 }
1861
1862
1863
1864
1865
1866 if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
1867
1868
1869
1870 ctxi->unavail = true;
1871 mutex_unlock(&ctxi->mutex);
1872 rc = process_sense(sdev, verify);
1873 if (unlikely(rc)) {
1874 dev_err(dev, "%s: Failed to validate sense data (%d)\n",
1875 __func__, rc);
1876 mutex_lock(&ctxi->mutex);
1877 ctxi->unavail = false;
1878 goto out;
1879 }
1880 mutex_lock(&ctxi->mutex);
1881 ctxi->unavail = false;
1882 }
1883
1884 switch (gli->mode) {
1885 case MODE_PHYSICAL:
1886 last_lba = gli->max_lba;
1887 break;
1888 case MODE_VIRTUAL:
1889
1890 last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len);
1891 last_lba /= CXLFLASH_BLOCK_SIZE;
1892 last_lba--;
1893 break;
1894 default:
1895 WARN(1, "Unsupported LUN mode!");
1896 }
1897
1898 verify->last_lba = last_lba;
1899
1900out:
1901 if (likely(ctxi))
1902 put_context(ctxi);
1903 dev_dbg(dev, "%s: returning rc=%d llba=%llx\n",
1904 __func__, rc, verify->last_lba);
1905 return rc;
1906}
1907
1908
1909
1910
1911
1912
1913
1914static char *decode_ioctl(int cmd)
1915{
1916 switch (cmd) {
1917 case DK_CXLFLASH_ATTACH:
1918 return __stringify_1(DK_CXLFLASH_ATTACH);
1919 case DK_CXLFLASH_USER_DIRECT:
1920 return __stringify_1(DK_CXLFLASH_USER_DIRECT);
1921 case DK_CXLFLASH_USER_VIRTUAL:
1922 return __stringify_1(DK_CXLFLASH_USER_VIRTUAL);
1923 case DK_CXLFLASH_VLUN_RESIZE:
1924 return __stringify_1(DK_CXLFLASH_VLUN_RESIZE);
1925 case DK_CXLFLASH_RELEASE:
1926 return __stringify_1(DK_CXLFLASH_RELEASE);
1927 case DK_CXLFLASH_DETACH:
1928 return __stringify_1(DK_CXLFLASH_DETACH);
1929 case DK_CXLFLASH_VERIFY:
1930 return __stringify_1(DK_CXLFLASH_VERIFY);
1931 case DK_CXLFLASH_VLUN_CLONE:
1932 return __stringify_1(DK_CXLFLASH_VLUN_CLONE);
1933 case DK_CXLFLASH_RECOVER_AFU:
1934 return __stringify_1(DK_CXLFLASH_RECOVER_AFU);
1935 case DK_CXLFLASH_MANAGE_LUN:
1936 return __stringify_1(DK_CXLFLASH_MANAGE_LUN);
1937 }
1938
1939 return "UNKNOWN";
1940}
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
1954{
1955 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1956 struct device *dev = &cfg->dev->dev;
1957 struct afu *afu = cfg->afu;
1958 struct llun_info *lli = sdev->hostdata;
1959 struct glun_info *gli = lli->parent;
1960 struct dk_cxlflash_release rel = { { 0 }, 0 };
1961
1962 struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
1963
1964 u64 ctxid = DECODE_CTXID(pphys->context_id),
1965 rctxid = pphys->context_id;
1966 u64 lun_size = 0;
1967 u64 last_lba = 0;
1968 u64 rsrc_handle = -1;
1969 u32 port = CHAN2PORTMASK(sdev->channel);
1970
1971 int rc = 0;
1972
1973 struct ctx_info *ctxi = NULL;
1974 struct sisl_rht_entry *rhte = NULL;
1975
1976 dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
1977
1978 rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
1979 if (unlikely(rc)) {
1980 dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__);
1981 goto out;
1982 }
1983
1984 ctxi = get_context(cfg, rctxid, lli, 0);
1985 if (unlikely(!ctxi)) {
1986 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1987 rc = -EINVAL;
1988 goto err1;
1989 }
1990
1991 rhte = rhte_checkout(ctxi, lli);
1992 if (unlikely(!rhte)) {
1993 dev_dbg(dev, "%s: Too many opens ctxid=%lld\n",
1994 __func__, ctxid);
1995 rc = -EMFILE;
1996 goto err1;
1997 }
1998
1999 rsrc_handle = (rhte - ctxi->rht_start);
2000
2001 rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
2002
2003 last_lba = gli->max_lba;
2004 pphys->hdr.return_flags = 0;
2005 pphys->last_lba = last_lba;
2006 pphys->rsrc_handle = rsrc_handle;
2007
2008 rc = cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
2009 if (unlikely(rc)) {
2010 dev_dbg(dev, "%s: AFU sync failed rc=%d\n", __func__, rc);
2011 goto err2;
2012 }
2013
2014out:
2015 if (likely(ctxi))
2016 put_context(ctxi);
2017 dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
2018 __func__, rsrc_handle, rc, last_lba);
2019 return rc;
2020
2021err2:
2022 marshal_udir_to_rele(pphys, &rel);
2023 _cxlflash_disk_release(sdev, ctxi, &rel);
2024 goto out;
2025err1:
2026 cxlflash_lun_detach(gli);
2027 goto out;
2028}
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041static int ioctl_common(struct scsi_device *sdev, int cmd)
2042{
2043 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
2044 struct device *dev = &cfg->dev->dev;
2045 struct llun_info *lli = sdev->hostdata;
2046 int rc = 0;
2047
2048 if (unlikely(!lli)) {
2049 dev_dbg(dev, "%s: Unknown LUN\n", __func__);
2050 rc = -EINVAL;
2051 goto out;
2052 }
2053
2054 rc = check_state(cfg);
2055 if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) {
2056 switch (cmd) {
2057 case DK_CXLFLASH_VLUN_RESIZE:
2058 case DK_CXLFLASH_RELEASE:
2059 case DK_CXLFLASH_DETACH:
2060 dev_dbg(dev, "%s: Command override rc=%d\n",
2061 __func__, rc);
2062 rc = 0;
2063 break;
2064 }
2065 }
2066out:
2067 return rc;
2068}
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
2087{
2088 typedef int (*sioctl) (struct scsi_device *, void *);
2089
2090 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
2091 struct device *dev = &cfg->dev->dev;
2092 struct afu *afu = cfg->afu;
2093 struct dk_cxlflash_hdr *hdr;
2094 char buf[sizeof(union cxlflash_ioctls)];
2095 size_t size = 0;
2096 bool known_ioctl = false;
2097 int idx;
2098 int rc = 0;
2099 struct Scsi_Host *shost = sdev->host;
2100 sioctl do_ioctl = NULL;
2101
2102 static const struct {
2103 size_t size;
2104 sioctl ioctl;
2105 } ioctl_tbl[] = {
2106 {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach},
2107 {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
2108 {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release},
2109 {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach},
2110 {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify},
2111 {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover},
2112 {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun},
2113 {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open},
2114 {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize},
2115 {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone},
2116 };
2117
2118
2119 down_read(&cfg->ioctl_rwsem);
2120
2121
2122 if (afu->internal_lun)
2123 switch (cmd) {
2124 case DK_CXLFLASH_RELEASE:
2125 case DK_CXLFLASH_USER_VIRTUAL:
2126 case DK_CXLFLASH_VLUN_RESIZE:
2127 case DK_CXLFLASH_VLUN_CLONE:
2128 dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n",
2129 __func__, decode_ioctl(cmd), afu->internal_lun);
2130 rc = -EINVAL;
2131 goto cxlflash_ioctl_exit;
2132 }
2133
2134 switch (cmd) {
2135 case DK_CXLFLASH_ATTACH:
2136 case DK_CXLFLASH_USER_DIRECT:
2137 case DK_CXLFLASH_RELEASE:
2138 case DK_CXLFLASH_DETACH:
2139 case DK_CXLFLASH_VERIFY:
2140 case DK_CXLFLASH_RECOVER_AFU:
2141 case DK_CXLFLASH_USER_VIRTUAL:
2142 case DK_CXLFLASH_VLUN_RESIZE:
2143 case DK_CXLFLASH_VLUN_CLONE:
2144 dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%d)\n",
2145 __func__, decode_ioctl(cmd), cmd, shost->host_no,
2146 sdev->channel, sdev->id, sdev->lun);
2147 rc = ioctl_common(sdev, cmd);
2148 if (unlikely(rc))
2149 goto cxlflash_ioctl_exit;
2150
2151
2152
2153 case DK_CXLFLASH_MANAGE_LUN:
2154 known_ioctl = true;
2155 idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH);
2156 size = ioctl_tbl[idx].size;
2157 do_ioctl = ioctl_tbl[idx].ioctl;
2158
2159 if (likely(do_ioctl))
2160 break;
2161
2162
2163 default:
2164 rc = -EINVAL;
2165 goto cxlflash_ioctl_exit;
2166 }
2167
2168 if (unlikely(copy_from_user(&buf, arg, size))) {
2169 dev_err(dev, "%s: copy_from_user() fail "
2170 "size=%lu cmd=%d (%s) arg=%p\n",
2171 __func__, size, cmd, decode_ioctl(cmd), arg);
2172 rc = -EFAULT;
2173 goto cxlflash_ioctl_exit;
2174 }
2175
2176 hdr = (struct dk_cxlflash_hdr *)&buf;
2177 if (hdr->version != DK_CXLFLASH_VERSION_0) {
2178 dev_dbg(dev, "%s: Version %u not supported for %s\n",
2179 __func__, hdr->version, decode_ioctl(cmd));
2180 rc = -EINVAL;
2181 goto cxlflash_ioctl_exit;
2182 }
2183
2184 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
2185 dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
2186 rc = -EINVAL;
2187 goto cxlflash_ioctl_exit;
2188 }
2189
2190 rc = do_ioctl(sdev, (void *)&buf);
2191 if (likely(!rc))
2192 if (unlikely(copy_to_user(arg, &buf, size))) {
2193 dev_err(dev, "%s: copy_to_user() fail "
2194 "size=%lu cmd=%d (%s) arg=%p\n",
2195 __func__, size, cmd, decode_ioctl(cmd), arg);
2196 rc = -EFAULT;
2197 }
2198
2199
2200
2201cxlflash_ioctl_exit:
2202 up_read(&cfg->ioctl_rwsem);
2203 if (unlikely(rc && known_ioctl))
2204 dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%d) "
2205 "returned rc %d\n", __func__,
2206 decode_ioctl(cmd), cmd, shost->host_no,
2207 sdev->channel, sdev->id, sdev->lun, rc);
2208 else
2209 dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%d) "
2210 "returned rc %d\n", __func__, decode_ioctl(cmd),
2211 cmd, shost->host_no, sdev->channel, sdev->id,
2212 sdev->lun, rc);
2213 return rc;
2214}
2215