1
2
3
4
5
6
7
8
9
10
11#include <linux/file.h>
12#include <linux/idr.h>
13#include <linux/module.h>
14#include <linux/mount.h>
15#include <linux/pseudo_fs.h>
16#include <linux/poll.h>
17#include <linux/sched/signal.h>
18#include <linux/interrupt.h>
19#include <asm/xive.h>
20#include <misc/ocxl.h>
21
22#include <uapi/misc/cxl.h>
23
24#include "backend.h"
25#include "ocxl_hw.h"
26
27
28
29
30
31#define OCXLFLASH_FS_MAGIC 0x1697698f
32
33static int ocxlflash_fs_cnt;
34static struct vfsmount *ocxlflash_vfs_mount;
35
36static int ocxlflash_fs_init_fs_context(struct fs_context *fc)
37{
38 return init_pseudo(fc, OCXLFLASH_FS_MAGIC) ? 0 : -ENOMEM;
39}
40
41static struct file_system_type ocxlflash_fs_type = {
42 .name = "ocxlflash",
43 .owner = THIS_MODULE,
44 .init_fs_context = ocxlflash_fs_init_fs_context,
45 .kill_sb = kill_anon_super,
46};
47
48
49
50
51
52static void ocxlflash_release_mapping(struct ocxlflash_context *ctx)
53{
54 if (ctx->mapping)
55 simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
56 ctx->mapping = NULL;
57}
58
59
60
61
62
63
64
65
66
67
68
69static struct file *ocxlflash_getfile(struct device *dev, const char *name,
70 const struct file_operations *fops,
71 void *priv, int flags)
72{
73 struct file *file;
74 struct inode *inode;
75 int rc;
76
77 if (fops->owner && !try_module_get(fops->owner)) {
78 dev_err(dev, "%s: Owner does not exist\n", __func__);
79 rc = -ENOENT;
80 goto err1;
81 }
82
83 rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount,
84 &ocxlflash_fs_cnt);
85 if (unlikely(rc < 0)) {
86 dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n",
87 __func__, rc);
88 goto err2;
89 }
90
91 inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb);
92 if (IS_ERR(inode)) {
93 rc = PTR_ERR(inode);
94 dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n",
95 __func__, rc);
96 goto err3;
97 }
98
99 file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name,
100 flags & (O_ACCMODE | O_NONBLOCK), fops);
101 if (IS_ERR(file)) {
102 rc = PTR_ERR(file);
103 dev_err(dev, "%s: alloc_file failed rc=%d\n",
104 __func__, rc);
105 goto err4;
106 }
107
108 file->private_data = priv;
109out:
110 return file;
111err4:
112 iput(inode);
113err3:
114 simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
115err2:
116 module_put(fops->owner);
117err1:
118 file = ERR_PTR(rc);
119 goto out;
120}
121
122
123
124
125
126
127
128static void __iomem *ocxlflash_psa_map(void *ctx_cookie)
129{
130 struct ocxlflash_context *ctx = ctx_cookie;
131 struct device *dev = ctx->hw_afu->dev;
132
133 mutex_lock(&ctx->state_mutex);
134 if (ctx->state != STARTED) {
135 dev_err(dev, "%s: Context not started, state=%d\n", __func__,
136 ctx->state);
137 mutex_unlock(&ctx->state_mutex);
138 return NULL;
139 }
140 mutex_unlock(&ctx->state_mutex);
141
142 return ioremap(ctx->psn_phys, ctx->psn_size);
143}
144
145
146
147
148
149static void ocxlflash_psa_unmap(void __iomem *addr)
150{
151 iounmap(addr);
152}
153
154
155
156
157
158
159
160static int ocxlflash_process_element(void *ctx_cookie)
161{
162 struct ocxlflash_context *ctx = ctx_cookie;
163
164 return ctx->pe;
165}
166
167
168
169
170
171
172
173
174
175
176
177
178static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num,
179 irq_handler_t handler, void *cookie, char *name)
180{
181 struct ocxl_hw_afu *afu = ctx->hw_afu;
182 struct device *dev = afu->dev;
183 struct ocxlflash_irqs *irq;
184 struct xive_irq_data *xd;
185 u32 virq;
186 int rc = 0;
187
188 if (num < 0 || num >= ctx->num_irqs) {
189 dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
190 rc = -ENOENT;
191 goto out;
192 }
193
194 irq = &ctx->irqs[num];
195 virq = irq_create_mapping(NULL, irq->hwirq);
196 if (unlikely(!virq)) {
197 dev_err(dev, "%s: irq_create_mapping failed\n", __func__);
198 rc = -ENOMEM;
199 goto out;
200 }
201
202 rc = request_irq(virq, handler, 0, name, cookie);
203 if (unlikely(rc)) {
204 dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc);
205 goto err1;
206 }
207
208 xd = irq_get_handler_data(virq);
209 if (unlikely(!xd)) {
210 dev_err(dev, "%s: Can't get interrupt data\n", __func__);
211 rc = -ENXIO;
212 goto err2;
213 }
214
215 irq->virq = virq;
216 irq->vtrig = xd->trig_mmio;
217out:
218 return rc;
219err2:
220 free_irq(virq, cookie);
221err1:
222 irq_dispose_mapping(virq);
223 goto out;
224}
225
226
227
228
229
230
231
232
233
234
235
236static int ocxlflash_map_afu_irq(void *ctx_cookie, int num,
237 irq_handler_t handler, void *cookie,
238 char *name)
239{
240 return afu_map_irq(0, ctx_cookie, num, handler, cookie, name);
241}
242
243
244
245
246
247
248
249
250static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num,
251 void *cookie)
252{
253 struct ocxl_hw_afu *afu = ctx->hw_afu;
254 struct device *dev = afu->dev;
255 struct ocxlflash_irqs *irq;
256
257 if (num < 0 || num >= ctx->num_irqs) {
258 dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
259 return;
260 }
261
262 irq = &ctx->irqs[num];
263
264 if (irq_find_mapping(NULL, irq->hwirq)) {
265 free_irq(irq->virq, cookie);
266 irq_dispose_mapping(irq->virq);
267 }
268
269 memset(irq, 0, sizeof(*irq));
270}
271
272
273
274
275
276
277
278static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
279{
280 return afu_unmap_irq(0, ctx_cookie, num, cookie);
281}
282
283
284
285
286
287
288
289
290static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
291{
292 struct ocxlflash_context *ctx = ctx_cookie;
293
294 if (irq < 0 || irq >= ctx->num_irqs)
295 return 0;
296
297 return (__force u64)ctx->irqs[irq].vtrig;
298}
299
300
301
302
303
304
305
306static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr)
307{
308 struct ocxlflash_context *ctx = data;
309
310 spin_lock(&ctx->slock);
311 ctx->fault_addr = addr;
312 ctx->fault_dsisr = dsisr;
313 ctx->pending_fault = true;
314 spin_unlock(&ctx->slock);
315
316 wake_up_all(&ctx->wq);
317}
318
319
320
321
322
323
324
325
326
327static int start_context(struct ocxlflash_context *ctx)
328{
329 struct ocxl_hw_afu *afu = ctx->hw_afu;
330 struct ocxl_afu_config *acfg = &afu->acfg;
331 void *link_token = afu->link_token;
332 struct pci_dev *pdev = afu->pdev;
333 struct device *dev = afu->dev;
334 bool master = ctx->master;
335 struct mm_struct *mm;
336 int rc = 0;
337 u32 pid;
338
339 mutex_lock(&ctx->state_mutex);
340 if (ctx->state != OPENED) {
341 dev_err(dev, "%s: Context state invalid, state=%d\n",
342 __func__, ctx->state);
343 rc = -EINVAL;
344 goto out;
345 }
346
347 if (master) {
348 ctx->psn_size = acfg->global_mmio_size;
349 ctx->psn_phys = afu->gmmio_phys;
350 } else {
351 ctx->psn_size = acfg->pp_mmio_stride;
352 ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size);
353 }
354
355
356 if (master) {
357 pid = 0;
358 mm = NULL;
359 } else {
360 pid = current->mm->context.id;
361 mm = current->mm;
362 }
363
364 rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0,
365 pci_dev_id(pdev), mm, ocxlflash_xsl_fault,
366 ctx);
367 if (unlikely(rc)) {
368 dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n",
369 __func__, rc);
370 goto out;
371 }
372
373 ctx->state = STARTED;
374out:
375 mutex_unlock(&ctx->state_mutex);
376 return rc;
377}
378
379
380
381
382
383
384
385static int ocxlflash_start_context(void *ctx_cookie)
386{
387 struct ocxlflash_context *ctx = ctx_cookie;
388
389 return start_context(ctx);
390}
391
392
393
394
395
396
397
398static int ocxlflash_stop_context(void *ctx_cookie)
399{
400 struct ocxlflash_context *ctx = ctx_cookie;
401 struct ocxl_hw_afu *afu = ctx->hw_afu;
402 struct ocxl_afu_config *acfg = &afu->acfg;
403 struct pci_dev *pdev = afu->pdev;
404 struct device *dev = afu->dev;
405 enum ocxlflash_ctx_state state;
406 int rc = 0;
407
408 mutex_lock(&ctx->state_mutex);
409 state = ctx->state;
410 ctx->state = CLOSED;
411 mutex_unlock(&ctx->state_mutex);
412 if (state != STARTED)
413 goto out;
414
415 rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos,
416 ctx->pe);
417 if (unlikely(rc)) {
418 dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n",
419 __func__, rc);
420
421 if (rc == -EBUSY)
422 goto out;
423 }
424
425 rc = ocxl_link_remove_pe(afu->link_token, ctx->pe);
426 if (unlikely(rc)) {
427 dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n",
428 __func__, rc);
429 goto out;
430 }
431out:
432 return rc;
433}
434
435
436
437
438
439static int ocxlflash_afu_reset(void *ctx_cookie)
440{
441 struct ocxlflash_context *ctx = ctx_cookie;
442 struct device *dev = ctx->hw_afu->dev;
443
444
445 dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__);
446
447
448 return 0;
449}
450
451
452
453
454
455static void ocxlflash_set_master(void *ctx_cookie)
456{
457 struct ocxlflash_context *ctx = ctx_cookie;
458
459 ctx->master = true;
460}
461
462
463
464
465
466
467
468
469static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie)
470{
471 struct ocxl_hw_afu *afu = afu_cookie;
472
473 return afu->ocxl_ctx;
474}
475
476
477
478
479
480
481
482
483static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie)
484{
485 struct ocxl_hw_afu *afu = afu_cookie;
486 struct device *dev = afu->dev;
487 struct ocxlflash_context *ctx;
488 int rc;
489
490 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
491 if (unlikely(!ctx)) {
492 dev_err(dev, "%s: Context allocation failed\n", __func__);
493 rc = -ENOMEM;
494 goto err1;
495 }
496
497 idr_preload(GFP_KERNEL);
498 rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT);
499 idr_preload_end();
500 if (unlikely(rc < 0)) {
501 dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc);
502 goto err2;
503 }
504
505 spin_lock_init(&ctx->slock);
506 init_waitqueue_head(&ctx->wq);
507 mutex_init(&ctx->state_mutex);
508
509 ctx->state = OPENED;
510 ctx->pe = rc;
511 ctx->master = false;
512 ctx->mapping = NULL;
513 ctx->hw_afu = afu;
514 ctx->irq_bitmap = 0;
515 ctx->pending_irq = false;
516 ctx->pending_fault = false;
517out:
518 return ctx;
519err2:
520 kfree(ctx);
521err1:
522 ctx = ERR_PTR(rc);
523 goto out;
524}
525
526
527
528
529
530
531
532static int ocxlflash_release_context(void *ctx_cookie)
533{
534 struct ocxlflash_context *ctx = ctx_cookie;
535 struct device *dev;
536 int rc = 0;
537
538 if (!ctx)
539 goto out;
540
541 dev = ctx->hw_afu->dev;
542 mutex_lock(&ctx->state_mutex);
543 if (ctx->state >= STARTED) {
544 dev_err(dev, "%s: Context in use, state=%d\n", __func__,
545 ctx->state);
546 mutex_unlock(&ctx->state_mutex);
547 rc = -EBUSY;
548 goto out;
549 }
550 mutex_unlock(&ctx->state_mutex);
551
552 idr_remove(&ctx->hw_afu->idr, ctx->pe);
553 ocxlflash_release_mapping(ctx);
554 kfree(ctx);
555out:
556 return rc;
557}
558
559
560
561
562
563
564static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
565{
566 struct ocxl_hw_afu *afu = afu_cookie;
567
568 afu->perst_same_image = image;
569}
570
571
572
573
574
575
576
577
578
579static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf,
580 size_t count)
581{
582 return pci_read_vpd(pdev, 0, count, buf);
583}
584
585
586
587
588
589static void free_afu_irqs(struct ocxlflash_context *ctx)
590{
591 struct ocxl_hw_afu *afu = ctx->hw_afu;
592 struct device *dev = afu->dev;
593 int i;
594
595 if (!ctx->irqs) {
596 dev_err(dev, "%s: Interrupts not allocated\n", __func__);
597 return;
598 }
599
600 for (i = ctx->num_irqs; i >= 0; i--)
601 ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq);
602
603 kfree(ctx->irqs);
604 ctx->irqs = NULL;
605}
606
607
608
609
610
611
612
613
614static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num)
615{
616 struct ocxl_hw_afu *afu = ctx->hw_afu;
617 struct device *dev = afu->dev;
618 struct ocxlflash_irqs *irqs;
619 int rc = 0;
620 int hwirq;
621 int i;
622
623 if (ctx->irqs) {
624 dev_err(dev, "%s: Interrupts already allocated\n", __func__);
625 rc = -EEXIST;
626 goto out;
627 }
628
629 if (num > OCXL_MAX_IRQS) {
630 dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num);
631 rc = -EINVAL;
632 goto out;
633 }
634
635 irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL);
636 if (unlikely(!irqs)) {
637 dev_err(dev, "%s: Context irqs allocation failed\n", __func__);
638 rc = -ENOMEM;
639 goto out;
640 }
641
642 for (i = 0; i < num; i++) {
643 rc = ocxl_link_irq_alloc(afu->link_token, &hwirq);
644 if (unlikely(rc)) {
645 dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n",
646 __func__, rc);
647 goto err;
648 }
649
650 irqs[i].hwirq = hwirq;
651 }
652
653 ctx->irqs = irqs;
654 ctx->num_irqs = num;
655out:
656 return rc;
657err:
658 for (i = i-1; i >= 0; i--)
659 ocxl_link_free_irq(afu->link_token, irqs[i].hwirq);
660 kfree(irqs);
661 goto out;
662}
663
664
665
666
667
668
669
670
671static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
672{
673 return alloc_afu_irqs(ctx_cookie, num);
674}
675
676
677
678
679
680static void ocxlflash_free_afu_irqs(void *ctx_cookie)
681{
682 free_afu_irqs(ctx_cookie);
683}
684
685
686
687
688
689static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu)
690{
691 if (afu->gmmio_virt) {
692 iounmap(afu->gmmio_virt);
693 afu->gmmio_virt = NULL;
694 }
695}
696
697
698
699
700
701static void ocxlflash_destroy_afu(void *afu_cookie)
702{
703 struct ocxl_hw_afu *afu = afu_cookie;
704 int pos;
705
706 if (!afu)
707 return;
708
709 ocxlflash_release_context(afu->ocxl_ctx);
710 idr_destroy(&afu->idr);
711
712
713 pos = afu->acfg.dvsec_afu_control_pos;
714 ocxl_config_set_afu_state(afu->pdev, pos, 0);
715
716 ocxlflash_unconfig_afu(afu);
717 kfree(afu);
718}
719
720
721
722
723
724
725
726
727static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
728{
729 struct ocxl_fn_config *fcfg = &afu->fcfg;
730 struct device *dev = &pdev->dev;
731 u16 base, enabled, supported;
732 int rc = 0;
733
734
735 rc = ocxl_config_read_function(pdev, fcfg);
736 if (unlikely(rc)) {
737 dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n",
738 __func__, rc);
739 goto out;
740 }
741
742
743 if (fcfg->max_afu_index >= 0) {
744 afu->is_present = true;
745 if (fcfg->max_afu_index != 0)
746 dev_warn(dev, "%s: Unexpected AFU index value %d\n",
747 __func__, fcfg->max_afu_index);
748 }
749
750 rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported);
751 if (unlikely(rc)) {
752 dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n",
753 __func__, rc);
754 goto out;
755 }
756
757 afu->fn_actag_base = base;
758 afu->fn_actag_enabled = enabled;
759
760 ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled);
761 dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n",
762 __func__, base, enabled);
763
764 rc = ocxl_link_setup(pdev, 0, &afu->link_token);
765 if (unlikely(rc)) {
766 dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n",
767 __func__, rc);
768 goto out;
769 }
770
771 rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos);
772 if (unlikely(rc)) {
773 dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n",
774 __func__, rc);
775 goto err;
776 }
777out:
778 return rc;
779err:
780 ocxl_link_release(pdev, afu->link_token);
781 goto out;
782}
783
784
785
786
787
788
789static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
790{
791 ocxl_link_release(pdev, afu->link_token);
792}
793
794
795
796
797
798
799
800static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu)
801{
802 struct ocxl_afu_config *acfg = &afu->acfg;
803 struct pci_dev *pdev = afu->pdev;
804 struct device *dev = afu->dev;
805 phys_addr_t gmmio, ppmmio;
806 int rc = 0;
807
808 rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash");
809 if (unlikely(rc)) {
810 dev_err(dev, "%s: pci_request_region for global failed rc=%d\n",
811 __func__, rc);
812 goto out;
813 }
814 gmmio = pci_resource_start(pdev, acfg->global_mmio_bar);
815 gmmio += acfg->global_mmio_offset;
816
817 rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash");
818 if (unlikely(rc)) {
819 dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n",
820 __func__, rc);
821 goto err1;
822 }
823 ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar);
824 ppmmio += acfg->pp_mmio_offset;
825
826 afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size);
827 if (unlikely(!afu->gmmio_virt)) {
828 dev_err(dev, "%s: MMIO mapping failed\n", __func__);
829 rc = -ENOMEM;
830 goto err2;
831 }
832
833 afu->gmmio_phys = gmmio;
834 afu->ppmmio_phys = ppmmio;
835out:
836 return rc;
837err2:
838 pci_release_region(pdev, acfg->pp_mmio_bar);
839err1:
840 pci_release_region(pdev, acfg->global_mmio_bar);
841 goto out;
842}
843
844
845
846
847
848
849
850
851
852
853static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
854{
855 struct ocxl_afu_config *acfg = &afu->acfg;
856 struct ocxl_fn_config *fcfg = &afu->fcfg;
857 struct device *dev = &pdev->dev;
858 int count;
859 int base;
860 int pos;
861 int rc = 0;
862
863
864 if (!afu->is_present)
865 goto out;
866
867
868 rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0);
869 if (unlikely(rc)) {
870 dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n",
871 __func__, rc);
872 goto out;
873 }
874
875
876 base = afu->fn_actag_base;
877 count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled);
878 pos = acfg->dvsec_afu_control_pos;
879
880 ocxl_config_set_afu_actag(pdev, pos, base, count);
881 dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count);
882 afu->afu_actag_base = base;
883 afu->afu_actag_enabled = count;
884 afu->max_pasid = 1 << acfg->pasid_supported_log;
885
886 ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log);
887
888 rc = ocxlflash_map_mmio(afu);
889 if (unlikely(rc)) {
890 dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n",
891 __func__, rc);
892 goto out;
893 }
894
895
896 ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1);
897out:
898 return rc;
899}
900
901
902
903
904
905
906
907static void *ocxlflash_create_afu(struct pci_dev *pdev)
908{
909 struct device *dev = &pdev->dev;
910 struct ocxlflash_context *ctx;
911 struct ocxl_hw_afu *afu;
912 int rc;
913
914 afu = kzalloc(sizeof(*afu), GFP_KERNEL);
915 if (unlikely(!afu)) {
916 dev_err(dev, "%s: HW AFU allocation failed\n", __func__);
917 goto out;
918 }
919
920 afu->pdev = pdev;
921 afu->dev = dev;
922 idr_init(&afu->idr);
923
924 rc = ocxlflash_config_fn(pdev, afu);
925 if (unlikely(rc)) {
926 dev_err(dev, "%s: Function configuration failed rc=%d\n",
927 __func__, rc);
928 goto err1;
929 }
930
931 rc = ocxlflash_config_afu(pdev, afu);
932 if (unlikely(rc)) {
933 dev_err(dev, "%s: AFU configuration failed rc=%d\n",
934 __func__, rc);
935 goto err2;
936 }
937
938 ctx = ocxlflash_dev_context_init(pdev, afu);
939 if (IS_ERR(ctx)) {
940 rc = PTR_ERR(ctx);
941 dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n",
942 __func__, rc);
943 goto err3;
944 }
945
946 afu->ocxl_ctx = ctx;
947out:
948 return afu;
949err3:
950 ocxlflash_unconfig_afu(afu);
951err2:
952 ocxlflash_unconfig_fn(pdev, afu);
953err1:
954 idr_destroy(&afu->idr);
955 kfree(afu);
956 afu = NULL;
957 goto out;
958}
959
960
961
962
963
964
965
966static inline bool ctx_event_pending(struct ocxlflash_context *ctx)
967{
968 if (ctx->pending_irq || ctx->pending_fault)
969 return true;
970
971 return false;
972}
973
974
975
976
977
978
979
980
981static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
982{
983 struct ocxlflash_context *ctx = file->private_data;
984 struct device *dev = ctx->hw_afu->dev;
985 ulong lock_flags;
986 int mask = 0;
987
988 poll_wait(file, &ctx->wq, poll);
989
990 spin_lock_irqsave(&ctx->slock, lock_flags);
991 if (ctx_event_pending(ctx))
992 mask |= POLLIN | POLLRDNORM;
993 else if (ctx->state == CLOSED)
994 mask |= POLLERR;
995 spin_unlock_irqrestore(&ctx->slock, lock_flags);
996
997 dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n",
998 __func__, ctx->pe, mask);
999
1000 return mask;
1001}
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
1013 loff_t *off)
1014{
1015 struct ocxlflash_context *ctx = file->private_data;
1016 struct device *dev = ctx->hw_afu->dev;
1017 struct cxl_event event;
1018 ulong lock_flags;
1019 ssize_t esize;
1020 ssize_t rc;
1021 int bit;
1022 DEFINE_WAIT(event_wait);
1023
1024 if (*off != 0) {
1025 dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n",
1026 __func__, *off);
1027 rc = -EINVAL;
1028 goto out;
1029 }
1030
1031 spin_lock_irqsave(&ctx->slock, lock_flags);
1032
1033 for (;;) {
1034 prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE);
1035
1036 if (ctx_event_pending(ctx) || (ctx->state == CLOSED))
1037 break;
1038
1039 if (file->f_flags & O_NONBLOCK) {
1040 dev_err(dev, "%s: File cannot be blocked on I/O\n",
1041 __func__);
1042 rc = -EAGAIN;
1043 goto err;
1044 }
1045
1046 if (signal_pending(current)) {
1047 dev_err(dev, "%s: Signal pending on the process\n",
1048 __func__);
1049 rc = -ERESTARTSYS;
1050 goto err;
1051 }
1052
1053 spin_unlock_irqrestore(&ctx->slock, lock_flags);
1054 schedule();
1055 spin_lock_irqsave(&ctx->slock, lock_flags);
1056 }
1057
1058 finish_wait(&ctx->wq, &event_wait);
1059
1060 memset(&event, 0, sizeof(event));
1061 event.header.process_element = ctx->pe;
1062 event.header.size = sizeof(struct cxl_event_header);
1063 if (ctx->pending_irq) {
1064 esize = sizeof(struct cxl_event_afu_interrupt);
1065 event.header.size += esize;
1066 event.header.type = CXL_EVENT_AFU_INTERRUPT;
1067
1068 bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs);
1069 clear_bit(bit, &ctx->irq_bitmap);
1070 event.irq.irq = bit + 1;
1071 if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs))
1072 ctx->pending_irq = false;
1073 } else if (ctx->pending_fault) {
1074 event.header.size += sizeof(struct cxl_event_data_storage);
1075 event.header.type = CXL_EVENT_DATA_STORAGE;
1076 event.fault.addr = ctx->fault_addr;
1077 event.fault.dsisr = ctx->fault_dsisr;
1078 ctx->pending_fault = false;
1079 }
1080
1081 spin_unlock_irqrestore(&ctx->slock, lock_flags);
1082
1083 if (copy_to_user(buf, &event, event.header.size)) {
1084 dev_err(dev, "%s: copy_to_user failed\n", __func__);
1085 rc = -EFAULT;
1086 goto out;
1087 }
1088
1089 rc = event.header.size;
1090out:
1091 return rc;
1092err:
1093 finish_wait(&ctx->wq, &event_wait);
1094 spin_unlock_irqrestore(&ctx->slock, lock_flags);
1095 goto out;
1096}
1097
1098
1099
1100
1101
1102
1103
1104
1105static int afu_release(struct inode *inode, struct file *file)
1106{
1107 struct ocxlflash_context *ctx = file->private_data;
1108 int i;
1109
1110
1111 for (i = ctx->num_irqs; i >= 0; i--)
1112 afu_unmap_irq(0, ctx, i, ctx);
1113 free_afu_irqs(ctx);
1114
1115 return ocxlflash_release_context(ctx);
1116}
1117
1118
1119
1120
1121
1122
1123
1124static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf)
1125{
1126 struct vm_area_struct *vma = vmf->vma;
1127 struct ocxlflash_context *ctx = vma->vm_file->private_data;
1128 struct device *dev = ctx->hw_afu->dev;
1129 u64 mmio_area, offset;
1130
1131 offset = vmf->pgoff << PAGE_SHIFT;
1132 if (offset >= ctx->psn_size)
1133 return VM_FAULT_SIGBUS;
1134
1135 mutex_lock(&ctx->state_mutex);
1136 if (ctx->state != STARTED) {
1137 dev_err(dev, "%s: Context not started, state=%d\n",
1138 __func__, ctx->state);
1139 mutex_unlock(&ctx->state_mutex);
1140 return VM_FAULT_SIGBUS;
1141 }
1142 mutex_unlock(&ctx->state_mutex);
1143
1144 mmio_area = ctx->psn_phys;
1145 mmio_area += offset;
1146
1147 return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
1148}
1149
1150static const struct vm_operations_struct ocxlflash_vmops = {
1151 .fault = ocxlflash_mmap_fault,
1152};
1153
1154
1155
1156
1157
1158
1159
1160
1161static int afu_mmap(struct file *file, struct vm_area_struct *vma)
1162{
1163 struct ocxlflash_context *ctx = file->private_data;
1164
1165 if ((vma_pages(vma) + vma->vm_pgoff) >
1166 (ctx->psn_size >> PAGE_SHIFT))
1167 return -EINVAL;
1168
1169 vma->vm_flags |= VM_IO | VM_PFNMAP;
1170 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1171 vma->vm_ops = &ocxlflash_vmops;
1172 return 0;
1173}
1174
1175static const struct file_operations ocxl_afu_fops = {
1176 .owner = THIS_MODULE,
1177 .poll = afu_poll,
1178 .read = afu_read,
1179 .release = afu_release,
1180 .mmap = afu_mmap,
1181};
1182
1183#define PATCH_FOPS(NAME) \
1184 do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0)
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194static struct file *ocxlflash_get_fd(void *ctx_cookie,
1195 struct file_operations *fops, int *fd)
1196{
1197 struct ocxlflash_context *ctx = ctx_cookie;
1198 struct device *dev = ctx->hw_afu->dev;
1199 struct file *file;
1200 int flags, fdtmp;
1201 int rc = 0;
1202 char *name = NULL;
1203
1204
1205 if (ctx->mapping) {
1206 dev_err(dev, "%s: Context is already mapped to an fd\n",
1207 __func__);
1208 rc = -EEXIST;
1209 goto err1;
1210 }
1211
1212 flags = O_RDWR | O_CLOEXEC;
1213
1214
1215 rc = get_unused_fd_flags(flags);
1216 if (unlikely(rc < 0)) {
1217 dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n",
1218 __func__, rc);
1219 goto err1;
1220 }
1221 fdtmp = rc;
1222
1223
1224 if (fops) {
1225 PATCH_FOPS(poll);
1226 PATCH_FOPS(read);
1227 PATCH_FOPS(release);
1228 PATCH_FOPS(mmap);
1229 } else
1230 fops = (struct file_operations *)&ocxl_afu_fops;
1231
1232 name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe);
1233 file = ocxlflash_getfile(dev, name, fops, ctx, flags);
1234 kfree(name);
1235 if (IS_ERR(file)) {
1236 rc = PTR_ERR(file);
1237 dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n",
1238 __func__, rc);
1239 goto err2;
1240 }
1241
1242 ctx->mapping = file->f_mapping;
1243 *fd = fdtmp;
1244out:
1245 return file;
1246err2:
1247 put_unused_fd(fdtmp);
1248err1:
1249 file = ERR_PTR(rc);
1250 goto out;
1251}
1252
1253
1254
1255
1256
1257
1258
1259static void *ocxlflash_fops_get_context(struct file *file)
1260{
1261 return file->private_data;
1262}
1263
1264
1265
1266
1267
1268
1269
1270
1271static irqreturn_t ocxlflash_afu_irq(int irq, void *data)
1272{
1273 struct ocxlflash_context *ctx = data;
1274 struct device *dev = ctx->hw_afu->dev;
1275 int i;
1276
1277 dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n",
1278 __func__, ctx->pe, irq);
1279
1280 for (i = 0; i < ctx->num_irqs; i++) {
1281 if (ctx->irqs[i].virq == irq)
1282 break;
1283 }
1284 if (unlikely(i >= ctx->num_irqs)) {
1285 dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__);
1286 goto out;
1287 }
1288
1289 spin_lock(&ctx->slock);
1290 set_bit(i - 1, &ctx->irq_bitmap);
1291 ctx->pending_irq = true;
1292 spin_unlock(&ctx->slock);
1293
1294 wake_up_all(&ctx->wq);
1295out:
1296 return IRQ_HANDLED;
1297}
1298
1299
1300
1301
1302
1303
1304
1305
1306static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs)
1307{
1308 struct ocxlflash_context *ctx = ctx_cookie;
1309 struct ocxl_hw_afu *afu = ctx->hw_afu;
1310 struct device *dev = afu->dev;
1311 char *name;
1312 int rc = 0;
1313 int i;
1314
1315 rc = alloc_afu_irqs(ctx, num_irqs);
1316 if (unlikely(rc < 0)) {
1317 dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc);
1318 goto out;
1319 }
1320
1321 for (i = 0; i < num_irqs; i++) {
1322 name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i",
1323 dev_name(dev), ctx->pe, i);
1324 rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name);
1325 kfree(name);
1326 if (unlikely(rc < 0)) {
1327 dev_err(dev, "%s: afu_map_irq failed rc=%d\n",
1328 __func__, rc);
1329 goto err;
1330 }
1331 }
1332
1333 rc = start_context(ctx);
1334 if (unlikely(rc)) {
1335 dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc);
1336 goto err;
1337 }
1338out:
1339 return rc;
1340err:
1341 for (i = i-1; i >= 0; i--)
1342 afu_unmap_irq(0, ctx, i, ctx);
1343 free_afu_irqs(ctx);
1344 goto out;
1345};
1346
1347
1348
1349
1350
1351
1352
1353
1354static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma)
1355{
1356 return afu_mmap(file, vma);
1357}
1358
1359
1360
1361
1362
1363
1364
1365
1366static int ocxlflash_fd_release(struct inode *inode, struct file *file)
1367{
1368 return afu_release(inode, file);
1369}
1370
1371
1372const struct cxlflash_backend_ops cxlflash_ocxl_ops = {
1373 .module = THIS_MODULE,
1374 .psa_map = ocxlflash_psa_map,
1375 .psa_unmap = ocxlflash_psa_unmap,
1376 .process_element = ocxlflash_process_element,
1377 .map_afu_irq = ocxlflash_map_afu_irq,
1378 .unmap_afu_irq = ocxlflash_unmap_afu_irq,
1379 .get_irq_objhndl = ocxlflash_get_irq_objhndl,
1380 .start_context = ocxlflash_start_context,
1381 .stop_context = ocxlflash_stop_context,
1382 .afu_reset = ocxlflash_afu_reset,
1383 .set_master = ocxlflash_set_master,
1384 .get_context = ocxlflash_get_context,
1385 .dev_context_init = ocxlflash_dev_context_init,
1386 .release_context = ocxlflash_release_context,
1387 .perst_reloads_same_image = ocxlflash_perst_reloads_same_image,
1388 .read_adapter_vpd = ocxlflash_read_adapter_vpd,
1389 .allocate_afu_irqs = ocxlflash_allocate_afu_irqs,
1390 .free_afu_irqs = ocxlflash_free_afu_irqs,
1391 .create_afu = ocxlflash_create_afu,
1392 .destroy_afu = ocxlflash_destroy_afu,
1393 .get_fd = ocxlflash_get_fd,
1394 .fops_get_context = ocxlflash_fops_get_context,
1395 .start_work = ocxlflash_start_work,
1396 .fd_mmap = ocxlflash_fd_mmap,
1397 .fd_release = ocxlflash_fd_release,
1398};
1399