1
2
3
4
5
6
7
8
9
10
11
12#include <linux/slab.h>
13#include <linux/mmu_notifier.h>
14#include <linux/mmu_context.h>
15#include <linux/of.h>
16#include <linux/export.h>
17#include <linux/pci.h>
18#include <linux/memblock.h>
19#include <linux/iommu.h>
20
21#include <asm/tlb.h>
22#include <asm/powernv.h>
23#include <asm/reg.h>
24#include <asm/opal.h>
25#include <asm/io.h>
26#include <asm/iommu.h>
27#include <asm/pnv-pci.h>
28#include <asm/msi_bitmap.h>
29#include <asm/opal.h>
30
31#include "powernv.h"
32#include "pci.h"
33
34#define npu_to_phb(x) container_of(x, struct pnv_phb, npu)
35
36
37
38
39
40static struct pci_dev *get_pci_dev(struct device_node *dn)
41{
42 return PCI_DN(dn)->pcidev;
43}
44
45
46struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev)
47{
48 struct device_node *dn;
49 struct pci_dev *gpdev;
50
51 if (WARN_ON(!npdev))
52 return NULL;
53
54 if (WARN_ON(!npdev->dev.of_node))
55 return NULL;
56
57
58 dn = of_parse_phandle(npdev->dev.of_node, "ibm,gpu", 0);
59 if (!dn)
60 return NULL;
61
62 gpdev = get_pci_dev(dn);
63 of_node_put(dn);
64
65 return gpdev;
66}
67EXPORT_SYMBOL(pnv_pci_get_gpu_dev);
68
69
70struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
71{
72 struct device_node *dn;
73 struct pci_dev *npdev;
74
75 if (WARN_ON(!gpdev))
76 return NULL;
77
78
79 if (!gpdev->dev.of_node)
80 return NULL;
81
82
83 dn = of_parse_phandle(gpdev->dev.of_node, "ibm,npu", index);
84 if (!dn)
85 return NULL;
86
87 npdev = get_pci_dev(dn);
88 of_node_put(dn);
89
90 return npdev;
91}
92EXPORT_SYMBOL(pnv_pci_get_npu_dev);
93
94#define NPU_DMA_OP_UNSUPPORTED() \
95 dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
96 __func__)
97
98static void *dma_npu_alloc(struct device *dev, size_t size,
99 dma_addr_t *dma_handle, gfp_t flag,
100 unsigned long attrs)
101{
102 NPU_DMA_OP_UNSUPPORTED();
103 return NULL;
104}
105
106static void dma_npu_free(struct device *dev, size_t size,
107 void *vaddr, dma_addr_t dma_handle,
108 unsigned long attrs)
109{
110 NPU_DMA_OP_UNSUPPORTED();
111}
112
113static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
114 unsigned long offset, size_t size,
115 enum dma_data_direction direction,
116 unsigned long attrs)
117{
118 NPU_DMA_OP_UNSUPPORTED();
119 return 0;
120}
121
122static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
123 int nelems, enum dma_data_direction direction,
124 unsigned long attrs)
125{
126 NPU_DMA_OP_UNSUPPORTED();
127 return 0;
128}
129
130static int dma_npu_dma_supported(struct device *dev, u64 mask)
131{
132 NPU_DMA_OP_UNSUPPORTED();
133 return 0;
134}
135
136static u64 dma_npu_get_required_mask(struct device *dev)
137{
138 NPU_DMA_OP_UNSUPPORTED();
139 return 0;
140}
141
142static const struct dma_map_ops dma_npu_ops = {
143 .map_page = dma_npu_map_page,
144 .map_sg = dma_npu_map_sg,
145 .alloc = dma_npu_alloc,
146 .free = dma_npu_free,
147 .dma_supported = dma_npu_dma_supported,
148 .get_required_mask = dma_npu_get_required_mask,
149};
150
151
152
153
154
155static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe,
156 struct pci_dev **gpdev)
157{
158 struct pnv_phb *phb;
159 struct pci_controller *hose;
160 struct pci_dev *pdev;
161 struct pnv_ioda_pe *pe;
162 struct pci_dn *pdn;
163
164 pdev = pnv_pci_get_gpu_dev(npe->pdev);
165 if (!pdev)
166 return NULL;
167
168 pdn = pci_get_pdn(pdev);
169 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
170 return NULL;
171
172 hose = pci_bus_to_host(pdev->bus);
173 phb = hose->private_data;
174 pe = &phb->ioda.pe_array[pdn->pe_number];
175
176 if (gpdev)
177 *gpdev = pdev;
178
179 return pe;
180}
181
182long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num,
183 struct iommu_table *tbl)
184{
185 struct pnv_phb *phb = npe->phb;
186 int64_t rc;
187 const unsigned long size = tbl->it_indirect_levels ?
188 tbl->it_level_size : tbl->it_size;
189 const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
190 const __u64 win_size = tbl->it_size << tbl->it_page_shift;
191
192 pe_info(npe, "Setting up window %llx..%llx pg=%lx\n",
193 start_addr, start_addr + win_size - 1,
194 IOMMU_PAGE_SIZE(tbl));
195
196 rc = opal_pci_map_pe_dma_window(phb->opal_id,
197 npe->pe_number,
198 npe->pe_number,
199 tbl->it_indirect_levels + 1,
200 __pa(tbl->it_base),
201 size << 3,
202 IOMMU_PAGE_SIZE(tbl));
203 if (rc) {
204 pe_err(npe, "Failed to configure TCE table, err %lld\n", rc);
205 return rc;
206 }
207 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
208
209
210 pnv_pci_link_table_and_group(phb->hose->node, num,
211 tbl, &npe->table_group);
212
213 return 0;
214}
215
216long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num)
217{
218 struct pnv_phb *phb = npe->phb;
219 int64_t rc;
220
221 pe_info(npe, "Removing DMA window\n");
222
223 rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number,
224 npe->pe_number,
225 0, 0,
226 0, 0);
227 if (rc) {
228 pe_err(npe, "Unmapping failed, ret = %lld\n", rc);
229 return rc;
230 }
231 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
232
233 pnv_pci_unlink_table_and_group(npe->table_group.tables[num],
234 &npe->table_group);
235
236 return 0;
237}
238
239
240
241
242static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
243{
244 struct pci_dev *gpdev;
245 struct pnv_ioda_pe *gpe;
246 int64_t rc;
247
248
249
250
251
252 if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV))
253 return;
254
255 gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
256 if (!gpe)
257 return;
258
259 rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]);
260
261
262
263
264
265 set_dma_ops(&npe->pdev->dev, &dma_npu_ops);
266}
267
268
269
270
271
272
273
274static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe)
275{
276 struct pnv_phb *phb = npe->phb;
277 int64_t rc = 0;
278 phys_addr_t top = memblock_end_of_DRAM();
279
280 if (phb->type != PNV_PHB_NPU || !npe->pdev)
281 return -EINVAL;
282
283 rc = pnv_npu_unset_window(npe, 0);
284 if (rc != OPAL_SUCCESS)
285 return rc;
286
287
288
289 top = roundup_pow_of_two(top);
290 dev_info(&npe->pdev->dev, "Enabling bypass for PE %x\n",
291 npe->pe_number);
292 rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
293 npe->pe_number, npe->pe_number,
294 0 , top);
295
296 if (rc == OPAL_SUCCESS)
297 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
298
299 return rc;
300}
301
302void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass)
303{
304 int i;
305 struct pnv_phb *phb;
306 struct pci_dn *pdn;
307 struct pnv_ioda_pe *npe;
308 struct pci_dev *npdev;
309
310 for (i = 0; ; ++i) {
311 npdev = pnv_pci_get_npu_dev(gpdev, i);
312
313 if (!npdev)
314 break;
315
316 pdn = pci_get_pdn(npdev);
317 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
318 return;
319
320 phb = pci_bus_to_host(npdev->bus)->private_data;
321
322
323 npe = &phb->ioda.pe_array[pdn->pe_number];
324
325 if (bypass) {
326 dev_info(&npdev->dev,
327 "Using 64-bit DMA iommu bypass\n");
328 pnv_npu_dma_set_bypass(npe);
329 } else {
330 dev_info(&npdev->dev, "Using 32-bit DMA via iommu\n");
331 pnv_npu_dma_set_32(npe);
332 }
333 }
334}
335
336
337void pnv_npu_take_ownership(struct pnv_ioda_pe *npe)
338{
339 struct pnv_phb *phb = npe->phb;
340 int64_t rc;
341
342
343
344
345
346
347
348 if (npe->table_group.tables[0]) {
349 pnv_npu_unset_window(npe, 0);
350 return;
351 }
352
353
354 rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
355 npe->pe_number, npe->pe_number,
356 0 , 0);
357 if (rc) {
358 pe_err(npe, "Failed to disable bypass, err %lld\n", rc);
359 return;
360 }
361 pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false);
362}
363
364struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe)
365{
366 struct pnv_phb *phb = npe->phb;
367 struct pci_bus *pbus = phb->hose->bus;
368 struct pci_dev *npdev, *gpdev = NULL, *gptmp;
369 struct pnv_ioda_pe *gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
370
371 if (!gpe || !gpdev)
372 return NULL;
373
374 list_for_each_entry(npdev, &pbus->devices, bus_list) {
375 gptmp = pnv_pci_get_gpu_dev(npdev);
376
377 if (gptmp != gpdev)
378 continue;
379
380 pe_info(gpe, "Attached NPU %s\n", dev_name(&npdev->dev));
381 iommu_group_add_device(gpe->table_group.group, &npdev->dev);
382 }
383
384 return gpe;
385}
386
387
388#define NV_MAX_LINKS 6
389
390
391static int max_npu2_index;
392
393struct npu_context {
394 struct mm_struct *mm;
395 struct pci_dev *npdev[NV_MAX_NPUS][NV_MAX_LINKS];
396 struct mmu_notifier mn;
397 struct kref kref;
398
399
400 struct npu_context *(*release_cb)(struct npu_context *, void *);
401
402
403
404
405
406 void *priv;
407};
408
409
410
411
412
413static int get_mmio_atsd_reg(struct npu *npu)
414{
415 int i;
416
417 for (i = 0; i < npu->mmio_atsd_count; i++) {
418 if (!test_and_set_bit(i, &npu->mmio_atsd_usage))
419 return i;
420 }
421
422 return -ENOSPC;
423}
424
425static void put_mmio_atsd_reg(struct npu *npu, int reg)
426{
427 clear_bit(reg, &npu->mmio_atsd_usage);
428}
429
430
431#define XTS_ATSD_AVA 1
432#define XTS_ATSD_STAT 2
433
434static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
435 unsigned long va)
436{
437 int mmio_atsd_reg;
438
439 do {
440 mmio_atsd_reg = get_mmio_atsd_reg(npu);
441 cpu_relax();
442 } while (mmio_atsd_reg < 0);
443
444 __raw_writeq(cpu_to_be64(va),
445 npu->mmio_atsd_regs[mmio_atsd_reg] + XTS_ATSD_AVA);
446 eieio();
447 __raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[mmio_atsd_reg]);
448
449 return mmio_atsd_reg;
450}
451
452static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush)
453{
454 unsigned long launch;
455
456
457 launch = PPC_BIT(12);
458
459
460 launch |= PPC_BIT(13);
461
462
463 launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
464
465
466 launch |= pid << PPC_BITLSHIFT(38);
467
468
469 launch |= !flush << PPC_BITLSHIFT(39);
470
471
472 return mmio_launch_invalidate(npu, launch, 0);
473}
474
475static int mmio_invalidate_va(struct npu *npu, unsigned long va,
476 unsigned long pid, bool flush)
477{
478 unsigned long launch;
479
480
481 launch = 0;
482
483
484 launch |= PPC_BIT(13);
485
486
487 launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
488
489
490 launch |= pid << PPC_BITLSHIFT(38);
491
492
493 launch |= !flush << PPC_BITLSHIFT(39);
494
495 return mmio_launch_invalidate(npu, launch, va);
496}
497
498#define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
499
500struct mmio_atsd_reg {
501 struct npu *npu;
502 int reg;
503};
504
505static void mmio_invalidate_wait(
506 struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush)
507{
508 struct npu *npu;
509 int i, reg;
510
511
512 for (i = 0; i <= max_npu2_index; i++) {
513 if (mmio_atsd_reg[i].reg < 0)
514 continue;
515
516
517 npu = mmio_atsd_reg[i].npu;
518 reg = mmio_atsd_reg[i].reg;
519 while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
520 cpu_relax();
521
522 put_mmio_atsd_reg(npu, reg);
523
524
525
526
527
528
529 if (flush)
530 mmio_invalidate_pid(npu, 0, true);
531 }
532}
533
534
535
536
537
538static void mmio_invalidate(struct npu_context *npu_context, int va,
539 unsigned long address, bool flush)
540{
541 int i, j;
542 struct npu *npu;
543 struct pnv_phb *nphb;
544 struct pci_dev *npdev;
545 struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
546 unsigned long pid = npu_context->mm->context.id;
547
548
549
550
551
552 flush_tlb_mm(npu_context->mm);
553
554
555
556
557
558 for (i = 0; i <= max_npu2_index; i++) {
559 mmio_atsd_reg[i].reg = -1;
560 for (j = 0; j < NV_MAX_LINKS; j++) {
561 npdev = npu_context->npdev[i][j];
562 if (!npdev)
563 continue;
564
565 nphb = pci_bus_to_host(npdev->bus)->private_data;
566 npu = &nphb->npu;
567 mmio_atsd_reg[i].npu = npu;
568
569 if (va)
570 mmio_atsd_reg[i].reg =
571 mmio_invalidate_va(npu, address, pid,
572 flush);
573 else
574 mmio_atsd_reg[i].reg =
575 mmio_invalidate_pid(npu, pid, flush);
576
577
578
579
580
581 break;
582 }
583 }
584
585 mmio_invalidate_wait(mmio_atsd_reg, flush);
586 if (flush)
587
588 mmio_invalidate_wait(mmio_atsd_reg, false);
589}
590
591static void pnv_npu2_mn_release(struct mmu_notifier *mn,
592 struct mm_struct *mm)
593{
594 struct npu_context *npu_context = mn_to_npu_context(mn);
595
596
597 if (npu_context->release_cb)
598 npu_context->release_cb(npu_context, npu_context->priv);
599
600
601
602
603
604 mmio_invalidate(npu_context, 0, 0, true);
605}
606
607static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
608 struct mm_struct *mm,
609 unsigned long address,
610 pte_t pte)
611{
612 struct npu_context *npu_context = mn_to_npu_context(mn);
613
614 mmio_invalidate(npu_context, 1, address, true);
615}
616
617static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
618 struct mm_struct *mm,
619 unsigned long start, unsigned long end)
620{
621 struct npu_context *npu_context = mn_to_npu_context(mn);
622 unsigned long address;
623
624 for (address = start; address < end; address += PAGE_SIZE)
625 mmio_invalidate(npu_context, 1, address, false);
626
627
628 mmio_invalidate(npu_context, 1, address, true);
629}
630
631static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
632 .release = pnv_npu2_mn_release,
633 .change_pte = pnv_npu2_mn_change_pte,
634 .invalidate_range = pnv_npu2_mn_invalidate_range,
635};
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
652 unsigned long flags,
653 struct npu_context *(*cb)(struct npu_context *, void *),
654 void *priv)
655{
656 int rc;
657 u32 nvlink_index;
658 struct device_node *nvlink_dn;
659 struct mm_struct *mm = current->mm;
660 struct pnv_phb *nphb;
661 struct npu *npu;
662 struct npu_context *npu_context;
663
664
665
666
667
668 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
669
670 if (!firmware_has_feature(FW_FEATURE_OPAL))
671 return ERR_PTR(-ENODEV);
672
673 if (!npdev)
674
675 return ERR_PTR(-ENODEV);
676
677 if (!mm || mm->context.id == 0) {
678
679
680
681
682 return ERR_PTR(-EINVAL);
683 }
684
685 nphb = pci_bus_to_host(npdev->bus)->private_data;
686 npu = &nphb->npu;
687
688
689
690
691
692
693 rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags,
694 PCI_DEVID(gpdev->bus->number, gpdev->devfn));
695 if (rc < 0)
696 return ERR_PTR(-ENOSPC);
697
698
699
700
701
702 npu_context = mm->context.npu_context;
703 if (!npu_context) {
704 npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL);
705 if (!npu_context)
706 return ERR_PTR(-ENOMEM);
707
708 mm->context.npu_context = npu_context;
709 npu_context->mm = mm;
710 npu_context->mn.ops = &nv_nmmu_notifier_ops;
711 __mmu_notifier_register(&npu_context->mn, mm);
712 kref_init(&npu_context->kref);
713 } else {
714 kref_get(&npu_context->kref);
715 }
716
717 npu_context->release_cb = cb;
718 npu_context->priv = priv;
719 nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
720 if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
721 &nvlink_index)))
722 return ERR_PTR(-ENODEV);
723 npu_context->npdev[npu->index][nvlink_index] = npdev;
724
725 return npu_context;
726}
727EXPORT_SYMBOL(pnv_npu2_init_context);
728
729static void pnv_npu2_release_context(struct kref *kref)
730{
731 struct npu_context *npu_context =
732 container_of(kref, struct npu_context, kref);
733
734 npu_context->mm->context.npu_context = NULL;
735 mmu_notifier_unregister(&npu_context->mn,
736 npu_context->mm);
737
738 kfree(npu_context);
739}
740
741void pnv_npu2_destroy_context(struct npu_context *npu_context,
742 struct pci_dev *gpdev)
743{
744 struct pnv_phb *nphb;
745 struct npu *npu;
746 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
747 struct device_node *nvlink_dn;
748 u32 nvlink_index;
749
750 if (WARN_ON(!npdev))
751 return;
752
753 if (!firmware_has_feature(FW_FEATURE_OPAL))
754 return;
755
756 nphb = pci_bus_to_host(npdev->bus)->private_data;
757 npu = &nphb->npu;
758 nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
759 if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
760 &nvlink_index)))
761 return;
762 npu_context->npdev[npu->index][nvlink_index] = NULL;
763 opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
764 PCI_DEVID(gpdev->bus->number, gpdev->devfn));
765 kref_put(&npu_context->kref, pnv_npu2_release_context);
766}
767EXPORT_SYMBOL(pnv_npu2_destroy_context);
768
769
770
771
772int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
773 unsigned long *flags, unsigned long *status, int count)
774{
775 u64 rc = 0, result = 0;
776 int i, is_write;
777 struct page *page[1];
778
779
780 struct mm_struct *mm = context->mm;
781
782 if (!firmware_has_feature(FW_FEATURE_OPAL))
783 return -ENODEV;
784
785 WARN_ON(!rwsem_is_locked(&mm->mmap_sem));
786
787 for (i = 0; i < count; i++) {
788 is_write = flags[i] & NPU2_WRITE;
789 rc = get_user_pages_remote(NULL, mm, ea[i], 1,
790 is_write ? FOLL_WRITE : 0,
791 page, NULL, NULL);
792
793
794
795
796
797
798
799 if (rc != 1) {
800 status[i] = rc;
801 result = -EFAULT;
802 continue;
803 }
804
805 status[i] = 0;
806 put_page(page[0]);
807 }
808
809 return result;
810}
811EXPORT_SYMBOL(pnv_npu2_handle_fault);
812
813int pnv_npu2_init(struct pnv_phb *phb)
814{
815 unsigned int i;
816 u64 mmio_atsd;
817 struct device_node *dn;
818 struct pci_dev *gpdev;
819 static int npu_index;
820 uint64_t rc = 0;
821
822 for_each_child_of_node(phb->hose->dn, dn) {
823 gpdev = pnv_pci_get_gpu_dev(get_pci_dev(dn));
824 if (gpdev) {
825 rc = opal_npu_map_lpar(phb->opal_id,
826 PCI_DEVID(gpdev->bus->number, gpdev->devfn),
827 0, 0);
828 if (rc)
829 dev_err(&gpdev->dev,
830 "Error %lld mapping device to LPAR\n",
831 rc);
832 }
833 }
834
835 for (i = 0; !of_property_read_u64_index(phb->hose->dn, "ibm,mmio-atsd",
836 i, &mmio_atsd); i++)
837 phb->npu.mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
838
839 pr_info("NPU%lld: Found %d MMIO ATSD registers", phb->opal_id, i);
840 phb->npu.mmio_atsd_count = i;
841 phb->npu.mmio_atsd_usage = 0;
842 npu_index++;
843 if (WARN_ON(npu_index >= NV_MAX_NPUS))
844 return -ENOSPC;
845 max_npu2_index = npu_index;
846 phb->npu.index = npu_index;
847
848 return 0;
849}
850