1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) "pci-p2pdma: " fmt
12#include <linux/ctype.h>
13#include <linux/pci-p2pdma.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/genalloc.h>
17#include <linux/memremap.h>
18#include <linux/percpu-refcount.h>
19#include <linux/random.h>
20#include <linux/seq_buf.h>
21#include <linux/xarray.h>
22
23enum pci_p2pdma_map_type {
24 PCI_P2PDMA_MAP_UNKNOWN = 0,
25 PCI_P2PDMA_MAP_NOT_SUPPORTED,
26 PCI_P2PDMA_MAP_BUS_ADDR,
27 PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
28};
29
30struct pci_p2pdma {
31 struct gen_pool *pool;
32 bool p2pmem_published;
33 struct xarray map_types;
34};
35
36struct pci_p2pdma_pagemap {
37 struct dev_pagemap pgmap;
38 struct pci_dev *provider;
39 u64 bus_offset;
40};
41
42static struct pci_p2pdma_pagemap *to_p2p_pgmap(struct dev_pagemap *pgmap)
43{
44 return container_of(pgmap, struct pci_p2pdma_pagemap, pgmap);
45}
46
47static ssize_t size_show(struct device *dev, struct device_attribute *attr,
48 char *buf)
49{
50 struct pci_dev *pdev = to_pci_dev(dev);
51 struct pci_p2pdma *p2pdma;
52 size_t size = 0;
53
54 rcu_read_lock();
55 p2pdma = rcu_dereference(pdev->p2pdma);
56 if (p2pdma && p2pdma->pool)
57 size = gen_pool_size(p2pdma->pool);
58 rcu_read_unlock();
59
60 return sysfs_emit(buf, "%zd\n", size);
61}
62static DEVICE_ATTR_RO(size);
63
64static ssize_t available_show(struct device *dev, struct device_attribute *attr,
65 char *buf)
66{
67 struct pci_dev *pdev = to_pci_dev(dev);
68 struct pci_p2pdma *p2pdma;
69 size_t avail = 0;
70
71 rcu_read_lock();
72 p2pdma = rcu_dereference(pdev->p2pdma);
73 if (p2pdma && p2pdma->pool)
74 avail = gen_pool_avail(p2pdma->pool);
75 rcu_read_unlock();
76
77 return sysfs_emit(buf, "%zd\n", avail);
78}
79static DEVICE_ATTR_RO(available);
80
81static ssize_t published_show(struct device *dev, struct device_attribute *attr,
82 char *buf)
83{
84 struct pci_dev *pdev = to_pci_dev(dev);
85 struct pci_p2pdma *p2pdma;
86 bool published = false;
87
88 rcu_read_lock();
89 p2pdma = rcu_dereference(pdev->p2pdma);
90 if (p2pdma)
91 published = p2pdma->p2pmem_published;
92 rcu_read_unlock();
93
94 return sysfs_emit(buf, "%d\n", published);
95}
96static DEVICE_ATTR_RO(published);
97
98static struct attribute *p2pmem_attrs[] = {
99 &dev_attr_size.attr,
100 &dev_attr_available.attr,
101 &dev_attr_published.attr,
102 NULL,
103};
104
105static const struct attribute_group p2pmem_group = {
106 .attrs = p2pmem_attrs,
107 .name = "p2pmem",
108};
109
110static void pci_p2pdma_release(void *data)
111{
112 struct pci_dev *pdev = data;
113 struct pci_p2pdma *p2pdma;
114
115 p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
116 if (!p2pdma)
117 return;
118
119
120 pdev->p2pdma = NULL;
121 synchronize_rcu();
122
123 gen_pool_destroy(p2pdma->pool);
124 sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
125 xa_destroy(&p2pdma->map_types);
126}
127
128static int pci_p2pdma_setup(struct pci_dev *pdev)
129{
130 int error = -ENOMEM;
131 struct pci_p2pdma *p2p;
132
133 p2p = devm_kzalloc(&pdev->dev, sizeof(*p2p), GFP_KERNEL);
134 if (!p2p)
135 return -ENOMEM;
136
137 xa_init(&p2p->map_types);
138
139 p2p->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev));
140 if (!p2p->pool)
141 goto out;
142
143 error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
144 if (error)
145 goto out_pool_destroy;
146
147 error = sysfs_create_group(&pdev->dev.kobj, &p2pmem_group);
148 if (error)
149 goto out_pool_destroy;
150
151 rcu_assign_pointer(pdev->p2pdma, p2p);
152 return 0;
153
154out_pool_destroy:
155 gen_pool_destroy(p2p->pool);
156out:
157 devm_kfree(&pdev->dev, p2p);
158 return error;
159}
160
161
162
163
164
165
166
167
168
169
170
171int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
172 u64 offset)
173{
174 struct pci_p2pdma_pagemap *p2p_pgmap;
175 struct dev_pagemap *pgmap;
176 struct pci_p2pdma *p2pdma;
177 void *addr;
178 int error;
179
180 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
181 return -EINVAL;
182
183 if (offset >= pci_resource_len(pdev, bar))
184 return -EINVAL;
185
186 if (!size)
187 size = pci_resource_len(pdev, bar) - offset;
188
189 if (size + offset > pci_resource_len(pdev, bar))
190 return -EINVAL;
191
192 if (!pdev->p2pdma) {
193 error = pci_p2pdma_setup(pdev);
194 if (error)
195 return error;
196 }
197
198 p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL);
199 if (!p2p_pgmap)
200 return -ENOMEM;
201
202 pgmap = &p2p_pgmap->pgmap;
203 pgmap->range.start = pci_resource_start(pdev, bar) + offset;
204 pgmap->range.end = pgmap->range.start + size - 1;
205 pgmap->nr_range = 1;
206 pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
207
208 p2p_pgmap->provider = pdev;
209 p2p_pgmap->bus_offset = pci_bus_address(pdev, bar) -
210 pci_resource_start(pdev, bar);
211
212 addr = devm_memremap_pages(&pdev->dev, pgmap);
213 if (IS_ERR(addr)) {
214 error = PTR_ERR(addr);
215 goto pgmap_free;
216 }
217
218 p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
219 error = gen_pool_add_owner(p2pdma->pool, (unsigned long)addr,
220 pci_bus_address(pdev, bar) + offset,
221 range_len(&pgmap->range), dev_to_node(&pdev->dev),
222 &pgmap->ref);
223 if (error)
224 goto pages_free;
225
226 pci_info(pdev, "added peer-to-peer DMA memory %#llx-%#llx\n",
227 pgmap->range.start, pgmap->range.end);
228
229 return 0;
230
231pages_free:
232 devm_memunmap_pages(&pdev->dev, pgmap);
233pgmap_free:
234 devm_kfree(&pdev->dev, pgmap);
235 return error;
236}
237EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
238
239
240
241
242
243
244static struct pci_dev *find_parent_pci_dev(struct device *dev)
245{
246 struct device *parent;
247
248 dev = get_device(dev);
249
250 while (dev) {
251 if (dev_is_pci(dev))
252 return to_pci_dev(dev);
253
254 parent = get_device(dev->parent);
255 put_device(dev);
256 dev = parent;
257 }
258
259 return NULL;
260}
261
262
263
264
265
266
267static int pci_bridge_has_acs_redir(struct pci_dev *pdev)
268{
269 int pos;
270 u16 ctrl;
271
272 pos = pdev->acs_cap;
273 if (!pos)
274 return 0;
275
276 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
277
278 if (ctrl & (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC))
279 return 1;
280
281 return 0;
282}
283
284static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev)
285{
286 if (!buf)
287 return;
288
289 seq_buf_printf(buf, "%s;", pci_name(pdev));
290}
291
292static bool cpu_supports_p2pdma(void)
293{
294#ifdef CONFIG_X86
295 struct cpuinfo_x86 *c = &cpu_data(0);
296
297
298 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 >= 0x17)
299 return true;
300#endif
301
302 return false;
303}
304
305static const struct pci_p2pdma_whitelist_entry {
306 unsigned short vendor;
307 unsigned short device;
308 enum {
309 REQ_SAME_HOST_BRIDGE = 1 << 0,
310 } flags;
311} pci_p2pdma_whitelist[] = {
312
313 {PCI_VENDOR_ID_INTEL, 0x3c00, REQ_SAME_HOST_BRIDGE},
314 {PCI_VENDOR_ID_INTEL, 0x3c01, REQ_SAME_HOST_BRIDGE},
315
316 {PCI_VENDOR_ID_INTEL, 0x2f00, REQ_SAME_HOST_BRIDGE},
317 {PCI_VENDOR_ID_INTEL, 0x2f01, REQ_SAME_HOST_BRIDGE},
318
319 {PCI_VENDOR_ID_INTEL, 0x2030, 0},
320 {PCI_VENDOR_ID_INTEL, 0x2031, 0},
321 {PCI_VENDOR_ID_INTEL, 0x2032, 0},
322 {PCI_VENDOR_ID_INTEL, 0x2033, 0},
323 {PCI_VENDOR_ID_INTEL, 0x2020, 0},
324 {PCI_VENDOR_ID_INTEL, 0x09a2, 0},
325 {}
326};
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344static struct pci_dev *pci_host_bridge_dev(struct pci_host_bridge *host)
345{
346 struct pci_dev *root;
347
348 root = list_first_entry_or_null(&host->bus->devices,
349 struct pci_dev, bus_list);
350
351 if (!root)
352 return NULL;
353 if (root->devfn != PCI_DEVFN(0, 0))
354 return NULL;
355
356 return root;
357}
358
359static bool __host_bridge_whitelist(struct pci_host_bridge *host,
360 bool same_host_bridge, bool warn)
361{
362 struct pci_dev *root = pci_host_bridge_dev(host);
363 const struct pci_p2pdma_whitelist_entry *entry;
364 unsigned short vendor, device;
365
366 if (!root)
367 return false;
368
369 vendor = root->vendor;
370 device = root->device;
371
372 for (entry = pci_p2pdma_whitelist; entry->vendor; entry++) {
373 if (vendor != entry->vendor || device != entry->device)
374 continue;
375 if (entry->flags & REQ_SAME_HOST_BRIDGE && !same_host_bridge)
376 return false;
377
378 return true;
379 }
380
381 if (warn)
382 pci_warn(root, "Host bridge not in P2PDMA whitelist: %04x:%04x\n",
383 vendor, device);
384
385 return false;
386}
387
388
389
390
391
392static bool host_bridge_whitelist(struct pci_dev *a, struct pci_dev *b,
393 bool warn)
394{
395 struct pci_host_bridge *host_a = pci_find_host_bridge(a->bus);
396 struct pci_host_bridge *host_b = pci_find_host_bridge(b->bus);
397
398 if (host_a == host_b)
399 return __host_bridge_whitelist(host_a, true, warn);
400
401 if (__host_bridge_whitelist(host_a, false, warn) &&
402 __host_bridge_whitelist(host_b, false, warn))
403 return true;
404
405 return false;
406}
407
408static unsigned long map_types_idx(struct pci_dev *client)
409{
410 return (pci_domain_nr(client->bus) << 16) |
411 (client->bus->number << 8) | client->devfn;
412}
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450static enum pci_p2pdma_map_type
451calc_map_type_and_dist(struct pci_dev *provider, struct pci_dev *client,
452 int *dist, bool verbose)
453{
454 enum pci_p2pdma_map_type map_type = PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
455 struct pci_dev *a = provider, *b = client, *bb;
456 bool acs_redirects = false;
457 struct pci_p2pdma *p2pdma;
458 struct seq_buf acs_list;
459 int acs_cnt = 0;
460 int dist_a = 0;
461 int dist_b = 0;
462 char buf[128];
463
464 seq_buf_init(&acs_list, buf, sizeof(buf));
465
466
467
468
469
470
471 while (a) {
472 dist_b = 0;
473
474 if (pci_bridge_has_acs_redir(a)) {
475 seq_buf_print_bus_devfn(&acs_list, a);
476 acs_cnt++;
477 }
478
479 bb = b;
480
481 while (bb) {
482 if (a == bb)
483 goto check_b_path_acs;
484
485 bb = pci_upstream_bridge(bb);
486 dist_b++;
487 }
488
489 a = pci_upstream_bridge(a);
490 dist_a++;
491 }
492
493 *dist = dist_a + dist_b;
494 goto map_through_host_bridge;
495
496check_b_path_acs:
497 bb = b;
498
499 while (bb) {
500 if (a == bb)
501 break;
502
503 if (pci_bridge_has_acs_redir(bb)) {
504 seq_buf_print_bus_devfn(&acs_list, bb);
505 acs_cnt++;
506 }
507
508 bb = pci_upstream_bridge(bb);
509 }
510
511 *dist = dist_a + dist_b;
512
513 if (!acs_cnt) {
514 map_type = PCI_P2PDMA_MAP_BUS_ADDR;
515 goto done;
516 }
517
518 if (verbose) {
519 acs_list.buffer[acs_list.len-1] = 0;
520 pci_warn(client, "ACS redirect is set between the client and provider (%s)\n",
521 pci_name(provider));
522 pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n",
523 acs_list.buffer);
524 }
525 acs_redirects = true;
526
527map_through_host_bridge:
528 if (!cpu_supports_p2pdma() &&
529 !host_bridge_whitelist(provider, client, acs_redirects)) {
530 if (verbose)
531 pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge or whitelisted host bridge\n",
532 pci_name(provider));
533 map_type = PCI_P2PDMA_MAP_NOT_SUPPORTED;
534 }
535done:
536 rcu_read_lock();
537 p2pdma = rcu_dereference(provider->p2pdma);
538 if (p2pdma)
539 xa_store(&p2pdma->map_types, map_types_idx(client),
540 xa_mk_value(map_type), GFP_KERNEL);
541 rcu_read_unlock();
542 return map_type;
543}
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
563 int num_clients, bool verbose)
564{
565 enum pci_p2pdma_map_type map;
566 bool not_supported = false;
567 struct pci_dev *pci_client;
568 int total_dist = 0;
569 int i, distance;
570
571 if (num_clients == 0)
572 return -1;
573
574 for (i = 0; i < num_clients; i++) {
575 pci_client = find_parent_pci_dev(clients[i]);
576 if (!pci_client) {
577 if (verbose)
578 dev_warn(clients[i],
579 "cannot be used for peer-to-peer DMA as it is not a PCI device\n");
580 return -1;
581 }
582
583 map = calc_map_type_and_dist(provider, pci_client, &distance,
584 verbose);
585
586 pci_dev_put(pci_client);
587
588 if (map == PCI_P2PDMA_MAP_NOT_SUPPORTED)
589 not_supported = true;
590
591 if (not_supported && !verbose)
592 break;
593
594 total_dist += distance;
595 }
596
597 if (not_supported)
598 return -1;
599
600 return total_dist;
601}
602EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many);
603
604
605
606
607
608bool pci_has_p2pmem(struct pci_dev *pdev)
609{
610 struct pci_p2pdma *p2pdma;
611 bool res;
612
613 rcu_read_lock();
614 p2pdma = rcu_dereference(pdev->p2pdma);
615 res = p2pdma && p2pdma->p2pmem_published;
616 rcu_read_unlock();
617
618 return res;
619}
620EXPORT_SYMBOL_GPL(pci_has_p2pmem);
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients)
640{
641 struct pci_dev *pdev = NULL;
642 int distance;
643 int closest_distance = INT_MAX;
644 struct pci_dev **closest_pdevs;
645 int dev_cnt = 0;
646 const int max_devs = PAGE_SIZE / sizeof(*closest_pdevs);
647 int i;
648
649 closest_pdevs = kmalloc(PAGE_SIZE, GFP_KERNEL);
650 if (!closest_pdevs)
651 return NULL;
652
653 while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
654 if (!pci_has_p2pmem(pdev))
655 continue;
656
657 distance = pci_p2pdma_distance_many(pdev, clients,
658 num_clients, false);
659 if (distance < 0 || distance > closest_distance)
660 continue;
661
662 if (distance == closest_distance && dev_cnt >= max_devs)
663 continue;
664
665 if (distance < closest_distance) {
666 for (i = 0; i < dev_cnt; i++)
667 pci_dev_put(closest_pdevs[i]);
668
669 dev_cnt = 0;
670 closest_distance = distance;
671 }
672
673 closest_pdevs[dev_cnt++] = pci_dev_get(pdev);
674 }
675
676 if (dev_cnt)
677 pdev = pci_dev_get(closest_pdevs[prandom_u32_max(dev_cnt)]);
678
679 for (i = 0; i < dev_cnt; i++)
680 pci_dev_put(closest_pdevs[i]);
681
682 kfree(closest_pdevs);
683 return pdev;
684}
685EXPORT_SYMBOL_GPL(pci_p2pmem_find_many);
686
687
688
689
690
691
692
693
694void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
695{
696 void *ret = NULL;
697 struct percpu_ref *ref;
698 struct pci_p2pdma *p2pdma;
699
700
701
702
703
704
705 rcu_read_lock();
706 p2pdma = rcu_dereference(pdev->p2pdma);
707 if (unlikely(!p2pdma))
708 goto out;
709
710 ret = (void *)gen_pool_alloc_owner(p2pdma->pool, size, (void **) &ref);
711 if (!ret)
712 goto out;
713
714 if (unlikely(!percpu_ref_tryget_live_rcu(ref))) {
715 gen_pool_free(p2pdma->pool, (unsigned long) ret, size);
716 ret = NULL;
717 goto out;
718 }
719out:
720 rcu_read_unlock();
721 return ret;
722}
723EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
724
725
726
727
728
729
730
731void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
732{
733 struct percpu_ref *ref;
734 struct pci_p2pdma *p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
735
736 gen_pool_free_owner(p2pdma->pool, (uintptr_t)addr, size,
737 (void **) &ref);
738 percpu_ref_put(ref);
739}
740EXPORT_SYMBOL_GPL(pci_free_p2pmem);
741
742
743
744
745
746
747
748pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr)
749{
750 struct pci_p2pdma *p2pdma;
751
752 if (!addr)
753 return 0;
754
755 p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
756 if (!p2pdma)
757 return 0;
758
759
760
761
762
763
764 return gen_pool_virt_to_phys(p2pdma->pool, (unsigned long)addr);
765}
766EXPORT_SYMBOL_GPL(pci_p2pmem_virt_to_bus);
767
768
769
770
771
772
773
774
775
776struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
777 unsigned int *nents, u32 length)
778{
779 struct scatterlist *sg;
780 void *addr;
781
782 sg = kmalloc(sizeof(*sg), GFP_KERNEL);
783 if (!sg)
784 return NULL;
785
786 sg_init_table(sg, 1);
787
788 addr = pci_alloc_p2pmem(pdev, length);
789 if (!addr)
790 goto out_free_sg;
791
792 sg_set_buf(sg, addr, length);
793 *nents = 1;
794 return sg;
795
796out_free_sg:
797 kfree(sg);
798 return NULL;
799}
800EXPORT_SYMBOL_GPL(pci_p2pmem_alloc_sgl);
801
802
803
804
805
806
807void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl)
808{
809 struct scatterlist *sg;
810 int count;
811
812 for_each_sg(sgl, sg, INT_MAX, count) {
813 if (!sg)
814 break;
815
816 pci_free_p2pmem(pdev, sg_virt(sg), sg->length);
817 }
818 kfree(sgl);
819}
820EXPORT_SYMBOL_GPL(pci_p2pmem_free_sgl);
821
822
823
824
825
826
827
828
829
830
831
832
833void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
834{
835 struct pci_p2pdma *p2pdma;
836
837 rcu_read_lock();
838 p2pdma = rcu_dereference(pdev->p2pdma);
839 if (p2pdma)
840 p2pdma->p2pmem_published = publish;
841 rcu_read_unlock();
842}
843EXPORT_SYMBOL_GPL(pci_p2pmem_publish);
844
845static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
846 struct device *dev)
847{
848 enum pci_p2pdma_map_type type = PCI_P2PDMA_MAP_NOT_SUPPORTED;
849 struct pci_dev *provider = to_p2p_pgmap(pgmap)->provider;
850 struct pci_dev *client;
851 struct pci_p2pdma *p2pdma;
852
853 if (!provider->p2pdma)
854 return PCI_P2PDMA_MAP_NOT_SUPPORTED;
855
856 if (!dev_is_pci(dev))
857 return PCI_P2PDMA_MAP_NOT_SUPPORTED;
858
859 client = to_pci_dev(dev);
860
861 rcu_read_lock();
862 p2pdma = rcu_dereference(provider->p2pdma);
863
864 if (p2pdma)
865 type = xa_to_value(xa_load(&p2pdma->map_types,
866 map_types_idx(client)));
867 rcu_read_unlock();
868 return type;
869}
870
871static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap,
872 struct device *dev, struct scatterlist *sg, int nents)
873{
874 struct scatterlist *s;
875 int i;
876
877 for_each_sg(sg, s, nents, i) {
878 s->dma_address = sg_phys(s) + p2p_pgmap->bus_offset;
879 sg_dma_len(s) = s->length;
880 }
881
882 return nents;
883}
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
899 int nents, enum dma_data_direction dir, unsigned long attrs)
900{
901 struct pci_p2pdma_pagemap *p2p_pgmap =
902 to_p2p_pgmap(sg_page(sg)->pgmap);
903
904 switch (pci_p2pdma_map_type(sg_page(sg)->pgmap, dev)) {
905 case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
906 return dma_map_sg_attrs(dev, sg, nents, dir, attrs);
907 case PCI_P2PDMA_MAP_BUS_ADDR:
908 return __pci_p2pdma_map_sg(p2p_pgmap, dev, sg, nents);
909 default:
910 WARN_ON_ONCE(1);
911 return 0;
912 }
913}
914EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg_attrs);
915
916
917
918
919
920
921
922
923
924
925void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
926 int nents, enum dma_data_direction dir, unsigned long attrs)
927{
928 enum pci_p2pdma_map_type map_type;
929
930 map_type = pci_p2pdma_map_type(sg_page(sg)->pgmap, dev);
931
932 if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
933 dma_unmap_sg_attrs(dev, sg, nents, dir, attrs);
934}
935EXPORT_SYMBOL_GPL(pci_p2pdma_unmap_sg_attrs);
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
958 bool *use_p2pdma)
959{
960 struct device *dev;
961
962 dev = bus_find_device_by_name(&pci_bus_type, NULL, page);
963 if (dev) {
964 *use_p2pdma = true;
965 *p2p_dev = to_pci_dev(dev);
966
967 if (!pci_has_p2pmem(*p2p_dev)) {
968 pci_err(*p2p_dev,
969 "PCI device has no peer-to-peer memory: %s\n",
970 page);
971 pci_dev_put(*p2p_dev);
972 return -ENODEV;
973 }
974
975 return 0;
976 } else if ((page[0] == '0' || page[0] == '1') && !iscntrl(page[1])) {
977
978
979
980
981
982
983 } else if (!kstrtobool(page, use_p2pdma)) {
984 return 0;
985 }
986
987 pr_err("No such PCI device: %.*s\n", (int)strcspn(page, "\n"), page);
988 return -ENODEV;
989}
990EXPORT_SYMBOL_GPL(pci_p2pdma_enable_store);
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
1005 bool use_p2pdma)
1006{
1007 if (!use_p2pdma)
1008 return sprintf(page, "0\n");
1009
1010 if (!p2p_dev)
1011 return sprintf(page, "1\n");
1012
1013 return sprintf(page, "%s\n", pci_name(p2p_dev));
1014}
1015EXPORT_SYMBOL_GPL(pci_p2pdma_enable_show);
1016