1
2
3
4
5
6
7
8
9
10
11#include <linux/acpi.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/dmi.h>
15#include <linux/init.h>
16#include <linux/msi.h>
17#include <linux/of.h>
18#include <linux/pci.h>
19#include <linux/pm.h>
20#include <linux/slab.h>
21#include <linux/module.h>
22#include <linux/spinlock.h>
23#include <linux/string.h>
24#include <linux/log2.h>
25#include <linux/logic_pio.h>
26#include <linux/pm_wakeup.h>
27#include <linux/interrupt.h>
28#include <linux/device.h>
29#include <linux/pm_runtime.h>
30#include <linux/pci_hotplug.h>
31#include <linux/vmalloc.h>
32#include <asm/dma.h>
33#include <linux/aer.h>
34#include <linux/bitfield.h>
35#include "pci.h"
36
37DEFINE_MUTEX(pci_slot_mutex);
38
39const char *pci_power_names[] = {
40 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
41};
42EXPORT_SYMBOL_GPL(pci_power_names);
43
44int isa_dma_bridge_buggy;
45EXPORT_SYMBOL(isa_dma_bridge_buggy);
46
47int pci_pci_problems;
48EXPORT_SYMBOL(pci_pci_problems);
49
50unsigned int pci_pm_d3hot_delay;
51
52static void pci_pme_list_scan(struct work_struct *work);
53
54static LIST_HEAD(pci_pme_list);
55static DEFINE_MUTEX(pci_pme_list_mutex);
56static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
57
58struct pci_pme_device {
59 struct list_head list;
60 struct pci_dev *dev;
61};
62
63#define PME_TIMEOUT 1000
64
65static void pci_dev_d3_sleep(struct pci_dev *dev)
66{
67 unsigned int delay = dev->d3hot_delay;
68
69 if (delay < pci_pm_d3hot_delay)
70 delay = pci_pm_d3hot_delay;
71
72 if (delay)
73 msleep(delay);
74}
75
76bool pci_reset_supported(struct pci_dev *dev)
77{
78 return dev->reset_methods[0] != 0;
79}
80
81#ifdef CONFIG_PCI_DOMAINS
82int pci_domains_supported = 1;
83#endif
84
85#define DEFAULT_CARDBUS_IO_SIZE (256)
86#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
87
88unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
89unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
90
91#define DEFAULT_HOTPLUG_IO_SIZE (256)
92#define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
93#define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
94
95unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
96
97
98
99
100
101unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
102unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
103
104#define DEFAULT_HOTPLUG_BUS_SIZE 1
105unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
106
107
108
109#ifdef CONFIG_PCIE_BUS_TUNE_OFF
110enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
111#elif defined CONFIG_PCIE_BUS_SAFE
112enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
113#elif defined CONFIG_PCIE_BUS_PERFORMANCE
114enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
115#elif defined CONFIG_PCIE_BUS_PEER2PEER
116enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
117#else
118enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
119#endif
120
121
122
123
124
125
126
127u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
128u8 pci_cache_line_size;
129
130
131
132
133
134unsigned int pcibios_max_latency = 255;
135
136
137static bool pcie_ari_disabled;
138
139
140static bool pcie_ats_disabled;
141
142
143bool pci_early_dump;
144
145bool pci_ats_disabled(void)
146{
147 return pcie_ats_disabled;
148}
149EXPORT_SYMBOL_GPL(pci_ats_disabled);
150
151
152static bool pci_bridge_d3_disable;
153
154static bool pci_bridge_d3_force;
155
156static int __init pcie_port_pm_setup(char *str)
157{
158 if (!strcmp(str, "off"))
159 pci_bridge_d3_disable = true;
160 else if (!strcmp(str, "force"))
161 pci_bridge_d3_force = true;
162 return 1;
163}
164__setup("pcie_port_pm=", pcie_port_pm_setup);
165
166
167#define PCIE_RESET_READY_POLL_MS 60000
168
169
170
171
172
173
174
175
176unsigned char pci_bus_max_busnr(struct pci_bus *bus)
177{
178 struct pci_bus *tmp;
179 unsigned char max, n;
180
181 max = bus->busn_res.end;
182 list_for_each_entry(tmp, &bus->children, node) {
183 n = pci_bus_max_busnr(tmp);
184 if (n > max)
185 max = n;
186 }
187 return max;
188}
189EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
190
191
192
193
194
195
196
197int pci_status_get_and_clear_errors(struct pci_dev *pdev)
198{
199 u16 status;
200 int ret;
201
202 ret = pci_read_config_word(pdev, PCI_STATUS, &status);
203 if (ret != PCIBIOS_SUCCESSFUL)
204 return -EIO;
205
206 status &= PCI_STATUS_ERROR_BITS;
207 if (status)
208 pci_write_config_word(pdev, PCI_STATUS, status);
209
210 return status;
211}
212EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
213
214#ifdef CONFIG_HAS_IOMEM
215static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
216 bool write_combine)
217{
218 struct resource *res = &pdev->resource[bar];
219 resource_size_t start = res->start;
220 resource_size_t size = resource_size(res);
221
222
223
224
225 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
226 pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
227 return NULL;
228 }
229
230 if (write_combine)
231 return ioremap_wc(start, size);
232
233 return ioremap(start, size);
234}
235
236void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
237{
238 return __pci_ioremap_resource(pdev, bar, false);
239}
240EXPORT_SYMBOL_GPL(pci_ioremap_bar);
241
242void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
243{
244 return __pci_ioremap_resource(pdev, bar, true);
245}
246EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
247#endif
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
269 const char **endptr)
270{
271 int ret;
272 unsigned int seg, bus, slot, func;
273 char *wpath, *p;
274 char end;
275
276 *endptr = strchrnul(path, ';');
277
278 wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
279 if (!wpath)
280 return -ENOMEM;
281
282 while (1) {
283 p = strrchr(wpath, '/');
284 if (!p)
285 break;
286 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
287 if (ret != 2) {
288 ret = -EINVAL;
289 goto free_and_exit;
290 }
291
292 if (dev->devfn != PCI_DEVFN(slot, func)) {
293 ret = 0;
294 goto free_and_exit;
295 }
296
297
298
299
300
301
302
303 dev = pci_upstream_bridge(dev);
304 if (!dev) {
305 ret = 0;
306 goto free_and_exit;
307 }
308
309 *p = 0;
310 }
311
312 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
313 &func, &end);
314 if (ret != 4) {
315 seg = 0;
316 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
317 if (ret != 3) {
318 ret = -EINVAL;
319 goto free_and_exit;
320 }
321 }
322
323 ret = (seg == pci_domain_nr(dev->bus) &&
324 bus == dev->bus->number &&
325 dev->devfn == PCI_DEVFN(slot, func));
326
327free_and_exit:
328 kfree(wpath);
329 return ret;
330}
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362static int pci_dev_str_match(struct pci_dev *dev, const char *p,
363 const char **endptr)
364{
365 int ret;
366 int count;
367 unsigned short vendor, device, subsystem_vendor, subsystem_device;
368
369 if (strncmp(p, "pci:", 4) == 0) {
370
371 p += 4;
372 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
373 &subsystem_vendor, &subsystem_device, &count);
374 if (ret != 4) {
375 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
376 if (ret != 2)
377 return -EINVAL;
378
379 subsystem_vendor = 0;
380 subsystem_device = 0;
381 }
382
383 p += count;
384
385 if ((!vendor || vendor == dev->vendor) &&
386 (!device || device == dev->device) &&
387 (!subsystem_vendor ||
388 subsystem_vendor == dev->subsystem_vendor) &&
389 (!subsystem_device ||
390 subsystem_device == dev->subsystem_device))
391 goto found;
392 } else {
393
394
395
396
397 ret = pci_dev_str_match_path(dev, p, &p);
398 if (ret < 0)
399 return ret;
400 else if (ret)
401 goto found;
402 }
403
404 *endptr = p;
405 return 0;
406
407found:
408 *endptr = p;
409 return 1;
410}
411
412static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
413 u8 pos, int cap, int *ttl)
414{
415 u8 id;
416 u16 ent;
417
418 pci_bus_read_config_byte(bus, devfn, pos, &pos);
419
420 while ((*ttl)--) {
421 if (pos < 0x40)
422 break;
423 pos &= ~3;
424 pci_bus_read_config_word(bus, devfn, pos, &ent);
425
426 id = ent & 0xff;
427 if (id == 0xff)
428 break;
429 if (id == cap)
430 return pos;
431 pos = (ent >> 8);
432 }
433 return 0;
434}
435
436static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
437 u8 pos, int cap)
438{
439 int ttl = PCI_FIND_CAP_TTL;
440
441 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
442}
443
444u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
445{
446 return __pci_find_next_cap(dev->bus, dev->devfn,
447 pos + PCI_CAP_LIST_NEXT, cap);
448}
449EXPORT_SYMBOL_GPL(pci_find_next_capability);
450
451static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
452 unsigned int devfn, u8 hdr_type)
453{
454 u16 status;
455
456 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
457 if (!(status & PCI_STATUS_CAP_LIST))
458 return 0;
459
460 switch (hdr_type) {
461 case PCI_HEADER_TYPE_NORMAL:
462 case PCI_HEADER_TYPE_BRIDGE:
463 return PCI_CAPABILITY_LIST;
464 case PCI_HEADER_TYPE_CARDBUS:
465 return PCI_CB_CAPABILITY_LIST;
466 }
467
468 return 0;
469}
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490u8 pci_find_capability(struct pci_dev *dev, int cap)
491{
492 u8 pos;
493
494 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
495 if (pos)
496 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
497
498 return pos;
499}
500EXPORT_SYMBOL(pci_find_capability);
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
516{
517 u8 hdr_type, pos;
518
519 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
520
521 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
522 if (pos)
523 pos = __pci_find_next_cap(bus, devfn, pos, cap);
524
525 return pos;
526}
527EXPORT_SYMBOL(pci_bus_find_capability);
528
529
530
531
532
533
534
535
536
537
538
539
540u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
541{
542 u32 header;
543 int ttl;
544 u16 pos = PCI_CFG_SPACE_SIZE;
545
546
547 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
548
549 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
550 return 0;
551
552 if (start)
553 pos = start;
554
555 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
556 return 0;
557
558
559
560
561
562 if (header == 0)
563 return 0;
564
565 while (ttl-- > 0) {
566 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
567 return pos;
568
569 pos = PCI_EXT_CAP_NEXT(header);
570 if (pos < PCI_CFG_SPACE_SIZE)
571 break;
572
573 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
574 break;
575 }
576
577 return 0;
578}
579EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
596{
597 return pci_find_next_ext_capability(dev, 0, cap);
598}
599EXPORT_SYMBOL_GPL(pci_find_ext_capability);
600
601
602
603
604
605
606
607
608
609
610u64 pci_get_dsn(struct pci_dev *dev)
611{
612 u32 dword;
613 u64 dsn;
614 int pos;
615
616 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
617 if (!pos)
618 return 0;
619
620
621
622
623
624
625 pos += 4;
626 pci_read_config_dword(dev, pos, &dword);
627 dsn = (u64)dword;
628 pci_read_config_dword(dev, pos + 4, &dword);
629 dsn |= ((u64)dword) << 32;
630
631 return dsn;
632}
633EXPORT_SYMBOL_GPL(pci_get_dsn);
634
635static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
636{
637 int rc, ttl = PCI_FIND_CAP_TTL;
638 u8 cap, mask;
639
640 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
641 mask = HT_3BIT_CAP_MASK;
642 else
643 mask = HT_5BIT_CAP_MASK;
644
645 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
646 PCI_CAP_ID_HT, &ttl);
647 while (pos) {
648 rc = pci_read_config_byte(dev, pos + 3, &cap);
649 if (rc != PCIBIOS_SUCCESSFUL)
650 return 0;
651
652 if ((cap & mask) == ht_cap)
653 return pos;
654
655 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
656 pos + PCI_CAP_LIST_NEXT,
657 PCI_CAP_ID_HT, &ttl);
658 }
659
660 return 0;
661}
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
677{
678 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
679}
680EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
681
682
683
684
685
686
687
688
689
690
691
692
693u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
694{
695 u8 pos;
696
697 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
698 if (pos)
699 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
700
701 return pos;
702}
703EXPORT_SYMBOL_GPL(pci_find_ht_capability);
704
705
706
707
708
709
710
711
712
713
714
715u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
716{
717 u16 vsec = 0;
718 u32 header;
719
720 if (vendor != dev->vendor)
721 return 0;
722
723 while ((vsec = pci_find_next_ext_capability(dev, vsec,
724 PCI_EXT_CAP_ID_VNDR))) {
725 if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER,
726 &header) == PCIBIOS_SUCCESSFUL &&
727 PCI_VNDR_HEADER_ID(header) == cap)
728 return vsec;
729 }
730
731 return 0;
732}
733EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
734
735
736
737
738
739
740
741
742
743
744u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec)
745{
746 int pos;
747
748 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC);
749 if (!pos)
750 return 0;
751
752 while (pos) {
753 u16 v, id;
754
755 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v);
756 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id);
757 if (vendor == v && dvsec == id)
758 return pos;
759
760 pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC);
761 }
762
763 return 0;
764}
765EXPORT_SYMBOL_GPL(pci_find_dvsec_capability);
766
767
768
769
770
771
772
773
774
775
776struct resource *pci_find_parent_resource(const struct pci_dev *dev,
777 struct resource *res)
778{
779 const struct pci_bus *bus = dev->bus;
780 struct resource *r;
781 int i;
782
783 pci_bus_for_each_resource(bus, r, i) {
784 if (!r)
785 continue;
786 if (resource_contains(r, res)) {
787
788
789
790
791
792 if (r->flags & IORESOURCE_PREFETCH &&
793 !(res->flags & IORESOURCE_PREFETCH))
794 return NULL;
795
796
797
798
799
800
801
802
803
804 return r;
805 }
806 }
807 return NULL;
808}
809EXPORT_SYMBOL(pci_find_parent_resource);
810
811
812
813
814
815
816
817
818
819
820struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
821{
822 int i;
823
824 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
825 struct resource *r = &dev->resource[i];
826
827 if (r->start && resource_contains(r, res))
828 return r;
829 }
830
831 return NULL;
832}
833EXPORT_SYMBOL(pci_find_resource);
834
835
836
837
838
839
840
841
842
843int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
844{
845 int i;
846
847
848 for (i = 0; i < 4; i++) {
849 u16 status;
850 if (i)
851 msleep((1 << (i - 1)) * 100);
852
853 pci_read_config_word(dev, pos, &status);
854 if (!(status & mask))
855 return 1;
856 }
857
858 return 0;
859}
860
861static int pci_acs_enable;
862
863
864
865
866void pci_request_acs(void)
867{
868 pci_acs_enable = 1;
869}
870
871static const char *disable_acs_redir_param;
872
873
874
875
876
877
878
879static void pci_disable_acs_redir(struct pci_dev *dev)
880{
881 int ret = 0;
882 const char *p;
883 int pos;
884 u16 ctrl;
885
886 if (!disable_acs_redir_param)
887 return;
888
889 p = disable_acs_redir_param;
890 while (*p) {
891 ret = pci_dev_str_match(dev, p, &p);
892 if (ret < 0) {
893 pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
894 disable_acs_redir_param);
895
896 break;
897 } else if (ret == 1) {
898
899 break;
900 }
901
902 if (*p != ';' && *p != ',') {
903
904 break;
905 }
906 p++;
907 }
908
909 if (ret != 1)
910 return;
911
912 if (!pci_dev_specific_disable_acs_redir(dev))
913 return;
914
915 pos = dev->acs_cap;
916 if (!pos) {
917 pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
918 return;
919 }
920
921 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
922
923
924 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
925
926 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
927
928 pci_info(dev, "disabled ACS redirect\n");
929}
930
931
932
933
934
935static void pci_std_enable_acs(struct pci_dev *dev)
936{
937 int pos;
938 u16 cap;
939 u16 ctrl;
940
941 pos = dev->acs_cap;
942 if (!pos)
943 return;
944
945 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
946 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
947
948
949 ctrl |= (cap & PCI_ACS_SV);
950
951
952 ctrl |= (cap & PCI_ACS_RR);
953
954
955 ctrl |= (cap & PCI_ACS_CR);
956
957
958 ctrl |= (cap & PCI_ACS_UF);
959
960
961 if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
962 ctrl |= (cap & PCI_ACS_TB);
963
964 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
965}
966
967
968
969
970
971static void pci_enable_acs(struct pci_dev *dev)
972{
973 if (!pci_acs_enable)
974 goto disable_acs_redir;
975
976 if (!pci_dev_specific_enable_acs(dev))
977 goto disable_acs_redir;
978
979 pci_std_enable_acs(dev);
980
981disable_acs_redir:
982
983
984
985
986
987
988
989 pci_disable_acs_redir(dev);
990}
991
992
993
994
995
996
997
998
999static void pci_restore_bars(struct pci_dev *dev)
1000{
1001 int i;
1002
1003 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
1004 pci_update_resource(dev, i);
1005}
1006
1007static inline bool platform_pci_power_manageable(struct pci_dev *dev)
1008{
1009 if (pci_use_mid_pm())
1010 return true;
1011
1012 return acpi_pci_power_manageable(dev);
1013}
1014
1015static inline int platform_pci_set_power_state(struct pci_dev *dev,
1016 pci_power_t t)
1017{
1018 if (pci_use_mid_pm())
1019 return mid_pci_set_power_state(dev, t);
1020
1021 return acpi_pci_set_power_state(dev, t);
1022}
1023
1024static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
1025{
1026 if (pci_use_mid_pm())
1027 return mid_pci_get_power_state(dev);
1028
1029 return acpi_pci_get_power_state(dev);
1030}
1031
1032static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
1033{
1034 if (!pci_use_mid_pm())
1035 acpi_pci_refresh_power_state(dev);
1036}
1037
1038static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
1039{
1040 if (pci_use_mid_pm())
1041 return PCI_POWER_ERROR;
1042
1043 return acpi_pci_choose_state(dev);
1044}
1045
1046static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
1047{
1048 if (pci_use_mid_pm())
1049 return PCI_POWER_ERROR;
1050
1051 return acpi_pci_wakeup(dev, enable);
1052}
1053
1054static inline bool platform_pci_need_resume(struct pci_dev *dev)
1055{
1056 if (pci_use_mid_pm())
1057 return false;
1058
1059 return acpi_pci_need_resume(dev);
1060}
1061
1062static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
1063{
1064 if (pci_use_mid_pm())
1065 return false;
1066
1067 return acpi_pci_bridge_d3(dev);
1068}
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1084{
1085 u16 pmcsr;
1086 bool need_restore = false;
1087
1088
1089 if (dev->current_state == state)
1090 return 0;
1091
1092 if (!dev->pm_cap)
1093 return -EIO;
1094
1095 if (state < PCI_D0 || state > PCI_D3hot)
1096 return -EINVAL;
1097
1098
1099
1100
1101
1102
1103
1104 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
1105 && dev->current_state > state) {
1106 pci_err(dev, "invalid power transition (from %s to %s)\n",
1107 pci_power_name(dev->current_state),
1108 pci_power_name(state));
1109 return -EINVAL;
1110 }
1111
1112
1113 if ((state == PCI_D1 && !dev->d1_support)
1114 || (state == PCI_D2 && !dev->d2_support))
1115 return -EIO;
1116
1117 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1118 if (PCI_POSSIBLE_ERROR(pmcsr)) {
1119 pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
1120 pci_power_name(dev->current_state),
1121 pci_power_name(state));
1122 return -EIO;
1123 }
1124
1125
1126
1127
1128
1129
1130 switch (dev->current_state) {
1131 case PCI_D0:
1132 case PCI_D1:
1133 case PCI_D2:
1134 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1135 pmcsr |= state;
1136 break;
1137 case PCI_D3hot:
1138 case PCI_D3cold:
1139 case PCI_UNKNOWN:
1140 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
1141 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
1142 need_restore = true;
1143 fallthrough;
1144 default:
1145 pmcsr = 0;
1146 break;
1147 }
1148
1149
1150 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1151
1152
1153
1154
1155
1156 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1157 pci_dev_d3_sleep(dev);
1158 else if (state == PCI_D2 || dev->current_state == PCI_D2)
1159 udelay(PCI_PM_D2_DELAY);
1160
1161 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1162 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1163 if (dev->current_state != state)
1164 pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
1165 pci_power_name(dev->current_state),
1166 pci_power_name(state));
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181 if (need_restore)
1182 pci_restore_bars(dev);
1183
1184 if (dev->bus->self)
1185 pcie_aspm_pm_state_change(dev->bus->self);
1186
1187 return 0;
1188}
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1203{
1204 if (platform_pci_get_power_state(dev) == PCI_D3cold ||
1205 !pci_device_is_present(dev)) {
1206 dev->current_state = PCI_D3cold;
1207 } else if (dev->pm_cap) {
1208 u16 pmcsr;
1209
1210 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1211 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1212 } else {
1213 dev->current_state = state;
1214 }
1215}
1216
1217
1218
1219
1220
1221
1222
1223
1224void pci_refresh_power_state(struct pci_dev *dev)
1225{
1226 platform_pci_refresh_power_state(dev);
1227 pci_update_current_state(dev, dev->current_state);
1228}
1229
1230
1231
1232
1233
1234
1235int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1236{
1237 int error;
1238
1239 error = platform_pci_set_power_state(dev, state);
1240 if (!error)
1241 pci_update_current_state(dev, state);
1242 else if (!dev->pm_cap)
1243 dev->current_state = PCI_D0;
1244
1245 return error;
1246}
1247EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1248
1249static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1250{
1251 pm_request_resume(&pci_dev->dev);
1252 return 0;
1253}
1254
1255
1256
1257
1258
1259void pci_resume_bus(struct pci_bus *bus)
1260{
1261 if (bus)
1262 pci_walk_bus(bus, pci_resume_one, NULL);
1263}
1264
1265static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1266{
1267 int delay = 1;
1268 u32 id;
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282 pci_read_config_dword(dev, PCI_COMMAND, &id);
1283 while (PCI_POSSIBLE_ERROR(id)) {
1284 if (delay > timeout) {
1285 pci_warn(dev, "not ready %dms after %s; giving up\n",
1286 delay - 1, reset_type);
1287 return -ENOTTY;
1288 }
1289
1290 if (delay > 1000)
1291 pci_info(dev, "not ready %dms after %s; waiting\n",
1292 delay - 1, reset_type);
1293
1294 msleep(delay);
1295 delay *= 2;
1296 pci_read_config_dword(dev, PCI_COMMAND, &id);
1297 }
1298
1299 if (delay > 1000)
1300 pci_info(dev, "ready %dms after %s\n", delay - 1,
1301 reset_type);
1302
1303 return 0;
1304}
1305
1306
1307
1308
1309
1310int pci_power_up(struct pci_dev *dev)
1311{
1312 pci_platform_power_transition(dev, PCI_D0);
1313
1314
1315
1316
1317
1318
1319 if (dev->runtime_d3cold) {
1320
1321
1322
1323
1324
1325 pci_resume_bus(dev->subordinate);
1326 }
1327
1328 return pci_raw_set_power_state(dev, PCI_D0);
1329}
1330
1331
1332
1333
1334
1335
1336static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1337{
1338 pci_power_t state = *(pci_power_t *)data;
1339
1340 dev->current_state = state;
1341 return 0;
1342}
1343
1344
1345
1346
1347
1348
1349void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1350{
1351 if (bus)
1352 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1353}
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1373{
1374 int error;
1375
1376
1377 if (state > PCI_D3cold)
1378 state = PCI_D3cold;
1379 else if (state < PCI_D0)
1380 state = PCI_D0;
1381 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1382
1383
1384
1385
1386
1387
1388
1389 return 0;
1390
1391
1392 if (dev->current_state == state)
1393 return 0;
1394
1395 if (state == PCI_D0)
1396 return pci_power_up(dev);
1397
1398
1399
1400
1401
1402 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1403 return 0;
1404
1405
1406
1407
1408
1409 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
1410 PCI_D3hot : state);
1411
1412 if (pci_platform_power_transition(dev, state))
1413 return error;
1414
1415
1416 if (state == PCI_D3cold)
1417 pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1418
1419 return 0;
1420}
1421EXPORT_SYMBOL(pci_set_power_state);
1422
1423#define PCI_EXP_SAVE_REGS 7
1424
1425static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1426 u16 cap, bool extended)
1427{
1428 struct pci_cap_saved_state *tmp;
1429
1430 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1431 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1432 return tmp;
1433 }
1434 return NULL;
1435}
1436
1437struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1438{
1439 return _pci_find_saved_cap(dev, cap, false);
1440}
1441
1442struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1443{
1444 return _pci_find_saved_cap(dev, cap, true);
1445}
1446
1447static int pci_save_pcie_state(struct pci_dev *dev)
1448{
1449 int i = 0;
1450 struct pci_cap_saved_state *save_state;
1451 u16 *cap;
1452
1453 if (!pci_is_pcie(dev))
1454 return 0;
1455
1456 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1457 if (!save_state) {
1458 pci_err(dev, "buffer not found in %s\n", __func__);
1459 return -ENOMEM;
1460 }
1461
1462 cap = (u16 *)&save_state->cap.data[0];
1463 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1464 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1465 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1466 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1467 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1468 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1469 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1470
1471 return 0;
1472}
1473
1474void pci_bridge_reconfigure_ltr(struct pci_dev *dev)
1475{
1476#ifdef CONFIG_PCIEASPM
1477 struct pci_dev *bridge;
1478 u32 ctl;
1479
1480 bridge = pci_upstream_bridge(dev);
1481 if (bridge && bridge->ltr_path) {
1482 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
1483 if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
1484 pci_dbg(bridge, "re-enabling LTR\n");
1485 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
1486 PCI_EXP_DEVCTL2_LTR_EN);
1487 }
1488 }
1489#endif
1490}
1491
1492static void pci_restore_pcie_state(struct pci_dev *dev)
1493{
1494 int i = 0;
1495 struct pci_cap_saved_state *save_state;
1496 u16 *cap;
1497
1498 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1499 if (!save_state)
1500 return;
1501
1502
1503
1504
1505
1506
1507 pci_bridge_reconfigure_ltr(dev);
1508
1509 cap = (u16 *)&save_state->cap.data[0];
1510 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1511 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1512 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1513 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1514 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1515 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1516 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1517}
1518
1519static int pci_save_pcix_state(struct pci_dev *dev)
1520{
1521 int pos;
1522 struct pci_cap_saved_state *save_state;
1523
1524 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1525 if (!pos)
1526 return 0;
1527
1528 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1529 if (!save_state) {
1530 pci_err(dev, "buffer not found in %s\n", __func__);
1531 return -ENOMEM;
1532 }
1533
1534 pci_read_config_word(dev, pos + PCI_X_CMD,
1535 (u16 *)save_state->cap.data);
1536
1537 return 0;
1538}
1539
1540static void pci_restore_pcix_state(struct pci_dev *dev)
1541{
1542 int i = 0, pos;
1543 struct pci_cap_saved_state *save_state;
1544 u16 *cap;
1545
1546 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1547 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1548 if (!save_state || !pos)
1549 return;
1550 cap = (u16 *)&save_state->cap.data[0];
1551
1552 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1553}
1554
1555static void pci_save_ltr_state(struct pci_dev *dev)
1556{
1557 int ltr;
1558 struct pci_cap_saved_state *save_state;
1559 u32 *cap;
1560
1561 if (!pci_is_pcie(dev))
1562 return;
1563
1564 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1565 if (!ltr)
1566 return;
1567
1568 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1569 if (!save_state) {
1570 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1571 return;
1572 }
1573
1574
1575 cap = &save_state->cap.data[0];
1576 pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
1577}
1578
1579static void pci_restore_ltr_state(struct pci_dev *dev)
1580{
1581 struct pci_cap_saved_state *save_state;
1582 int ltr;
1583 u32 *cap;
1584
1585 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1586 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1587 if (!save_state || !ltr)
1588 return;
1589
1590
1591 cap = &save_state->cap.data[0];
1592 pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
1593}
1594
1595
1596
1597
1598
1599
1600int pci_save_state(struct pci_dev *dev)
1601{
1602 int i;
1603
1604 for (i = 0; i < 16; i++) {
1605 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1606 pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
1607 i * 4, dev->saved_config_space[i]);
1608 }
1609 dev->state_saved = true;
1610
1611 i = pci_save_pcie_state(dev);
1612 if (i != 0)
1613 return i;
1614
1615 i = pci_save_pcix_state(dev);
1616 if (i != 0)
1617 return i;
1618
1619 pci_save_ltr_state(dev);
1620 pci_save_dpc_state(dev);
1621 pci_save_aer_state(dev);
1622 pci_save_ptm_state(dev);
1623 return pci_save_vc_state(dev);
1624}
1625EXPORT_SYMBOL(pci_save_state);
1626
1627static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1628 u32 saved_val, int retry, bool force)
1629{
1630 u32 val;
1631
1632 pci_read_config_dword(pdev, offset, &val);
1633 if (!force && val == saved_val)
1634 return;
1635
1636 for (;;) {
1637 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1638 offset, val, saved_val);
1639 pci_write_config_dword(pdev, offset, saved_val);
1640 if (retry-- <= 0)
1641 return;
1642
1643 pci_read_config_dword(pdev, offset, &val);
1644 if (val == saved_val)
1645 return;
1646
1647 mdelay(1);
1648 }
1649}
1650
1651static void pci_restore_config_space_range(struct pci_dev *pdev,
1652 int start, int end, int retry,
1653 bool force)
1654{
1655 int index;
1656
1657 for (index = end; index >= start; index--)
1658 pci_restore_config_dword(pdev, 4 * index,
1659 pdev->saved_config_space[index],
1660 retry, force);
1661}
1662
1663static void pci_restore_config_space(struct pci_dev *pdev)
1664{
1665 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1666 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1667
1668 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1669 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1670 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1671 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1672
1673
1674
1675
1676
1677
1678 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1679 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1680 } else {
1681 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1682 }
1683}
1684
1685static void pci_restore_rebar_state(struct pci_dev *pdev)
1686{
1687 unsigned int pos, nbars, i;
1688 u32 ctrl;
1689
1690 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1691 if (!pos)
1692 return;
1693
1694 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1695 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1696 PCI_REBAR_CTRL_NBAR_SHIFT;
1697
1698 for (i = 0; i < nbars; i++, pos += 8) {
1699 struct resource *res;
1700 int bar_idx, size;
1701
1702 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1703 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1704 res = pdev->resource + bar_idx;
1705 size = pci_rebar_bytes_to_size(resource_size(res));
1706 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1707 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1708 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1709 }
1710}
1711
1712
1713
1714
1715
1716void pci_restore_state(struct pci_dev *dev)
1717{
1718 if (!dev->state_saved)
1719 return;
1720
1721
1722
1723
1724
1725 pci_restore_ltr_state(dev);
1726
1727 pci_restore_pcie_state(dev);
1728 pci_restore_pasid_state(dev);
1729 pci_restore_pri_state(dev);
1730 pci_restore_ats_state(dev);
1731 pci_restore_vc_state(dev);
1732 pci_restore_rebar_state(dev);
1733 pci_restore_dpc_state(dev);
1734 pci_restore_ptm_state(dev);
1735
1736 pci_aer_clear_status(dev);
1737 pci_restore_aer_state(dev);
1738
1739 pci_restore_config_space(dev);
1740
1741 pci_restore_pcix_state(dev);
1742 pci_restore_msi_state(dev);
1743
1744
1745 pci_enable_acs(dev);
1746 pci_restore_iov_state(dev);
1747
1748 dev->state_saved = false;
1749}
1750EXPORT_SYMBOL(pci_restore_state);
1751
1752struct pci_saved_state {
1753 u32 config_space[16];
1754 struct pci_cap_saved_data cap[];
1755};
1756
1757
1758
1759
1760
1761
1762
1763
1764struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1765{
1766 struct pci_saved_state *state;
1767 struct pci_cap_saved_state *tmp;
1768 struct pci_cap_saved_data *cap;
1769 size_t size;
1770
1771 if (!dev->state_saved)
1772 return NULL;
1773
1774 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1775
1776 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1777 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1778
1779 state = kzalloc(size, GFP_KERNEL);
1780 if (!state)
1781 return NULL;
1782
1783 memcpy(state->config_space, dev->saved_config_space,
1784 sizeof(state->config_space));
1785
1786 cap = state->cap;
1787 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1788 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1789 memcpy(cap, &tmp->cap, len);
1790 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1791 }
1792
1793
1794 return state;
1795}
1796EXPORT_SYMBOL_GPL(pci_store_saved_state);
1797
1798
1799
1800
1801
1802
1803int pci_load_saved_state(struct pci_dev *dev,
1804 struct pci_saved_state *state)
1805{
1806 struct pci_cap_saved_data *cap;
1807
1808 dev->state_saved = false;
1809
1810 if (!state)
1811 return 0;
1812
1813 memcpy(dev->saved_config_space, state->config_space,
1814 sizeof(state->config_space));
1815
1816 cap = state->cap;
1817 while (cap->size) {
1818 struct pci_cap_saved_state *tmp;
1819
1820 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1821 if (!tmp || tmp->cap.size != cap->size)
1822 return -EINVAL;
1823
1824 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1825 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1826 sizeof(struct pci_cap_saved_data) + cap->size);
1827 }
1828
1829 dev->state_saved = true;
1830 return 0;
1831}
1832EXPORT_SYMBOL_GPL(pci_load_saved_state);
1833
1834
1835
1836
1837
1838
1839
1840int pci_load_and_free_saved_state(struct pci_dev *dev,
1841 struct pci_saved_state **state)
1842{
1843 int ret = pci_load_saved_state(dev, *state);
1844 kfree(*state);
1845 *state = NULL;
1846 return ret;
1847}
1848EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1849
1850int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1851{
1852 return pci_enable_resources(dev, bars);
1853}
1854
1855static int do_pci_enable_device(struct pci_dev *dev, int bars)
1856{
1857 int err;
1858 struct pci_dev *bridge;
1859 u16 cmd;
1860 u8 pin;
1861
1862 err = pci_set_power_state(dev, PCI_D0);
1863 if (err < 0 && err != -EIO)
1864 return err;
1865
1866 bridge = pci_upstream_bridge(dev);
1867 if (bridge)
1868 pcie_aspm_powersave_config_link(bridge);
1869
1870 err = pcibios_enable_device(dev, bars);
1871 if (err < 0)
1872 return err;
1873 pci_fixup_device(pci_fixup_enable, dev);
1874
1875 if (dev->msi_enabled || dev->msix_enabled)
1876 return 0;
1877
1878 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1879 if (pin) {
1880 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1881 if (cmd & PCI_COMMAND_INTX_DISABLE)
1882 pci_write_config_word(dev, PCI_COMMAND,
1883 cmd & ~PCI_COMMAND_INTX_DISABLE);
1884 }
1885
1886 return 0;
1887}
1888
1889
1890
1891
1892
1893
1894
1895
1896int pci_reenable_device(struct pci_dev *dev)
1897{
1898 if (pci_is_enabled(dev))
1899 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1900 return 0;
1901}
1902EXPORT_SYMBOL(pci_reenable_device);
1903
1904static void pci_enable_bridge(struct pci_dev *dev)
1905{
1906 struct pci_dev *bridge;
1907 int retval;
1908
1909 bridge = pci_upstream_bridge(dev);
1910 if (bridge)
1911 pci_enable_bridge(bridge);
1912
1913 if (pci_is_enabled(dev)) {
1914 if (!dev->is_busmaster)
1915 pci_set_master(dev);
1916 return;
1917 }
1918
1919 retval = pci_enable_device(dev);
1920 if (retval)
1921 pci_err(dev, "Error enabling bridge (%d), continuing\n",
1922 retval);
1923 pci_set_master(dev);
1924}
1925
1926static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1927{
1928 struct pci_dev *bridge;
1929 int err;
1930 int i, bars = 0;
1931
1932
1933
1934
1935
1936
1937
1938 pci_update_current_state(dev, dev->current_state);
1939
1940 if (atomic_inc_return(&dev->enable_cnt) > 1)
1941 return 0;
1942
1943 bridge = pci_upstream_bridge(dev);
1944 if (bridge)
1945 pci_enable_bridge(bridge);
1946
1947
1948 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1949 if (dev->resource[i].flags & flags)
1950 bars |= (1 << i);
1951 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1952 if (dev->resource[i].flags & flags)
1953 bars |= (1 << i);
1954
1955 err = do_pci_enable_device(dev, bars);
1956 if (err < 0)
1957 atomic_dec(&dev->enable_cnt);
1958 return err;
1959}
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969int pci_enable_device_io(struct pci_dev *dev)
1970{
1971 return pci_enable_device_flags(dev, IORESOURCE_IO);
1972}
1973EXPORT_SYMBOL(pci_enable_device_io);
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983int pci_enable_device_mem(struct pci_dev *dev)
1984{
1985 return pci_enable_device_flags(dev, IORESOURCE_MEM);
1986}
1987EXPORT_SYMBOL(pci_enable_device_mem);
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000int pci_enable_device(struct pci_dev *dev)
2001{
2002 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
2003}
2004EXPORT_SYMBOL(pci_enable_device);
2005
2006
2007
2008
2009
2010
2011
2012struct pci_devres {
2013 unsigned int enabled:1;
2014 unsigned int pinned:1;
2015 unsigned int orig_intx:1;
2016 unsigned int restore_intx:1;
2017 unsigned int mwi:1;
2018 u32 region_mask;
2019};
2020
2021static void pcim_release(struct device *gendev, void *res)
2022{
2023 struct pci_dev *dev = to_pci_dev(gendev);
2024 struct pci_devres *this = res;
2025 int i;
2026
2027 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
2028 if (this->region_mask & (1 << i))
2029 pci_release_region(dev, i);
2030
2031 if (this->mwi)
2032 pci_clear_mwi(dev);
2033
2034 if (this->restore_intx)
2035 pci_intx(dev, this->orig_intx);
2036
2037 if (this->enabled && !this->pinned)
2038 pci_disable_device(dev);
2039}
2040
2041static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
2042{
2043 struct pci_devres *dr, *new_dr;
2044
2045 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
2046 if (dr)
2047 return dr;
2048
2049 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
2050 if (!new_dr)
2051 return NULL;
2052 return devres_get(&pdev->dev, new_dr, NULL, NULL);
2053}
2054
2055static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
2056{
2057 if (pci_is_managed(pdev))
2058 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
2059 return NULL;
2060}
2061
2062
2063
2064
2065
2066
2067
2068int pcim_enable_device(struct pci_dev *pdev)
2069{
2070 struct pci_devres *dr;
2071 int rc;
2072
2073 dr = get_pci_dr(pdev);
2074 if (unlikely(!dr))
2075 return -ENOMEM;
2076 if (dr->enabled)
2077 return 0;
2078
2079 rc = pci_enable_device(pdev);
2080 if (!rc) {
2081 pdev->is_managed = 1;
2082 dr->enabled = 1;
2083 }
2084 return rc;
2085}
2086EXPORT_SYMBOL(pcim_enable_device);
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096void pcim_pin_device(struct pci_dev *pdev)
2097{
2098 struct pci_devres *dr;
2099
2100 dr = find_pci_dr(pdev);
2101 WARN_ON(!dr || !dr->enabled);
2102 if (dr)
2103 dr->pinned = 1;
2104}
2105EXPORT_SYMBOL(pcim_pin_device);
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115int __weak pcibios_device_add(struct pci_dev *dev)
2116{
2117 return 0;
2118}
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129void __weak pcibios_release_device(struct pci_dev *dev) {}
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139void __weak pcibios_disable_device(struct pci_dev *dev) {}
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150void __weak pcibios_penalize_isa_irq(int irq, int active) {}
2151
2152static void do_pci_disable_device(struct pci_dev *dev)
2153{
2154 u16 pci_command;
2155
2156 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2157 if (pci_command & PCI_COMMAND_MASTER) {
2158 pci_command &= ~PCI_COMMAND_MASTER;
2159 pci_write_config_word(dev, PCI_COMMAND, pci_command);
2160 }
2161
2162 pcibios_disable_device(dev);
2163}
2164
2165
2166
2167
2168
2169
2170
2171
2172void pci_disable_enabled_device(struct pci_dev *dev)
2173{
2174 if (pci_is_enabled(dev))
2175 do_pci_disable_device(dev);
2176}
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188void pci_disable_device(struct pci_dev *dev)
2189{
2190 struct pci_devres *dr;
2191
2192 dr = find_pci_dr(dev);
2193 if (dr)
2194 dr->enabled = 0;
2195
2196 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2197 "disabling already-disabled device");
2198
2199 if (atomic_dec_return(&dev->enable_cnt) != 0)
2200 return;
2201
2202 do_pci_disable_device(dev);
2203
2204 dev->is_busmaster = 0;
2205}
2206EXPORT_SYMBOL(pci_disable_device);
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2217 enum pcie_reset_state state)
2218{
2219 return -EINVAL;
2220}
2221
2222
2223
2224
2225
2226
2227
2228
2229int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2230{
2231 return pcibios_set_pcie_reset_state(dev, state);
2232}
2233EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2234
2235#ifdef CONFIG_PCIEAER
2236void pcie_clear_device_status(struct pci_dev *dev)
2237{
2238 u16 sta;
2239
2240 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2241 pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2242}
2243#endif
2244
2245
2246
2247
2248
2249void pcie_clear_root_pme_status(struct pci_dev *dev)
2250{
2251 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2252}
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262bool pci_check_pme_status(struct pci_dev *dev)
2263{
2264 int pmcsr_pos;
2265 u16 pmcsr;
2266 bool ret = false;
2267
2268 if (!dev->pm_cap)
2269 return false;
2270
2271 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2272 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2273 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2274 return false;
2275
2276
2277 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2278 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2279
2280 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2281 ret = true;
2282 }
2283
2284 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2285
2286 return ret;
2287}
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2298{
2299 if (pme_poll_reset && dev->pme_poll)
2300 dev->pme_poll = false;
2301
2302 if (pci_check_pme_status(dev)) {
2303 pci_wakeup_event(dev);
2304 pm_request_resume(&dev->dev);
2305 }
2306 return 0;
2307}
2308
2309
2310
2311
2312
2313void pci_pme_wakeup_bus(struct pci_bus *bus)
2314{
2315 if (bus)
2316 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2317}
2318
2319
2320
2321
2322
2323
2324
2325bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2326{
2327 if (!dev->pm_cap)
2328 return false;
2329
2330 return !!(dev->pme_support & (1 << state));
2331}
2332EXPORT_SYMBOL(pci_pme_capable);
2333
2334static void pci_pme_list_scan(struct work_struct *work)
2335{
2336 struct pci_pme_device *pme_dev, *n;
2337
2338 mutex_lock(&pci_pme_list_mutex);
2339 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2340 if (pme_dev->dev->pme_poll) {
2341 struct pci_dev *bridge;
2342
2343 bridge = pme_dev->dev->bus->self;
2344
2345
2346
2347
2348
2349 if (bridge && bridge->current_state != PCI_D0)
2350 continue;
2351
2352
2353
2354
2355 if (pme_dev->dev->current_state == PCI_D3cold)
2356 continue;
2357
2358 pci_pme_wakeup(pme_dev->dev, NULL);
2359 } else {
2360 list_del(&pme_dev->list);
2361 kfree(pme_dev);
2362 }
2363 }
2364 if (!list_empty(&pci_pme_list))
2365 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2366 msecs_to_jiffies(PME_TIMEOUT));
2367 mutex_unlock(&pci_pme_list_mutex);
2368}
2369
2370static void __pci_pme_active(struct pci_dev *dev, bool enable)
2371{
2372 u16 pmcsr;
2373
2374 if (!dev->pme_support)
2375 return;
2376
2377 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2378
2379 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2380 if (!enable)
2381 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2382
2383 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2384}
2385
2386
2387
2388
2389
2390void pci_pme_restore(struct pci_dev *dev)
2391{
2392 u16 pmcsr;
2393
2394 if (!dev->pme_support)
2395 return;
2396
2397 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2398 if (dev->wakeup_prepared) {
2399 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2400 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2401 } else {
2402 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2403 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2404 }
2405 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2406}
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416void pci_pme_active(struct pci_dev *dev, bool enable)
2417{
2418 __pci_pme_active(dev, enable);
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440 if (dev->pme_poll) {
2441 struct pci_pme_device *pme_dev;
2442 if (enable) {
2443 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2444 GFP_KERNEL);
2445 if (!pme_dev) {
2446 pci_warn(dev, "can't enable PME#\n");
2447 return;
2448 }
2449 pme_dev->dev = dev;
2450 mutex_lock(&pci_pme_list_mutex);
2451 list_add(&pme_dev->list, &pci_pme_list);
2452 if (list_is_singular(&pci_pme_list))
2453 queue_delayed_work(system_freezable_wq,
2454 &pci_pme_work,
2455 msecs_to_jiffies(PME_TIMEOUT));
2456 mutex_unlock(&pci_pme_list_mutex);
2457 } else {
2458 mutex_lock(&pci_pme_list_mutex);
2459 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2460 if (pme_dev->dev == dev) {
2461 list_del(&pme_dev->list);
2462 kfree(pme_dev);
2463 break;
2464 }
2465 }
2466 mutex_unlock(&pci_pme_list_mutex);
2467 }
2468 }
2469
2470 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2471}
2472EXPORT_SYMBOL(pci_pme_active);
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2494{
2495 int ret = 0;
2496
2497
2498
2499
2500
2501
2502
2503
2504 if (!pci_power_manageable(dev))
2505 return 0;
2506
2507
2508 if (!!enable == !!dev->wakeup_prepared)
2509 return 0;
2510
2511
2512
2513
2514
2515
2516
2517 if (enable) {
2518 int error;
2519
2520
2521
2522
2523
2524
2525
2526
2527 if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2528 pci_pme_active(dev, true);
2529 else
2530 ret = 1;
2531 error = platform_pci_set_wakeup(dev, true);
2532 if (ret)
2533 ret = error;
2534 if (!ret)
2535 dev->wakeup_prepared = true;
2536 } else {
2537 platform_pci_set_wakeup(dev, false);
2538 pci_pme_active(dev, false);
2539 dev->wakeup_prepared = false;
2540 }
2541
2542 return ret;
2543}
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2555{
2556 if (enable && !device_may_wakeup(&pci_dev->dev))
2557 return -EINVAL;
2558
2559 return __pci_enable_wake(pci_dev, state, enable);
2560}
2561EXPORT_SYMBOL(pci_enable_wake);
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2578{
2579 return pci_pme_capable(dev, PCI_D3cold) ?
2580 pci_enable_wake(dev, PCI_D3cold, enable) :
2581 pci_enable_wake(dev, PCI_D3hot, enable);
2582}
2583EXPORT_SYMBOL(pci_wake_from_d3);
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2595{
2596 if (platform_pci_power_manageable(dev)) {
2597
2598
2599
2600 pci_power_t state = platform_pci_choose_state(dev);
2601
2602 switch (state) {
2603 case PCI_POWER_ERROR:
2604 case PCI_UNKNOWN:
2605 return PCI_D3hot;
2606
2607 case PCI_D1:
2608 case PCI_D2:
2609 if (pci_no_d1d2(dev))
2610 return PCI_D3hot;
2611 }
2612
2613 return state;
2614 }
2615
2616
2617
2618
2619
2620
2621 if (dev->current_state == PCI_D3cold)
2622 return PCI_D3cold;
2623 else if (!dev->pm_cap)
2624 return PCI_D0;
2625
2626 if (wakeup && dev->pme_support) {
2627 pci_power_t state = PCI_D3hot;
2628
2629
2630
2631
2632
2633 while (state && !(dev->pme_support & (1 << state)))
2634 state--;
2635
2636 if (state)
2637 return state;
2638 else if (dev->pme_support & 1)
2639 return PCI_D0;
2640 }
2641
2642 return PCI_D3hot;
2643}
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654int pci_prepare_to_sleep(struct pci_dev *dev)
2655{
2656 bool wakeup = device_may_wakeup(&dev->dev);
2657 pci_power_t target_state = pci_target_state(dev, wakeup);
2658 int error;
2659
2660 if (target_state == PCI_POWER_ERROR)
2661 return -EIO;
2662
2663
2664
2665
2666
2667
2668
2669
2670 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2671 pci_disable_ptm(dev);
2672
2673 pci_enable_wake(dev, target_state, wakeup);
2674
2675 error = pci_set_power_state(dev, target_state);
2676
2677 if (error) {
2678 pci_enable_wake(dev, target_state, false);
2679 pci_restore_ptm_state(dev);
2680 }
2681
2682 return error;
2683}
2684EXPORT_SYMBOL(pci_prepare_to_sleep);
2685
2686
2687
2688
2689
2690
2691
2692
2693int pci_back_from_sleep(struct pci_dev *dev)
2694{
2695 int ret = pci_set_power_state(dev, PCI_D0);
2696
2697 if (ret)
2698 return ret;
2699
2700 pci_enable_wake(dev, PCI_D0, false);
2701 return 0;
2702}
2703EXPORT_SYMBOL(pci_back_from_sleep);
2704
2705
2706
2707
2708
2709
2710
2711
2712int pci_finish_runtime_suspend(struct pci_dev *dev)
2713{
2714 pci_power_t target_state;
2715 int error;
2716
2717 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2718 if (target_state == PCI_POWER_ERROR)
2719 return -EIO;
2720
2721 dev->runtime_d3cold = target_state == PCI_D3cold;
2722
2723
2724
2725
2726
2727
2728
2729
2730 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2731 pci_disable_ptm(dev);
2732
2733 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2734
2735 error = pci_set_power_state(dev, target_state);
2736
2737 if (error) {
2738 pci_enable_wake(dev, target_state, false);
2739 pci_restore_ptm_state(dev);
2740 dev->runtime_d3cold = false;
2741 }
2742
2743 return error;
2744}
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754bool pci_dev_run_wake(struct pci_dev *dev)
2755{
2756 struct pci_bus *bus = dev->bus;
2757
2758 if (!dev->pme_support)
2759 return false;
2760
2761
2762 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2763 return false;
2764
2765 if (device_can_wakeup(&dev->dev))
2766 return true;
2767
2768 while (bus->parent) {
2769 struct pci_dev *bridge = bus->self;
2770
2771 if (device_can_wakeup(&bridge->dev))
2772 return true;
2773
2774 bus = bus->parent;
2775 }
2776
2777
2778 if (bus->bridge)
2779 return device_can_wakeup(bus->bridge);
2780
2781 return false;
2782}
2783EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794bool pci_dev_need_resume(struct pci_dev *pci_dev)
2795{
2796 struct device *dev = &pci_dev->dev;
2797 pci_power_t target_state;
2798
2799 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2800 return true;
2801
2802 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2803
2804
2805
2806
2807
2808
2809 return target_state != pci_dev->current_state &&
2810 target_state != PCI_D3cold &&
2811 pci_dev->current_state != PCI_D3hot;
2812}
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2826{
2827 struct device *dev = &pci_dev->dev;
2828
2829 spin_lock_irq(&dev->power.lock);
2830
2831 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2832 pci_dev->current_state < PCI_D3cold)
2833 __pci_pme_active(pci_dev, false);
2834
2835 spin_unlock_irq(&dev->power.lock);
2836}
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846void pci_dev_complete_resume(struct pci_dev *pci_dev)
2847{
2848 struct device *dev = &pci_dev->dev;
2849
2850 if (!pci_dev_run_wake(pci_dev))
2851 return;
2852
2853 spin_lock_irq(&dev->power.lock);
2854
2855 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2856 __pci_pme_active(pci_dev, true);
2857
2858 spin_unlock_irq(&dev->power.lock);
2859}
2860
2861
2862
2863
2864
2865
2866
2867
2868pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
2869{
2870 if (state.event == PM_EVENT_ON)
2871 return PCI_D0;
2872
2873 return pci_target_state(dev, false);
2874}
2875EXPORT_SYMBOL(pci_choose_state);
2876
2877void pci_config_pm_runtime_get(struct pci_dev *pdev)
2878{
2879 struct device *dev = &pdev->dev;
2880 struct device *parent = dev->parent;
2881
2882 if (parent)
2883 pm_runtime_get_sync(parent);
2884 pm_runtime_get_noresume(dev);
2885
2886
2887
2888
2889 pm_runtime_barrier(dev);
2890
2891
2892
2893
2894
2895 if (pdev->current_state == PCI_D3cold)
2896 pm_runtime_resume(dev);
2897}
2898
2899void pci_config_pm_runtime_put(struct pci_dev *pdev)
2900{
2901 struct device *dev = &pdev->dev;
2902 struct device *parent = dev->parent;
2903
2904 pm_runtime_put(dev);
2905 if (parent)
2906 pm_runtime_put_sync(parent);
2907}
2908
2909static const struct dmi_system_id bridge_d3_blacklist[] = {
2910#ifdef CONFIG_X86
2911 {
2912
2913
2914
2915
2916
2917
2918 .ident = "X299 DESIGNARE EX-CF",
2919 .matches = {
2920 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2921 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2922 },
2923
2924
2925
2926
2927 .ident = "Elo i2",
2928 .matches = {
2929 DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"),
2930 DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"),
2931 DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"),
2932 },
2933 },
2934#endif
2935 { }
2936};
2937
2938
2939
2940
2941
2942
2943
2944
2945bool pci_bridge_d3_possible(struct pci_dev *bridge)
2946{
2947 if (!pci_is_pcie(bridge))
2948 return false;
2949
2950 switch (pci_pcie_type(bridge)) {
2951 case PCI_EXP_TYPE_ROOT_PORT:
2952 case PCI_EXP_TYPE_UPSTREAM:
2953 case PCI_EXP_TYPE_DOWNSTREAM:
2954 if (pci_bridge_d3_disable)
2955 return false;
2956
2957
2958
2959
2960
2961 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2962 return false;
2963
2964 if (pci_bridge_d3_force)
2965 return true;
2966
2967
2968 if (bridge->is_thunderbolt)
2969 return true;
2970
2971
2972 if (platform_pci_bridge_d3(bridge))
2973 return true;
2974
2975
2976
2977
2978
2979
2980 if (bridge->is_hotplug_bridge)
2981 return false;
2982
2983 if (dmi_check_system(bridge_d3_blacklist))
2984 return false;
2985
2986
2987
2988
2989
2990 if (dmi_get_bios_year() >= 2015)
2991 return true;
2992 break;
2993 }
2994
2995 return false;
2996}
2997
2998static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2999{
3000 bool *d3cold_ok = data;
3001
3002 if (
3003 dev->no_d3cold || !dev->d3cold_allowed ||
3004
3005
3006 (device_may_wakeup(&dev->dev) &&
3007 !pci_pme_capable(dev, PCI_D3cold)) ||
3008
3009
3010 !pci_power_manageable(dev))
3011
3012 *d3cold_ok = false;
3013
3014 return !*d3cold_ok;
3015}
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025void pci_bridge_d3_update(struct pci_dev *dev)
3026{
3027 bool remove = !device_is_registered(&dev->dev);
3028 struct pci_dev *bridge;
3029 bool d3cold_ok = true;
3030
3031 bridge = pci_upstream_bridge(dev);
3032 if (!bridge || !pci_bridge_d3_possible(bridge))
3033 return;
3034
3035
3036
3037
3038
3039 if (remove && bridge->bridge_d3)
3040 return;
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050 if (!remove)
3051 pci_dev_check_d3cold(dev, &d3cold_ok);
3052
3053
3054
3055
3056
3057
3058
3059 if (d3cold_ok && !bridge->bridge_d3)
3060 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
3061 &d3cold_ok);
3062
3063 if (bridge->bridge_d3 != d3cold_ok) {
3064 bridge->bridge_d3 = d3cold_ok;
3065
3066 pci_bridge_d3_update(bridge);
3067 }
3068}
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078void pci_d3cold_enable(struct pci_dev *dev)
3079{
3080 if (dev->no_d3cold) {
3081 dev->no_d3cold = false;
3082 pci_bridge_d3_update(dev);
3083 }
3084}
3085EXPORT_SYMBOL_GPL(pci_d3cold_enable);
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095void pci_d3cold_disable(struct pci_dev *dev)
3096{
3097 if (!dev->no_d3cold) {
3098 dev->no_d3cold = true;
3099 pci_bridge_d3_update(dev);
3100 }
3101}
3102EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3103
3104
3105
3106
3107
3108void pci_pm_init(struct pci_dev *dev)
3109{
3110 int pm;
3111 u16 status;
3112 u16 pmc;
3113
3114 pm_runtime_forbid(&dev->dev);
3115 pm_runtime_set_active(&dev->dev);
3116 pm_runtime_enable(&dev->dev);
3117 device_enable_async_suspend(&dev->dev);
3118 dev->wakeup_prepared = false;
3119
3120 dev->pm_cap = 0;
3121 dev->pme_support = 0;
3122
3123
3124 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3125 if (!pm)
3126 return;
3127
3128 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3129
3130 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3131 pci_err(dev, "unsupported PM cap regs version (%u)\n",
3132 pmc & PCI_PM_CAP_VER_MASK);
3133 return;
3134 }
3135
3136 dev->pm_cap = pm;
3137 dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3138 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3139 dev->bridge_d3 = pci_bridge_d3_possible(dev);
3140 dev->d3cold_allowed = true;
3141
3142 dev->d1_support = false;
3143 dev->d2_support = false;
3144 if (!pci_no_d1d2(dev)) {
3145 if (pmc & PCI_PM_CAP_D1)
3146 dev->d1_support = true;
3147 if (pmc & PCI_PM_CAP_D2)
3148 dev->d2_support = true;
3149
3150 if (dev->d1_support || dev->d2_support)
3151 pci_info(dev, "supports%s%s\n",
3152 dev->d1_support ? " D1" : "",
3153 dev->d2_support ? " D2" : "");
3154 }
3155
3156 pmc &= PCI_PM_CAP_PME_MASK;
3157 if (pmc) {
3158 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3159 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3160 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3161 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3162 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3163 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3164 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
3165 dev->pme_poll = true;
3166
3167
3168
3169
3170 device_set_wakeup_capable(&dev->dev, true);
3171
3172 pci_pme_active(dev, false);
3173 }
3174
3175 pci_read_config_word(dev, PCI_STATUS, &status);
3176 if (status & PCI_STATUS_IMM_READY)
3177 dev->imm_ready = 1;
3178}
3179
3180static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3181{
3182 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3183
3184 switch (prop) {
3185 case PCI_EA_P_MEM:
3186 case PCI_EA_P_VF_MEM:
3187 flags |= IORESOURCE_MEM;
3188 break;
3189 case PCI_EA_P_MEM_PREFETCH:
3190 case PCI_EA_P_VF_MEM_PREFETCH:
3191 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3192 break;
3193 case PCI_EA_P_IO:
3194 flags |= IORESOURCE_IO;
3195 break;
3196 default:
3197 return 0;
3198 }
3199
3200 return flags;
3201}
3202
3203static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3204 u8 prop)
3205{
3206 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3207 return &dev->resource[bei];
3208#ifdef CONFIG_PCI_IOV
3209 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3210 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3211 return &dev->resource[PCI_IOV_RESOURCES +
3212 bei - PCI_EA_BEI_VF_BAR0];
3213#endif
3214 else if (bei == PCI_EA_BEI_ROM)
3215 return &dev->resource[PCI_ROM_RESOURCE];
3216 else
3217 return NULL;
3218}
3219
3220
3221static int pci_ea_read(struct pci_dev *dev, int offset)
3222{
3223 struct resource *res;
3224 int ent_size, ent_offset = offset;
3225 resource_size_t start, end;
3226 unsigned long flags;
3227 u32 dw0, bei, base, max_offset;
3228 u8 prop;
3229 bool support_64 = (sizeof(resource_size_t) >= 8);
3230
3231 pci_read_config_dword(dev, ent_offset, &dw0);
3232 ent_offset += 4;
3233
3234
3235 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
3236
3237 if (!(dw0 & PCI_EA_ENABLE))
3238 goto out;
3239
3240 bei = (dw0 & PCI_EA_BEI) >> 4;
3241 prop = (dw0 & PCI_EA_PP) >> 8;
3242
3243
3244
3245
3246
3247 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3248 prop = (dw0 & PCI_EA_SP) >> 16;
3249 if (prop > PCI_EA_P_BRIDGE_IO)
3250 goto out;
3251
3252 res = pci_ea_get_resource(dev, bei, prop);
3253 if (!res) {
3254 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3255 goto out;
3256 }
3257
3258 flags = pci_ea_flags(dev, prop);
3259 if (!flags) {
3260 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3261 goto out;
3262 }
3263
3264
3265 pci_read_config_dword(dev, ent_offset, &base);
3266 start = (base & PCI_EA_FIELD_MASK);
3267 ent_offset += 4;
3268
3269
3270 pci_read_config_dword(dev, ent_offset, &max_offset);
3271 ent_offset += 4;
3272
3273
3274 if (base & PCI_EA_IS_64) {
3275 u32 base_upper;
3276
3277 pci_read_config_dword(dev, ent_offset, &base_upper);
3278 ent_offset += 4;
3279
3280 flags |= IORESOURCE_MEM_64;
3281
3282
3283 if (!support_64 && base_upper)
3284 goto out;
3285
3286 if (support_64)
3287 start |= ((u64)base_upper << 32);
3288 }
3289
3290 end = start + (max_offset | 0x03);
3291
3292
3293 if (max_offset & PCI_EA_IS_64) {
3294 u32 max_offset_upper;
3295
3296 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3297 ent_offset += 4;
3298
3299 flags |= IORESOURCE_MEM_64;
3300
3301
3302 if (!support_64 && max_offset_upper)
3303 goto out;
3304
3305 if (support_64)
3306 end += ((u64)max_offset_upper << 32);
3307 }
3308
3309 if (end < start) {
3310 pci_err(dev, "EA Entry crosses address boundary\n");
3311 goto out;
3312 }
3313
3314 if (ent_size != ent_offset - offset) {
3315 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3316 ent_size, ent_offset - offset);
3317 goto out;
3318 }
3319
3320 res->name = pci_name(dev);
3321 res->start = start;
3322 res->end = end;
3323 res->flags = flags;
3324
3325 if (bei <= PCI_EA_BEI_BAR5)
3326 pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3327 bei, res, prop);
3328 else if (bei == PCI_EA_BEI_ROM)
3329 pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3330 res, prop);
3331 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3332 pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3333 bei - PCI_EA_BEI_VF_BAR0, res, prop);
3334 else
3335 pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3336 bei, res, prop);
3337
3338out:
3339 return offset + ent_size;
3340}
3341
3342
3343void pci_ea_init(struct pci_dev *dev)
3344{
3345 int ea;
3346 u8 num_ent;
3347 int offset;
3348 int i;
3349
3350
3351 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3352 if (!ea)
3353 return;
3354
3355
3356 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3357 &num_ent);
3358 num_ent &= PCI_EA_NUM_ENT_MASK;
3359
3360 offset = ea + PCI_EA_FIRST_ENT;
3361
3362
3363 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3364 offset += 4;
3365
3366
3367 for (i = 0; i < num_ent; ++i)
3368 offset = pci_ea_read(dev, offset);
3369}
3370
3371static void pci_add_saved_cap(struct pci_dev *pci_dev,
3372 struct pci_cap_saved_state *new_cap)
3373{
3374 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3375}
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3386 bool extended, unsigned int size)
3387{
3388 int pos;
3389 struct pci_cap_saved_state *save_state;
3390
3391 if (extended)
3392 pos = pci_find_ext_capability(dev, cap);
3393 else
3394 pos = pci_find_capability(dev, cap);
3395
3396 if (!pos)
3397 return 0;
3398
3399 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3400 if (!save_state)
3401 return -ENOMEM;
3402
3403 save_state->cap.cap_nr = cap;
3404 save_state->cap.cap_extended = extended;
3405 save_state->cap.size = size;
3406 pci_add_saved_cap(dev, save_state);
3407
3408 return 0;
3409}
3410
3411int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3412{
3413 return _pci_add_cap_save_buffer(dev, cap, false, size);
3414}
3415
3416int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3417{
3418 return _pci_add_cap_save_buffer(dev, cap, true, size);
3419}
3420
3421
3422
3423
3424
3425void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3426{
3427 int error;
3428
3429 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3430 PCI_EXP_SAVE_REGS * sizeof(u16));
3431 if (error)
3432 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3433
3434 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3435 if (error)
3436 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3437
3438 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3439 2 * sizeof(u16));
3440 if (error)
3441 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3442
3443 pci_allocate_vc_save_buffers(dev);
3444}
3445
3446void pci_free_cap_save_buffers(struct pci_dev *dev)
3447{
3448 struct pci_cap_saved_state *tmp;
3449 struct hlist_node *n;
3450
3451 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3452 kfree(tmp);
3453}
3454
3455
3456
3457
3458
3459
3460
3461
3462void pci_configure_ari(struct pci_dev *dev)
3463{
3464 u32 cap;
3465 struct pci_dev *bridge;
3466
3467 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3468 return;
3469
3470 bridge = dev->bus->self;
3471 if (!bridge)
3472 return;
3473
3474 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3475 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3476 return;
3477
3478 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3479 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3480 PCI_EXP_DEVCTL2_ARI);
3481 bridge->ari_enabled = 1;
3482 } else {
3483 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3484 PCI_EXP_DEVCTL2_ARI);
3485 bridge->ari_enabled = 0;
3486 }
3487}
3488
3489static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3490{
3491 int pos;
3492 u16 cap, ctrl;
3493
3494 pos = pdev->acs_cap;
3495 if (!pos)
3496 return false;
3497
3498
3499
3500
3501
3502
3503 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3504 acs_flags &= (cap | PCI_ACS_EC);
3505
3506 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3507 return (ctrl & acs_flags) == acs_flags;
3508}
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3527{
3528 int ret;
3529
3530 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3531 if (ret >= 0)
3532 return ret > 0;
3533
3534
3535
3536
3537
3538
3539 if (!pci_is_pcie(pdev))
3540 return false;
3541
3542 switch (pci_pcie_type(pdev)) {
3543
3544
3545
3546
3547
3548 case PCI_EXP_TYPE_PCIE_BRIDGE:
3549
3550
3551
3552
3553
3554
3555 case PCI_EXP_TYPE_PCI_BRIDGE:
3556 case PCI_EXP_TYPE_RC_EC:
3557 return false;
3558
3559
3560
3561
3562
3563 case PCI_EXP_TYPE_DOWNSTREAM:
3564 case PCI_EXP_TYPE_ROOT_PORT:
3565 return pci_acs_flags_enabled(pdev, acs_flags);
3566
3567
3568
3569
3570
3571
3572
3573 case PCI_EXP_TYPE_ENDPOINT:
3574 case PCI_EXP_TYPE_UPSTREAM:
3575 case PCI_EXP_TYPE_LEG_END:
3576 case PCI_EXP_TYPE_RC_END:
3577 if (!pdev->multifunction)
3578 break;
3579
3580 return pci_acs_flags_enabled(pdev, acs_flags);
3581 }
3582
3583
3584
3585
3586
3587 return true;
3588}
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599bool pci_acs_path_enabled(struct pci_dev *start,
3600 struct pci_dev *end, u16 acs_flags)
3601{
3602 struct pci_dev *pdev, *parent = start;
3603
3604 do {
3605 pdev = parent;
3606
3607 if (!pci_acs_enabled(pdev, acs_flags))
3608 return false;
3609
3610 if (pci_is_root_bus(pdev->bus))
3611 return (end == NULL);
3612
3613 parent = pdev->bus->self;
3614 } while (pdev != end);
3615
3616 return true;
3617}
3618
3619
3620
3621
3622
3623void pci_acs_init(struct pci_dev *dev)
3624{
3625 dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3626
3627
3628
3629
3630
3631
3632
3633 pci_enable_acs(dev);
3634}
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3646{
3647 unsigned int pos, nbars, i;
3648 u32 ctrl;
3649
3650 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3651 if (!pos)
3652 return -ENOTSUPP;
3653
3654 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3655 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3656 PCI_REBAR_CTRL_NBAR_SHIFT;
3657
3658 for (i = 0; i < nbars; i++, pos += 8) {
3659 int bar_idx;
3660
3661 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3662 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3663 if (bar_idx == bar)
3664 return pos;
3665 }
3666
3667 return -ENOENT;
3668}
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3679{
3680 int pos;
3681 u32 cap;
3682
3683 pos = pci_rebar_find_pos(pdev, bar);
3684 if (pos < 0)
3685 return 0;
3686
3687 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3688 cap &= PCI_REBAR_CAP_SIZES;
3689
3690
3691 if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3692 bar == 0 && cap == 0x7000)
3693 cap = 0x3f000;
3694
3695 return cap >> 4;
3696}
3697EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3708{
3709 int pos;
3710 u32 ctrl;
3711
3712 pos = pci_rebar_find_pos(pdev, bar);
3713 if (pos < 0)
3714 return pos;
3715
3716 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3717 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3718}
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3730{
3731 int pos;
3732 u32 ctrl;
3733
3734 pos = pci_rebar_find_pos(pdev, bar);
3735 if (pos < 0)
3736 return pos;
3737
3738 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3739 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3740 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3741 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3742 return 0;
3743}
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3759{
3760 struct pci_bus *bus = dev->bus;
3761 struct pci_dev *bridge;
3762 u32 cap, ctl2;
3763
3764
3765
3766
3767
3768
3769 if (dev->is_virtfn)
3770 return -EINVAL;
3771
3772 if (!pci_is_pcie(dev))
3773 return -EINVAL;
3774
3775
3776
3777
3778
3779
3780
3781
3782 switch (pci_pcie_type(dev)) {
3783 case PCI_EXP_TYPE_ENDPOINT:
3784 case PCI_EXP_TYPE_LEG_END:
3785 case PCI_EXP_TYPE_RC_END:
3786 break;
3787 default:
3788 return -EINVAL;
3789 }
3790
3791 while (bus->parent) {
3792 bridge = bus->self;
3793
3794 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3795
3796 switch (pci_pcie_type(bridge)) {
3797
3798 case PCI_EXP_TYPE_UPSTREAM:
3799 case PCI_EXP_TYPE_DOWNSTREAM:
3800 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3801 return -EINVAL;
3802 break;
3803
3804
3805 case PCI_EXP_TYPE_ROOT_PORT:
3806 if ((cap & cap_mask) != cap_mask)
3807 return -EINVAL;
3808 break;
3809 }
3810
3811
3812 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3813 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3814 &ctl2);
3815 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3816 return -EINVAL;
3817 }
3818
3819 bus = bus->parent;
3820 }
3821
3822 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3823 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3824 return 0;
3825}
3826EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3840{
3841 int slot;
3842
3843 if (pci_ari_enabled(dev->bus))
3844 slot = 0;
3845 else
3846 slot = PCI_SLOT(dev->devfn);
3847
3848 return (((pin - 1) + slot) % 4) + 1;
3849}
3850
3851int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3852{
3853 u8 pin;
3854
3855 pin = dev->pin;
3856 if (!pin)
3857 return -1;
3858
3859 while (!pci_is_root_bus(dev->bus)) {
3860 pin = pci_swizzle_interrupt_pin(dev, pin);
3861 dev = dev->bus->self;
3862 }
3863 *bridge = dev;
3864 return pin;
3865}
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3876{
3877 u8 pin = *pinp;
3878
3879 while (!pci_is_root_bus(dev->bus)) {
3880 pin = pci_swizzle_interrupt_pin(dev, pin);
3881 dev = dev->bus->self;
3882 }
3883 *pinp = pin;
3884 return PCI_SLOT(dev->devfn);
3885}
3886EXPORT_SYMBOL_GPL(pci_common_swizzle);
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898void pci_release_region(struct pci_dev *pdev, int bar)
3899{
3900 struct pci_devres *dr;
3901
3902 if (pci_resource_len(pdev, bar) == 0)
3903 return;
3904 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3905 release_region(pci_resource_start(pdev, bar),
3906 pci_resource_len(pdev, bar));
3907 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3908 release_mem_region(pci_resource_start(pdev, bar),
3909 pci_resource_len(pdev, bar));
3910
3911 dr = find_pci_dr(pdev);
3912 if (dr)
3913 dr->region_mask &= ~(1 << bar);
3914}
3915EXPORT_SYMBOL(pci_release_region);
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936static int __pci_request_region(struct pci_dev *pdev, int bar,
3937 const char *res_name, int exclusive)
3938{
3939 struct pci_devres *dr;
3940
3941 if (pci_resource_len(pdev, bar) == 0)
3942 return 0;
3943
3944 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3945 if (!request_region(pci_resource_start(pdev, bar),
3946 pci_resource_len(pdev, bar), res_name))
3947 goto err_out;
3948 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3949 if (!__request_mem_region(pci_resource_start(pdev, bar),
3950 pci_resource_len(pdev, bar), res_name,
3951 exclusive))
3952 goto err_out;
3953 }
3954
3955 dr = find_pci_dr(pdev);
3956 if (dr)
3957 dr->region_mask |= 1 << bar;
3958
3959 return 0;
3960
3961err_out:
3962 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3963 &pdev->resource[bar]);
3964 return -EBUSY;
3965}
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3982{
3983 return __pci_request_region(pdev, bar, res_name, 0);
3984}
3985EXPORT_SYMBOL(pci_request_region);
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3996{
3997 int i;
3998
3999 for (i = 0; i < PCI_STD_NUM_BARS; i++)
4000 if (bars & (1 << i))
4001 pci_release_region(pdev, i);
4002}
4003EXPORT_SYMBOL(pci_release_selected_regions);
4004
4005static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
4006 const char *res_name, int excl)
4007{
4008 int i;
4009
4010 for (i = 0; i < PCI_STD_NUM_BARS; i++)
4011 if (bars & (1 << i))
4012 if (__pci_request_region(pdev, i, res_name, excl))
4013 goto err_out;
4014 return 0;
4015
4016err_out:
4017 while (--i >= 0)
4018 if (bars & (1 << i))
4019 pci_release_region(pdev, i);
4020
4021 return -EBUSY;
4022}
4023
4024
4025
4026
4027
4028
4029
4030
4031int pci_request_selected_regions(struct pci_dev *pdev, int bars,
4032 const char *res_name)
4033{
4034 return __pci_request_selected_regions(pdev, bars, res_name, 0);
4035}
4036EXPORT_SYMBOL(pci_request_selected_regions);
4037
4038int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
4039 const char *res_name)
4040{
4041 return __pci_request_selected_regions(pdev, bars, res_name,
4042 IORESOURCE_EXCLUSIVE);
4043}
4044EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056void pci_release_regions(struct pci_dev *pdev)
4057{
4058 pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
4059}
4060EXPORT_SYMBOL(pci_release_regions);
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075int pci_request_regions(struct pci_dev *pdev, const char *res_name)
4076{
4077 return pci_request_selected_regions(pdev,
4078 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
4079}
4080EXPORT_SYMBOL(pci_request_regions);
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
4098{
4099 return pci_request_selected_regions_exclusive(pdev,
4100 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
4101}
4102EXPORT_SYMBOL(pci_request_regions_exclusive);
4103
4104
4105
4106
4107
4108int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
4109 resource_size_t size)
4110{
4111 int ret = 0;
4112#ifdef PCI_IOBASE
4113 struct logic_pio_hwaddr *range;
4114
4115 if (!size || addr + size < addr)
4116 return -EINVAL;
4117
4118 range = kzalloc(sizeof(*range), GFP_ATOMIC);
4119 if (!range)
4120 return -ENOMEM;
4121
4122 range->fwnode = fwnode;
4123 range->size = size;
4124 range->hw_start = addr;
4125 range->flags = LOGIC_PIO_CPU_MMIO;
4126
4127 ret = logic_pio_register_range(range);
4128 if (ret)
4129 kfree(range);
4130
4131
4132 if (ret == -EEXIST)
4133 ret = 0;
4134#endif
4135
4136 return ret;
4137}
4138
4139phys_addr_t pci_pio_to_address(unsigned long pio)
4140{
4141 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
4142
4143#ifdef PCI_IOBASE
4144 if (pio >= MMIO_UPPER_LIMIT)
4145 return address;
4146
4147 address = logic_pio_to_hwaddr(pio);
4148#endif
4149
4150 return address;
4151}
4152EXPORT_SYMBOL_GPL(pci_pio_to_address);
4153
4154unsigned long __weak pci_address_to_pio(phys_addr_t address)
4155{
4156#ifdef PCI_IOBASE
4157 return logic_pio_trans_cpuaddr(address);
4158#else
4159 if (address > IO_SPACE_LIMIT)
4160 return (unsigned long)-1;
4161
4162 return (unsigned long) address;
4163#endif
4164}
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176#ifndef pci_remap_iospace
4177int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4178{
4179#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4180 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4181
4182 if (!(res->flags & IORESOURCE_IO))
4183 return -EINVAL;
4184
4185 if (res->end > IO_SPACE_LIMIT)
4186 return -EINVAL;
4187
4188 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4189 pgprot_device(PAGE_KERNEL));
4190#else
4191
4192
4193
4194
4195 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4196 return -ENODEV;
4197#endif
4198}
4199EXPORT_SYMBOL(pci_remap_iospace);
4200#endif
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210void pci_unmap_iospace(struct resource *res)
4211{
4212#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4213 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4214
4215 vunmap_range(vaddr, vaddr + resource_size(res));
4216#endif
4217}
4218EXPORT_SYMBOL(pci_unmap_iospace);
4219
4220static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4221{
4222 struct resource **res = ptr;
4223
4224 pci_unmap_iospace(*res);
4225}
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4237 phys_addr_t phys_addr)
4238{
4239 const struct resource **ptr;
4240 int error;
4241
4242 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4243 if (!ptr)
4244 return -ENOMEM;
4245
4246 error = pci_remap_iospace(res, phys_addr);
4247 if (error) {
4248 devres_free(ptr);
4249 } else {
4250 *ptr = res;
4251 devres_add(dev, ptr);
4252 }
4253
4254 return error;
4255}
4256EXPORT_SYMBOL(devm_pci_remap_iospace);
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4268 resource_size_t offset,
4269 resource_size_t size)
4270{
4271 void __iomem **ptr, *addr;
4272
4273 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4274 if (!ptr)
4275 return NULL;
4276
4277 addr = pci_remap_cfgspace(offset, size);
4278 if (addr) {
4279 *ptr = addr;
4280 devres_add(dev, ptr);
4281 } else
4282 devres_free(ptr);
4283
4284 return addr;
4285}
4286EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4308 struct resource *res)
4309{
4310 resource_size_t size;
4311 const char *name;
4312 void __iomem *dest_ptr;
4313
4314 BUG_ON(!dev);
4315
4316 if (!res || resource_type(res) != IORESOURCE_MEM) {
4317 dev_err(dev, "invalid resource\n");
4318 return IOMEM_ERR_PTR(-EINVAL);
4319 }
4320
4321 size = resource_size(res);
4322
4323 if (res->name)
4324 name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
4325 res->name);
4326 else
4327 name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
4328 if (!name)
4329 return IOMEM_ERR_PTR(-ENOMEM);
4330
4331 if (!devm_request_mem_region(dev, res->start, size, name)) {
4332 dev_err(dev, "can't request region for resource %pR\n", res);
4333 return IOMEM_ERR_PTR(-EBUSY);
4334 }
4335
4336 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4337 if (!dest_ptr) {
4338 dev_err(dev, "ioremap failed for resource %pR\n", res);
4339 devm_release_mem_region(dev, res->start, size);
4340 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4341 }
4342
4343 return dest_ptr;
4344}
4345EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4346
4347static void __pci_set_master(struct pci_dev *dev, bool enable)
4348{
4349 u16 old_cmd, cmd;
4350
4351 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4352 if (enable)
4353 cmd = old_cmd | PCI_COMMAND_MASTER;
4354 else
4355 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4356 if (cmd != old_cmd) {
4357 pci_dbg(dev, "%s bus mastering\n",
4358 enable ? "enabling" : "disabling");
4359 pci_write_config_word(dev, PCI_COMMAND, cmd);
4360 }
4361 dev->is_busmaster = enable;
4362}
4363
4364
4365
4366
4367
4368
4369
4370
4371char * __weak __init pcibios_setup(char *str)
4372{
4373 return str;
4374}
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384void __weak pcibios_set_master(struct pci_dev *dev)
4385{
4386 u8 lat;
4387
4388
4389 if (pci_is_pcie(dev))
4390 return;
4391
4392 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4393 if (lat < 16)
4394 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4395 else if (lat > pcibios_max_latency)
4396 lat = pcibios_max_latency;
4397 else
4398 return;
4399
4400 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4401}
4402
4403
4404
4405
4406
4407
4408
4409
4410void pci_set_master(struct pci_dev *dev)
4411{
4412 __pci_set_master(dev, true);
4413 pcibios_set_master(dev);
4414}
4415EXPORT_SYMBOL(pci_set_master);
4416
4417
4418
4419
4420
4421void pci_clear_master(struct pci_dev *dev)
4422{
4423 __pci_set_master(dev, false);
4424}
4425EXPORT_SYMBOL(pci_clear_master);
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437int pci_set_cacheline_size(struct pci_dev *dev)
4438{
4439 u8 cacheline_size;
4440
4441 if (!pci_cache_line_size)
4442 return -EINVAL;
4443
4444
4445
4446 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4447 if (cacheline_size >= pci_cache_line_size &&
4448 (cacheline_size % pci_cache_line_size) == 0)
4449 return 0;
4450
4451
4452 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4453
4454 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4455 if (cacheline_size == pci_cache_line_size)
4456 return 0;
4457
4458 pci_dbg(dev, "cache line size of %d is not supported\n",
4459 pci_cache_line_size << 2);
4460
4461 return -EINVAL;
4462}
4463EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473int pci_set_mwi(struct pci_dev *dev)
4474{
4475#ifdef PCI_DISABLE_MWI
4476 return 0;
4477#else
4478 int rc;
4479 u16 cmd;
4480
4481 rc = pci_set_cacheline_size(dev);
4482 if (rc)
4483 return rc;
4484
4485 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4486 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4487 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4488 cmd |= PCI_COMMAND_INVALIDATE;
4489 pci_write_config_word(dev, PCI_COMMAND, cmd);
4490 }
4491 return 0;
4492#endif
4493}
4494EXPORT_SYMBOL(pci_set_mwi);
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504int pcim_set_mwi(struct pci_dev *dev)
4505{
4506 struct pci_devres *dr;
4507
4508 dr = find_pci_dr(dev);
4509 if (!dr)
4510 return -ENOMEM;
4511
4512 dr->mwi = 1;
4513 return pci_set_mwi(dev);
4514}
4515EXPORT_SYMBOL(pcim_set_mwi);
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526int pci_try_set_mwi(struct pci_dev *dev)
4527{
4528#ifdef PCI_DISABLE_MWI
4529 return 0;
4530#else
4531 return pci_set_mwi(dev);
4532#endif
4533}
4534EXPORT_SYMBOL(pci_try_set_mwi);
4535
4536
4537
4538
4539
4540
4541
4542void pci_clear_mwi(struct pci_dev *dev)
4543{
4544#ifndef PCI_DISABLE_MWI
4545 u16 cmd;
4546
4547 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4548 if (cmd & PCI_COMMAND_INVALIDATE) {
4549 cmd &= ~PCI_COMMAND_INVALIDATE;
4550 pci_write_config_word(dev, PCI_COMMAND, cmd);
4551 }
4552#endif
4553}
4554EXPORT_SYMBOL(pci_clear_mwi);
4555
4556
4557
4558
4559
4560
4561
4562void pci_disable_parity(struct pci_dev *dev)
4563{
4564 u16 cmd;
4565
4566 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4567 if (cmd & PCI_COMMAND_PARITY) {
4568 cmd &= ~PCI_COMMAND_PARITY;
4569 pci_write_config_word(dev, PCI_COMMAND, cmd);
4570 }
4571}
4572
4573
4574
4575
4576
4577
4578
4579
4580void pci_intx(struct pci_dev *pdev, int enable)
4581{
4582 u16 pci_command, new;
4583
4584 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4585
4586 if (enable)
4587 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4588 else
4589 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4590
4591 if (new != pci_command) {
4592 struct pci_devres *dr;
4593
4594 pci_write_config_word(pdev, PCI_COMMAND, new);
4595
4596 dr = find_pci_dr(pdev);
4597 if (dr && !dr->restore_intx) {
4598 dr->restore_intx = 1;
4599 dr->orig_intx = !enable;
4600 }
4601 }
4602}
4603EXPORT_SYMBOL_GPL(pci_intx);
4604
4605static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4606{
4607 struct pci_bus *bus = dev->bus;
4608 bool mask_updated = true;
4609 u32 cmd_status_dword;
4610 u16 origcmd, newcmd;
4611 unsigned long flags;
4612 bool irq_pending;
4613
4614
4615
4616
4617
4618 BUILD_BUG_ON(PCI_COMMAND % 4);
4619 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4620
4621 raw_spin_lock_irqsave(&pci_lock, flags);
4622
4623 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4624
4625 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4626
4627
4628
4629
4630
4631
4632 if (mask != irq_pending) {
4633 mask_updated = false;
4634 goto done;
4635 }
4636
4637 origcmd = cmd_status_dword;
4638 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4639 if (mask)
4640 newcmd |= PCI_COMMAND_INTX_DISABLE;
4641 if (newcmd != origcmd)
4642 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4643
4644done:
4645 raw_spin_unlock_irqrestore(&pci_lock, flags);
4646
4647 return mask_updated;
4648}
4649
4650
4651
4652
4653
4654
4655
4656
4657bool pci_check_and_mask_intx(struct pci_dev *dev)
4658{
4659 return pci_check_and_set_intx_mask(dev, true);
4660}
4661EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671bool pci_check_and_unmask_intx(struct pci_dev *dev)
4672{
4673 return pci_check_and_set_intx_mask(dev, false);
4674}
4675EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4676
4677
4678
4679
4680
4681
4682
4683int pci_wait_for_pending_transaction(struct pci_dev *dev)
4684{
4685 if (!pci_is_pcie(dev))
4686 return 1;
4687
4688 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4689 PCI_EXP_DEVSTA_TRPND);
4690}
4691EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4692
4693
4694
4695
4696
4697
4698
4699
4700int pcie_flr(struct pci_dev *dev)
4701{
4702 if (!pci_wait_for_pending_transaction(dev))
4703 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4704
4705 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4706
4707 if (dev->imm_ready)
4708 return 0;
4709
4710
4711
4712
4713
4714
4715 msleep(100);
4716
4717 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4718}
4719EXPORT_SYMBOL_GPL(pcie_flr);
4720
4721
4722
4723
4724
4725
4726
4727
4728int pcie_reset_flr(struct pci_dev *dev, bool probe)
4729{
4730 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4731 return -ENOTTY;
4732
4733 if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
4734 return -ENOTTY;
4735
4736 if (probe)
4737 return 0;
4738
4739 return pcie_flr(dev);
4740}
4741EXPORT_SYMBOL_GPL(pcie_reset_flr);
4742
4743static int pci_af_flr(struct pci_dev *dev, bool probe)
4744{
4745 int pos;
4746 u8 cap;
4747
4748 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4749 if (!pos)
4750 return -ENOTTY;
4751
4752 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4753 return -ENOTTY;
4754
4755 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4756 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4757 return -ENOTTY;
4758
4759 if (probe)
4760 return 0;
4761
4762
4763
4764
4765
4766
4767 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4768 PCI_AF_STATUS_TP << 8))
4769 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4770
4771 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4772
4773 if (dev->imm_ready)
4774 return 0;
4775
4776
4777
4778
4779
4780
4781
4782 msleep(100);
4783
4784 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4785}
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801
4802static int pci_pm_reset(struct pci_dev *dev, bool probe)
4803{
4804 u16 csr;
4805
4806 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4807 return -ENOTTY;
4808
4809 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4810 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4811 return -ENOTTY;
4812
4813 if (probe)
4814 return 0;
4815
4816 if (dev->current_state != PCI_D0)
4817 return -EINVAL;
4818
4819 csr &= ~PCI_PM_CTRL_STATE_MASK;
4820 csr |= PCI_D3hot;
4821 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4822 pci_dev_d3_sleep(dev);
4823
4824 csr &= ~PCI_PM_CTRL_STATE_MASK;
4825 csr |= PCI_D0;
4826 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4827 pci_dev_d3_sleep(dev);
4828
4829 return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4830}
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4841 int delay)
4842{
4843 int timeout = 1000;
4844 bool ret;
4845 u16 lnk_status;
4846
4847
4848
4849
4850
4851 if (!pdev->link_active_reporting) {
4852 msleep(timeout + delay);
4853 return true;
4854 }
4855
4856
4857
4858
4859
4860
4861
4862
4863
4864
4865 if (active)
4866 msleep(20);
4867 for (;;) {
4868 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4869 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4870 if (ret == active)
4871 break;
4872 if (timeout <= 0)
4873 break;
4874 msleep(10);
4875 timeout -= 10;
4876 }
4877 if (active && ret)
4878 msleep(delay);
4879
4880 return ret == active;
4881}
4882
4883
4884
4885
4886
4887
4888
4889
4890bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4891{
4892 return pcie_wait_for_link_delay(pdev, active, 100);
4893}
4894
4895
4896
4897
4898
4899
4900
4901
4902static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4903{
4904 const struct pci_dev *pdev;
4905 int min_delay = 100;
4906 int max_delay = 0;
4907
4908 list_for_each_entry(pdev, &bus->devices, bus_list) {
4909 if (pdev->d3cold_delay < min_delay)
4910 min_delay = pdev->d3cold_delay;
4911 if (pdev->d3cold_delay > max_delay)
4912 max_delay = pdev->d3cold_delay;
4913 }
4914
4915 return max(min_delay, max_delay);
4916}
4917
4918
4919
4920
4921
4922
4923
4924
4925
4926
4927
4928
4929void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
4930{
4931 struct pci_dev *child;
4932 int delay;
4933
4934 if (pci_dev_is_disconnected(dev))
4935 return;
4936
4937 if (!pci_is_bridge(dev) || !dev->bridge_d3)
4938 return;
4939
4940 down_read(&pci_bus_sem);
4941
4942
4943
4944
4945
4946
4947
4948 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4949 up_read(&pci_bus_sem);
4950 return;
4951 }
4952
4953
4954 delay = pci_bus_max_d3cold_delay(dev->subordinate);
4955 if (!delay) {
4956 up_read(&pci_bus_sem);
4957 return;
4958 }
4959
4960 child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
4961 bus_list);
4962 up_read(&pci_bus_sem);
4963
4964
4965
4966
4967
4968
4969
4970 if (!pci_is_pcie(dev)) {
4971 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4972 msleep(1000 + delay);
4973 return;
4974 }
4975
4976
4977
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992
4993 if (!pcie_downstream_port(dev))
4994 return;
4995
4996 if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4997 pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4998 msleep(delay);
4999 } else {
5000 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
5001 delay);
5002 if (!pcie_wait_for_link_delay(dev, true, delay)) {
5003
5004 pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
5005 return;
5006 }
5007 }
5008
5009 if (!pci_device_is_present(child)) {
5010 pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
5011 msleep(delay);
5012 }
5013}
5014
5015void pci_reset_secondary_bus(struct pci_dev *dev)
5016{
5017 u16 ctrl;
5018
5019 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
5020 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
5021 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
5022
5023
5024
5025
5026
5027 msleep(2);
5028
5029 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
5030 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
5031
5032
5033
5034
5035
5036
5037
5038
5039 ssleep(1);
5040}
5041
5042void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
5043{
5044 pci_reset_secondary_bus(dev);
5045}
5046
5047
5048
5049
5050
5051
5052
5053
5054int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
5055{
5056 pcibios_reset_secondary_bus(dev);
5057
5058 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
5059}
5060EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
5061
5062static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
5063{
5064 struct pci_dev *pdev;
5065
5066 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
5067 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5068 return -ENOTTY;
5069
5070 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
5071 if (pdev != dev)
5072 return -ENOTTY;
5073
5074 if (probe)
5075 return 0;
5076
5077 return pci_bridge_secondary_bus_reset(dev->bus->self);
5078}
5079
5080static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
5081{
5082 int rc = -ENOTTY;
5083
5084 if (!hotplug || !try_module_get(hotplug->owner))
5085 return rc;
5086
5087 if (hotplug->ops->reset_slot)
5088 rc = hotplug->ops->reset_slot(hotplug, probe);
5089
5090 module_put(hotplug->owner);
5091
5092 return rc;
5093}
5094
5095static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
5096{
5097 if (dev->multifunction || dev->subordinate || !dev->slot ||
5098 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5099 return -ENOTTY;
5100
5101 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
5102}
5103
5104static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
5105{
5106 int rc;
5107
5108 rc = pci_dev_reset_slot_function(dev, probe);
5109 if (rc != -ENOTTY)
5110 return rc;
5111 return pci_parent_bus_reset(dev, probe);
5112}
5113
5114void pci_dev_lock(struct pci_dev *dev)
5115{
5116 pci_cfg_access_lock(dev);
5117
5118 device_lock(&dev->dev);
5119}
5120EXPORT_SYMBOL_GPL(pci_dev_lock);
5121
5122
5123int pci_dev_trylock(struct pci_dev *dev)
5124{
5125 if (pci_cfg_access_trylock(dev)) {
5126 if (device_trylock(&dev->dev))
5127 return 1;
5128 pci_cfg_access_unlock(dev);
5129 }
5130
5131 return 0;
5132}
5133EXPORT_SYMBOL_GPL(pci_dev_trylock);
5134
5135void pci_dev_unlock(struct pci_dev *dev)
5136{
5137 device_unlock(&dev->dev);
5138 pci_cfg_access_unlock(dev);
5139}
5140EXPORT_SYMBOL_GPL(pci_dev_unlock);
5141
5142static void pci_dev_save_and_disable(struct pci_dev *dev)
5143{
5144 const struct pci_error_handlers *err_handler =
5145 dev->driver ? dev->driver->err_handler : NULL;
5146
5147
5148
5149
5150
5151
5152 if (err_handler && err_handler->reset_prepare)
5153 err_handler->reset_prepare(dev);
5154
5155
5156
5157
5158
5159
5160 pci_set_power_state(dev, PCI_D0);
5161
5162 pci_save_state(dev);
5163
5164
5165
5166
5167
5168
5169
5170 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5171}
5172
5173static void pci_dev_restore(struct pci_dev *dev)
5174{
5175 const struct pci_error_handlers *err_handler =
5176 dev->driver ? dev->driver->err_handler : NULL;
5177
5178 pci_restore_state(dev);
5179
5180
5181
5182
5183
5184
5185 if (err_handler && err_handler->reset_done)
5186 err_handler->reset_done(dev);
5187}
5188
5189
5190static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
5191 { },
5192 { pci_dev_specific_reset, .name = "device_specific" },
5193 { pci_dev_acpi_reset, .name = "acpi" },
5194 { pcie_reset_flr, .name = "flr" },
5195 { pci_af_flr, .name = "af_flr" },
5196 { pci_pm_reset, .name = "pm" },
5197 { pci_reset_bus_function, .name = "bus" },
5198};
5199
5200static ssize_t reset_method_show(struct device *dev,
5201 struct device_attribute *attr, char *buf)
5202{
5203 struct pci_dev *pdev = to_pci_dev(dev);
5204 ssize_t len = 0;
5205 int i, m;
5206
5207 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5208 m = pdev->reset_methods[i];
5209 if (!m)
5210 break;
5211
5212 len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
5213 pci_reset_fn_methods[m].name);
5214 }
5215
5216 if (len)
5217 len += sysfs_emit_at(buf, len, "\n");
5218
5219 return len;
5220}
5221
5222static int reset_method_lookup(const char *name)
5223{
5224 int m;
5225
5226 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5227 if (sysfs_streq(name, pci_reset_fn_methods[m].name))
5228 return m;
5229 }
5230
5231 return 0;
5232}
5233
5234static ssize_t reset_method_store(struct device *dev,
5235 struct device_attribute *attr,
5236 const char *buf, size_t count)
5237{
5238 struct pci_dev *pdev = to_pci_dev(dev);
5239 char *options, *name;
5240 int m, n;
5241 u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
5242
5243 if (sysfs_streq(buf, "")) {
5244 pdev->reset_methods[0] = 0;
5245 pci_warn(pdev, "All device reset methods disabled by user");
5246 return count;
5247 }
5248
5249 if (sysfs_streq(buf, "default")) {
5250 pci_init_reset_methods(pdev);
5251 return count;
5252 }
5253
5254 options = kstrndup(buf, count, GFP_KERNEL);
5255 if (!options)
5256 return -ENOMEM;
5257
5258 n = 0;
5259 while ((name = strsep(&options, " ")) != NULL) {
5260 if (sysfs_streq(name, ""))
5261 continue;
5262
5263 name = strim(name);
5264
5265 m = reset_method_lookup(name);
5266 if (!m) {
5267 pci_err(pdev, "Invalid reset method '%s'", name);
5268 goto error;
5269 }
5270
5271 if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
5272 pci_err(pdev, "Unsupported reset method '%s'", name);
5273 goto error;
5274 }
5275
5276 if (n == PCI_NUM_RESET_METHODS - 1) {
5277 pci_err(pdev, "Too many reset methods\n");
5278 goto error;
5279 }
5280
5281 reset_methods[n++] = m;
5282 }
5283
5284 reset_methods[n] = 0;
5285
5286
5287 if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
5288 reset_methods[0] != 1)
5289 pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
5290 memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
5291 kfree(options);
5292 return count;
5293
5294error:
5295
5296 kfree(options);
5297 return -EINVAL;
5298}
5299static DEVICE_ATTR_RW(reset_method);
5300
5301static struct attribute *pci_dev_reset_method_attrs[] = {
5302 &dev_attr_reset_method.attr,
5303 NULL,
5304};
5305
5306static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
5307 struct attribute *a, int n)
5308{
5309 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
5310
5311 if (!pci_reset_supported(pdev))
5312 return 0;
5313
5314 return a->mode;
5315}
5316
5317const struct attribute_group pci_dev_reset_method_attr_group = {
5318 .attrs = pci_dev_reset_method_attrs,
5319 .is_visible = pci_dev_reset_method_attr_is_visible,
5320};
5321
5322
5323
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340
5341
5342int __pci_reset_function_locked(struct pci_dev *dev)
5343{
5344 int i, m, rc;
5345
5346 might_sleep();
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5357 m = dev->reset_methods[i];
5358 if (!m)
5359 return -ENOTTY;
5360
5361 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
5362 if (!rc)
5363 return 0;
5364 if (rc != -ENOTTY)
5365 return rc;
5366 }
5367
5368 return -ENOTTY;
5369}
5370EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5371
5372
5373
5374
5375
5376
5377
5378
5379
5380
5381
5382
5383
5384void pci_init_reset_methods(struct pci_dev *dev)
5385{
5386 int m, i, rc;
5387
5388 BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
5389
5390 might_sleep();
5391
5392 i = 0;
5393 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5394 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
5395 if (!rc)
5396 dev->reset_methods[i++] = m;
5397 else if (rc != -ENOTTY)
5398 break;
5399 }
5400
5401 dev->reset_methods[i] = 0;
5402}
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414
5415
5416
5417
5418
5419
5420int pci_reset_function(struct pci_dev *dev)
5421{
5422 int rc;
5423
5424 if (!pci_reset_supported(dev))
5425 return -ENOTTY;
5426
5427 pci_dev_lock(dev);
5428 pci_dev_save_and_disable(dev);
5429
5430 rc = __pci_reset_function_locked(dev);
5431
5432 pci_dev_restore(dev);
5433 pci_dev_unlock(dev);
5434
5435 return rc;
5436}
5437EXPORT_SYMBOL_GPL(pci_reset_function);
5438
5439
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456int pci_reset_function_locked(struct pci_dev *dev)
5457{
5458 int rc;
5459
5460 if (!pci_reset_supported(dev))
5461 return -ENOTTY;
5462
5463 pci_dev_save_and_disable(dev);
5464
5465 rc = __pci_reset_function_locked(dev);
5466
5467 pci_dev_restore(dev);
5468
5469 return rc;
5470}
5471EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5472
5473
5474
5475
5476
5477
5478
5479int pci_try_reset_function(struct pci_dev *dev)
5480{
5481 int rc;
5482
5483 if (!pci_reset_supported(dev))
5484 return -ENOTTY;
5485
5486 if (!pci_dev_trylock(dev))
5487 return -EAGAIN;
5488
5489 pci_dev_save_and_disable(dev);
5490 rc = __pci_reset_function_locked(dev);
5491 pci_dev_restore(dev);
5492 pci_dev_unlock(dev);
5493
5494 return rc;
5495}
5496EXPORT_SYMBOL_GPL(pci_try_reset_function);
5497
5498
5499static bool pci_bus_resetable(struct pci_bus *bus)
5500{
5501 struct pci_dev *dev;
5502
5503
5504 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5505 return false;
5506
5507 list_for_each_entry(dev, &bus->devices, bus_list) {
5508 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5509 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5510 return false;
5511 }
5512
5513 return true;
5514}
5515
5516
5517static void pci_bus_lock(struct pci_bus *bus)
5518{
5519 struct pci_dev *dev;
5520
5521 list_for_each_entry(dev, &bus->devices, bus_list) {
5522 pci_dev_lock(dev);
5523 if (dev->subordinate)
5524 pci_bus_lock(dev->subordinate);
5525 }
5526}
5527
5528
5529static void pci_bus_unlock(struct pci_bus *bus)
5530{
5531 struct pci_dev *dev;
5532
5533 list_for_each_entry(dev, &bus->devices, bus_list) {
5534 if (dev->subordinate)
5535 pci_bus_unlock(dev->subordinate);
5536 pci_dev_unlock(dev);
5537 }
5538}
5539
5540
5541static int pci_bus_trylock(struct pci_bus *bus)
5542{
5543 struct pci_dev *dev;
5544
5545 list_for_each_entry(dev, &bus->devices, bus_list) {
5546 if (!pci_dev_trylock(dev))
5547 goto unlock;
5548 if (dev->subordinate) {
5549 if (!pci_bus_trylock(dev->subordinate)) {
5550 pci_dev_unlock(dev);
5551 goto unlock;
5552 }
5553 }
5554 }
5555 return 1;
5556
5557unlock:
5558 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5559 if (dev->subordinate)
5560 pci_bus_unlock(dev->subordinate);
5561 pci_dev_unlock(dev);
5562 }
5563 return 0;
5564}
5565
5566
5567static bool pci_slot_resetable(struct pci_slot *slot)
5568{
5569 struct pci_dev *dev;
5570
5571 if (slot->bus->self &&
5572 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5573 return false;
5574
5575 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5576 if (!dev->slot || dev->slot != slot)
5577 continue;
5578 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5579 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5580 return false;
5581 }
5582
5583 return true;
5584}
5585
5586
5587static void pci_slot_lock(struct pci_slot *slot)
5588{
5589 struct pci_dev *dev;
5590
5591 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5592 if (!dev->slot || dev->slot != slot)
5593 continue;
5594 pci_dev_lock(dev);
5595 if (dev->subordinate)
5596 pci_bus_lock(dev->subordinate);
5597 }
5598}
5599
5600
5601static void pci_slot_unlock(struct pci_slot *slot)
5602{
5603 struct pci_dev *dev;
5604
5605 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5606 if (!dev->slot || dev->slot != slot)
5607 continue;
5608 if (dev->subordinate)
5609 pci_bus_unlock(dev->subordinate);
5610 pci_dev_unlock(dev);
5611 }
5612}
5613
5614
5615static int pci_slot_trylock(struct pci_slot *slot)
5616{
5617 struct pci_dev *dev;
5618
5619 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5620 if (!dev->slot || dev->slot != slot)
5621 continue;
5622 if (!pci_dev_trylock(dev))
5623 goto unlock;
5624 if (dev->subordinate) {
5625 if (!pci_bus_trylock(dev->subordinate)) {
5626 pci_dev_unlock(dev);
5627 goto unlock;
5628 }
5629 }
5630 }
5631 return 1;
5632
5633unlock:
5634 list_for_each_entry_continue_reverse(dev,
5635 &slot->bus->devices, bus_list) {
5636 if (!dev->slot || dev->slot != slot)
5637 continue;
5638 if (dev->subordinate)
5639 pci_bus_unlock(dev->subordinate);
5640 pci_dev_unlock(dev);
5641 }
5642 return 0;
5643}
5644
5645
5646
5647
5648
5649static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5650{
5651 struct pci_dev *dev;
5652
5653 list_for_each_entry(dev, &bus->devices, bus_list) {
5654 pci_dev_save_and_disable(dev);
5655 if (dev->subordinate)
5656 pci_bus_save_and_disable_locked(dev->subordinate);
5657 }
5658}
5659
5660
5661
5662
5663
5664
5665static void pci_bus_restore_locked(struct pci_bus *bus)
5666{
5667 struct pci_dev *dev;
5668
5669 list_for_each_entry(dev, &bus->devices, bus_list) {
5670 pci_dev_restore(dev);
5671 if (dev->subordinate)
5672 pci_bus_restore_locked(dev->subordinate);
5673 }
5674}
5675
5676
5677
5678
5679
5680static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5681{
5682 struct pci_dev *dev;
5683
5684 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5685 if (!dev->slot || dev->slot != slot)
5686 continue;
5687 pci_dev_save_and_disable(dev);
5688 if (dev->subordinate)
5689 pci_bus_save_and_disable_locked(dev->subordinate);
5690 }
5691}
5692
5693
5694
5695
5696
5697
5698static void pci_slot_restore_locked(struct pci_slot *slot)
5699{
5700 struct pci_dev *dev;
5701
5702 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5703 if (!dev->slot || dev->slot != slot)
5704 continue;
5705 pci_dev_restore(dev);
5706 if (dev->subordinate)
5707 pci_bus_restore_locked(dev->subordinate);
5708 }
5709}
5710
5711static int pci_slot_reset(struct pci_slot *slot, bool probe)
5712{
5713 int rc;
5714
5715 if (!slot || !pci_slot_resetable(slot))
5716 return -ENOTTY;
5717
5718 if (!probe)
5719 pci_slot_lock(slot);
5720
5721 might_sleep();
5722
5723 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5724
5725 if (!probe)
5726 pci_slot_unlock(slot);
5727
5728 return rc;
5729}
5730
5731
5732
5733
5734
5735
5736
5737int pci_probe_reset_slot(struct pci_slot *slot)
5738{
5739 return pci_slot_reset(slot, PCI_RESET_PROBE);
5740}
5741EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5742
5743
5744
5745
5746
5747
5748
5749
5750
5751
5752
5753
5754
5755
5756
5757
5758static int __pci_reset_slot(struct pci_slot *slot)
5759{
5760 int rc;
5761
5762 rc = pci_slot_reset(slot, PCI_RESET_PROBE);
5763 if (rc)
5764 return rc;
5765
5766 if (pci_slot_trylock(slot)) {
5767 pci_slot_save_and_disable_locked(slot);
5768 might_sleep();
5769 rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
5770 pci_slot_restore_locked(slot);
5771 pci_slot_unlock(slot);
5772 } else
5773 rc = -EAGAIN;
5774
5775 return rc;
5776}
5777
5778static int pci_bus_reset(struct pci_bus *bus, bool probe)
5779{
5780 int ret;
5781
5782 if (!bus->self || !pci_bus_resetable(bus))
5783 return -ENOTTY;
5784
5785 if (probe)
5786 return 0;
5787
5788 pci_bus_lock(bus);
5789
5790 might_sleep();
5791
5792 ret = pci_bridge_secondary_bus_reset(bus->self);
5793
5794 pci_bus_unlock(bus);
5795
5796 return ret;
5797}
5798
5799
5800
5801
5802
5803
5804
5805
5806
5807int pci_bus_error_reset(struct pci_dev *bridge)
5808{
5809 struct pci_bus *bus = bridge->subordinate;
5810 struct pci_slot *slot;
5811
5812 if (!bus)
5813 return -ENOTTY;
5814
5815 mutex_lock(&pci_slot_mutex);
5816 if (list_empty(&bus->slots))
5817 goto bus_reset;
5818
5819 list_for_each_entry(slot, &bus->slots, list)
5820 if (pci_probe_reset_slot(slot))
5821 goto bus_reset;
5822
5823 list_for_each_entry(slot, &bus->slots, list)
5824 if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
5825 goto bus_reset;
5826
5827 mutex_unlock(&pci_slot_mutex);
5828 return 0;
5829bus_reset:
5830 mutex_unlock(&pci_slot_mutex);
5831 return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
5832}
5833
5834
5835
5836
5837
5838
5839
5840int pci_probe_reset_bus(struct pci_bus *bus)
5841{
5842 return pci_bus_reset(bus, PCI_RESET_PROBE);
5843}
5844EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5845
5846
5847
5848
5849
5850
5851
5852static int __pci_reset_bus(struct pci_bus *bus)
5853{
5854 int rc;
5855
5856 rc = pci_bus_reset(bus, PCI_RESET_PROBE);
5857 if (rc)
5858 return rc;
5859
5860 if (pci_bus_trylock(bus)) {
5861 pci_bus_save_and_disable_locked(bus);
5862 might_sleep();
5863 rc = pci_bridge_secondary_bus_reset(bus->self);
5864 pci_bus_restore_locked(bus);
5865 pci_bus_unlock(bus);
5866 } else
5867 rc = -EAGAIN;
5868
5869 return rc;
5870}
5871
5872
5873
5874
5875
5876
5877
5878int pci_reset_bus(struct pci_dev *pdev)
5879{
5880 return (!pci_probe_reset_slot(pdev->slot)) ?
5881 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5882}
5883EXPORT_SYMBOL_GPL(pci_reset_bus);
5884
5885
5886
5887
5888
5889
5890
5891
5892int pcix_get_max_mmrbc(struct pci_dev *dev)
5893{
5894 int cap;
5895 u32 stat;
5896
5897 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5898 if (!cap)
5899 return -EINVAL;
5900
5901 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5902 return -EINVAL;
5903
5904 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5905}
5906EXPORT_SYMBOL(pcix_get_max_mmrbc);
5907
5908
5909
5910
5911
5912
5913
5914
5915int pcix_get_mmrbc(struct pci_dev *dev)
5916{
5917 int cap;
5918 u16 cmd;
5919
5920 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5921 if (!cap)
5922 return -EINVAL;
5923
5924 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5925 return -EINVAL;
5926
5927 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5928}
5929EXPORT_SYMBOL(pcix_get_mmrbc);
5930
5931
5932
5933
5934
5935
5936
5937
5938
5939
5940int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5941{
5942 int cap;
5943 u32 stat, v, o;
5944 u16 cmd;
5945
5946 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5947 return -EINVAL;
5948
5949 v = ffs(mmrbc) - 10;
5950
5951 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5952 if (!cap)
5953 return -EINVAL;
5954
5955 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5956 return -EINVAL;
5957
5958 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5959 return -E2BIG;
5960
5961 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5962 return -EINVAL;
5963
5964 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5965 if (o != v) {
5966 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5967 return -EIO;
5968
5969 cmd &= ~PCI_X_CMD_MAX_READ;
5970 cmd |= v << 2;
5971 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5972 return -EIO;
5973 }
5974 return 0;
5975}
5976EXPORT_SYMBOL(pcix_set_mmrbc);
5977
5978
5979
5980
5981
5982
5983
5984int pcie_get_readrq(struct pci_dev *dev)
5985{
5986 u16 ctl;
5987
5988 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5989
5990 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5991}
5992EXPORT_SYMBOL(pcie_get_readrq);
5993
5994
5995
5996
5997
5998
5999
6000
6001
6002int pcie_set_readrq(struct pci_dev *dev, int rq)
6003{
6004 u16 v;
6005 int ret;
6006
6007 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
6008 return -EINVAL;
6009
6010
6011
6012
6013
6014
6015 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
6016 int mps = pcie_get_mps(dev);
6017
6018 if (mps < rq)
6019 rq = mps;
6020 }
6021
6022 v = (ffs(rq) - 8) << 12;
6023
6024 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6025 PCI_EXP_DEVCTL_READRQ, v);
6026
6027 return pcibios_err_to_errno(ret);
6028}
6029EXPORT_SYMBOL(pcie_set_readrq);
6030
6031
6032
6033
6034
6035
6036
6037int pcie_get_mps(struct pci_dev *dev)
6038{
6039 u16 ctl;
6040
6041 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
6042
6043 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6044}
6045EXPORT_SYMBOL(pcie_get_mps);
6046
6047
6048
6049
6050
6051
6052
6053
6054
6055int pcie_set_mps(struct pci_dev *dev, int mps)
6056{
6057 u16 v;
6058 int ret;
6059
6060 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
6061 return -EINVAL;
6062
6063 v = ffs(mps) - 8;
6064 if (v > dev->pcie_mpss)
6065 return -EINVAL;
6066 v <<= 5;
6067
6068 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6069 PCI_EXP_DEVCTL_PAYLOAD, v);
6070
6071 return pcibios_err_to_errno(ret);
6072}
6073EXPORT_SYMBOL(pcie_set_mps);
6074
6075
6076
6077
6078
6079
6080
6081
6082
6083
6084
6085
6086
6087
6088
6089u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
6090 enum pci_bus_speed *speed,
6091 enum pcie_link_width *width)
6092{
6093 u16 lnksta;
6094 enum pci_bus_speed next_speed;
6095 enum pcie_link_width next_width;
6096 u32 bw, next_bw;
6097
6098 if (speed)
6099 *speed = PCI_SPEED_UNKNOWN;
6100 if (width)
6101 *width = PCIE_LNK_WIDTH_UNKNOWN;
6102
6103 bw = 0;
6104
6105 while (dev) {
6106 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
6107
6108 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
6109 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
6110 PCI_EXP_LNKSTA_NLW_SHIFT;
6111
6112 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
6113
6114
6115 if (!bw || next_bw <= bw) {
6116 bw = next_bw;
6117
6118 if (limiting_dev)
6119 *limiting_dev = dev;
6120 if (speed)
6121 *speed = next_speed;
6122 if (width)
6123 *width = next_width;
6124 }
6125
6126 dev = pci_upstream_bridge(dev);
6127 }
6128
6129 return bw;
6130}
6131EXPORT_SYMBOL(pcie_bandwidth_available);
6132
6133
6134
6135
6136
6137
6138
6139
6140enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
6141{
6142 u32 lnkcap2, lnkcap;
6143
6144
6145
6146
6147
6148
6149
6150
6151
6152
6153 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
6154
6155
6156 if (lnkcap2)
6157 return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
6158
6159 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6160 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
6161 return PCIE_SPEED_5_0GT;
6162 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
6163 return PCIE_SPEED_2_5GT;
6164
6165 return PCI_SPEED_UNKNOWN;
6166}
6167EXPORT_SYMBOL(pcie_get_speed_cap);
6168
6169
6170
6171
6172
6173
6174
6175
6176enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
6177{
6178 u32 lnkcap;
6179
6180 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6181 if (lnkcap)
6182 return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
6183
6184 return PCIE_LNK_WIDTH_UNKNOWN;
6185}
6186EXPORT_SYMBOL(pcie_get_width_cap);
6187
6188
6189
6190
6191
6192
6193
6194
6195
6196
6197
6198u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
6199 enum pcie_link_width *width)
6200{
6201 *speed = pcie_get_speed_cap(dev);
6202 *width = pcie_get_width_cap(dev);
6203
6204 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
6205 return 0;
6206
6207 return *width * PCIE_SPEED2MBS_ENC(*speed);
6208}
6209
6210
6211
6212
6213
6214
6215
6216
6217
6218
6219
6220void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
6221{
6222 enum pcie_link_width width, width_cap;
6223 enum pci_bus_speed speed, speed_cap;
6224 struct pci_dev *limiting_dev = NULL;
6225 u32 bw_avail, bw_cap;
6226
6227 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
6228 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
6229
6230 if (bw_avail >= bw_cap && verbose)
6231 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
6232 bw_cap / 1000, bw_cap % 1000,
6233 pci_speed_string(speed_cap), width_cap);
6234 else if (bw_avail < bw_cap)
6235 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
6236 bw_avail / 1000, bw_avail % 1000,
6237 pci_speed_string(speed), width,
6238 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
6239 bw_cap / 1000, bw_cap % 1000,
6240 pci_speed_string(speed_cap), width_cap);
6241}
6242
6243
6244
6245
6246
6247
6248
6249void pcie_print_link_status(struct pci_dev *dev)
6250{
6251 __pcie_print_link_status(dev, true);
6252}
6253EXPORT_SYMBOL(pcie_print_link_status);
6254
6255
6256
6257
6258
6259
6260
6261
6262int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6263{
6264 int i, bars = 0;
6265 for (i = 0; i < PCI_NUM_RESOURCES; i++)
6266 if (pci_resource_flags(dev, i) & flags)
6267 bars |= (1 << i);
6268 return bars;
6269}
6270EXPORT_SYMBOL(pci_select_bars);
6271
6272
6273static arch_set_vga_state_t arch_set_vga_state;
6274
6275void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6276{
6277 arch_set_vga_state = func;
6278}
6279
6280static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6281 unsigned int command_bits, u32 flags)
6282{
6283 if (arch_set_vga_state)
6284 return arch_set_vga_state(dev, decode, command_bits,
6285 flags);
6286 return 0;
6287}
6288
6289
6290
6291
6292
6293
6294
6295
6296
6297int pci_set_vga_state(struct pci_dev *dev, bool decode,
6298 unsigned int command_bits, u32 flags)
6299{
6300 struct pci_bus *bus;
6301 struct pci_dev *bridge;
6302 u16 cmd;
6303 int rc;
6304
6305 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6306
6307
6308 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6309 if (rc)
6310 return rc;
6311
6312 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6313 pci_read_config_word(dev, PCI_COMMAND, &cmd);
6314 if (decode)
6315 cmd |= command_bits;
6316 else
6317 cmd &= ~command_bits;
6318 pci_write_config_word(dev, PCI_COMMAND, cmd);
6319 }
6320
6321 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6322 return 0;
6323
6324 bus = dev->bus;
6325 while (bus) {
6326 bridge = bus->self;
6327 if (bridge) {
6328 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6329 &cmd);
6330 if (decode)
6331 cmd |= PCI_BRIDGE_CTL_VGA;
6332 else
6333 cmd &= ~PCI_BRIDGE_CTL_VGA;
6334 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6335 cmd);
6336 }
6337 bus = bus->parent;
6338 }
6339 return 0;
6340}
6341
6342#ifdef CONFIG_ACPI
6343bool pci_pr3_present(struct pci_dev *pdev)
6344{
6345 struct acpi_device *adev;
6346
6347 if (acpi_disabled)
6348 return false;
6349
6350 adev = ACPI_COMPANION(&pdev->dev);
6351 if (!adev)
6352 return false;
6353
6354 return adev->power.flags.power_resources &&
6355 acpi_has_method(adev->handle, "_PR3");
6356}
6357EXPORT_SYMBOL_GPL(pci_pr3_present);
6358#endif
6359
6360
6361
6362
6363
6364
6365
6366
6367
6368
6369
6370
6371
6372
6373
6374
6375
6376
6377
6378
6379
6380void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from,
6381 unsigned int nr_devfns)
6382{
6383 int devfn_to;
6384
6385 nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from);
6386 devfn_to = devfn_from + nr_devfns - 1;
6387
6388 if (!dev->dma_alias_mask)
6389 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6390 if (!dev->dma_alias_mask) {
6391 pci_warn(dev, "Unable to allocate DMA alias mask\n");
6392 return;
6393 }
6394
6395 bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6396
6397 if (nr_devfns == 1)
6398 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6399 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6400 else if (nr_devfns > 1)
6401 pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6402 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6403 PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6404}
6405
6406bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6407{
6408 return (dev1->dma_alias_mask &&
6409 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6410 (dev2->dma_alias_mask &&
6411 test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6412 pci_real_dma_dev(dev1) == dev2 ||
6413 pci_real_dma_dev(dev2) == dev1;
6414}
6415
6416bool pci_device_is_present(struct pci_dev *pdev)
6417{
6418 u32 v;
6419
6420 if (pci_dev_is_disconnected(pdev))
6421 return false;
6422 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6423}
6424EXPORT_SYMBOL_GPL(pci_device_is_present);
6425
6426void pci_ignore_hotplug(struct pci_dev *dev)
6427{
6428 struct pci_dev *bridge = dev->bus->self;
6429
6430 dev->ignore_hotplug = 1;
6431
6432 if (bridge)
6433 bridge->ignore_hotplug = 1;
6434}
6435EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6436
6437
6438
6439
6440
6441
6442
6443
6444
6445
6446
6447struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6448{
6449 return dev;
6450}
6451
6452resource_size_t __weak pcibios_default_alignment(void)
6453{
6454 return 0;
6455}
6456
6457
6458
6459
6460
6461void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6462 const struct resource *rsrc,
6463 resource_size_t *start, resource_size_t *end)
6464{
6465 *start = rsrc->start;
6466 *end = rsrc->end;
6467}
6468
6469static char *resource_alignment_param;
6470static DEFINE_SPINLOCK(resource_alignment_lock);
6471
6472
6473
6474
6475
6476
6477
6478
6479
6480static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6481 bool *resize)
6482{
6483 int align_order, count;
6484 resource_size_t align = pcibios_default_alignment();
6485 const char *p;
6486 int ret;
6487
6488 spin_lock(&resource_alignment_lock);
6489 p = resource_alignment_param;
6490 if (!p || !*p)
6491 goto out;
6492 if (pci_has_flag(PCI_PROBE_ONLY)) {
6493 align = 0;
6494 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6495 goto out;
6496 }
6497
6498 while (*p) {
6499 count = 0;
6500 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6501 p[count] == '@') {
6502 p += count + 1;
6503 if (align_order > 63) {
6504 pr_err("PCI: Invalid requested alignment (order %d)\n",
6505 align_order);
6506 align_order = PAGE_SHIFT;
6507 }
6508 } else {
6509 align_order = PAGE_SHIFT;
6510 }
6511
6512 ret = pci_dev_str_match(dev, p, &p);
6513 if (ret == 1) {
6514 *resize = true;
6515 align = 1ULL << align_order;
6516 break;
6517 } else if (ret < 0) {
6518 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6519 p);
6520 break;
6521 }
6522
6523 if (*p != ';' && *p != ',') {
6524
6525 break;
6526 }
6527 p++;
6528 }
6529out:
6530 spin_unlock(&resource_alignment_lock);
6531 return align;
6532}
6533
6534static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6535 resource_size_t align, bool resize)
6536{
6537 struct resource *r = &dev->resource[bar];
6538 resource_size_t size;
6539
6540 if (!(r->flags & IORESOURCE_MEM))
6541 return;
6542
6543 if (r->flags & IORESOURCE_PCI_FIXED) {
6544 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6545 bar, r, (unsigned long long)align);
6546 return;
6547 }
6548
6549 size = resource_size(r);
6550 if (size >= align)
6551 return;
6552
6553
6554
6555
6556
6557
6558
6559
6560
6561
6562
6563
6564
6565
6566
6567
6568
6569
6570
6571
6572
6573
6574
6575
6576
6577
6578
6579
6580
6581 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6582 bar, r, (unsigned long long)align);
6583
6584 if (resize) {
6585 r->start = 0;
6586 r->end = align - 1;
6587 } else {
6588 r->flags &= ~IORESOURCE_SIZEALIGN;
6589 r->flags |= IORESOURCE_STARTALIGN;
6590 r->start = align;
6591 r->end = r->start + size - 1;
6592 }
6593 r->flags |= IORESOURCE_UNSET;
6594}
6595
6596
6597
6598
6599
6600
6601
6602
6603void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6604{
6605 int i;
6606 struct resource *r;
6607 resource_size_t align;
6608 u16 command;
6609 bool resize = false;
6610
6611
6612
6613
6614
6615
6616
6617 if (dev->is_virtfn)
6618 return;
6619
6620
6621 align = pci_specified_resource_alignment(dev, &resize);
6622 if (!align)
6623 return;
6624
6625 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6626 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6627 pci_warn(dev, "Can't reassign resources to host bridge\n");
6628 return;
6629 }
6630
6631 pci_read_config_word(dev, PCI_COMMAND, &command);
6632 command &= ~PCI_COMMAND_MEMORY;
6633 pci_write_config_word(dev, PCI_COMMAND, command);
6634
6635 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6636 pci_request_resource_alignment(dev, i, align, resize);
6637
6638
6639
6640
6641
6642
6643 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6644 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6645 r = &dev->resource[i];
6646 if (!(r->flags & IORESOURCE_MEM))
6647 continue;
6648 r->flags |= IORESOURCE_UNSET;
6649 r->end = resource_size(r) - 1;
6650 r->start = 0;
6651 }
6652 pci_disable_bridge_window(dev);
6653 }
6654}
6655
6656static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6657{
6658 size_t count = 0;
6659
6660 spin_lock(&resource_alignment_lock);
6661 if (resource_alignment_param)
6662 count = sysfs_emit(buf, "%s\n", resource_alignment_param);
6663 spin_unlock(&resource_alignment_lock);
6664
6665 return count;
6666}
6667
6668static ssize_t resource_alignment_store(struct bus_type *bus,
6669 const char *buf, size_t count)
6670{
6671 char *param, *old, *end;
6672
6673 if (count >= (PAGE_SIZE - 1))
6674 return -EINVAL;
6675
6676 param = kstrndup(buf, count, GFP_KERNEL);
6677 if (!param)
6678 return -ENOMEM;
6679
6680 end = strchr(param, '\n');
6681 if (end)
6682 *end = '\0';
6683
6684 spin_lock(&resource_alignment_lock);
6685 old = resource_alignment_param;
6686 if (strlen(param)) {
6687 resource_alignment_param = param;
6688 } else {
6689 kfree(param);
6690 resource_alignment_param = NULL;
6691 }
6692 spin_unlock(&resource_alignment_lock);
6693
6694 kfree(old);
6695
6696 return count;
6697}
6698
6699static BUS_ATTR_RW(resource_alignment);
6700
6701static int __init pci_resource_alignment_sysfs_init(void)
6702{
6703 return bus_create_file(&pci_bus_type,
6704 &bus_attr_resource_alignment);
6705}
6706late_initcall(pci_resource_alignment_sysfs_init);
6707
6708static void pci_no_domains(void)
6709{
6710#ifdef CONFIG_PCI_DOMAINS
6711 pci_domains_supported = 0;
6712#endif
6713}
6714
6715#ifdef CONFIG_PCI_DOMAINS_GENERIC
6716static atomic_t __domain_nr = ATOMIC_INIT(-1);
6717
6718static int pci_get_new_domain_nr(void)
6719{
6720 return atomic_inc_return(&__domain_nr);
6721}
6722
6723static int of_pci_bus_find_domain_nr(struct device *parent)
6724{
6725 static int use_dt_domains = -1;
6726 int domain = -1;
6727
6728 if (parent)
6729 domain = of_get_pci_domain_nr(parent->of_node);
6730
6731
6732
6733
6734
6735
6736
6737
6738
6739
6740
6741
6742
6743
6744
6745
6746
6747
6748
6749
6750
6751
6752
6753
6754
6755
6756
6757 if (domain >= 0 && use_dt_domains) {
6758 use_dt_domains = 1;
6759 } else if (domain < 0 && use_dt_domains != 1) {
6760 use_dt_domains = 0;
6761 domain = pci_get_new_domain_nr();
6762 } else {
6763 if (parent)
6764 pr_err("Node %pOF has ", parent->of_node);
6765 pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6766 domain = -1;
6767 }
6768
6769 return domain;
6770}
6771
6772int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6773{
6774 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6775 acpi_pci_bus_find_domain_nr(bus);
6776}
6777#endif
6778
6779
6780
6781
6782
6783
6784
6785
6786int __weak pci_ext_cfg_avail(void)
6787{
6788 return 1;
6789}
6790
6791void __weak pci_fixup_cardbus(struct pci_bus *bus)
6792{
6793}
6794EXPORT_SYMBOL(pci_fixup_cardbus);
6795
6796static int __init pci_setup(char *str)
6797{
6798 while (str) {
6799 char *k = strchr(str, ',');
6800 if (k)
6801 *k++ = 0;
6802 if (*str && (str = pcibios_setup(str)) && *str) {
6803 if (!strcmp(str, "nomsi")) {
6804 pci_no_msi();
6805 } else if (!strncmp(str, "noats", 5)) {
6806 pr_info("PCIe: ATS is disabled\n");
6807 pcie_ats_disabled = true;
6808 } else if (!strcmp(str, "noaer")) {
6809 pci_no_aer();
6810 } else if (!strcmp(str, "earlydump")) {
6811 pci_early_dump = true;
6812 } else if (!strncmp(str, "realloc=", 8)) {
6813 pci_realloc_get_opt(str + 8);
6814 } else if (!strncmp(str, "realloc", 7)) {
6815 pci_realloc_get_opt("on");
6816 } else if (!strcmp(str, "nodomains")) {
6817 pci_no_domains();
6818 } else if (!strncmp(str, "noari", 5)) {
6819 pcie_ari_disabled = true;
6820 } else if (!strncmp(str, "cbiosize=", 9)) {
6821 pci_cardbus_io_size = memparse(str + 9, &str);
6822 } else if (!strncmp(str, "cbmemsize=", 10)) {
6823 pci_cardbus_mem_size = memparse(str + 10, &str);
6824 } else if (!strncmp(str, "resource_alignment=", 19)) {
6825 resource_alignment_param = str + 19;
6826 } else if (!strncmp(str, "ecrc=", 5)) {
6827 pcie_ecrc_get_policy(str + 5);
6828 } else if (!strncmp(str, "hpiosize=", 9)) {
6829 pci_hotplug_io_size = memparse(str + 9, &str);
6830 } else if (!strncmp(str, "hpmmiosize=", 11)) {
6831 pci_hotplug_mmio_size = memparse(str + 11, &str);
6832 } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6833 pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6834 } else if (!strncmp(str, "hpmemsize=", 10)) {
6835 pci_hotplug_mmio_size = memparse(str + 10, &str);
6836 pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6837 } else if (!strncmp(str, "hpbussize=", 10)) {
6838 pci_hotplug_bus_size =
6839 simple_strtoul(str + 10, &str, 0);
6840 if (pci_hotplug_bus_size > 0xff)
6841 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6842 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6843 pcie_bus_config = PCIE_BUS_TUNE_OFF;
6844 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
6845 pcie_bus_config = PCIE_BUS_SAFE;
6846 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
6847 pcie_bus_config = PCIE_BUS_PERFORMANCE;
6848 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6849 pcie_bus_config = PCIE_BUS_PEER2PEER;
6850 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6851 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6852 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6853 disable_acs_redir_param = str + 18;
6854 } else {
6855 pr_err("PCI: Unknown option `%s'\n", str);
6856 }
6857 }
6858 str = k;
6859 }
6860 return 0;
6861}
6862early_param("pci", pci_setup);
6863
6864
6865
6866
6867
6868
6869
6870
6871
6872
6873static int __init pci_realloc_setup_params(void)
6874{
6875 resource_alignment_param = kstrdup(resource_alignment_param,
6876 GFP_KERNEL);
6877 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6878
6879 return 0;
6880}
6881pure_initcall(pci_realloc_setup_params);
6882