1
2
3
4
5
6
7
8
9
10
11#include <linux/acpi.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/dmi.h>
15#include <linux/init.h>
16#include <linux/msi.h>
17#include <linux/of.h>
18#include <linux/pci.h>
19#include <linux/pm.h>
20#include <linux/slab.h>
21#include <linux/module.h>
22#include <linux/spinlock.h>
23#include <linux/string.h>
24#include <linux/log2.h>
25#include <linux/logic_pio.h>
26#include <linux/pm_wakeup.h>
27#include <linux/interrupt.h>
28#include <linux/device.h>
29#include <linux/pm_runtime.h>
30#include <linux/pci_hotplug.h>
31#include <linux/vmalloc.h>
32#include <asm/dma.h>
33#include <linux/aer.h>
34#include "pci.h"
35
36DEFINE_MUTEX(pci_slot_mutex);
37
38const char *pci_power_names[] = {
39 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
40};
41EXPORT_SYMBOL_GPL(pci_power_names);
42
43int isa_dma_bridge_buggy;
44EXPORT_SYMBOL(isa_dma_bridge_buggy);
45
46int pci_pci_problems;
47EXPORT_SYMBOL(pci_pci_problems);
48
49unsigned int pci_pm_d3hot_delay;
50
51static void pci_pme_list_scan(struct work_struct *work);
52
53static LIST_HEAD(pci_pme_list);
54static DEFINE_MUTEX(pci_pme_list_mutex);
55static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
56
57struct pci_pme_device {
58 struct list_head list;
59 struct pci_dev *dev;
60};
61
62#define PME_TIMEOUT 1000
63
64static void pci_dev_d3_sleep(struct pci_dev *dev)
65{
66 unsigned int delay = dev->d3hot_delay;
67
68 if (delay < pci_pm_d3hot_delay)
69 delay = pci_pm_d3hot_delay;
70
71 if (delay)
72 msleep(delay);
73}
74
75#ifdef CONFIG_PCI_DOMAINS
76int pci_domains_supported = 1;
77#endif
78
79#define DEFAULT_CARDBUS_IO_SIZE (256)
80#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
81
82unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
83unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
84
85#define DEFAULT_HOTPLUG_IO_SIZE (256)
86#define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
87#define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
88
89unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
90
91
92
93
94
95unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
96unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
97
98#define DEFAULT_HOTPLUG_BUS_SIZE 1
99unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
100
101
102
103#ifdef CONFIG_PCIE_BUS_TUNE_OFF
104enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
105#elif defined CONFIG_PCIE_BUS_SAFE
106enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
107#elif defined CONFIG_PCIE_BUS_PERFORMANCE
108enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
109#elif defined CONFIG_PCIE_BUS_PEER2PEER
110enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
111#else
112enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
113#endif
114
115
116
117
118
119
120
121u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
122u8 pci_cache_line_size;
123
124
125
126
127
128unsigned int pcibios_max_latency = 255;
129
130
131static bool pcie_ari_disabled;
132
133
134static bool pcie_ats_disabled;
135
136
137bool pci_early_dump;
138
139bool pci_ats_disabled(void)
140{
141 return pcie_ats_disabled;
142}
143EXPORT_SYMBOL_GPL(pci_ats_disabled);
144
145
146static bool pci_bridge_d3_disable;
147
148static bool pci_bridge_d3_force;
149
150static int __init pcie_port_pm_setup(char *str)
151{
152 if (!strcmp(str, "off"))
153 pci_bridge_d3_disable = true;
154 else if (!strcmp(str, "force"))
155 pci_bridge_d3_force = true;
156 return 1;
157}
158__setup("pcie_port_pm=", pcie_port_pm_setup);
159
160
161#define PCIE_RESET_READY_POLL_MS 60000
162
163
164
165
166
167
168
169
170unsigned char pci_bus_max_busnr(struct pci_bus *bus)
171{
172 struct pci_bus *tmp;
173 unsigned char max, n;
174
175 max = bus->busn_res.end;
176 list_for_each_entry(tmp, &bus->children, node) {
177 n = pci_bus_max_busnr(tmp);
178 if (n > max)
179 max = n;
180 }
181 return max;
182}
183EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
184
185
186
187
188
189
190
191int pci_status_get_and_clear_errors(struct pci_dev *pdev)
192{
193 u16 status;
194 int ret;
195
196 ret = pci_read_config_word(pdev, PCI_STATUS, &status);
197 if (ret != PCIBIOS_SUCCESSFUL)
198 return -EIO;
199
200 status &= PCI_STATUS_ERROR_BITS;
201 if (status)
202 pci_write_config_word(pdev, PCI_STATUS, status);
203
204 return status;
205}
206EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
207
208#ifdef CONFIG_HAS_IOMEM
209void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
210{
211 struct resource *res = &pdev->resource[bar];
212
213
214
215
216 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
217 pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
218 return NULL;
219 }
220 return ioremap(res->start, resource_size(res));
221}
222EXPORT_SYMBOL_GPL(pci_ioremap_bar);
223
224void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
225{
226
227
228
229 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
230 WARN_ON(1);
231 return NULL;
232 }
233 return ioremap_wc(pci_resource_start(pdev, bar),
234 pci_resource_len(pdev, bar));
235}
236EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
237#endif
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
259 const char **endptr)
260{
261 int ret;
262 int seg, bus, slot, func;
263 char *wpath, *p;
264 char end;
265
266 *endptr = strchrnul(path, ';');
267
268 wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
269 if (!wpath)
270 return -ENOMEM;
271
272 while (1) {
273 p = strrchr(wpath, '/');
274 if (!p)
275 break;
276 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
277 if (ret != 2) {
278 ret = -EINVAL;
279 goto free_and_exit;
280 }
281
282 if (dev->devfn != PCI_DEVFN(slot, func)) {
283 ret = 0;
284 goto free_and_exit;
285 }
286
287
288
289
290
291
292
293 dev = pci_upstream_bridge(dev);
294 if (!dev) {
295 ret = 0;
296 goto free_and_exit;
297 }
298
299 *p = 0;
300 }
301
302 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
303 &func, &end);
304 if (ret != 4) {
305 seg = 0;
306 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
307 if (ret != 3) {
308 ret = -EINVAL;
309 goto free_and_exit;
310 }
311 }
312
313 ret = (seg == pci_domain_nr(dev->bus) &&
314 bus == dev->bus->number &&
315 dev->devfn == PCI_DEVFN(slot, func));
316
317free_and_exit:
318 kfree(wpath);
319 return ret;
320}
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352static int pci_dev_str_match(struct pci_dev *dev, const char *p,
353 const char **endptr)
354{
355 int ret;
356 int count;
357 unsigned short vendor, device, subsystem_vendor, subsystem_device;
358
359 if (strncmp(p, "pci:", 4) == 0) {
360
361 p += 4;
362 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
363 &subsystem_vendor, &subsystem_device, &count);
364 if (ret != 4) {
365 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
366 if (ret != 2)
367 return -EINVAL;
368
369 subsystem_vendor = 0;
370 subsystem_device = 0;
371 }
372
373 p += count;
374
375 if ((!vendor || vendor == dev->vendor) &&
376 (!device || device == dev->device) &&
377 (!subsystem_vendor ||
378 subsystem_vendor == dev->subsystem_vendor) &&
379 (!subsystem_device ||
380 subsystem_device == dev->subsystem_device))
381 goto found;
382 } else {
383
384
385
386
387 ret = pci_dev_str_match_path(dev, p, &p);
388 if (ret < 0)
389 return ret;
390 else if (ret)
391 goto found;
392 }
393
394 *endptr = p;
395 return 0;
396
397found:
398 *endptr = p;
399 return 1;
400}
401
402static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
403 u8 pos, int cap, int *ttl)
404{
405 u8 id;
406 u16 ent;
407
408 pci_bus_read_config_byte(bus, devfn, pos, &pos);
409
410 while ((*ttl)--) {
411 if (pos < 0x40)
412 break;
413 pos &= ~3;
414 pci_bus_read_config_word(bus, devfn, pos, &ent);
415
416 id = ent & 0xff;
417 if (id == 0xff)
418 break;
419 if (id == cap)
420 return pos;
421 pos = (ent >> 8);
422 }
423 return 0;
424}
425
426static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
427 u8 pos, int cap)
428{
429 int ttl = PCI_FIND_CAP_TTL;
430
431 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
432}
433
434u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
435{
436 return __pci_find_next_cap(dev->bus, dev->devfn,
437 pos + PCI_CAP_LIST_NEXT, cap);
438}
439EXPORT_SYMBOL_GPL(pci_find_next_capability);
440
441static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
442 unsigned int devfn, u8 hdr_type)
443{
444 u16 status;
445
446 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
447 if (!(status & PCI_STATUS_CAP_LIST))
448 return 0;
449
450 switch (hdr_type) {
451 case PCI_HEADER_TYPE_NORMAL:
452 case PCI_HEADER_TYPE_BRIDGE:
453 return PCI_CAPABILITY_LIST;
454 case PCI_HEADER_TYPE_CARDBUS:
455 return PCI_CB_CAPABILITY_LIST;
456 }
457
458 return 0;
459}
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480u8 pci_find_capability(struct pci_dev *dev, int cap)
481{
482 u8 pos;
483
484 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
485 if (pos)
486 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
487
488 return pos;
489}
490EXPORT_SYMBOL(pci_find_capability);
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
506{
507 u8 hdr_type, pos;
508
509 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
510
511 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
512 if (pos)
513 pos = __pci_find_next_cap(bus, devfn, pos, cap);
514
515 return pos;
516}
517EXPORT_SYMBOL(pci_bus_find_capability);
518
519
520
521
522
523
524
525
526
527
528
529
530u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
531{
532 u32 header;
533 int ttl;
534 u16 pos = PCI_CFG_SPACE_SIZE;
535
536
537 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
538
539 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
540 return 0;
541
542 if (start)
543 pos = start;
544
545 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
546 return 0;
547
548
549
550
551
552 if (header == 0)
553 return 0;
554
555 while (ttl-- > 0) {
556 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
557 return pos;
558
559 pos = PCI_EXT_CAP_NEXT(header);
560 if (pos < PCI_CFG_SPACE_SIZE)
561 break;
562
563 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
564 break;
565 }
566
567 return 0;
568}
569EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
586{
587 return pci_find_next_ext_capability(dev, 0, cap);
588}
589EXPORT_SYMBOL_GPL(pci_find_ext_capability);
590
591
592
593
594
595
596
597
598
599
600u64 pci_get_dsn(struct pci_dev *dev)
601{
602 u32 dword;
603 u64 dsn;
604 int pos;
605
606 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
607 if (!pos)
608 return 0;
609
610
611
612
613
614
615 pos += 4;
616 pci_read_config_dword(dev, pos, &dword);
617 dsn = (u64)dword;
618 pci_read_config_dword(dev, pos + 4, &dword);
619 dsn |= ((u64)dword) << 32;
620
621 return dsn;
622}
623EXPORT_SYMBOL_GPL(pci_get_dsn);
624
625static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
626{
627 int rc, ttl = PCI_FIND_CAP_TTL;
628 u8 cap, mask;
629
630 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
631 mask = HT_3BIT_CAP_MASK;
632 else
633 mask = HT_5BIT_CAP_MASK;
634
635 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
636 PCI_CAP_ID_HT, &ttl);
637 while (pos) {
638 rc = pci_read_config_byte(dev, pos + 3, &cap);
639 if (rc != PCIBIOS_SUCCESSFUL)
640 return 0;
641
642 if ((cap & mask) == ht_cap)
643 return pos;
644
645 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
646 pos + PCI_CAP_LIST_NEXT,
647 PCI_CAP_ID_HT, &ttl);
648 }
649
650 return 0;
651}
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
667{
668 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
669}
670EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
671
672
673
674
675
676
677
678
679
680
681
682
683u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
684{
685 u8 pos;
686
687 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
688 if (pos)
689 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
690
691 return pos;
692}
693EXPORT_SYMBOL_GPL(pci_find_ht_capability);
694
695
696
697
698
699
700
701
702
703
704
705u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
706{
707 u16 vsec = 0;
708 u32 header;
709
710 if (vendor != dev->vendor)
711 return 0;
712
713 while ((vsec = pci_find_next_ext_capability(dev, vsec,
714 PCI_EXT_CAP_ID_VNDR))) {
715 if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER,
716 &header) == PCIBIOS_SUCCESSFUL &&
717 PCI_VNDR_HEADER_ID(header) == cap)
718 return vsec;
719 }
720
721 return 0;
722}
723EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
724
725
726
727
728
729
730
731
732
733
734struct resource *pci_find_parent_resource(const struct pci_dev *dev,
735 struct resource *res)
736{
737 const struct pci_bus *bus = dev->bus;
738 struct resource *r;
739 int i;
740
741 pci_bus_for_each_resource(bus, r, i) {
742 if (!r)
743 continue;
744 if (resource_contains(r, res)) {
745
746
747
748
749
750 if (r->flags & IORESOURCE_PREFETCH &&
751 !(res->flags & IORESOURCE_PREFETCH))
752 return NULL;
753
754
755
756
757
758
759
760
761
762 return r;
763 }
764 }
765 return NULL;
766}
767EXPORT_SYMBOL(pci_find_parent_resource);
768
769
770
771
772
773
774
775
776
777
778struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
779{
780 int i;
781
782 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
783 struct resource *r = &dev->resource[i];
784
785 if (r->start && resource_contains(r, res))
786 return r;
787 }
788
789 return NULL;
790}
791EXPORT_SYMBOL(pci_find_resource);
792
793
794
795
796
797
798
799
800
801int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
802{
803 int i;
804
805
806 for (i = 0; i < 4; i++) {
807 u16 status;
808 if (i)
809 msleep((1 << (i - 1)) * 100);
810
811 pci_read_config_word(dev, pos, &status);
812 if (!(status & mask))
813 return 1;
814 }
815
816 return 0;
817}
818
819static int pci_acs_enable;
820
821
822
823
824void pci_request_acs(void)
825{
826 pci_acs_enable = 1;
827}
828
829static const char *disable_acs_redir_param;
830
831
832
833
834
835
836
837static void pci_disable_acs_redir(struct pci_dev *dev)
838{
839 int ret = 0;
840 const char *p;
841 int pos;
842 u16 ctrl;
843
844 if (!disable_acs_redir_param)
845 return;
846
847 p = disable_acs_redir_param;
848 while (*p) {
849 ret = pci_dev_str_match(dev, p, &p);
850 if (ret < 0) {
851 pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
852 disable_acs_redir_param);
853
854 break;
855 } else if (ret == 1) {
856
857 break;
858 }
859
860 if (*p != ';' && *p != ',') {
861
862 break;
863 }
864 p++;
865 }
866
867 if (ret != 1)
868 return;
869
870 if (!pci_dev_specific_disable_acs_redir(dev))
871 return;
872
873 pos = dev->acs_cap;
874 if (!pos) {
875 pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
876 return;
877 }
878
879 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
880
881
882 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
883
884 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
885
886 pci_info(dev, "disabled ACS redirect\n");
887}
888
889
890
891
892
893static void pci_std_enable_acs(struct pci_dev *dev)
894{
895 int pos;
896 u16 cap;
897 u16 ctrl;
898
899 pos = dev->acs_cap;
900 if (!pos)
901 return;
902
903 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
904 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
905
906
907 ctrl |= (cap & PCI_ACS_SV);
908
909
910 ctrl |= (cap & PCI_ACS_RR);
911
912
913 ctrl |= (cap & PCI_ACS_CR);
914
915
916 ctrl |= (cap & PCI_ACS_UF);
917
918
919 if (dev->external_facing || dev->untrusted)
920 ctrl |= (cap & PCI_ACS_TB);
921
922 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
923}
924
925
926
927
928
929static void pci_enable_acs(struct pci_dev *dev)
930{
931 if (!pci_acs_enable)
932 goto disable_acs_redir;
933
934 if (!pci_dev_specific_enable_acs(dev))
935 goto disable_acs_redir;
936
937 pci_std_enable_acs(dev);
938
939disable_acs_redir:
940
941
942
943
944
945
946
947 pci_disable_acs_redir(dev);
948}
949
950
951
952
953
954
955
956
957static void pci_restore_bars(struct pci_dev *dev)
958{
959 int i;
960
961 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
962 pci_update_resource(dev, i);
963}
964
965static const struct pci_platform_pm_ops *pci_platform_pm;
966
967int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
968{
969 if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
970 !ops->choose_state || !ops->set_wakeup || !ops->need_resume)
971 return -EINVAL;
972 pci_platform_pm = ops;
973 return 0;
974}
975
976static inline bool platform_pci_power_manageable(struct pci_dev *dev)
977{
978 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
979}
980
981static inline int platform_pci_set_power_state(struct pci_dev *dev,
982 pci_power_t t)
983{
984 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
985}
986
987static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
988{
989 return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
990}
991
992static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
993{
994 if (pci_platform_pm && pci_platform_pm->refresh_state)
995 pci_platform_pm->refresh_state(dev);
996}
997
998static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
999{
1000 return pci_platform_pm ?
1001 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
1002}
1003
1004static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
1005{
1006 return pci_platform_pm ?
1007 pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
1008}
1009
1010static inline bool platform_pci_need_resume(struct pci_dev *dev)
1011{
1012 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
1013}
1014
1015static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
1016{
1017 if (pci_platform_pm && pci_platform_pm->bridge_d3)
1018 return pci_platform_pm->bridge_d3(dev);
1019 return false;
1020}
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1036{
1037 u16 pmcsr;
1038 bool need_restore = false;
1039
1040
1041 if (dev->current_state == state)
1042 return 0;
1043
1044 if (!dev->pm_cap)
1045 return -EIO;
1046
1047 if (state < PCI_D0 || state > PCI_D3hot)
1048 return -EINVAL;
1049
1050
1051
1052
1053
1054
1055
1056 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
1057 && dev->current_state > state) {
1058 pci_err(dev, "invalid power transition (from %s to %s)\n",
1059 pci_power_name(dev->current_state),
1060 pci_power_name(state));
1061 return -EINVAL;
1062 }
1063
1064
1065 if ((state == PCI_D1 && !dev->d1_support)
1066 || (state == PCI_D2 && !dev->d2_support))
1067 return -EIO;
1068
1069 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1070 if (pmcsr == (u16) ~0) {
1071 pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
1072 pci_power_name(dev->current_state),
1073 pci_power_name(state));
1074 return -EIO;
1075 }
1076
1077
1078
1079
1080
1081
1082 switch (dev->current_state) {
1083 case PCI_D0:
1084 case PCI_D1:
1085 case PCI_D2:
1086 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1087 pmcsr |= state;
1088 break;
1089 case PCI_D3hot:
1090 case PCI_D3cold:
1091 case PCI_UNKNOWN:
1092 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
1093 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
1094 need_restore = true;
1095 fallthrough;
1096 default:
1097 pmcsr = 0;
1098 break;
1099 }
1100
1101
1102 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1103
1104
1105
1106
1107
1108 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1109 pci_dev_d3_sleep(dev);
1110 else if (state == PCI_D2 || dev->current_state == PCI_D2)
1111 udelay(PCI_PM_D2_DELAY);
1112
1113 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1114 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1115 if (dev->current_state != state)
1116 pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
1117 pci_power_name(dev->current_state),
1118 pci_power_name(state));
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133 if (need_restore)
1134 pci_restore_bars(dev);
1135
1136 if (dev->bus->self)
1137 pcie_aspm_pm_state_change(dev->bus->self);
1138
1139 return 0;
1140}
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1155{
1156 if (platform_pci_get_power_state(dev) == PCI_D3cold ||
1157 !pci_device_is_present(dev)) {
1158 dev->current_state = PCI_D3cold;
1159 } else if (dev->pm_cap) {
1160 u16 pmcsr;
1161
1162 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1163 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1164 } else {
1165 dev->current_state = state;
1166 }
1167}
1168
1169
1170
1171
1172
1173
1174
1175
1176void pci_refresh_power_state(struct pci_dev *dev)
1177{
1178 if (platform_pci_power_manageable(dev))
1179 platform_pci_refresh_power_state(dev);
1180
1181 pci_update_current_state(dev, dev->current_state);
1182}
1183
1184
1185
1186
1187
1188
1189int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1190{
1191 int error;
1192
1193 if (platform_pci_power_manageable(dev)) {
1194 error = platform_pci_set_power_state(dev, state);
1195 if (!error)
1196 pci_update_current_state(dev, state);
1197 } else
1198 error = -ENODEV;
1199
1200 if (error && !dev->pm_cap)
1201 dev->current_state = PCI_D0;
1202
1203 return error;
1204}
1205EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1206
1207static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1208{
1209 pm_request_resume(&pci_dev->dev);
1210 return 0;
1211}
1212
1213
1214
1215
1216
1217void pci_resume_bus(struct pci_bus *bus)
1218{
1219 if (bus)
1220 pci_walk_bus(bus, pci_resume_one, NULL);
1221}
1222
1223static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1224{
1225 int delay = 1;
1226 u32 id;
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240 pci_read_config_dword(dev, PCI_COMMAND, &id);
1241 while (id == ~0) {
1242 if (delay > timeout) {
1243 pci_warn(dev, "not ready %dms after %s; giving up\n",
1244 delay - 1, reset_type);
1245 return -ENOTTY;
1246 }
1247
1248 if (delay > 1000)
1249 pci_info(dev, "not ready %dms after %s; waiting\n",
1250 delay - 1, reset_type);
1251
1252 msleep(delay);
1253 delay *= 2;
1254 pci_read_config_dword(dev, PCI_COMMAND, &id);
1255 }
1256
1257 if (delay > 1000)
1258 pci_info(dev, "ready %dms after %s\n", delay - 1,
1259 reset_type);
1260
1261 return 0;
1262}
1263
1264
1265
1266
1267
1268int pci_power_up(struct pci_dev *dev)
1269{
1270 pci_platform_power_transition(dev, PCI_D0);
1271
1272
1273
1274
1275
1276
1277 if (dev->runtime_d3cold) {
1278
1279
1280
1281
1282
1283 pci_resume_bus(dev->subordinate);
1284 }
1285
1286 return pci_raw_set_power_state(dev, PCI_D0);
1287}
1288
1289
1290
1291
1292
1293
1294static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1295{
1296 pci_power_t state = *(pci_power_t *)data;
1297
1298 dev->current_state = state;
1299 return 0;
1300}
1301
1302
1303
1304
1305
1306
1307void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1308{
1309 if (bus)
1310 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1311}
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1331{
1332 int error;
1333
1334
1335 if (state > PCI_D3cold)
1336 state = PCI_D3cold;
1337 else if (state < PCI_D0)
1338 state = PCI_D0;
1339 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1340
1341
1342
1343
1344
1345
1346
1347 return 0;
1348
1349
1350 if (dev->current_state == state)
1351 return 0;
1352
1353 if (state == PCI_D0)
1354 return pci_power_up(dev);
1355
1356
1357
1358
1359
1360 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1361 return 0;
1362
1363
1364
1365
1366
1367 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
1368 PCI_D3hot : state);
1369
1370 if (pci_platform_power_transition(dev, state))
1371 return error;
1372
1373
1374 if (state == PCI_D3cold)
1375 pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1376
1377 return 0;
1378}
1379EXPORT_SYMBOL(pci_set_power_state);
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
1391{
1392 pci_power_t ret;
1393
1394 if (!dev->pm_cap)
1395 return PCI_D0;
1396
1397 ret = platform_pci_choose_state(dev);
1398 if (ret != PCI_POWER_ERROR)
1399 return ret;
1400
1401 switch (state.event) {
1402 case PM_EVENT_ON:
1403 return PCI_D0;
1404 case PM_EVENT_FREEZE:
1405 case PM_EVENT_PRETHAW:
1406
1407 case PM_EVENT_SUSPEND:
1408 case PM_EVENT_HIBERNATE:
1409 return PCI_D3hot;
1410 default:
1411 pci_info(dev, "unrecognized suspend event %d\n",
1412 state.event);
1413 BUG();
1414 }
1415 return PCI_D0;
1416}
1417EXPORT_SYMBOL(pci_choose_state);
1418
1419#define PCI_EXP_SAVE_REGS 7
1420
1421static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1422 u16 cap, bool extended)
1423{
1424 struct pci_cap_saved_state *tmp;
1425
1426 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1427 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1428 return tmp;
1429 }
1430 return NULL;
1431}
1432
1433struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1434{
1435 return _pci_find_saved_cap(dev, cap, false);
1436}
1437
1438struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1439{
1440 return _pci_find_saved_cap(dev, cap, true);
1441}
1442
1443static int pci_save_pcie_state(struct pci_dev *dev)
1444{
1445 int i = 0;
1446 struct pci_cap_saved_state *save_state;
1447 u16 *cap;
1448
1449 if (!pci_is_pcie(dev))
1450 return 0;
1451
1452 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1453 if (!save_state) {
1454 pci_err(dev, "buffer not found in %s\n", __func__);
1455 return -ENOMEM;
1456 }
1457
1458 cap = (u16 *)&save_state->cap.data[0];
1459 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1460 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1461 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1462 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1463 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1464 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1465 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1466
1467 return 0;
1468}
1469
1470static void pci_restore_pcie_state(struct pci_dev *dev)
1471{
1472 int i = 0;
1473 struct pci_cap_saved_state *save_state;
1474 u16 *cap;
1475
1476 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1477 if (!save_state)
1478 return;
1479
1480 cap = (u16 *)&save_state->cap.data[0];
1481 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1482 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1483 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1484 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1485 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1486 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1487 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1488}
1489
1490static int pci_save_pcix_state(struct pci_dev *dev)
1491{
1492 int pos;
1493 struct pci_cap_saved_state *save_state;
1494
1495 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1496 if (!pos)
1497 return 0;
1498
1499 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1500 if (!save_state) {
1501 pci_err(dev, "buffer not found in %s\n", __func__);
1502 return -ENOMEM;
1503 }
1504
1505 pci_read_config_word(dev, pos + PCI_X_CMD,
1506 (u16 *)save_state->cap.data);
1507
1508 return 0;
1509}
1510
1511static void pci_restore_pcix_state(struct pci_dev *dev)
1512{
1513 int i = 0, pos;
1514 struct pci_cap_saved_state *save_state;
1515 u16 *cap;
1516
1517 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1518 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1519 if (!save_state || !pos)
1520 return;
1521 cap = (u16 *)&save_state->cap.data[0];
1522
1523 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1524}
1525
1526static void pci_save_ltr_state(struct pci_dev *dev)
1527{
1528 int ltr;
1529 struct pci_cap_saved_state *save_state;
1530 u16 *cap;
1531
1532 if (!pci_is_pcie(dev))
1533 return;
1534
1535 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1536 if (!ltr)
1537 return;
1538
1539 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1540 if (!save_state) {
1541 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1542 return;
1543 }
1544
1545 cap = (u16 *)&save_state->cap.data[0];
1546 pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
1547 pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
1548}
1549
1550static void pci_restore_ltr_state(struct pci_dev *dev)
1551{
1552 struct pci_cap_saved_state *save_state;
1553 int ltr;
1554 u16 *cap;
1555
1556 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1557 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1558 if (!save_state || !ltr)
1559 return;
1560
1561 cap = (u16 *)&save_state->cap.data[0];
1562 pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
1563 pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
1564}
1565
1566
1567
1568
1569
1570
1571int pci_save_state(struct pci_dev *dev)
1572{
1573 int i;
1574
1575 for (i = 0; i < 16; i++) {
1576 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1577 pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
1578 i * 4, dev->saved_config_space[i]);
1579 }
1580 dev->state_saved = true;
1581
1582 i = pci_save_pcie_state(dev);
1583 if (i != 0)
1584 return i;
1585
1586 i = pci_save_pcix_state(dev);
1587 if (i != 0)
1588 return i;
1589
1590 pci_save_ltr_state(dev);
1591 pci_save_dpc_state(dev);
1592 pci_save_aer_state(dev);
1593 pci_save_ptm_state(dev);
1594 return pci_save_vc_state(dev);
1595}
1596EXPORT_SYMBOL(pci_save_state);
1597
1598static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1599 u32 saved_val, int retry, bool force)
1600{
1601 u32 val;
1602
1603 pci_read_config_dword(pdev, offset, &val);
1604 if (!force && val == saved_val)
1605 return;
1606
1607 for (;;) {
1608 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1609 offset, val, saved_val);
1610 pci_write_config_dword(pdev, offset, saved_val);
1611 if (retry-- <= 0)
1612 return;
1613
1614 pci_read_config_dword(pdev, offset, &val);
1615 if (val == saved_val)
1616 return;
1617
1618 mdelay(1);
1619 }
1620}
1621
1622static void pci_restore_config_space_range(struct pci_dev *pdev,
1623 int start, int end, int retry,
1624 bool force)
1625{
1626 int index;
1627
1628 for (index = end; index >= start; index--)
1629 pci_restore_config_dword(pdev, 4 * index,
1630 pdev->saved_config_space[index],
1631 retry, force);
1632}
1633
1634static void pci_restore_config_space(struct pci_dev *pdev)
1635{
1636 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1637 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1638
1639 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1640 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1641 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1642 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1643
1644
1645
1646
1647
1648
1649 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1650 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1651 } else {
1652 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1653 }
1654}
1655
1656static void pci_restore_rebar_state(struct pci_dev *pdev)
1657{
1658 unsigned int pos, nbars, i;
1659 u32 ctrl;
1660
1661 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1662 if (!pos)
1663 return;
1664
1665 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1666 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1667 PCI_REBAR_CTRL_NBAR_SHIFT;
1668
1669 for (i = 0; i < nbars; i++, pos += 8) {
1670 struct resource *res;
1671 int bar_idx, size;
1672
1673 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1674 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1675 res = pdev->resource + bar_idx;
1676 size = pci_rebar_bytes_to_size(resource_size(res));
1677 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1678 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1679 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1680 }
1681}
1682
1683
1684
1685
1686
1687void pci_restore_state(struct pci_dev *dev)
1688{
1689 if (!dev->state_saved)
1690 return;
1691
1692
1693
1694
1695
1696 pci_restore_ltr_state(dev);
1697
1698 pci_restore_pcie_state(dev);
1699 pci_restore_pasid_state(dev);
1700 pci_restore_pri_state(dev);
1701 pci_restore_ats_state(dev);
1702 pci_restore_vc_state(dev);
1703 pci_restore_rebar_state(dev);
1704 pci_restore_dpc_state(dev);
1705 pci_restore_ptm_state(dev);
1706
1707 pci_aer_clear_status(dev);
1708 pci_restore_aer_state(dev);
1709
1710 pci_restore_config_space(dev);
1711
1712 pci_restore_pcix_state(dev);
1713 pci_restore_msi_state(dev);
1714
1715
1716 pci_enable_acs(dev);
1717 pci_restore_iov_state(dev);
1718
1719 dev->state_saved = false;
1720}
1721EXPORT_SYMBOL(pci_restore_state);
1722
1723struct pci_saved_state {
1724 u32 config_space[16];
1725 struct pci_cap_saved_data cap[];
1726};
1727
1728
1729
1730
1731
1732
1733
1734
1735struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1736{
1737 struct pci_saved_state *state;
1738 struct pci_cap_saved_state *tmp;
1739 struct pci_cap_saved_data *cap;
1740 size_t size;
1741
1742 if (!dev->state_saved)
1743 return NULL;
1744
1745 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1746
1747 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1748 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1749
1750 state = kzalloc(size, GFP_KERNEL);
1751 if (!state)
1752 return NULL;
1753
1754 memcpy(state->config_space, dev->saved_config_space,
1755 sizeof(state->config_space));
1756
1757 cap = state->cap;
1758 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1759 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1760 memcpy(cap, &tmp->cap, len);
1761 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1762 }
1763
1764
1765 return state;
1766}
1767EXPORT_SYMBOL_GPL(pci_store_saved_state);
1768
1769
1770
1771
1772
1773
1774int pci_load_saved_state(struct pci_dev *dev,
1775 struct pci_saved_state *state)
1776{
1777 struct pci_cap_saved_data *cap;
1778
1779 dev->state_saved = false;
1780
1781 if (!state)
1782 return 0;
1783
1784 memcpy(dev->saved_config_space, state->config_space,
1785 sizeof(state->config_space));
1786
1787 cap = state->cap;
1788 while (cap->size) {
1789 struct pci_cap_saved_state *tmp;
1790
1791 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1792 if (!tmp || tmp->cap.size != cap->size)
1793 return -EINVAL;
1794
1795 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1796 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1797 sizeof(struct pci_cap_saved_data) + cap->size);
1798 }
1799
1800 dev->state_saved = true;
1801 return 0;
1802}
1803EXPORT_SYMBOL_GPL(pci_load_saved_state);
1804
1805
1806
1807
1808
1809
1810
1811int pci_load_and_free_saved_state(struct pci_dev *dev,
1812 struct pci_saved_state **state)
1813{
1814 int ret = pci_load_saved_state(dev, *state);
1815 kfree(*state);
1816 *state = NULL;
1817 return ret;
1818}
1819EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1820
1821int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1822{
1823 return pci_enable_resources(dev, bars);
1824}
1825
1826static int do_pci_enable_device(struct pci_dev *dev, int bars)
1827{
1828 int err;
1829 struct pci_dev *bridge;
1830 u16 cmd;
1831 u8 pin;
1832
1833 err = pci_set_power_state(dev, PCI_D0);
1834 if (err < 0 && err != -EIO)
1835 return err;
1836
1837 bridge = pci_upstream_bridge(dev);
1838 if (bridge)
1839 pcie_aspm_powersave_config_link(bridge);
1840
1841 err = pcibios_enable_device(dev, bars);
1842 if (err < 0)
1843 return err;
1844 pci_fixup_device(pci_fixup_enable, dev);
1845
1846 if (dev->msi_enabled || dev->msix_enabled)
1847 return 0;
1848
1849 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1850 if (pin) {
1851 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1852 if (cmd & PCI_COMMAND_INTX_DISABLE)
1853 pci_write_config_word(dev, PCI_COMMAND,
1854 cmd & ~PCI_COMMAND_INTX_DISABLE);
1855 }
1856
1857 return 0;
1858}
1859
1860
1861
1862
1863
1864
1865
1866
1867int pci_reenable_device(struct pci_dev *dev)
1868{
1869 if (pci_is_enabled(dev))
1870 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1871 return 0;
1872}
1873EXPORT_SYMBOL(pci_reenable_device);
1874
1875static void pci_enable_bridge(struct pci_dev *dev)
1876{
1877 struct pci_dev *bridge;
1878 int retval;
1879
1880 bridge = pci_upstream_bridge(dev);
1881 if (bridge)
1882 pci_enable_bridge(bridge);
1883
1884 if (pci_is_enabled(dev)) {
1885 if (!dev->is_busmaster)
1886 pci_set_master(dev);
1887 return;
1888 }
1889
1890 retval = pci_enable_device(dev);
1891 if (retval)
1892 pci_err(dev, "Error enabling bridge (%d), continuing\n",
1893 retval);
1894 pci_set_master(dev);
1895}
1896
1897static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1898{
1899 struct pci_dev *bridge;
1900 int err;
1901 int i, bars = 0;
1902
1903
1904
1905
1906
1907
1908
1909 if (dev->pm_cap) {
1910 u16 pmcsr;
1911 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1912 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1913 }
1914
1915 if (atomic_inc_return(&dev->enable_cnt) > 1)
1916 return 0;
1917
1918 bridge = pci_upstream_bridge(dev);
1919 if (bridge)
1920 pci_enable_bridge(bridge);
1921
1922
1923 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1924 if (dev->resource[i].flags & flags)
1925 bars |= (1 << i);
1926 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1927 if (dev->resource[i].flags & flags)
1928 bars |= (1 << i);
1929
1930 err = do_pci_enable_device(dev, bars);
1931 if (err < 0)
1932 atomic_dec(&dev->enable_cnt);
1933 return err;
1934}
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944int pci_enable_device_io(struct pci_dev *dev)
1945{
1946 return pci_enable_device_flags(dev, IORESOURCE_IO);
1947}
1948EXPORT_SYMBOL(pci_enable_device_io);
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958int pci_enable_device_mem(struct pci_dev *dev)
1959{
1960 return pci_enable_device_flags(dev, IORESOURCE_MEM);
1961}
1962EXPORT_SYMBOL(pci_enable_device_mem);
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975int pci_enable_device(struct pci_dev *dev)
1976{
1977 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1978}
1979EXPORT_SYMBOL(pci_enable_device);
1980
1981
1982
1983
1984
1985
1986
1987struct pci_devres {
1988 unsigned int enabled:1;
1989 unsigned int pinned:1;
1990 unsigned int orig_intx:1;
1991 unsigned int restore_intx:1;
1992 unsigned int mwi:1;
1993 u32 region_mask;
1994};
1995
1996static void pcim_release(struct device *gendev, void *res)
1997{
1998 struct pci_dev *dev = to_pci_dev(gendev);
1999 struct pci_devres *this = res;
2000 int i;
2001
2002 if (dev->msi_enabled)
2003 pci_disable_msi(dev);
2004 if (dev->msix_enabled)
2005 pci_disable_msix(dev);
2006
2007 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
2008 if (this->region_mask & (1 << i))
2009 pci_release_region(dev, i);
2010
2011 if (this->mwi)
2012 pci_clear_mwi(dev);
2013
2014 if (this->restore_intx)
2015 pci_intx(dev, this->orig_intx);
2016
2017 if (this->enabled && !this->pinned)
2018 pci_disable_device(dev);
2019}
2020
2021static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
2022{
2023 struct pci_devres *dr, *new_dr;
2024
2025 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
2026 if (dr)
2027 return dr;
2028
2029 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
2030 if (!new_dr)
2031 return NULL;
2032 return devres_get(&pdev->dev, new_dr, NULL, NULL);
2033}
2034
2035static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
2036{
2037 if (pci_is_managed(pdev))
2038 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
2039 return NULL;
2040}
2041
2042
2043
2044
2045
2046
2047
2048int pcim_enable_device(struct pci_dev *pdev)
2049{
2050 struct pci_devres *dr;
2051 int rc;
2052
2053 dr = get_pci_dr(pdev);
2054 if (unlikely(!dr))
2055 return -ENOMEM;
2056 if (dr->enabled)
2057 return 0;
2058
2059 rc = pci_enable_device(pdev);
2060 if (!rc) {
2061 pdev->is_managed = 1;
2062 dr->enabled = 1;
2063 }
2064 return rc;
2065}
2066EXPORT_SYMBOL(pcim_enable_device);
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076void pcim_pin_device(struct pci_dev *pdev)
2077{
2078 struct pci_devres *dr;
2079
2080 dr = find_pci_dr(pdev);
2081 WARN_ON(!dr || !dr->enabled);
2082 if (dr)
2083 dr->pinned = 1;
2084}
2085EXPORT_SYMBOL(pcim_pin_device);
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095int __weak pcibios_add_device(struct pci_dev *dev)
2096{
2097 return 0;
2098}
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109void __weak pcibios_release_device(struct pci_dev *dev) {}
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119void __weak pcibios_disable_device(struct pci_dev *dev) {}
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130void __weak pcibios_penalize_isa_irq(int irq, int active) {}
2131
2132static void do_pci_disable_device(struct pci_dev *dev)
2133{
2134 u16 pci_command;
2135
2136 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2137 if (pci_command & PCI_COMMAND_MASTER) {
2138 pci_command &= ~PCI_COMMAND_MASTER;
2139 pci_write_config_word(dev, PCI_COMMAND, pci_command);
2140 }
2141
2142 pcibios_disable_device(dev);
2143}
2144
2145
2146
2147
2148
2149
2150
2151
2152void pci_disable_enabled_device(struct pci_dev *dev)
2153{
2154 if (pci_is_enabled(dev))
2155 do_pci_disable_device(dev);
2156}
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168void pci_disable_device(struct pci_dev *dev)
2169{
2170 struct pci_devres *dr;
2171
2172 dr = find_pci_dr(dev);
2173 if (dr)
2174 dr->enabled = 0;
2175
2176 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2177 "disabling already-disabled device");
2178
2179 if (atomic_dec_return(&dev->enable_cnt) != 0)
2180 return;
2181
2182 do_pci_disable_device(dev);
2183
2184 dev->is_busmaster = 0;
2185}
2186EXPORT_SYMBOL(pci_disable_device);
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2197 enum pcie_reset_state state)
2198{
2199 return -EINVAL;
2200}
2201
2202
2203
2204
2205
2206
2207
2208
2209int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2210{
2211 return pcibios_set_pcie_reset_state(dev, state);
2212}
2213EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2214
2215void pcie_clear_device_status(struct pci_dev *dev)
2216{
2217 u16 sta;
2218
2219 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2220 pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2221}
2222
2223
2224
2225
2226
2227void pcie_clear_root_pme_status(struct pci_dev *dev)
2228{
2229 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2230}
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240bool pci_check_pme_status(struct pci_dev *dev)
2241{
2242 int pmcsr_pos;
2243 u16 pmcsr;
2244 bool ret = false;
2245
2246 if (!dev->pm_cap)
2247 return false;
2248
2249 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2250 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2251 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2252 return false;
2253
2254
2255 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2256 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2257
2258 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2259 ret = true;
2260 }
2261
2262 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2263
2264 return ret;
2265}
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2276{
2277 if (pme_poll_reset && dev->pme_poll)
2278 dev->pme_poll = false;
2279
2280 if (pci_check_pme_status(dev)) {
2281 pci_wakeup_event(dev);
2282 pm_request_resume(&dev->dev);
2283 }
2284 return 0;
2285}
2286
2287
2288
2289
2290
2291void pci_pme_wakeup_bus(struct pci_bus *bus)
2292{
2293 if (bus)
2294 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2295}
2296
2297
2298
2299
2300
2301
2302
2303bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2304{
2305 if (!dev->pm_cap)
2306 return false;
2307
2308 return !!(dev->pme_support & (1 << state));
2309}
2310EXPORT_SYMBOL(pci_pme_capable);
2311
2312static void pci_pme_list_scan(struct work_struct *work)
2313{
2314 struct pci_pme_device *pme_dev, *n;
2315
2316 mutex_lock(&pci_pme_list_mutex);
2317 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2318 if (pme_dev->dev->pme_poll) {
2319 struct pci_dev *bridge;
2320
2321 bridge = pme_dev->dev->bus->self;
2322
2323
2324
2325
2326
2327 if (bridge && bridge->current_state != PCI_D0)
2328 continue;
2329
2330
2331
2332
2333 if (pme_dev->dev->current_state == PCI_D3cold)
2334 continue;
2335
2336 pci_pme_wakeup(pme_dev->dev, NULL);
2337 } else {
2338 list_del(&pme_dev->list);
2339 kfree(pme_dev);
2340 }
2341 }
2342 if (!list_empty(&pci_pme_list))
2343 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2344 msecs_to_jiffies(PME_TIMEOUT));
2345 mutex_unlock(&pci_pme_list_mutex);
2346}
2347
2348static void __pci_pme_active(struct pci_dev *dev, bool enable)
2349{
2350 u16 pmcsr;
2351
2352 if (!dev->pme_support)
2353 return;
2354
2355 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2356
2357 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2358 if (!enable)
2359 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2360
2361 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2362}
2363
2364
2365
2366
2367
2368void pci_pme_restore(struct pci_dev *dev)
2369{
2370 u16 pmcsr;
2371
2372 if (!dev->pme_support)
2373 return;
2374
2375 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2376 if (dev->wakeup_prepared) {
2377 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2378 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2379 } else {
2380 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2381 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2382 }
2383 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2384}
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394void pci_pme_active(struct pci_dev *dev, bool enable)
2395{
2396 __pci_pme_active(dev, enable);
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418 if (dev->pme_poll) {
2419 struct pci_pme_device *pme_dev;
2420 if (enable) {
2421 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2422 GFP_KERNEL);
2423 if (!pme_dev) {
2424 pci_warn(dev, "can't enable PME#\n");
2425 return;
2426 }
2427 pme_dev->dev = dev;
2428 mutex_lock(&pci_pme_list_mutex);
2429 list_add(&pme_dev->list, &pci_pme_list);
2430 if (list_is_singular(&pci_pme_list))
2431 queue_delayed_work(system_freezable_wq,
2432 &pci_pme_work,
2433 msecs_to_jiffies(PME_TIMEOUT));
2434 mutex_unlock(&pci_pme_list_mutex);
2435 } else {
2436 mutex_lock(&pci_pme_list_mutex);
2437 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2438 if (pme_dev->dev == dev) {
2439 list_del(&pme_dev->list);
2440 kfree(pme_dev);
2441 break;
2442 }
2443 }
2444 mutex_unlock(&pci_pme_list_mutex);
2445 }
2446 }
2447
2448 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2449}
2450EXPORT_SYMBOL(pci_pme_active);
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2472{
2473 int ret = 0;
2474
2475
2476
2477
2478
2479
2480
2481
2482 if (!pci_power_manageable(dev))
2483 return 0;
2484
2485
2486 if (!!enable == !!dev->wakeup_prepared)
2487 return 0;
2488
2489
2490
2491
2492
2493
2494
2495 if (enable) {
2496 int error;
2497
2498 if (pci_pme_capable(dev, state))
2499 pci_pme_active(dev, true);
2500 else
2501 ret = 1;
2502 error = platform_pci_set_wakeup(dev, true);
2503 if (ret)
2504 ret = error;
2505 if (!ret)
2506 dev->wakeup_prepared = true;
2507 } else {
2508 platform_pci_set_wakeup(dev, false);
2509 pci_pme_active(dev, false);
2510 dev->wakeup_prepared = false;
2511 }
2512
2513 return ret;
2514}
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2526{
2527 if (enable && !device_may_wakeup(&pci_dev->dev))
2528 return -EINVAL;
2529
2530 return __pci_enable_wake(pci_dev, state, enable);
2531}
2532EXPORT_SYMBOL(pci_enable_wake);
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2549{
2550 return pci_pme_capable(dev, PCI_D3cold) ?
2551 pci_enable_wake(dev, PCI_D3cold, enable) :
2552 pci_enable_wake(dev, PCI_D3hot, enable);
2553}
2554EXPORT_SYMBOL(pci_wake_from_d3);
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2566{
2567 pci_power_t target_state = PCI_D3hot;
2568
2569 if (platform_pci_power_manageable(dev)) {
2570
2571
2572
2573 pci_power_t state = platform_pci_choose_state(dev);
2574
2575 switch (state) {
2576 case PCI_POWER_ERROR:
2577 case PCI_UNKNOWN:
2578 break;
2579 case PCI_D1:
2580 case PCI_D2:
2581 if (pci_no_d1d2(dev))
2582 break;
2583 fallthrough;
2584 default:
2585 target_state = state;
2586 }
2587
2588 return target_state;
2589 }
2590
2591 if (!dev->pm_cap)
2592 target_state = PCI_D0;
2593
2594
2595
2596
2597
2598
2599 if (dev->current_state == PCI_D3cold)
2600 target_state = PCI_D3cold;
2601
2602 if (wakeup) {
2603
2604
2605
2606
2607 if (dev->pme_support) {
2608 while (target_state
2609 && !(dev->pme_support & (1 << target_state)))
2610 target_state--;
2611 }
2612 }
2613
2614 return target_state;
2615}
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626int pci_prepare_to_sleep(struct pci_dev *dev)
2627{
2628 bool wakeup = device_may_wakeup(&dev->dev);
2629 pci_power_t target_state = pci_target_state(dev, wakeup);
2630 int error;
2631
2632 if (target_state == PCI_POWER_ERROR)
2633 return -EIO;
2634
2635
2636
2637
2638
2639
2640
2641
2642 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2643 pci_disable_ptm(dev);
2644
2645 pci_enable_wake(dev, target_state, wakeup);
2646
2647 error = pci_set_power_state(dev, target_state);
2648
2649 if (error) {
2650 pci_enable_wake(dev, target_state, false);
2651 pci_restore_ptm_state(dev);
2652 }
2653
2654 return error;
2655}
2656EXPORT_SYMBOL(pci_prepare_to_sleep);
2657
2658
2659
2660
2661
2662
2663
2664
2665int pci_back_from_sleep(struct pci_dev *dev)
2666{
2667 pci_enable_wake(dev, PCI_D0, false);
2668 return pci_set_power_state(dev, PCI_D0);
2669}
2670EXPORT_SYMBOL(pci_back_from_sleep);
2671
2672
2673
2674
2675
2676
2677
2678
2679int pci_finish_runtime_suspend(struct pci_dev *dev)
2680{
2681 pci_power_t target_state;
2682 int error;
2683
2684 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2685 if (target_state == PCI_POWER_ERROR)
2686 return -EIO;
2687
2688 dev->runtime_d3cold = target_state == PCI_D3cold;
2689
2690
2691
2692
2693
2694
2695
2696
2697 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2698 pci_disable_ptm(dev);
2699
2700 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2701
2702 error = pci_set_power_state(dev, target_state);
2703
2704 if (error) {
2705 pci_enable_wake(dev, target_state, false);
2706 pci_restore_ptm_state(dev);
2707 dev->runtime_d3cold = false;
2708 }
2709
2710 return error;
2711}
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721bool pci_dev_run_wake(struct pci_dev *dev)
2722{
2723 struct pci_bus *bus = dev->bus;
2724
2725 if (!dev->pme_support)
2726 return false;
2727
2728
2729 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2730 return false;
2731
2732 if (device_can_wakeup(&dev->dev))
2733 return true;
2734
2735 while (bus->parent) {
2736 struct pci_dev *bridge = bus->self;
2737
2738 if (device_can_wakeup(&bridge->dev))
2739 return true;
2740
2741 bus = bus->parent;
2742 }
2743
2744
2745 if (bus->bridge)
2746 return device_can_wakeup(bus->bridge);
2747
2748 return false;
2749}
2750EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761bool pci_dev_need_resume(struct pci_dev *pci_dev)
2762{
2763 struct device *dev = &pci_dev->dev;
2764 pci_power_t target_state;
2765
2766 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2767 return true;
2768
2769 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2770
2771
2772
2773
2774
2775
2776 return target_state != pci_dev->current_state &&
2777 target_state != PCI_D3cold &&
2778 pci_dev->current_state != PCI_D3hot;
2779}
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2793{
2794 struct device *dev = &pci_dev->dev;
2795
2796 spin_lock_irq(&dev->power.lock);
2797
2798 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2799 pci_dev->current_state < PCI_D3cold)
2800 __pci_pme_active(pci_dev, false);
2801
2802 spin_unlock_irq(&dev->power.lock);
2803}
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813void pci_dev_complete_resume(struct pci_dev *pci_dev)
2814{
2815 struct device *dev = &pci_dev->dev;
2816
2817 if (!pci_dev_run_wake(pci_dev))
2818 return;
2819
2820 spin_lock_irq(&dev->power.lock);
2821
2822 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2823 __pci_pme_active(pci_dev, true);
2824
2825 spin_unlock_irq(&dev->power.lock);
2826}
2827
2828void pci_config_pm_runtime_get(struct pci_dev *pdev)
2829{
2830 struct device *dev = &pdev->dev;
2831 struct device *parent = dev->parent;
2832
2833 if (parent)
2834 pm_runtime_get_sync(parent);
2835 pm_runtime_get_noresume(dev);
2836
2837
2838
2839
2840 pm_runtime_barrier(dev);
2841
2842
2843
2844
2845
2846 if (pdev->current_state == PCI_D3cold)
2847 pm_runtime_resume(dev);
2848}
2849
2850void pci_config_pm_runtime_put(struct pci_dev *pdev)
2851{
2852 struct device *dev = &pdev->dev;
2853 struct device *parent = dev->parent;
2854
2855 pm_runtime_put(dev);
2856 if (parent)
2857 pm_runtime_put_sync(parent);
2858}
2859
2860static const struct dmi_system_id bridge_d3_blacklist[] = {
2861#ifdef CONFIG_X86
2862 {
2863
2864
2865
2866
2867
2868
2869 .ident = "X299 DESIGNARE EX-CF",
2870 .matches = {
2871 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2872 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2873 },
2874 },
2875#endif
2876 { }
2877};
2878
2879
2880
2881
2882
2883
2884
2885
2886bool pci_bridge_d3_possible(struct pci_dev *bridge)
2887{
2888 if (!pci_is_pcie(bridge))
2889 return false;
2890
2891 switch (pci_pcie_type(bridge)) {
2892 case PCI_EXP_TYPE_ROOT_PORT:
2893 case PCI_EXP_TYPE_UPSTREAM:
2894 case PCI_EXP_TYPE_DOWNSTREAM:
2895 if (pci_bridge_d3_disable)
2896 return false;
2897
2898
2899
2900
2901
2902 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2903 return false;
2904
2905 if (pci_bridge_d3_force)
2906 return true;
2907
2908
2909 if (bridge->is_thunderbolt)
2910 return true;
2911
2912
2913 if (platform_pci_bridge_d3(bridge))
2914 return true;
2915
2916
2917
2918
2919
2920
2921 if (bridge->is_hotplug_bridge)
2922 return false;
2923
2924 if (dmi_check_system(bridge_d3_blacklist))
2925 return false;
2926
2927
2928
2929
2930
2931 if (dmi_get_bios_year() >= 2015)
2932 return true;
2933 break;
2934 }
2935
2936 return false;
2937}
2938
2939static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2940{
2941 bool *d3cold_ok = data;
2942
2943 if (
2944 dev->no_d3cold || !dev->d3cold_allowed ||
2945
2946
2947 (device_may_wakeup(&dev->dev) &&
2948 !pci_pme_capable(dev, PCI_D3cold)) ||
2949
2950
2951 !pci_power_manageable(dev))
2952
2953 *d3cold_ok = false;
2954
2955 return !*d3cold_ok;
2956}
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966void pci_bridge_d3_update(struct pci_dev *dev)
2967{
2968 bool remove = !device_is_registered(&dev->dev);
2969 struct pci_dev *bridge;
2970 bool d3cold_ok = true;
2971
2972 bridge = pci_upstream_bridge(dev);
2973 if (!bridge || !pci_bridge_d3_possible(bridge))
2974 return;
2975
2976
2977
2978
2979
2980 if (remove && bridge->bridge_d3)
2981 return;
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991 if (!remove)
2992 pci_dev_check_d3cold(dev, &d3cold_ok);
2993
2994
2995
2996
2997
2998
2999
3000 if (d3cold_ok && !bridge->bridge_d3)
3001 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
3002 &d3cold_ok);
3003
3004 if (bridge->bridge_d3 != d3cold_ok) {
3005 bridge->bridge_d3 = d3cold_ok;
3006
3007 pci_bridge_d3_update(bridge);
3008 }
3009}
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019void pci_d3cold_enable(struct pci_dev *dev)
3020{
3021 if (dev->no_d3cold) {
3022 dev->no_d3cold = false;
3023 pci_bridge_d3_update(dev);
3024 }
3025}
3026EXPORT_SYMBOL_GPL(pci_d3cold_enable);
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036void pci_d3cold_disable(struct pci_dev *dev)
3037{
3038 if (!dev->no_d3cold) {
3039 dev->no_d3cold = true;
3040 pci_bridge_d3_update(dev);
3041 }
3042}
3043EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3044
3045
3046
3047
3048
3049void pci_pm_init(struct pci_dev *dev)
3050{
3051 int pm;
3052 u16 status;
3053 u16 pmc;
3054
3055 pm_runtime_forbid(&dev->dev);
3056 pm_runtime_set_active(&dev->dev);
3057 pm_runtime_enable(&dev->dev);
3058 device_enable_async_suspend(&dev->dev);
3059 dev->wakeup_prepared = false;
3060
3061 dev->pm_cap = 0;
3062 dev->pme_support = 0;
3063
3064
3065 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3066 if (!pm)
3067 return;
3068
3069 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3070
3071 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3072 pci_err(dev, "unsupported PM cap regs version (%u)\n",
3073 pmc & PCI_PM_CAP_VER_MASK);
3074 return;
3075 }
3076
3077 dev->pm_cap = pm;
3078 dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3079 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3080 dev->bridge_d3 = pci_bridge_d3_possible(dev);
3081 dev->d3cold_allowed = true;
3082
3083 dev->d1_support = false;
3084 dev->d2_support = false;
3085 if (!pci_no_d1d2(dev)) {
3086 if (pmc & PCI_PM_CAP_D1)
3087 dev->d1_support = true;
3088 if (pmc & PCI_PM_CAP_D2)
3089 dev->d2_support = true;
3090
3091 if (dev->d1_support || dev->d2_support)
3092 pci_info(dev, "supports%s%s\n",
3093 dev->d1_support ? " D1" : "",
3094 dev->d2_support ? " D2" : "");
3095 }
3096
3097 pmc &= PCI_PM_CAP_PME_MASK;
3098 if (pmc) {
3099 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3100 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3101 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3102 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3103 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3104 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3105 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
3106 dev->pme_poll = true;
3107
3108
3109
3110
3111 device_set_wakeup_capable(&dev->dev, true);
3112
3113 pci_pme_active(dev, false);
3114 }
3115
3116 pci_read_config_word(dev, PCI_STATUS, &status);
3117 if (status & PCI_STATUS_IMM_READY)
3118 dev->imm_ready = 1;
3119}
3120
3121static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3122{
3123 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3124
3125 switch (prop) {
3126 case PCI_EA_P_MEM:
3127 case PCI_EA_P_VF_MEM:
3128 flags |= IORESOURCE_MEM;
3129 break;
3130 case PCI_EA_P_MEM_PREFETCH:
3131 case PCI_EA_P_VF_MEM_PREFETCH:
3132 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3133 break;
3134 case PCI_EA_P_IO:
3135 flags |= IORESOURCE_IO;
3136 break;
3137 default:
3138 return 0;
3139 }
3140
3141 return flags;
3142}
3143
3144static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3145 u8 prop)
3146{
3147 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3148 return &dev->resource[bei];
3149#ifdef CONFIG_PCI_IOV
3150 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3151 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3152 return &dev->resource[PCI_IOV_RESOURCES +
3153 bei - PCI_EA_BEI_VF_BAR0];
3154#endif
3155 else if (bei == PCI_EA_BEI_ROM)
3156 return &dev->resource[PCI_ROM_RESOURCE];
3157 else
3158 return NULL;
3159}
3160
3161
3162static int pci_ea_read(struct pci_dev *dev, int offset)
3163{
3164 struct resource *res;
3165 int ent_size, ent_offset = offset;
3166 resource_size_t start, end;
3167 unsigned long flags;
3168 u32 dw0, bei, base, max_offset;
3169 u8 prop;
3170 bool support_64 = (sizeof(resource_size_t) >= 8);
3171
3172 pci_read_config_dword(dev, ent_offset, &dw0);
3173 ent_offset += 4;
3174
3175
3176 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
3177
3178 if (!(dw0 & PCI_EA_ENABLE))
3179 goto out;
3180
3181 bei = (dw0 & PCI_EA_BEI) >> 4;
3182 prop = (dw0 & PCI_EA_PP) >> 8;
3183
3184
3185
3186
3187
3188 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3189 prop = (dw0 & PCI_EA_SP) >> 16;
3190 if (prop > PCI_EA_P_BRIDGE_IO)
3191 goto out;
3192
3193 res = pci_ea_get_resource(dev, bei, prop);
3194 if (!res) {
3195 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3196 goto out;
3197 }
3198
3199 flags = pci_ea_flags(dev, prop);
3200 if (!flags) {
3201 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3202 goto out;
3203 }
3204
3205
3206 pci_read_config_dword(dev, ent_offset, &base);
3207 start = (base & PCI_EA_FIELD_MASK);
3208 ent_offset += 4;
3209
3210
3211 pci_read_config_dword(dev, ent_offset, &max_offset);
3212 ent_offset += 4;
3213
3214
3215 if (base & PCI_EA_IS_64) {
3216 u32 base_upper;
3217
3218 pci_read_config_dword(dev, ent_offset, &base_upper);
3219 ent_offset += 4;
3220
3221 flags |= IORESOURCE_MEM_64;
3222
3223
3224 if (!support_64 && base_upper)
3225 goto out;
3226
3227 if (support_64)
3228 start |= ((u64)base_upper << 32);
3229 }
3230
3231 end = start + (max_offset | 0x03);
3232
3233
3234 if (max_offset & PCI_EA_IS_64) {
3235 u32 max_offset_upper;
3236
3237 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3238 ent_offset += 4;
3239
3240 flags |= IORESOURCE_MEM_64;
3241
3242
3243 if (!support_64 && max_offset_upper)
3244 goto out;
3245
3246 if (support_64)
3247 end += ((u64)max_offset_upper << 32);
3248 }
3249
3250 if (end < start) {
3251 pci_err(dev, "EA Entry crosses address boundary\n");
3252 goto out;
3253 }
3254
3255 if (ent_size != ent_offset - offset) {
3256 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3257 ent_size, ent_offset - offset);
3258 goto out;
3259 }
3260
3261 res->name = pci_name(dev);
3262 res->start = start;
3263 res->end = end;
3264 res->flags = flags;
3265
3266 if (bei <= PCI_EA_BEI_BAR5)
3267 pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3268 bei, res, prop);
3269 else if (bei == PCI_EA_BEI_ROM)
3270 pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3271 res, prop);
3272 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3273 pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3274 bei - PCI_EA_BEI_VF_BAR0, res, prop);
3275 else
3276 pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3277 bei, res, prop);
3278
3279out:
3280 return offset + ent_size;
3281}
3282
3283
3284void pci_ea_init(struct pci_dev *dev)
3285{
3286 int ea;
3287 u8 num_ent;
3288 int offset;
3289 int i;
3290
3291
3292 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3293 if (!ea)
3294 return;
3295
3296
3297 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3298 &num_ent);
3299 num_ent &= PCI_EA_NUM_ENT_MASK;
3300
3301 offset = ea + PCI_EA_FIRST_ENT;
3302
3303
3304 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3305 offset += 4;
3306
3307
3308 for (i = 0; i < num_ent; ++i)
3309 offset = pci_ea_read(dev, offset);
3310}
3311
3312static void pci_add_saved_cap(struct pci_dev *pci_dev,
3313 struct pci_cap_saved_state *new_cap)
3314{
3315 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3316}
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3327 bool extended, unsigned int size)
3328{
3329 int pos;
3330 struct pci_cap_saved_state *save_state;
3331
3332 if (extended)
3333 pos = pci_find_ext_capability(dev, cap);
3334 else
3335 pos = pci_find_capability(dev, cap);
3336
3337 if (!pos)
3338 return 0;
3339
3340 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3341 if (!save_state)
3342 return -ENOMEM;
3343
3344 save_state->cap.cap_nr = cap;
3345 save_state->cap.cap_extended = extended;
3346 save_state->cap.size = size;
3347 pci_add_saved_cap(dev, save_state);
3348
3349 return 0;
3350}
3351
3352int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3353{
3354 return _pci_add_cap_save_buffer(dev, cap, false, size);
3355}
3356
3357int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3358{
3359 return _pci_add_cap_save_buffer(dev, cap, true, size);
3360}
3361
3362
3363
3364
3365
3366void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3367{
3368 int error;
3369
3370 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3371 PCI_EXP_SAVE_REGS * sizeof(u16));
3372 if (error)
3373 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3374
3375 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3376 if (error)
3377 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3378
3379 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3380 2 * sizeof(u16));
3381 if (error)
3382 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3383
3384 pci_allocate_vc_save_buffers(dev);
3385}
3386
3387void pci_free_cap_save_buffers(struct pci_dev *dev)
3388{
3389 struct pci_cap_saved_state *tmp;
3390 struct hlist_node *n;
3391
3392 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3393 kfree(tmp);
3394}
3395
3396
3397
3398
3399
3400
3401
3402
3403void pci_configure_ari(struct pci_dev *dev)
3404{
3405 u32 cap;
3406 struct pci_dev *bridge;
3407
3408 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3409 return;
3410
3411 bridge = dev->bus->self;
3412 if (!bridge)
3413 return;
3414
3415 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3416 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3417 return;
3418
3419 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3420 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3421 PCI_EXP_DEVCTL2_ARI);
3422 bridge->ari_enabled = 1;
3423 } else {
3424 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3425 PCI_EXP_DEVCTL2_ARI);
3426 bridge->ari_enabled = 0;
3427 }
3428}
3429
3430static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3431{
3432 int pos;
3433 u16 cap, ctrl;
3434
3435 pos = pdev->acs_cap;
3436 if (!pos)
3437 return false;
3438
3439
3440
3441
3442
3443
3444 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3445 acs_flags &= (cap | PCI_ACS_EC);
3446
3447 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3448 return (ctrl & acs_flags) == acs_flags;
3449}
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3468{
3469 int ret;
3470
3471 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3472 if (ret >= 0)
3473 return ret > 0;
3474
3475
3476
3477
3478
3479
3480 if (!pci_is_pcie(pdev))
3481 return false;
3482
3483 switch (pci_pcie_type(pdev)) {
3484
3485
3486
3487
3488
3489 case PCI_EXP_TYPE_PCIE_BRIDGE:
3490
3491
3492
3493
3494
3495
3496 case PCI_EXP_TYPE_PCI_BRIDGE:
3497 case PCI_EXP_TYPE_RC_EC:
3498 return false;
3499
3500
3501
3502
3503
3504 case PCI_EXP_TYPE_DOWNSTREAM:
3505 case PCI_EXP_TYPE_ROOT_PORT:
3506 return pci_acs_flags_enabled(pdev, acs_flags);
3507
3508
3509
3510
3511
3512
3513
3514 case PCI_EXP_TYPE_ENDPOINT:
3515 case PCI_EXP_TYPE_UPSTREAM:
3516 case PCI_EXP_TYPE_LEG_END:
3517 case PCI_EXP_TYPE_RC_END:
3518 if (!pdev->multifunction)
3519 break;
3520
3521 return pci_acs_flags_enabled(pdev, acs_flags);
3522 }
3523
3524
3525
3526
3527
3528 return true;
3529}
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540bool pci_acs_path_enabled(struct pci_dev *start,
3541 struct pci_dev *end, u16 acs_flags)
3542{
3543 struct pci_dev *pdev, *parent = start;
3544
3545 do {
3546 pdev = parent;
3547
3548 if (!pci_acs_enabled(pdev, acs_flags))
3549 return false;
3550
3551 if (pci_is_root_bus(pdev->bus))
3552 return (end == NULL);
3553
3554 parent = pdev->bus->self;
3555 } while (pdev != end);
3556
3557 return true;
3558}
3559
3560
3561
3562
3563
3564void pci_acs_init(struct pci_dev *dev)
3565{
3566 dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3567
3568
3569
3570
3571
3572
3573
3574 pci_enable_acs(dev);
3575}
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3587{
3588 unsigned int pos, nbars, i;
3589 u32 ctrl;
3590
3591 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3592 if (!pos)
3593 return -ENOTSUPP;
3594
3595 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3596 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3597 PCI_REBAR_CTRL_NBAR_SHIFT;
3598
3599 for (i = 0; i < nbars; i++, pos += 8) {
3600 int bar_idx;
3601
3602 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3603 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3604 if (bar_idx == bar)
3605 return pos;
3606 }
3607
3608 return -ENOENT;
3609}
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3620{
3621 int pos;
3622 u32 cap;
3623
3624 pos = pci_rebar_find_pos(pdev, bar);
3625 if (pos < 0)
3626 return 0;
3627
3628 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3629 cap &= PCI_REBAR_CAP_SIZES;
3630
3631
3632 if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3633 bar == 0 && cap == 0x7000)
3634 cap = 0x3f000;
3635
3636 return cap >> 4;
3637}
3638EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3649{
3650 int pos;
3651 u32 ctrl;
3652
3653 pos = pci_rebar_find_pos(pdev, bar);
3654 if (pos < 0)
3655 return pos;
3656
3657 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3658 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3659}
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3671{
3672 int pos;
3673 u32 ctrl;
3674
3675 pos = pci_rebar_find_pos(pdev, bar);
3676 if (pos < 0)
3677 return pos;
3678
3679 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3680 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3681 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3682 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3683 return 0;
3684}
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3700{
3701 struct pci_bus *bus = dev->bus;
3702 struct pci_dev *bridge;
3703 u32 cap, ctl2;
3704
3705 if (!pci_is_pcie(dev))
3706 return -EINVAL;
3707
3708
3709
3710
3711
3712
3713
3714
3715 switch (pci_pcie_type(dev)) {
3716 case PCI_EXP_TYPE_ENDPOINT:
3717 case PCI_EXP_TYPE_LEG_END:
3718 case PCI_EXP_TYPE_RC_END:
3719 break;
3720 default:
3721 return -EINVAL;
3722 }
3723
3724 while (bus->parent) {
3725 bridge = bus->self;
3726
3727 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3728
3729 switch (pci_pcie_type(bridge)) {
3730
3731 case PCI_EXP_TYPE_UPSTREAM:
3732 case PCI_EXP_TYPE_DOWNSTREAM:
3733 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3734 return -EINVAL;
3735 break;
3736
3737
3738 case PCI_EXP_TYPE_ROOT_PORT:
3739 if ((cap & cap_mask) != cap_mask)
3740 return -EINVAL;
3741 break;
3742 }
3743
3744
3745 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3746 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3747 &ctl2);
3748 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3749 return -EINVAL;
3750 }
3751
3752 bus = bus->parent;
3753 }
3754
3755 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3756 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3757 return 0;
3758}
3759EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3773{
3774 int slot;
3775
3776 if (pci_ari_enabled(dev->bus))
3777 slot = 0;
3778 else
3779 slot = PCI_SLOT(dev->devfn);
3780
3781 return (((pin - 1) + slot) % 4) + 1;
3782}
3783
3784int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3785{
3786 u8 pin;
3787
3788 pin = dev->pin;
3789 if (!pin)
3790 return -1;
3791
3792 while (!pci_is_root_bus(dev->bus)) {
3793 pin = pci_swizzle_interrupt_pin(dev, pin);
3794 dev = dev->bus->self;
3795 }
3796 *bridge = dev;
3797 return pin;
3798}
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3809{
3810 u8 pin = *pinp;
3811
3812 while (!pci_is_root_bus(dev->bus)) {
3813 pin = pci_swizzle_interrupt_pin(dev, pin);
3814 dev = dev->bus->self;
3815 }
3816 *pinp = pin;
3817 return PCI_SLOT(dev->devfn);
3818}
3819EXPORT_SYMBOL_GPL(pci_common_swizzle);
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831void pci_release_region(struct pci_dev *pdev, int bar)
3832{
3833 struct pci_devres *dr;
3834
3835 if (pci_resource_len(pdev, bar) == 0)
3836 return;
3837 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3838 release_region(pci_resource_start(pdev, bar),
3839 pci_resource_len(pdev, bar));
3840 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3841 release_mem_region(pci_resource_start(pdev, bar),
3842 pci_resource_len(pdev, bar));
3843
3844 dr = find_pci_dr(pdev);
3845 if (dr)
3846 dr->region_mask &= ~(1 << bar);
3847}
3848EXPORT_SYMBOL(pci_release_region);
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869static int __pci_request_region(struct pci_dev *pdev, int bar,
3870 const char *res_name, int exclusive)
3871{
3872 struct pci_devres *dr;
3873
3874 if (pci_resource_len(pdev, bar) == 0)
3875 return 0;
3876
3877 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3878 if (!request_region(pci_resource_start(pdev, bar),
3879 pci_resource_len(pdev, bar), res_name))
3880 goto err_out;
3881 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3882 if (!__request_mem_region(pci_resource_start(pdev, bar),
3883 pci_resource_len(pdev, bar), res_name,
3884 exclusive))
3885 goto err_out;
3886 }
3887
3888 dr = find_pci_dr(pdev);
3889 if (dr)
3890 dr->region_mask |= 1 << bar;
3891
3892 return 0;
3893
3894err_out:
3895 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3896 &pdev->resource[bar]);
3897 return -EBUSY;
3898}
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3915{
3916 return __pci_request_region(pdev, bar, res_name, 0);
3917}
3918EXPORT_SYMBOL(pci_request_region);
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3929{
3930 int i;
3931
3932 for (i = 0; i < PCI_STD_NUM_BARS; i++)
3933 if (bars & (1 << i))
3934 pci_release_region(pdev, i);
3935}
3936EXPORT_SYMBOL(pci_release_selected_regions);
3937
3938static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3939 const char *res_name, int excl)
3940{
3941 int i;
3942
3943 for (i = 0; i < PCI_STD_NUM_BARS; i++)
3944 if (bars & (1 << i))
3945 if (__pci_request_region(pdev, i, res_name, excl))
3946 goto err_out;
3947 return 0;
3948
3949err_out:
3950 while (--i >= 0)
3951 if (bars & (1 << i))
3952 pci_release_region(pdev, i);
3953
3954 return -EBUSY;
3955}
3956
3957
3958
3959
3960
3961
3962
3963
3964int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3965 const char *res_name)
3966{
3967 return __pci_request_selected_regions(pdev, bars, res_name, 0);
3968}
3969EXPORT_SYMBOL(pci_request_selected_regions);
3970
3971int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3972 const char *res_name)
3973{
3974 return __pci_request_selected_regions(pdev, bars, res_name,
3975 IORESOURCE_EXCLUSIVE);
3976}
3977EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989void pci_release_regions(struct pci_dev *pdev)
3990{
3991 pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
3992}
3993EXPORT_SYMBOL(pci_release_regions);
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008int pci_request_regions(struct pci_dev *pdev, const char *res_name)
4009{
4010 return pci_request_selected_regions(pdev,
4011 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
4012}
4013EXPORT_SYMBOL(pci_request_regions);
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
4031{
4032 return pci_request_selected_regions_exclusive(pdev,
4033 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
4034}
4035EXPORT_SYMBOL(pci_request_regions_exclusive);
4036
4037
4038
4039
4040
4041int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
4042 resource_size_t size)
4043{
4044 int ret = 0;
4045#ifdef PCI_IOBASE
4046 struct logic_pio_hwaddr *range;
4047
4048 if (!size || addr + size < addr)
4049 return -EINVAL;
4050
4051 range = kzalloc(sizeof(*range), GFP_ATOMIC);
4052 if (!range)
4053 return -ENOMEM;
4054
4055 range->fwnode = fwnode;
4056 range->size = size;
4057 range->hw_start = addr;
4058 range->flags = LOGIC_PIO_CPU_MMIO;
4059
4060 ret = logic_pio_register_range(range);
4061 if (ret)
4062 kfree(range);
4063
4064
4065 if (ret == -EEXIST)
4066 ret = 0;
4067#endif
4068
4069 return ret;
4070}
4071
4072phys_addr_t pci_pio_to_address(unsigned long pio)
4073{
4074 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
4075
4076#ifdef PCI_IOBASE
4077 if (pio >= MMIO_UPPER_LIMIT)
4078 return address;
4079
4080 address = logic_pio_to_hwaddr(pio);
4081#endif
4082
4083 return address;
4084}
4085EXPORT_SYMBOL_GPL(pci_pio_to_address);
4086
4087unsigned long __weak pci_address_to_pio(phys_addr_t address)
4088{
4089#ifdef PCI_IOBASE
4090 return logic_pio_trans_cpuaddr(address);
4091#else
4092 if (address > IO_SPACE_LIMIT)
4093 return (unsigned long)-1;
4094
4095 return (unsigned long) address;
4096#endif
4097}
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4110{
4111#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4112 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4113
4114 if (!(res->flags & IORESOURCE_IO))
4115 return -EINVAL;
4116
4117 if (res->end > IO_SPACE_LIMIT)
4118 return -EINVAL;
4119
4120 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4121 pgprot_device(PAGE_KERNEL));
4122#else
4123
4124
4125
4126
4127 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4128 return -ENODEV;
4129#endif
4130}
4131EXPORT_SYMBOL(pci_remap_iospace);
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141void pci_unmap_iospace(struct resource *res)
4142{
4143#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4144 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4145
4146 vunmap_range(vaddr, vaddr + resource_size(res));
4147#endif
4148}
4149EXPORT_SYMBOL(pci_unmap_iospace);
4150
4151static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4152{
4153 struct resource **res = ptr;
4154
4155 pci_unmap_iospace(*res);
4156}
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4168 phys_addr_t phys_addr)
4169{
4170 const struct resource **ptr;
4171 int error;
4172
4173 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4174 if (!ptr)
4175 return -ENOMEM;
4176
4177 error = pci_remap_iospace(res, phys_addr);
4178 if (error) {
4179 devres_free(ptr);
4180 } else {
4181 *ptr = res;
4182 devres_add(dev, ptr);
4183 }
4184
4185 return error;
4186}
4187EXPORT_SYMBOL(devm_pci_remap_iospace);
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4199 resource_size_t offset,
4200 resource_size_t size)
4201{
4202 void __iomem **ptr, *addr;
4203
4204 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4205 if (!ptr)
4206 return NULL;
4207
4208 addr = pci_remap_cfgspace(offset, size);
4209 if (addr) {
4210 *ptr = addr;
4211 devres_add(dev, ptr);
4212 } else
4213 devres_free(ptr);
4214
4215 return addr;
4216}
4217EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4239 struct resource *res)
4240{
4241 resource_size_t size;
4242 const char *name;
4243 void __iomem *dest_ptr;
4244
4245 BUG_ON(!dev);
4246
4247 if (!res || resource_type(res) != IORESOURCE_MEM) {
4248 dev_err(dev, "invalid resource\n");
4249 return IOMEM_ERR_PTR(-EINVAL);
4250 }
4251
4252 size = resource_size(res);
4253
4254 if (res->name)
4255 name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
4256 res->name);
4257 else
4258 name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
4259 if (!name)
4260 return IOMEM_ERR_PTR(-ENOMEM);
4261
4262 if (!devm_request_mem_region(dev, res->start, size, name)) {
4263 dev_err(dev, "can't request region for resource %pR\n", res);
4264 return IOMEM_ERR_PTR(-EBUSY);
4265 }
4266
4267 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4268 if (!dest_ptr) {
4269 dev_err(dev, "ioremap failed for resource %pR\n", res);
4270 devm_release_mem_region(dev, res->start, size);
4271 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4272 }
4273
4274 return dest_ptr;
4275}
4276EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4277
4278static void __pci_set_master(struct pci_dev *dev, bool enable)
4279{
4280 u16 old_cmd, cmd;
4281
4282 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4283 if (enable)
4284 cmd = old_cmd | PCI_COMMAND_MASTER;
4285 else
4286 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4287 if (cmd != old_cmd) {
4288 pci_dbg(dev, "%s bus mastering\n",
4289 enable ? "enabling" : "disabling");
4290 pci_write_config_word(dev, PCI_COMMAND, cmd);
4291 }
4292 dev->is_busmaster = enable;
4293}
4294
4295
4296
4297
4298
4299
4300
4301
4302char * __weak __init pcibios_setup(char *str)
4303{
4304 return str;
4305}
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315void __weak pcibios_set_master(struct pci_dev *dev)
4316{
4317 u8 lat;
4318
4319
4320 if (pci_is_pcie(dev))
4321 return;
4322
4323 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4324 if (lat < 16)
4325 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4326 else if (lat > pcibios_max_latency)
4327 lat = pcibios_max_latency;
4328 else
4329 return;
4330
4331 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4332}
4333
4334
4335
4336
4337
4338
4339
4340
4341void pci_set_master(struct pci_dev *dev)
4342{
4343 __pci_set_master(dev, true);
4344 pcibios_set_master(dev);
4345}
4346EXPORT_SYMBOL(pci_set_master);
4347
4348
4349
4350
4351
4352void pci_clear_master(struct pci_dev *dev)
4353{
4354 __pci_set_master(dev, false);
4355}
4356EXPORT_SYMBOL(pci_clear_master);
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368int pci_set_cacheline_size(struct pci_dev *dev)
4369{
4370 u8 cacheline_size;
4371
4372 if (!pci_cache_line_size)
4373 return -EINVAL;
4374
4375
4376
4377 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4378 if (cacheline_size >= pci_cache_line_size &&
4379 (cacheline_size % pci_cache_line_size) == 0)
4380 return 0;
4381
4382
4383 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4384
4385 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4386 if (cacheline_size == pci_cache_line_size)
4387 return 0;
4388
4389 pci_dbg(dev, "cache line size of %d is not supported\n",
4390 pci_cache_line_size << 2);
4391
4392 return -EINVAL;
4393}
4394EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404int pci_set_mwi(struct pci_dev *dev)
4405{
4406#ifdef PCI_DISABLE_MWI
4407 return 0;
4408#else
4409 int rc;
4410 u16 cmd;
4411
4412 rc = pci_set_cacheline_size(dev);
4413 if (rc)
4414 return rc;
4415
4416 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4417 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4418 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4419 cmd |= PCI_COMMAND_INVALIDATE;
4420 pci_write_config_word(dev, PCI_COMMAND, cmd);
4421 }
4422 return 0;
4423#endif
4424}
4425EXPORT_SYMBOL(pci_set_mwi);
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435int pcim_set_mwi(struct pci_dev *dev)
4436{
4437 struct pci_devres *dr;
4438
4439 dr = find_pci_dr(dev);
4440 if (!dr)
4441 return -ENOMEM;
4442
4443 dr->mwi = 1;
4444 return pci_set_mwi(dev);
4445}
4446EXPORT_SYMBOL(pcim_set_mwi);
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457int pci_try_set_mwi(struct pci_dev *dev)
4458{
4459#ifdef PCI_DISABLE_MWI
4460 return 0;
4461#else
4462 return pci_set_mwi(dev);
4463#endif
4464}
4465EXPORT_SYMBOL(pci_try_set_mwi);
4466
4467
4468
4469
4470
4471
4472
4473void pci_clear_mwi(struct pci_dev *dev)
4474{
4475#ifndef PCI_DISABLE_MWI
4476 u16 cmd;
4477
4478 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4479 if (cmd & PCI_COMMAND_INVALIDATE) {
4480 cmd &= ~PCI_COMMAND_INVALIDATE;
4481 pci_write_config_word(dev, PCI_COMMAND, cmd);
4482 }
4483#endif
4484}
4485EXPORT_SYMBOL(pci_clear_mwi);
4486
4487
4488
4489
4490
4491
4492
4493void pci_disable_parity(struct pci_dev *dev)
4494{
4495 u16 cmd;
4496
4497 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4498 if (cmd & PCI_COMMAND_PARITY) {
4499 cmd &= ~PCI_COMMAND_PARITY;
4500 pci_write_config_word(dev, PCI_COMMAND, cmd);
4501 }
4502}
4503
4504
4505
4506
4507
4508
4509
4510
4511void pci_intx(struct pci_dev *pdev, int enable)
4512{
4513 u16 pci_command, new;
4514
4515 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4516
4517 if (enable)
4518 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4519 else
4520 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4521
4522 if (new != pci_command) {
4523 struct pci_devres *dr;
4524
4525 pci_write_config_word(pdev, PCI_COMMAND, new);
4526
4527 dr = find_pci_dr(pdev);
4528 if (dr && !dr->restore_intx) {
4529 dr->restore_intx = 1;
4530 dr->orig_intx = !enable;
4531 }
4532 }
4533}
4534EXPORT_SYMBOL_GPL(pci_intx);
4535
4536static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4537{
4538 struct pci_bus *bus = dev->bus;
4539 bool mask_updated = true;
4540 u32 cmd_status_dword;
4541 u16 origcmd, newcmd;
4542 unsigned long flags;
4543 bool irq_pending;
4544
4545
4546
4547
4548
4549 BUILD_BUG_ON(PCI_COMMAND % 4);
4550 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4551
4552 raw_spin_lock_irqsave(&pci_lock, flags);
4553
4554 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4555
4556 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4557
4558
4559
4560
4561
4562
4563 if (mask != irq_pending) {
4564 mask_updated = false;
4565 goto done;
4566 }
4567
4568 origcmd = cmd_status_dword;
4569 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4570 if (mask)
4571 newcmd |= PCI_COMMAND_INTX_DISABLE;
4572 if (newcmd != origcmd)
4573 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4574
4575done:
4576 raw_spin_unlock_irqrestore(&pci_lock, flags);
4577
4578 return mask_updated;
4579}
4580
4581
4582
4583
4584
4585
4586
4587
4588bool pci_check_and_mask_intx(struct pci_dev *dev)
4589{
4590 return pci_check_and_set_intx_mask(dev, true);
4591}
4592EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4593
4594
4595
4596
4597
4598
4599
4600
4601
4602bool pci_check_and_unmask_intx(struct pci_dev *dev)
4603{
4604 return pci_check_and_set_intx_mask(dev, false);
4605}
4606EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4607
4608
4609
4610
4611
4612
4613
4614int pci_wait_for_pending_transaction(struct pci_dev *dev)
4615{
4616 if (!pci_is_pcie(dev))
4617 return 1;
4618
4619 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4620 PCI_EXP_DEVSTA_TRPND);
4621}
4622EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4623
4624
4625
4626
4627
4628
4629
4630
4631bool pcie_has_flr(struct pci_dev *dev)
4632{
4633 u32 cap;
4634
4635 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4636 return false;
4637
4638 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
4639 return cap & PCI_EXP_DEVCAP_FLR;
4640}
4641EXPORT_SYMBOL_GPL(pcie_has_flr);
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651int pcie_flr(struct pci_dev *dev)
4652{
4653 if (!pci_wait_for_pending_transaction(dev))
4654 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4655
4656 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4657
4658 if (dev->imm_ready)
4659 return 0;
4660
4661
4662
4663
4664
4665
4666 msleep(100);
4667
4668 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4669}
4670EXPORT_SYMBOL_GPL(pcie_flr);
4671
4672static int pci_af_flr(struct pci_dev *dev, int probe)
4673{
4674 int pos;
4675 u8 cap;
4676
4677 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4678 if (!pos)
4679 return -ENOTTY;
4680
4681 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4682 return -ENOTTY;
4683
4684 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4685 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4686 return -ENOTTY;
4687
4688 if (probe)
4689 return 0;
4690
4691
4692
4693
4694
4695
4696 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4697 PCI_AF_STATUS_TP << 8))
4698 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4699
4700 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4701
4702 if (dev->imm_ready)
4703 return 0;
4704
4705
4706
4707
4708
4709
4710
4711 msleep(100);
4712
4713 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4714}
4715
4716
4717
4718
4719
4720
4721
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731static int pci_pm_reset(struct pci_dev *dev, int probe)
4732{
4733 u16 csr;
4734
4735 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4736 return -ENOTTY;
4737
4738 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4739 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4740 return -ENOTTY;
4741
4742 if (probe)
4743 return 0;
4744
4745 if (dev->current_state != PCI_D0)
4746 return -EINVAL;
4747
4748 csr &= ~PCI_PM_CTRL_STATE_MASK;
4749 csr |= PCI_D3hot;
4750 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4751 pci_dev_d3_sleep(dev);
4752
4753 csr &= ~PCI_PM_CTRL_STATE_MASK;
4754 csr |= PCI_D0;
4755 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4756 pci_dev_d3_sleep(dev);
4757
4758 return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4759}
4760
4761
4762
4763
4764
4765
4766
4767
4768
4769static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4770 int delay)
4771{
4772 int timeout = 1000;
4773 bool ret;
4774 u16 lnk_status;
4775
4776
4777
4778
4779
4780 if (!pdev->link_active_reporting) {
4781 msleep(timeout + delay);
4782 return true;
4783 }
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794 if (active)
4795 msleep(20);
4796 for (;;) {
4797 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4798 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4799 if (ret == active)
4800 break;
4801 if (timeout <= 0)
4802 break;
4803 msleep(10);
4804 timeout -= 10;
4805 }
4806 if (active && ret)
4807 msleep(delay);
4808
4809 return ret == active;
4810}
4811
4812
4813
4814
4815
4816
4817
4818
4819bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4820{
4821 return pcie_wait_for_link_delay(pdev, active, 100);
4822}
4823
4824
4825
4826
4827
4828
4829
4830
4831static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4832{
4833 const struct pci_dev *pdev;
4834 int min_delay = 100;
4835 int max_delay = 0;
4836
4837 list_for_each_entry(pdev, &bus->devices, bus_list) {
4838 if (pdev->d3cold_delay < min_delay)
4839 min_delay = pdev->d3cold_delay;
4840 if (pdev->d3cold_delay > max_delay)
4841 max_delay = pdev->d3cold_delay;
4842 }
4843
4844 return max(min_delay, max_delay);
4845}
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856
4857
4858void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
4859{
4860 struct pci_dev *child;
4861 int delay;
4862
4863 if (pci_dev_is_disconnected(dev))
4864 return;
4865
4866 if (!pci_is_bridge(dev) || !dev->bridge_d3)
4867 return;
4868
4869 down_read(&pci_bus_sem);
4870
4871
4872
4873
4874
4875
4876
4877 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4878 up_read(&pci_bus_sem);
4879 return;
4880 }
4881
4882
4883 delay = pci_bus_max_d3cold_delay(dev->subordinate);
4884 if (!delay) {
4885 up_read(&pci_bus_sem);
4886 return;
4887 }
4888
4889 child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
4890 bus_list);
4891 up_read(&pci_bus_sem);
4892
4893
4894
4895
4896
4897
4898
4899 if (!pci_is_pcie(dev)) {
4900 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4901 msleep(1000 + delay);
4902 return;
4903 }
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919
4920
4921
4922 if (!pcie_downstream_port(dev))
4923 return;
4924
4925 if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4926 pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4927 msleep(delay);
4928 } else {
4929 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4930 delay);
4931 if (!pcie_wait_for_link_delay(dev, true, delay)) {
4932
4933 pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
4934 return;
4935 }
4936 }
4937
4938 if (!pci_device_is_present(child)) {
4939 pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
4940 msleep(delay);
4941 }
4942}
4943
4944void pci_reset_secondary_bus(struct pci_dev *dev)
4945{
4946 u16 ctrl;
4947
4948 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4949 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4950 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4951
4952
4953
4954
4955
4956 msleep(2);
4957
4958 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4959 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4960
4961
4962
4963
4964
4965
4966
4967
4968 ssleep(1);
4969}
4970
4971void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4972{
4973 pci_reset_secondary_bus(dev);
4974}
4975
4976
4977
4978
4979
4980
4981
4982
4983int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4984{
4985 pcibios_reset_secondary_bus(dev);
4986
4987 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
4988}
4989EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4990
4991static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4992{
4993 struct pci_dev *pdev;
4994
4995 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4996 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4997 return -ENOTTY;
4998
4999 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
5000 if (pdev != dev)
5001 return -ENOTTY;
5002
5003 if (probe)
5004 return 0;
5005
5006 return pci_bridge_secondary_bus_reset(dev->bus->self);
5007}
5008
5009static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
5010{
5011 int rc = -ENOTTY;
5012
5013 if (!hotplug || !try_module_get(hotplug->owner))
5014 return rc;
5015
5016 if (hotplug->ops->reset_slot)
5017 rc = hotplug->ops->reset_slot(hotplug, probe);
5018
5019 module_put(hotplug->owner);
5020
5021 return rc;
5022}
5023
5024static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
5025{
5026 if (dev->multifunction || dev->subordinate || !dev->slot ||
5027 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5028 return -ENOTTY;
5029
5030 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
5031}
5032
5033static void pci_dev_lock(struct pci_dev *dev)
5034{
5035 pci_cfg_access_lock(dev);
5036
5037 device_lock(&dev->dev);
5038}
5039
5040
5041static int pci_dev_trylock(struct pci_dev *dev)
5042{
5043 if (pci_cfg_access_trylock(dev)) {
5044 if (device_trylock(&dev->dev))
5045 return 1;
5046 pci_cfg_access_unlock(dev);
5047 }
5048
5049 return 0;
5050}
5051
5052static void pci_dev_unlock(struct pci_dev *dev)
5053{
5054 device_unlock(&dev->dev);
5055 pci_cfg_access_unlock(dev);
5056}
5057
5058static void pci_dev_save_and_disable(struct pci_dev *dev)
5059{
5060 const struct pci_error_handlers *err_handler =
5061 dev->driver ? dev->driver->err_handler : NULL;
5062
5063
5064
5065
5066
5067
5068 if (err_handler && err_handler->reset_prepare)
5069 err_handler->reset_prepare(dev);
5070
5071
5072
5073
5074
5075
5076 pci_set_power_state(dev, PCI_D0);
5077
5078 pci_save_state(dev);
5079
5080
5081
5082
5083
5084
5085
5086 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5087}
5088
5089static void pci_dev_restore(struct pci_dev *dev)
5090{
5091 const struct pci_error_handlers *err_handler =
5092 dev->driver ? dev->driver->err_handler : NULL;
5093
5094 pci_restore_state(dev);
5095
5096
5097
5098
5099
5100
5101 if (err_handler && err_handler->reset_done)
5102 err_handler->reset_done(dev);
5103}
5104
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114
5115
5116
5117
5118
5119
5120
5121
5122
5123
5124
5125int __pci_reset_function_locked(struct pci_dev *dev)
5126{
5127 int rc;
5128
5129 might_sleep();
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139 rc = pci_dev_specific_reset(dev, 0);
5140 if (rc != -ENOTTY)
5141 return rc;
5142 if (pcie_has_flr(dev)) {
5143 rc = pcie_flr(dev);
5144 if (rc != -ENOTTY)
5145 return rc;
5146 }
5147 rc = pci_af_flr(dev, 0);
5148 if (rc != -ENOTTY)
5149 return rc;
5150 rc = pci_pm_reset(dev, 0);
5151 if (rc != -ENOTTY)
5152 return rc;
5153 rc = pci_dev_reset_slot_function(dev, 0);
5154 if (rc != -ENOTTY)
5155 return rc;
5156 return pci_parent_bus_reset(dev, 0);
5157}
5158EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168
5169
5170
5171int pci_probe_reset_function(struct pci_dev *dev)
5172{
5173 int rc;
5174
5175 might_sleep();
5176
5177 rc = pci_dev_specific_reset(dev, 1);
5178 if (rc != -ENOTTY)
5179 return rc;
5180 if (pcie_has_flr(dev))
5181 return 0;
5182 rc = pci_af_flr(dev, 1);
5183 if (rc != -ENOTTY)
5184 return rc;
5185 rc = pci_pm_reset(dev, 1);
5186 if (rc != -ENOTTY)
5187 return rc;
5188 rc = pci_dev_reset_slot_function(dev, 1);
5189 if (rc != -ENOTTY)
5190 return rc;
5191
5192 return pci_parent_bus_reset(dev, 1);
5193}
5194
5195
5196
5197
5198
5199
5200
5201
5202
5203
5204
5205
5206
5207
5208
5209
5210
5211int pci_reset_function(struct pci_dev *dev)
5212{
5213 int rc;
5214
5215 if (!dev->reset_fn)
5216 return -ENOTTY;
5217
5218 pci_dev_lock(dev);
5219 pci_dev_save_and_disable(dev);
5220
5221 rc = __pci_reset_function_locked(dev);
5222
5223 pci_dev_restore(dev);
5224 pci_dev_unlock(dev);
5225
5226 return rc;
5227}
5228EXPORT_SYMBOL_GPL(pci_reset_function);
5229
5230
5231
5232
5233
5234
5235
5236
5237
5238
5239
5240
5241
5242
5243
5244
5245
5246
5247int pci_reset_function_locked(struct pci_dev *dev)
5248{
5249 int rc;
5250
5251 if (!dev->reset_fn)
5252 return -ENOTTY;
5253
5254 pci_dev_save_and_disable(dev);
5255
5256 rc = __pci_reset_function_locked(dev);
5257
5258 pci_dev_restore(dev);
5259
5260 return rc;
5261}
5262EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5263
5264
5265
5266
5267
5268
5269
5270int pci_try_reset_function(struct pci_dev *dev)
5271{
5272 int rc;
5273
5274 if (!dev->reset_fn)
5275 return -ENOTTY;
5276
5277 if (!pci_dev_trylock(dev))
5278 return -EAGAIN;
5279
5280 pci_dev_save_and_disable(dev);
5281 rc = __pci_reset_function_locked(dev);
5282 pci_dev_restore(dev);
5283 pci_dev_unlock(dev);
5284
5285 return rc;
5286}
5287EXPORT_SYMBOL_GPL(pci_try_reset_function);
5288
5289
5290static bool pci_bus_resetable(struct pci_bus *bus)
5291{
5292 struct pci_dev *dev;
5293
5294
5295 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5296 return false;
5297
5298 list_for_each_entry(dev, &bus->devices, bus_list) {
5299 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5300 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5301 return false;
5302 }
5303
5304 return true;
5305}
5306
5307
5308static void pci_bus_lock(struct pci_bus *bus)
5309{
5310 struct pci_dev *dev;
5311
5312 list_for_each_entry(dev, &bus->devices, bus_list) {
5313 pci_dev_lock(dev);
5314 if (dev->subordinate)
5315 pci_bus_lock(dev->subordinate);
5316 }
5317}
5318
5319
5320static void pci_bus_unlock(struct pci_bus *bus)
5321{
5322 struct pci_dev *dev;
5323
5324 list_for_each_entry(dev, &bus->devices, bus_list) {
5325 if (dev->subordinate)
5326 pci_bus_unlock(dev->subordinate);
5327 pci_dev_unlock(dev);
5328 }
5329}
5330
5331
5332static int pci_bus_trylock(struct pci_bus *bus)
5333{
5334 struct pci_dev *dev;
5335
5336 list_for_each_entry(dev, &bus->devices, bus_list) {
5337 if (!pci_dev_trylock(dev))
5338 goto unlock;
5339 if (dev->subordinate) {
5340 if (!pci_bus_trylock(dev->subordinate)) {
5341 pci_dev_unlock(dev);
5342 goto unlock;
5343 }
5344 }
5345 }
5346 return 1;
5347
5348unlock:
5349 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5350 if (dev->subordinate)
5351 pci_bus_unlock(dev->subordinate);
5352 pci_dev_unlock(dev);
5353 }
5354 return 0;
5355}
5356
5357
5358static bool pci_slot_resetable(struct pci_slot *slot)
5359{
5360 struct pci_dev *dev;
5361
5362 if (slot->bus->self &&
5363 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5364 return false;
5365
5366 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5367 if (!dev->slot || dev->slot != slot)
5368 continue;
5369 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5370 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5371 return false;
5372 }
5373
5374 return true;
5375}
5376
5377
5378static void pci_slot_lock(struct pci_slot *slot)
5379{
5380 struct pci_dev *dev;
5381
5382 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5383 if (!dev->slot || dev->slot != slot)
5384 continue;
5385 pci_dev_lock(dev);
5386 if (dev->subordinate)
5387 pci_bus_lock(dev->subordinate);
5388 }
5389}
5390
5391
5392static void pci_slot_unlock(struct pci_slot *slot)
5393{
5394 struct pci_dev *dev;
5395
5396 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5397 if (!dev->slot || dev->slot != slot)
5398 continue;
5399 if (dev->subordinate)
5400 pci_bus_unlock(dev->subordinate);
5401 pci_dev_unlock(dev);
5402 }
5403}
5404
5405
5406static int pci_slot_trylock(struct pci_slot *slot)
5407{
5408 struct pci_dev *dev;
5409
5410 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5411 if (!dev->slot || dev->slot != slot)
5412 continue;
5413 if (!pci_dev_trylock(dev))
5414 goto unlock;
5415 if (dev->subordinate) {
5416 if (!pci_bus_trylock(dev->subordinate)) {
5417 pci_dev_unlock(dev);
5418 goto unlock;
5419 }
5420 }
5421 }
5422 return 1;
5423
5424unlock:
5425 list_for_each_entry_continue_reverse(dev,
5426 &slot->bus->devices, bus_list) {
5427 if (!dev->slot || dev->slot != slot)
5428 continue;
5429 if (dev->subordinate)
5430 pci_bus_unlock(dev->subordinate);
5431 pci_dev_unlock(dev);
5432 }
5433 return 0;
5434}
5435
5436
5437
5438
5439
5440static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5441{
5442 struct pci_dev *dev;
5443
5444 list_for_each_entry(dev, &bus->devices, bus_list) {
5445 pci_dev_save_and_disable(dev);
5446 if (dev->subordinate)
5447 pci_bus_save_and_disable_locked(dev->subordinate);
5448 }
5449}
5450
5451
5452
5453
5454
5455
5456static void pci_bus_restore_locked(struct pci_bus *bus)
5457{
5458 struct pci_dev *dev;
5459
5460 list_for_each_entry(dev, &bus->devices, bus_list) {
5461 pci_dev_restore(dev);
5462 if (dev->subordinate)
5463 pci_bus_restore_locked(dev->subordinate);
5464 }
5465}
5466
5467
5468
5469
5470
5471static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5472{
5473 struct pci_dev *dev;
5474
5475 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5476 if (!dev->slot || dev->slot != slot)
5477 continue;
5478 pci_dev_save_and_disable(dev);
5479 if (dev->subordinate)
5480 pci_bus_save_and_disable_locked(dev->subordinate);
5481 }
5482}
5483
5484
5485
5486
5487
5488
5489static void pci_slot_restore_locked(struct pci_slot *slot)
5490{
5491 struct pci_dev *dev;
5492
5493 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5494 if (!dev->slot || dev->slot != slot)
5495 continue;
5496 pci_dev_restore(dev);
5497 if (dev->subordinate)
5498 pci_bus_restore_locked(dev->subordinate);
5499 }
5500}
5501
5502static int pci_slot_reset(struct pci_slot *slot, int probe)
5503{
5504 int rc;
5505
5506 if (!slot || !pci_slot_resetable(slot))
5507 return -ENOTTY;
5508
5509 if (!probe)
5510 pci_slot_lock(slot);
5511
5512 might_sleep();
5513
5514 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5515
5516 if (!probe)
5517 pci_slot_unlock(slot);
5518
5519 return rc;
5520}
5521
5522
5523
5524
5525
5526
5527
5528int pci_probe_reset_slot(struct pci_slot *slot)
5529{
5530 return pci_slot_reset(slot, 1);
5531}
5532EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548
5549static int __pci_reset_slot(struct pci_slot *slot)
5550{
5551 int rc;
5552
5553 rc = pci_slot_reset(slot, 1);
5554 if (rc)
5555 return rc;
5556
5557 if (pci_slot_trylock(slot)) {
5558 pci_slot_save_and_disable_locked(slot);
5559 might_sleep();
5560 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
5561 pci_slot_restore_locked(slot);
5562 pci_slot_unlock(slot);
5563 } else
5564 rc = -EAGAIN;
5565
5566 return rc;
5567}
5568
5569static int pci_bus_reset(struct pci_bus *bus, int probe)
5570{
5571 int ret;
5572
5573 if (!bus->self || !pci_bus_resetable(bus))
5574 return -ENOTTY;
5575
5576 if (probe)
5577 return 0;
5578
5579 pci_bus_lock(bus);
5580
5581 might_sleep();
5582
5583 ret = pci_bridge_secondary_bus_reset(bus->self);
5584
5585 pci_bus_unlock(bus);
5586
5587 return ret;
5588}
5589
5590
5591
5592
5593
5594
5595
5596
5597
5598int pci_bus_error_reset(struct pci_dev *bridge)
5599{
5600 struct pci_bus *bus = bridge->subordinate;
5601 struct pci_slot *slot;
5602
5603 if (!bus)
5604 return -ENOTTY;
5605
5606 mutex_lock(&pci_slot_mutex);
5607 if (list_empty(&bus->slots))
5608 goto bus_reset;
5609
5610 list_for_each_entry(slot, &bus->slots, list)
5611 if (pci_probe_reset_slot(slot))
5612 goto bus_reset;
5613
5614 list_for_each_entry(slot, &bus->slots, list)
5615 if (pci_slot_reset(slot, 0))
5616 goto bus_reset;
5617
5618 mutex_unlock(&pci_slot_mutex);
5619 return 0;
5620bus_reset:
5621 mutex_unlock(&pci_slot_mutex);
5622 return pci_bus_reset(bridge->subordinate, 0);
5623}
5624
5625
5626
5627
5628
5629
5630
5631int pci_probe_reset_bus(struct pci_bus *bus)
5632{
5633 return pci_bus_reset(bus, 1);
5634}
5635EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5636
5637
5638
5639
5640
5641
5642
5643static int __pci_reset_bus(struct pci_bus *bus)
5644{
5645 int rc;
5646
5647 rc = pci_bus_reset(bus, 1);
5648 if (rc)
5649 return rc;
5650
5651 if (pci_bus_trylock(bus)) {
5652 pci_bus_save_and_disable_locked(bus);
5653 might_sleep();
5654 rc = pci_bridge_secondary_bus_reset(bus->self);
5655 pci_bus_restore_locked(bus);
5656 pci_bus_unlock(bus);
5657 } else
5658 rc = -EAGAIN;
5659
5660 return rc;
5661}
5662
5663
5664
5665
5666
5667
5668
5669int pci_reset_bus(struct pci_dev *pdev)
5670{
5671 return (!pci_probe_reset_slot(pdev->slot)) ?
5672 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5673}
5674EXPORT_SYMBOL_GPL(pci_reset_bus);
5675
5676
5677
5678
5679
5680
5681
5682
5683int pcix_get_max_mmrbc(struct pci_dev *dev)
5684{
5685 int cap;
5686 u32 stat;
5687
5688 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5689 if (!cap)
5690 return -EINVAL;
5691
5692 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5693 return -EINVAL;
5694
5695 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5696}
5697EXPORT_SYMBOL(pcix_get_max_mmrbc);
5698
5699
5700
5701
5702
5703
5704
5705
5706int pcix_get_mmrbc(struct pci_dev *dev)
5707{
5708 int cap;
5709 u16 cmd;
5710
5711 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5712 if (!cap)
5713 return -EINVAL;
5714
5715 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5716 return -EINVAL;
5717
5718 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5719}
5720EXPORT_SYMBOL(pcix_get_mmrbc);
5721
5722
5723
5724
5725
5726
5727
5728
5729
5730
5731int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5732{
5733 int cap;
5734 u32 stat, v, o;
5735 u16 cmd;
5736
5737 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5738 return -EINVAL;
5739
5740 v = ffs(mmrbc) - 10;
5741
5742 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5743 if (!cap)
5744 return -EINVAL;
5745
5746 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5747 return -EINVAL;
5748
5749 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5750 return -E2BIG;
5751
5752 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5753 return -EINVAL;
5754
5755 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5756 if (o != v) {
5757 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5758 return -EIO;
5759
5760 cmd &= ~PCI_X_CMD_MAX_READ;
5761 cmd |= v << 2;
5762 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5763 return -EIO;
5764 }
5765 return 0;
5766}
5767EXPORT_SYMBOL(pcix_set_mmrbc);
5768
5769
5770
5771
5772
5773
5774
5775int pcie_get_readrq(struct pci_dev *dev)
5776{
5777 u16 ctl;
5778
5779 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5780
5781 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5782}
5783EXPORT_SYMBOL(pcie_get_readrq);
5784
5785
5786
5787
5788
5789
5790
5791
5792
5793int pcie_set_readrq(struct pci_dev *dev, int rq)
5794{
5795 u16 v;
5796 int ret;
5797
5798 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5799 return -EINVAL;
5800
5801
5802
5803
5804
5805
5806 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5807 int mps = pcie_get_mps(dev);
5808
5809 if (mps < rq)
5810 rq = mps;
5811 }
5812
5813 v = (ffs(rq) - 8) << 12;
5814
5815 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5816 PCI_EXP_DEVCTL_READRQ, v);
5817
5818 return pcibios_err_to_errno(ret);
5819}
5820EXPORT_SYMBOL(pcie_set_readrq);
5821
5822
5823
5824
5825
5826
5827
5828int pcie_get_mps(struct pci_dev *dev)
5829{
5830 u16 ctl;
5831
5832 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5833
5834 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5835}
5836EXPORT_SYMBOL(pcie_get_mps);
5837
5838
5839
5840
5841
5842
5843
5844
5845
5846int pcie_set_mps(struct pci_dev *dev, int mps)
5847{
5848 u16 v;
5849 int ret;
5850
5851 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5852 return -EINVAL;
5853
5854 v = ffs(mps) - 8;
5855 if (v > dev->pcie_mpss)
5856 return -EINVAL;
5857 v <<= 5;
5858
5859 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5860 PCI_EXP_DEVCTL_PAYLOAD, v);
5861
5862 return pcibios_err_to_errno(ret);
5863}
5864EXPORT_SYMBOL(pcie_set_mps);
5865
5866
5867
5868
5869
5870
5871
5872
5873
5874
5875
5876
5877
5878
5879
5880u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
5881 enum pci_bus_speed *speed,
5882 enum pcie_link_width *width)
5883{
5884 u16 lnksta;
5885 enum pci_bus_speed next_speed;
5886 enum pcie_link_width next_width;
5887 u32 bw, next_bw;
5888
5889 if (speed)
5890 *speed = PCI_SPEED_UNKNOWN;
5891 if (width)
5892 *width = PCIE_LNK_WIDTH_UNKNOWN;
5893
5894 bw = 0;
5895
5896 while (dev) {
5897 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5898
5899 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5900 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5901 PCI_EXP_LNKSTA_NLW_SHIFT;
5902
5903 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
5904
5905
5906 if (!bw || next_bw <= bw) {
5907 bw = next_bw;
5908
5909 if (limiting_dev)
5910 *limiting_dev = dev;
5911 if (speed)
5912 *speed = next_speed;
5913 if (width)
5914 *width = next_width;
5915 }
5916
5917 dev = pci_upstream_bridge(dev);
5918 }
5919
5920 return bw;
5921}
5922EXPORT_SYMBOL(pcie_bandwidth_available);
5923
5924
5925
5926
5927
5928
5929
5930
5931enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5932{
5933 u32 lnkcap2, lnkcap;
5934
5935
5936
5937
5938
5939
5940
5941
5942
5943
5944 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5945
5946
5947 if (lnkcap2)
5948 return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
5949
5950 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5951 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
5952 return PCIE_SPEED_5_0GT;
5953 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
5954 return PCIE_SPEED_2_5GT;
5955
5956 return PCI_SPEED_UNKNOWN;
5957}
5958EXPORT_SYMBOL(pcie_get_speed_cap);
5959
5960
5961
5962
5963
5964
5965
5966
5967enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
5968{
5969 u32 lnkcap;
5970
5971 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5972 if (lnkcap)
5973 return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
5974
5975 return PCIE_LNK_WIDTH_UNKNOWN;
5976}
5977EXPORT_SYMBOL(pcie_get_width_cap);
5978
5979
5980
5981
5982
5983
5984
5985
5986
5987
5988
5989u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
5990 enum pcie_link_width *width)
5991{
5992 *speed = pcie_get_speed_cap(dev);
5993 *width = pcie_get_width_cap(dev);
5994
5995 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5996 return 0;
5997
5998 return *width * PCIE_SPEED2MBS_ENC(*speed);
5999}
6000
6001
6002
6003
6004
6005
6006
6007
6008
6009
6010
6011void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
6012{
6013 enum pcie_link_width width, width_cap;
6014 enum pci_bus_speed speed, speed_cap;
6015 struct pci_dev *limiting_dev = NULL;
6016 u32 bw_avail, bw_cap;
6017
6018 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
6019 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
6020
6021 if (bw_avail >= bw_cap && verbose)
6022 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
6023 bw_cap / 1000, bw_cap % 1000,
6024 pci_speed_string(speed_cap), width_cap);
6025 else if (bw_avail < bw_cap)
6026 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
6027 bw_avail / 1000, bw_avail % 1000,
6028 pci_speed_string(speed), width,
6029 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
6030 bw_cap / 1000, bw_cap % 1000,
6031 pci_speed_string(speed_cap), width_cap);
6032}
6033
6034
6035
6036
6037
6038
6039
6040void pcie_print_link_status(struct pci_dev *dev)
6041{
6042 __pcie_print_link_status(dev, true);
6043}
6044EXPORT_SYMBOL(pcie_print_link_status);
6045
6046
6047
6048
6049
6050
6051
6052
6053int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6054{
6055 int i, bars = 0;
6056 for (i = 0; i < PCI_NUM_RESOURCES; i++)
6057 if (pci_resource_flags(dev, i) & flags)
6058 bars |= (1 << i);
6059 return bars;
6060}
6061EXPORT_SYMBOL(pci_select_bars);
6062
6063
6064static arch_set_vga_state_t arch_set_vga_state;
6065
6066void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6067{
6068 arch_set_vga_state = func;
6069}
6070
6071static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6072 unsigned int command_bits, u32 flags)
6073{
6074 if (arch_set_vga_state)
6075 return arch_set_vga_state(dev, decode, command_bits,
6076 flags);
6077 return 0;
6078}
6079
6080
6081
6082
6083
6084
6085
6086
6087
6088int pci_set_vga_state(struct pci_dev *dev, bool decode,
6089 unsigned int command_bits, u32 flags)
6090{
6091 struct pci_bus *bus;
6092 struct pci_dev *bridge;
6093 u16 cmd;
6094 int rc;
6095
6096 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6097
6098
6099 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6100 if (rc)
6101 return rc;
6102
6103 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6104 pci_read_config_word(dev, PCI_COMMAND, &cmd);
6105 if (decode)
6106 cmd |= command_bits;
6107 else
6108 cmd &= ~command_bits;
6109 pci_write_config_word(dev, PCI_COMMAND, cmd);
6110 }
6111
6112 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6113 return 0;
6114
6115 bus = dev->bus;
6116 while (bus) {
6117 bridge = bus->self;
6118 if (bridge) {
6119 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6120 &cmd);
6121 if (decode)
6122 cmd |= PCI_BRIDGE_CTL_VGA;
6123 else
6124 cmd &= ~PCI_BRIDGE_CTL_VGA;
6125 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6126 cmd);
6127 }
6128 bus = bus->parent;
6129 }
6130 return 0;
6131}
6132
6133#ifdef CONFIG_ACPI
6134bool pci_pr3_present(struct pci_dev *pdev)
6135{
6136 struct acpi_device *adev;
6137
6138 if (acpi_disabled)
6139 return false;
6140
6141 adev = ACPI_COMPANION(&pdev->dev);
6142 if (!adev)
6143 return false;
6144
6145 return adev->power.flags.power_resources &&
6146 acpi_has_method(adev->handle, "_PR3");
6147}
6148EXPORT_SYMBOL_GPL(pci_pr3_present);
6149#endif
6150
6151
6152
6153
6154
6155
6156
6157
6158
6159
6160
6161
6162
6163
6164
6165
6166
6167
6168
6169
6170
6171void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns)
6172{
6173 int devfn_to;
6174
6175 nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from);
6176 devfn_to = devfn_from + nr_devfns - 1;
6177
6178 if (!dev->dma_alias_mask)
6179 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6180 if (!dev->dma_alias_mask) {
6181 pci_warn(dev, "Unable to allocate DMA alias mask\n");
6182 return;
6183 }
6184
6185 bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6186
6187 if (nr_devfns == 1)
6188 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6189 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6190 else if (nr_devfns > 1)
6191 pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6192 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6193 PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6194}
6195
6196bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6197{
6198 return (dev1->dma_alias_mask &&
6199 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6200 (dev2->dma_alias_mask &&
6201 test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6202 pci_real_dma_dev(dev1) == dev2 ||
6203 pci_real_dma_dev(dev2) == dev1;
6204}
6205
6206bool pci_device_is_present(struct pci_dev *pdev)
6207{
6208 u32 v;
6209
6210 if (pci_dev_is_disconnected(pdev))
6211 return false;
6212 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6213}
6214EXPORT_SYMBOL_GPL(pci_device_is_present);
6215
6216void pci_ignore_hotplug(struct pci_dev *dev)
6217{
6218 struct pci_dev *bridge = dev->bus->self;
6219
6220 dev->ignore_hotplug = 1;
6221
6222 if (bridge)
6223 bridge->ignore_hotplug = 1;
6224}
6225EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6226
6227
6228
6229
6230
6231
6232
6233
6234
6235
6236
6237struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6238{
6239 return dev;
6240}
6241
6242resource_size_t __weak pcibios_default_alignment(void)
6243{
6244 return 0;
6245}
6246
6247
6248
6249
6250
6251void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6252 const struct resource *rsrc,
6253 resource_size_t *start, resource_size_t *end)
6254{
6255 *start = rsrc->start;
6256 *end = rsrc->end;
6257}
6258
6259static char *resource_alignment_param;
6260static DEFINE_SPINLOCK(resource_alignment_lock);
6261
6262
6263
6264
6265
6266
6267
6268
6269
6270static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6271 bool *resize)
6272{
6273 int align_order, count;
6274 resource_size_t align = pcibios_default_alignment();
6275 const char *p;
6276 int ret;
6277
6278 spin_lock(&resource_alignment_lock);
6279 p = resource_alignment_param;
6280 if (!p || !*p)
6281 goto out;
6282 if (pci_has_flag(PCI_PROBE_ONLY)) {
6283 align = 0;
6284 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6285 goto out;
6286 }
6287
6288 while (*p) {
6289 count = 0;
6290 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6291 p[count] == '@') {
6292 p += count + 1;
6293 if (align_order > 63) {
6294 pr_err("PCI: Invalid requested alignment (order %d)\n",
6295 align_order);
6296 align_order = PAGE_SHIFT;
6297 }
6298 } else {
6299 align_order = PAGE_SHIFT;
6300 }
6301
6302 ret = pci_dev_str_match(dev, p, &p);
6303 if (ret == 1) {
6304 *resize = true;
6305 align = 1ULL << align_order;
6306 break;
6307 } else if (ret < 0) {
6308 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6309 p);
6310 break;
6311 }
6312
6313 if (*p != ';' && *p != ',') {
6314
6315 break;
6316 }
6317 p++;
6318 }
6319out:
6320 spin_unlock(&resource_alignment_lock);
6321 return align;
6322}
6323
6324static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6325 resource_size_t align, bool resize)
6326{
6327 struct resource *r = &dev->resource[bar];
6328 resource_size_t size;
6329
6330 if (!(r->flags & IORESOURCE_MEM))
6331 return;
6332
6333 if (r->flags & IORESOURCE_PCI_FIXED) {
6334 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6335 bar, r, (unsigned long long)align);
6336 return;
6337 }
6338
6339 size = resource_size(r);
6340 if (size >= align)
6341 return;
6342
6343
6344
6345
6346
6347
6348
6349
6350
6351
6352
6353
6354
6355
6356
6357
6358
6359
6360
6361
6362
6363
6364
6365
6366
6367
6368
6369
6370
6371 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6372 bar, r, (unsigned long long)align);
6373
6374 if (resize) {
6375 r->start = 0;
6376 r->end = align - 1;
6377 } else {
6378 r->flags &= ~IORESOURCE_SIZEALIGN;
6379 r->flags |= IORESOURCE_STARTALIGN;
6380 r->start = align;
6381 r->end = r->start + size - 1;
6382 }
6383 r->flags |= IORESOURCE_UNSET;
6384}
6385
6386
6387
6388
6389
6390
6391
6392
6393void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6394{
6395 int i;
6396 struct resource *r;
6397 resource_size_t align;
6398 u16 command;
6399 bool resize = false;
6400
6401
6402
6403
6404
6405
6406
6407 if (dev->is_virtfn)
6408 return;
6409
6410
6411 align = pci_specified_resource_alignment(dev, &resize);
6412 if (!align)
6413 return;
6414
6415 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6416 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6417 pci_warn(dev, "Can't reassign resources to host bridge\n");
6418 return;
6419 }
6420
6421 pci_read_config_word(dev, PCI_COMMAND, &command);
6422 command &= ~PCI_COMMAND_MEMORY;
6423 pci_write_config_word(dev, PCI_COMMAND, command);
6424
6425 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6426 pci_request_resource_alignment(dev, i, align, resize);
6427
6428
6429
6430
6431
6432
6433 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6434 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6435 r = &dev->resource[i];
6436 if (!(r->flags & IORESOURCE_MEM))
6437 continue;
6438 r->flags |= IORESOURCE_UNSET;
6439 r->end = resource_size(r) - 1;
6440 r->start = 0;
6441 }
6442 pci_disable_bridge_window(dev);
6443 }
6444}
6445
6446static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6447{
6448 size_t count = 0;
6449
6450 spin_lock(&resource_alignment_lock);
6451 if (resource_alignment_param)
6452 count = scnprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
6453 spin_unlock(&resource_alignment_lock);
6454
6455
6456
6457
6458
6459
6460 if (count >= 2 && buf[count - 2] != '\n' && count < PAGE_SIZE - 1) {
6461 buf[count - 1] = '\n';
6462 buf[count++] = 0;
6463 }
6464
6465 return count;
6466}
6467
6468static ssize_t resource_alignment_store(struct bus_type *bus,
6469 const char *buf, size_t count)
6470{
6471 char *param = kstrndup(buf, count, GFP_KERNEL);
6472
6473 if (!param)
6474 return -ENOMEM;
6475
6476 spin_lock(&resource_alignment_lock);
6477 kfree(resource_alignment_param);
6478 resource_alignment_param = param;
6479 spin_unlock(&resource_alignment_lock);
6480 return count;
6481}
6482
6483static BUS_ATTR_RW(resource_alignment);
6484
6485static int __init pci_resource_alignment_sysfs_init(void)
6486{
6487 return bus_create_file(&pci_bus_type,
6488 &bus_attr_resource_alignment);
6489}
6490late_initcall(pci_resource_alignment_sysfs_init);
6491
6492static void pci_no_domains(void)
6493{
6494#ifdef CONFIG_PCI_DOMAINS
6495 pci_domains_supported = 0;
6496#endif
6497}
6498
6499#ifdef CONFIG_PCI_DOMAINS_GENERIC
6500static atomic_t __domain_nr = ATOMIC_INIT(-1);
6501
6502static int pci_get_new_domain_nr(void)
6503{
6504 return atomic_inc_return(&__domain_nr);
6505}
6506
6507static int of_pci_bus_find_domain_nr(struct device *parent)
6508{
6509 static int use_dt_domains = -1;
6510 int domain = -1;
6511
6512 if (parent)
6513 domain = of_get_pci_domain_nr(parent->of_node);
6514
6515
6516
6517
6518
6519
6520
6521
6522
6523
6524
6525
6526
6527
6528
6529
6530
6531
6532
6533
6534
6535
6536
6537
6538
6539
6540
6541 if (domain >= 0 && use_dt_domains) {
6542 use_dt_domains = 1;
6543 } else if (domain < 0 && use_dt_domains != 1) {
6544 use_dt_domains = 0;
6545 domain = pci_get_new_domain_nr();
6546 } else {
6547 if (parent)
6548 pr_err("Node %pOF has ", parent->of_node);
6549 pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6550 domain = -1;
6551 }
6552
6553 return domain;
6554}
6555
6556int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6557{
6558 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6559 acpi_pci_bus_find_domain_nr(bus);
6560}
6561#endif
6562
6563
6564
6565
6566
6567
6568
6569
6570int __weak pci_ext_cfg_avail(void)
6571{
6572 return 1;
6573}
6574
6575void __weak pci_fixup_cardbus(struct pci_bus *bus)
6576{
6577}
6578EXPORT_SYMBOL(pci_fixup_cardbus);
6579
6580static int __init pci_setup(char *str)
6581{
6582 while (str) {
6583 char *k = strchr(str, ',');
6584 if (k)
6585 *k++ = 0;
6586 if (*str && (str = pcibios_setup(str)) && *str) {
6587 if (!strcmp(str, "nomsi")) {
6588 pci_no_msi();
6589 } else if (!strncmp(str, "noats", 5)) {
6590 pr_info("PCIe: ATS is disabled\n");
6591 pcie_ats_disabled = true;
6592 } else if (!strcmp(str, "noaer")) {
6593 pci_no_aer();
6594 } else if (!strcmp(str, "earlydump")) {
6595 pci_early_dump = true;
6596 } else if (!strncmp(str, "realloc=", 8)) {
6597 pci_realloc_get_opt(str + 8);
6598 } else if (!strncmp(str, "realloc", 7)) {
6599 pci_realloc_get_opt("on");
6600 } else if (!strcmp(str, "nodomains")) {
6601 pci_no_domains();
6602 } else if (!strncmp(str, "noari", 5)) {
6603 pcie_ari_disabled = true;
6604 } else if (!strncmp(str, "cbiosize=", 9)) {
6605 pci_cardbus_io_size = memparse(str + 9, &str);
6606 } else if (!strncmp(str, "cbmemsize=", 10)) {
6607 pci_cardbus_mem_size = memparse(str + 10, &str);
6608 } else if (!strncmp(str, "resource_alignment=", 19)) {
6609 resource_alignment_param = str + 19;
6610 } else if (!strncmp(str, "ecrc=", 5)) {
6611 pcie_ecrc_get_policy(str + 5);
6612 } else if (!strncmp(str, "hpiosize=", 9)) {
6613 pci_hotplug_io_size = memparse(str + 9, &str);
6614 } else if (!strncmp(str, "hpmmiosize=", 11)) {
6615 pci_hotplug_mmio_size = memparse(str + 11, &str);
6616 } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6617 pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6618 } else if (!strncmp(str, "hpmemsize=", 10)) {
6619 pci_hotplug_mmio_size = memparse(str + 10, &str);
6620 pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6621 } else if (!strncmp(str, "hpbussize=", 10)) {
6622 pci_hotplug_bus_size =
6623 simple_strtoul(str + 10, &str, 0);
6624 if (pci_hotplug_bus_size > 0xff)
6625 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6626 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6627 pcie_bus_config = PCIE_BUS_TUNE_OFF;
6628 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
6629 pcie_bus_config = PCIE_BUS_SAFE;
6630 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
6631 pcie_bus_config = PCIE_BUS_PERFORMANCE;
6632 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6633 pcie_bus_config = PCIE_BUS_PEER2PEER;
6634 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6635 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6636 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6637 disable_acs_redir_param = str + 18;
6638 } else {
6639 pr_err("PCI: Unknown option `%s'\n", str);
6640 }
6641 }
6642 str = k;
6643 }
6644 return 0;
6645}
6646early_param("pci", pci_setup);
6647
6648
6649
6650
6651
6652
6653
6654
6655
6656
6657static int __init pci_realloc_setup_params(void)
6658{
6659 resource_alignment_param = kstrdup(resource_alignment_param,
6660 GFP_KERNEL);
6661 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6662
6663 return 0;
6664}
6665pure_initcall(pci_realloc_setup_params);
6666