1
2
3
4
5
6
7
8
9
10
11#include <linux/acpi.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/dmi.h>
15#include <linux/init.h>
16#include <linux/msi.h>
17#include <linux/of.h>
18#include <linux/of_pci.h>
19#include <linux/pci.h>
20#include <linux/pm.h>
21#include <linux/slab.h>
22#include <linux/module.h>
23#include <linux/spinlock.h>
24#include <linux/string.h>
25#include <linux/log2.h>
26#include <linux/logic_pio.h>
27#include <linux/pm_wakeup.h>
28#include <linux/interrupt.h>
29#include <linux/device.h>
30#include <linux/pm_runtime.h>
31#include <linux/pci_hotplug.h>
32#include <linux/vmalloc.h>
33#include <linux/pci-ats.h>
34#include <asm/setup.h>
35#include <asm/dma.h>
36#include <linux/aer.h>
37#include "pci.h"
38
39DEFINE_MUTEX(pci_slot_mutex);
40
41const char *pci_power_names[] = {
42 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
43};
44EXPORT_SYMBOL_GPL(pci_power_names);
45
46int isa_dma_bridge_buggy;
47EXPORT_SYMBOL(isa_dma_bridge_buggy);
48
49int pci_pci_problems;
50EXPORT_SYMBOL(pci_pci_problems);
51
52unsigned int pci_pm_d3_delay;
53
54static void pci_pme_list_scan(struct work_struct *work);
55
56static LIST_HEAD(pci_pme_list);
57static DEFINE_MUTEX(pci_pme_list_mutex);
58static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
59
60struct pci_pme_device {
61 struct list_head list;
62 struct pci_dev *dev;
63};
64
65#define PME_TIMEOUT 1000
66
67static void pci_dev_d3_sleep(struct pci_dev *dev)
68{
69 unsigned int delay = dev->d3_delay;
70
71 if (delay < pci_pm_d3_delay)
72 delay = pci_pm_d3_delay;
73
74 if (delay)
75 msleep(delay);
76}
77
78#ifdef CONFIG_PCI_DOMAINS
79int pci_domains_supported = 1;
80#endif
81
82#define DEFAULT_CARDBUS_IO_SIZE (256)
83#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
84
85unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
86unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
87
88#define DEFAULT_HOTPLUG_IO_SIZE (256)
89#define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
90#define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
91
92unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
93
94
95
96
97
98unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
99unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
100
101#define DEFAULT_HOTPLUG_BUS_SIZE 1
102unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
103
104enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
105
106
107
108
109
110
111
112u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
113u8 pci_cache_line_size;
114
115
116
117
118
119unsigned int pcibios_max_latency = 255;
120
121
122static bool pcie_ari_disabled;
123
124
125static bool pcie_ats_disabled;
126
127
128bool pci_early_dump;
129
130bool pci_ats_disabled(void)
131{
132 return pcie_ats_disabled;
133}
134
135
136static bool pci_bridge_d3_disable;
137
138static bool pci_bridge_d3_force;
139
140static int __init pcie_port_pm_setup(char *str)
141{
142 if (!strcmp(str, "off"))
143 pci_bridge_d3_disable = true;
144 else if (!strcmp(str, "force"))
145 pci_bridge_d3_force = true;
146 return 1;
147}
148__setup("pcie_port_pm=", pcie_port_pm_setup);
149
150
151#define PCIE_RESET_READY_POLL_MS 60000
152
153
154
155
156
157
158
159
160unsigned char pci_bus_max_busnr(struct pci_bus *bus)
161{
162 struct pci_bus *tmp;
163 unsigned char max, n;
164
165 max = bus->busn_res.end;
166 list_for_each_entry(tmp, &bus->children, node) {
167 n = pci_bus_max_busnr(tmp);
168 if (n > max)
169 max = n;
170 }
171 return max;
172}
173EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
174
175#ifdef CONFIG_HAS_IOMEM
176void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
177{
178 struct resource *res = &pdev->resource[bar];
179
180
181
182
183 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
184 pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
185 return NULL;
186 }
187 return ioremap_nocache(res->start, resource_size(res));
188}
189EXPORT_SYMBOL_GPL(pci_ioremap_bar);
190
191void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
192{
193
194
195
196 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
197 WARN_ON(1);
198 return NULL;
199 }
200 return ioremap_wc(pci_resource_start(pdev, bar),
201 pci_resource_len(pdev, bar));
202}
203EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
204#endif
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
226 const char **endptr)
227{
228 int ret;
229 int seg, bus, slot, func;
230 char *wpath, *p;
231 char end;
232
233 *endptr = strchrnul(path, ';');
234
235 wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
236 if (!wpath)
237 return -ENOMEM;
238
239 while (1) {
240 p = strrchr(wpath, '/');
241 if (!p)
242 break;
243 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
244 if (ret != 2) {
245 ret = -EINVAL;
246 goto free_and_exit;
247 }
248
249 if (dev->devfn != PCI_DEVFN(slot, func)) {
250 ret = 0;
251 goto free_and_exit;
252 }
253
254
255
256
257
258
259
260 dev = pci_upstream_bridge(dev);
261 if (!dev) {
262 ret = 0;
263 goto free_and_exit;
264 }
265
266 *p = 0;
267 }
268
269 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
270 &func, &end);
271 if (ret != 4) {
272 seg = 0;
273 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
274 if (ret != 3) {
275 ret = -EINVAL;
276 goto free_and_exit;
277 }
278 }
279
280 ret = (seg == pci_domain_nr(dev->bus) &&
281 bus == dev->bus->number &&
282 dev->devfn == PCI_DEVFN(slot, func));
283
284free_and_exit:
285 kfree(wpath);
286 return ret;
287}
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319static int pci_dev_str_match(struct pci_dev *dev, const char *p,
320 const char **endptr)
321{
322 int ret;
323 int count;
324 unsigned short vendor, device, subsystem_vendor, subsystem_device;
325
326 if (strncmp(p, "pci:", 4) == 0) {
327
328 p += 4;
329 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
330 &subsystem_vendor, &subsystem_device, &count);
331 if (ret != 4) {
332 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
333 if (ret != 2)
334 return -EINVAL;
335
336 subsystem_vendor = 0;
337 subsystem_device = 0;
338 }
339
340 p += count;
341
342 if ((!vendor || vendor == dev->vendor) &&
343 (!device || device == dev->device) &&
344 (!subsystem_vendor ||
345 subsystem_vendor == dev->subsystem_vendor) &&
346 (!subsystem_device ||
347 subsystem_device == dev->subsystem_device))
348 goto found;
349 } else {
350
351
352
353
354 ret = pci_dev_str_match_path(dev, p, &p);
355 if (ret < 0)
356 return ret;
357 else if (ret)
358 goto found;
359 }
360
361 *endptr = p;
362 return 0;
363
364found:
365 *endptr = p;
366 return 1;
367}
368
369static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
370 u8 pos, int cap, int *ttl)
371{
372 u8 id;
373 u16 ent;
374
375 pci_bus_read_config_byte(bus, devfn, pos, &pos);
376
377 while ((*ttl)--) {
378 if (pos < 0x40)
379 break;
380 pos &= ~3;
381 pci_bus_read_config_word(bus, devfn, pos, &ent);
382
383 id = ent & 0xff;
384 if (id == 0xff)
385 break;
386 if (id == cap)
387 return pos;
388 pos = (ent >> 8);
389 }
390 return 0;
391}
392
393static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
394 u8 pos, int cap)
395{
396 int ttl = PCI_FIND_CAP_TTL;
397
398 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
399}
400
401int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
402{
403 return __pci_find_next_cap(dev->bus, dev->devfn,
404 pos + PCI_CAP_LIST_NEXT, cap);
405}
406EXPORT_SYMBOL_GPL(pci_find_next_capability);
407
408static int __pci_bus_find_cap_start(struct pci_bus *bus,
409 unsigned int devfn, u8 hdr_type)
410{
411 u16 status;
412
413 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
414 if (!(status & PCI_STATUS_CAP_LIST))
415 return 0;
416
417 switch (hdr_type) {
418 case PCI_HEADER_TYPE_NORMAL:
419 case PCI_HEADER_TYPE_BRIDGE:
420 return PCI_CAPABILITY_LIST;
421 case PCI_HEADER_TYPE_CARDBUS:
422 return PCI_CB_CAPABILITY_LIST;
423 }
424
425 return 0;
426}
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447int pci_find_capability(struct pci_dev *dev, int cap)
448{
449 int pos;
450
451 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
452 if (pos)
453 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
454
455 return pos;
456}
457EXPORT_SYMBOL(pci_find_capability);
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
473{
474 int pos;
475 u8 hdr_type;
476
477 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
478
479 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
480 if (pos)
481 pos = __pci_find_next_cap(bus, devfn, pos, cap);
482
483 return pos;
484}
485EXPORT_SYMBOL(pci_bus_find_capability);
486
487
488
489
490
491
492
493
494
495
496
497
498int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
499{
500 u32 header;
501 int ttl;
502 int pos = PCI_CFG_SPACE_SIZE;
503
504
505 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
506
507 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
508 return 0;
509
510 if (start)
511 pos = start;
512
513 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
514 return 0;
515
516
517
518
519
520 if (header == 0)
521 return 0;
522
523 while (ttl-- > 0) {
524 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
525 return pos;
526
527 pos = PCI_EXT_CAP_NEXT(header);
528 if (pos < PCI_CFG_SPACE_SIZE)
529 break;
530
531 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
532 break;
533 }
534
535 return 0;
536}
537EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553int pci_find_ext_capability(struct pci_dev *dev, int cap)
554{
555 return pci_find_next_ext_capability(dev, 0, cap);
556}
557EXPORT_SYMBOL_GPL(pci_find_ext_capability);
558
559static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
560{
561 int rc, ttl = PCI_FIND_CAP_TTL;
562 u8 cap, mask;
563
564 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
565 mask = HT_3BIT_CAP_MASK;
566 else
567 mask = HT_5BIT_CAP_MASK;
568
569 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
570 PCI_CAP_ID_HT, &ttl);
571 while (pos) {
572 rc = pci_read_config_byte(dev, pos + 3, &cap);
573 if (rc != PCIBIOS_SUCCESSFUL)
574 return 0;
575
576 if ((cap & mask) == ht_cap)
577 return pos;
578
579 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
580 pos + PCI_CAP_LIST_NEXT,
581 PCI_CAP_ID_HT, &ttl);
582 }
583
584 return 0;
585}
586
587
588
589
590
591
592
593
594
595
596
597
598
599int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
600{
601 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
602}
603EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
604
605
606
607
608
609
610
611
612
613
614
615
616int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
617{
618 int pos;
619
620 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
621 if (pos)
622 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
623
624 return pos;
625}
626EXPORT_SYMBOL_GPL(pci_find_ht_capability);
627
628
629
630
631
632
633
634
635
636
637struct resource *pci_find_parent_resource(const struct pci_dev *dev,
638 struct resource *res)
639{
640 const struct pci_bus *bus = dev->bus;
641 struct resource *r;
642 int i;
643
644 pci_bus_for_each_resource(bus, r, i) {
645 if (!r)
646 continue;
647 if (resource_contains(r, res)) {
648
649
650
651
652
653 if (r->flags & IORESOURCE_PREFETCH &&
654 !(res->flags & IORESOURCE_PREFETCH))
655 return NULL;
656
657
658
659
660
661
662
663
664
665 return r;
666 }
667 }
668 return NULL;
669}
670EXPORT_SYMBOL(pci_find_parent_resource);
671
672
673
674
675
676
677
678
679
680
681struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
682{
683 int i;
684
685 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
686 struct resource *r = &dev->resource[i];
687
688 if (r->start && resource_contains(r, res))
689 return r;
690 }
691
692 return NULL;
693}
694EXPORT_SYMBOL(pci_find_resource);
695
696
697
698
699
700
701
702
703struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
704{
705 struct pci_dev *bridge, *highest_pcie_bridge = dev;
706
707 bridge = pci_upstream_bridge(dev);
708 while (bridge && pci_is_pcie(bridge)) {
709 highest_pcie_bridge = bridge;
710 bridge = pci_upstream_bridge(bridge);
711 }
712
713 if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
714 return NULL;
715
716 return highest_pcie_bridge;
717}
718EXPORT_SYMBOL(pci_find_pcie_root_port);
719
720
721
722
723
724
725
726
727
728int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
729{
730 int i;
731
732
733 for (i = 0; i < 4; i++) {
734 u16 status;
735 if (i)
736 msleep((1 << (i - 1)) * 100);
737
738 pci_read_config_word(dev, pos, &status);
739 if (!(status & mask))
740 return 1;
741 }
742
743 return 0;
744}
745
746
747
748
749
750
751
752
753static void pci_restore_bars(struct pci_dev *dev)
754{
755 int i;
756
757 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
758 pci_update_resource(dev, i);
759}
760
761static const struct pci_platform_pm_ops *pci_platform_pm;
762
763int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
764{
765 if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
766 !ops->choose_state || !ops->set_wakeup || !ops->need_resume)
767 return -EINVAL;
768 pci_platform_pm = ops;
769 return 0;
770}
771
772static inline bool platform_pci_power_manageable(struct pci_dev *dev)
773{
774 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
775}
776
777static inline int platform_pci_set_power_state(struct pci_dev *dev,
778 pci_power_t t)
779{
780 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
781}
782
783static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
784{
785 return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
786}
787
788static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
789{
790 if (pci_platform_pm && pci_platform_pm->refresh_state)
791 pci_platform_pm->refresh_state(dev);
792}
793
794static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
795{
796 return pci_platform_pm ?
797 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
798}
799
800static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
801{
802 return pci_platform_pm ?
803 pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
804}
805
806static inline bool platform_pci_need_resume(struct pci_dev *dev)
807{
808 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
809}
810
811static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
812{
813 return pci_platform_pm ? pci_platform_pm->bridge_d3(dev) : false;
814}
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
830{
831 u16 pmcsr;
832 bool need_restore = false;
833
834
835 if (dev->current_state == state)
836 return 0;
837
838 if (!dev->pm_cap)
839 return -EIO;
840
841 if (state < PCI_D0 || state > PCI_D3hot)
842 return -EINVAL;
843
844
845
846
847
848
849
850 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
851 && dev->current_state > state) {
852 pci_err(dev, "invalid power transition (from %s to %s)\n",
853 pci_power_name(dev->current_state),
854 pci_power_name(state));
855 return -EINVAL;
856 }
857
858
859 if ((state == PCI_D1 && !dev->d1_support)
860 || (state == PCI_D2 && !dev->d2_support))
861 return -EIO;
862
863 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
864 if (pmcsr == (u16) ~0) {
865 pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
866 pci_power_name(dev->current_state),
867 pci_power_name(state));
868 return -EIO;
869 }
870
871
872
873
874
875
876 switch (dev->current_state) {
877 case PCI_D0:
878 case PCI_D1:
879 case PCI_D2:
880 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
881 pmcsr |= state;
882 break;
883 case PCI_D3hot:
884 case PCI_D3cold:
885 case PCI_UNKNOWN:
886 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
887 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
888 need_restore = true;
889
890 default:
891 pmcsr = 0;
892 break;
893 }
894
895
896 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
897
898
899
900
901
902 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
903 pci_dev_d3_sleep(dev);
904 else if (state == PCI_D2 || dev->current_state == PCI_D2)
905 msleep(PCI_PM_D2_DELAY);
906
907 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
908 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
909 if (dev->current_state != state)
910 pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
911 pci_power_name(dev->current_state),
912 pci_power_name(state));
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927 if (need_restore)
928 pci_restore_bars(dev);
929
930 if (dev->bus->self)
931 pcie_aspm_pm_state_change(dev->bus->self);
932
933 return 0;
934}
935
936
937
938
939
940
941
942
943
944
945
946
947
948void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
949{
950 if (platform_pci_get_power_state(dev) == PCI_D3cold ||
951 !pci_device_is_present(dev)) {
952 dev->current_state = PCI_D3cold;
953 } else if (dev->pm_cap) {
954 u16 pmcsr;
955
956 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
957 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
958 } else {
959 dev->current_state = state;
960 }
961}
962
963
964
965
966
967
968
969
970void pci_refresh_power_state(struct pci_dev *dev)
971{
972 if (platform_pci_power_manageable(dev))
973 platform_pci_refresh_power_state(dev);
974
975 pci_update_current_state(dev, dev->current_state);
976}
977
978
979
980
981
982
983int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
984{
985 int error;
986
987 if (platform_pci_power_manageable(dev)) {
988 error = platform_pci_set_power_state(dev, state);
989 if (!error)
990 pci_update_current_state(dev, state);
991 } else
992 error = -ENODEV;
993
994 if (error && !dev->pm_cap)
995 dev->current_state = PCI_D0;
996
997 return error;
998}
999EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1000
1001
1002
1003
1004
1005
1006static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
1007{
1008 pci_wakeup_event(pci_dev);
1009 pm_request_resume(&pci_dev->dev);
1010 return 0;
1011}
1012
1013
1014
1015
1016
1017void pci_wakeup_bus(struct pci_bus *bus)
1018{
1019 if (bus)
1020 pci_walk_bus(bus, pci_wakeup, NULL);
1021}
1022
1023static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1024{
1025 int delay = 1;
1026 u32 id;
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040 pci_read_config_dword(dev, PCI_COMMAND, &id);
1041 while (id == ~0) {
1042 if (delay > timeout) {
1043 pci_warn(dev, "not ready %dms after %s; giving up\n",
1044 delay - 1, reset_type);
1045 return -ENOTTY;
1046 }
1047
1048 if (delay > 1000)
1049 pci_info(dev, "not ready %dms after %s; waiting\n",
1050 delay - 1, reset_type);
1051
1052 msleep(delay);
1053 delay *= 2;
1054 pci_read_config_dword(dev, PCI_COMMAND, &id);
1055 }
1056
1057 if (delay > 1000)
1058 pci_info(dev, "ready %dms after %s\n", delay - 1,
1059 reset_type);
1060
1061 return 0;
1062}
1063
1064
1065
1066
1067
1068int pci_power_up(struct pci_dev *dev)
1069{
1070 pci_platform_power_transition(dev, PCI_D0);
1071
1072
1073
1074
1075
1076
1077 if (dev->runtime_d3cold) {
1078
1079
1080
1081
1082
1083 pci_wakeup_bus(dev->subordinate);
1084 }
1085
1086 return pci_raw_set_power_state(dev, PCI_D0);
1087}
1088
1089
1090
1091
1092
1093
1094static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1095{
1096 pci_power_t state = *(pci_power_t *)data;
1097
1098 dev->current_state = state;
1099 return 0;
1100}
1101
1102
1103
1104
1105
1106
1107void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1108{
1109 if (bus)
1110 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1111}
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1131{
1132 int error;
1133
1134
1135 if (state > PCI_D3cold)
1136 state = PCI_D3cold;
1137 else if (state < PCI_D0)
1138 state = PCI_D0;
1139 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1140
1141
1142
1143
1144
1145
1146
1147 return 0;
1148
1149
1150 if (dev->current_state == state)
1151 return 0;
1152
1153 if (state == PCI_D0)
1154 return pci_power_up(dev);
1155
1156
1157
1158
1159
1160 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1161 return 0;
1162
1163
1164
1165
1166
1167 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
1168 PCI_D3hot : state);
1169
1170 if (pci_platform_power_transition(dev, state))
1171 return error;
1172
1173
1174 if (state == PCI_D3cold)
1175 pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1176
1177 return 0;
1178}
1179EXPORT_SYMBOL(pci_set_power_state);
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
1191{
1192 pci_power_t ret;
1193
1194 if (!dev->pm_cap)
1195 return PCI_D0;
1196
1197 ret = platform_pci_choose_state(dev);
1198 if (ret != PCI_POWER_ERROR)
1199 return ret;
1200
1201 switch (state.event) {
1202 case PM_EVENT_ON:
1203 return PCI_D0;
1204 case PM_EVENT_FREEZE:
1205 case PM_EVENT_PRETHAW:
1206
1207 case PM_EVENT_SUSPEND:
1208 case PM_EVENT_HIBERNATE:
1209 return PCI_D3hot;
1210 default:
1211 pci_info(dev, "unrecognized suspend event %d\n",
1212 state.event);
1213 BUG();
1214 }
1215 return PCI_D0;
1216}
1217EXPORT_SYMBOL(pci_choose_state);
1218
1219#define PCI_EXP_SAVE_REGS 7
1220
1221static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1222 u16 cap, bool extended)
1223{
1224 struct pci_cap_saved_state *tmp;
1225
1226 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1227 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1228 return tmp;
1229 }
1230 return NULL;
1231}
1232
1233struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1234{
1235 return _pci_find_saved_cap(dev, cap, false);
1236}
1237
1238struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1239{
1240 return _pci_find_saved_cap(dev, cap, true);
1241}
1242
1243static int pci_save_pcie_state(struct pci_dev *dev)
1244{
1245 int i = 0;
1246 struct pci_cap_saved_state *save_state;
1247 u16 *cap;
1248
1249 if (!pci_is_pcie(dev))
1250 return 0;
1251
1252 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1253 if (!save_state) {
1254 pci_err(dev, "buffer not found in %s\n", __func__);
1255 return -ENOMEM;
1256 }
1257
1258 cap = (u16 *)&save_state->cap.data[0];
1259 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1260 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1261 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1262 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1263 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1264 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1265 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1266
1267 return 0;
1268}
1269
1270static void pci_restore_pcie_state(struct pci_dev *dev)
1271{
1272 int i = 0;
1273 struct pci_cap_saved_state *save_state;
1274 u16 *cap;
1275
1276 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1277 if (!save_state)
1278 return;
1279
1280 cap = (u16 *)&save_state->cap.data[0];
1281 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1282 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1283 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1284 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1285 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1286 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1287 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1288}
1289
1290static int pci_save_pcix_state(struct pci_dev *dev)
1291{
1292 int pos;
1293 struct pci_cap_saved_state *save_state;
1294
1295 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1296 if (!pos)
1297 return 0;
1298
1299 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1300 if (!save_state) {
1301 pci_err(dev, "buffer not found in %s\n", __func__);
1302 return -ENOMEM;
1303 }
1304
1305 pci_read_config_word(dev, pos + PCI_X_CMD,
1306 (u16 *)save_state->cap.data);
1307
1308 return 0;
1309}
1310
1311static void pci_restore_pcix_state(struct pci_dev *dev)
1312{
1313 int i = 0, pos;
1314 struct pci_cap_saved_state *save_state;
1315 u16 *cap;
1316
1317 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1318 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1319 if (!save_state || !pos)
1320 return;
1321 cap = (u16 *)&save_state->cap.data[0];
1322
1323 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1324}
1325
1326static void pci_save_ltr_state(struct pci_dev *dev)
1327{
1328 int ltr;
1329 struct pci_cap_saved_state *save_state;
1330 u16 *cap;
1331
1332 if (!pci_is_pcie(dev))
1333 return;
1334
1335 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1336 if (!ltr)
1337 return;
1338
1339 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1340 if (!save_state) {
1341 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1342 return;
1343 }
1344
1345 cap = (u16 *)&save_state->cap.data[0];
1346 pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
1347 pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
1348}
1349
1350static void pci_restore_ltr_state(struct pci_dev *dev)
1351{
1352 struct pci_cap_saved_state *save_state;
1353 int ltr;
1354 u16 *cap;
1355
1356 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1357 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1358 if (!save_state || !ltr)
1359 return;
1360
1361 cap = (u16 *)&save_state->cap.data[0];
1362 pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
1363 pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
1364}
1365
1366
1367
1368
1369
1370
1371int pci_save_state(struct pci_dev *dev)
1372{
1373 int i;
1374
1375 for (i = 0; i < 16; i++)
1376 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1377 dev->state_saved = true;
1378
1379 i = pci_save_pcie_state(dev);
1380 if (i != 0)
1381 return i;
1382
1383 i = pci_save_pcix_state(dev);
1384 if (i != 0)
1385 return i;
1386
1387 pci_save_ltr_state(dev);
1388 pci_save_dpc_state(dev);
1389 pci_save_aer_state(dev);
1390 return pci_save_vc_state(dev);
1391}
1392EXPORT_SYMBOL(pci_save_state);
1393
1394static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1395 u32 saved_val, int retry, bool force)
1396{
1397 u32 val;
1398
1399 pci_read_config_dword(pdev, offset, &val);
1400 if (!force && val == saved_val)
1401 return;
1402
1403 for (;;) {
1404 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1405 offset, val, saved_val);
1406 pci_write_config_dword(pdev, offset, saved_val);
1407 if (retry-- <= 0)
1408 return;
1409
1410 pci_read_config_dword(pdev, offset, &val);
1411 if (val == saved_val)
1412 return;
1413
1414 mdelay(1);
1415 }
1416}
1417
1418static void pci_restore_config_space_range(struct pci_dev *pdev,
1419 int start, int end, int retry,
1420 bool force)
1421{
1422 int index;
1423
1424 for (index = end; index >= start; index--)
1425 pci_restore_config_dword(pdev, 4 * index,
1426 pdev->saved_config_space[index],
1427 retry, force);
1428}
1429
1430static void pci_restore_config_space(struct pci_dev *pdev)
1431{
1432 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1433 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1434
1435 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1436 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1437 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1438 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1439
1440
1441
1442
1443
1444
1445 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1446 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1447 } else {
1448 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1449 }
1450}
1451
1452static void pci_restore_rebar_state(struct pci_dev *pdev)
1453{
1454 unsigned int pos, nbars, i;
1455 u32 ctrl;
1456
1457 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1458 if (!pos)
1459 return;
1460
1461 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1462 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1463 PCI_REBAR_CTRL_NBAR_SHIFT;
1464
1465 for (i = 0; i < nbars; i++, pos += 8) {
1466 struct resource *res;
1467 int bar_idx, size;
1468
1469 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1470 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1471 res = pdev->resource + bar_idx;
1472 size = ilog2(resource_size(res)) - 20;
1473 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1474 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1475 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1476 }
1477}
1478
1479
1480
1481
1482
1483void pci_restore_state(struct pci_dev *dev)
1484{
1485 if (!dev->state_saved)
1486 return;
1487
1488
1489
1490
1491
1492 pci_restore_ltr_state(dev);
1493
1494 pci_restore_pcie_state(dev);
1495 pci_restore_pasid_state(dev);
1496 pci_restore_pri_state(dev);
1497 pci_restore_ats_state(dev);
1498 pci_restore_vc_state(dev);
1499 pci_restore_rebar_state(dev);
1500 pci_restore_dpc_state(dev);
1501
1502 pci_cleanup_aer_error_status_regs(dev);
1503 pci_restore_aer_state(dev);
1504
1505 pci_restore_config_space(dev);
1506
1507 pci_restore_pcix_state(dev);
1508 pci_restore_msi_state(dev);
1509
1510
1511 pci_enable_acs(dev);
1512 pci_restore_iov_state(dev);
1513
1514 dev->state_saved = false;
1515}
1516EXPORT_SYMBOL(pci_restore_state);
1517
1518struct pci_saved_state {
1519 u32 config_space[16];
1520 struct pci_cap_saved_data cap[0];
1521};
1522
1523
1524
1525
1526
1527
1528
1529
1530struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1531{
1532 struct pci_saved_state *state;
1533 struct pci_cap_saved_state *tmp;
1534 struct pci_cap_saved_data *cap;
1535 size_t size;
1536
1537 if (!dev->state_saved)
1538 return NULL;
1539
1540 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1541
1542 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1543 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1544
1545 state = kzalloc(size, GFP_KERNEL);
1546 if (!state)
1547 return NULL;
1548
1549 memcpy(state->config_space, dev->saved_config_space,
1550 sizeof(state->config_space));
1551
1552 cap = state->cap;
1553 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1554 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1555 memcpy(cap, &tmp->cap, len);
1556 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1557 }
1558
1559
1560 return state;
1561}
1562EXPORT_SYMBOL_GPL(pci_store_saved_state);
1563
1564
1565
1566
1567
1568
1569int pci_load_saved_state(struct pci_dev *dev,
1570 struct pci_saved_state *state)
1571{
1572 struct pci_cap_saved_data *cap;
1573
1574 dev->state_saved = false;
1575
1576 if (!state)
1577 return 0;
1578
1579 memcpy(dev->saved_config_space, state->config_space,
1580 sizeof(state->config_space));
1581
1582 cap = state->cap;
1583 while (cap->size) {
1584 struct pci_cap_saved_state *tmp;
1585
1586 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1587 if (!tmp || tmp->cap.size != cap->size)
1588 return -EINVAL;
1589
1590 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1591 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1592 sizeof(struct pci_cap_saved_data) + cap->size);
1593 }
1594
1595 dev->state_saved = true;
1596 return 0;
1597}
1598EXPORT_SYMBOL_GPL(pci_load_saved_state);
1599
1600
1601
1602
1603
1604
1605
1606int pci_load_and_free_saved_state(struct pci_dev *dev,
1607 struct pci_saved_state **state)
1608{
1609 int ret = pci_load_saved_state(dev, *state);
1610 kfree(*state);
1611 *state = NULL;
1612 return ret;
1613}
1614EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1615
1616int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1617{
1618 return pci_enable_resources(dev, bars);
1619}
1620
1621static int do_pci_enable_device(struct pci_dev *dev, int bars)
1622{
1623 int err;
1624 struct pci_dev *bridge;
1625 u16 cmd;
1626 u8 pin;
1627
1628 err = pci_set_power_state(dev, PCI_D0);
1629 if (err < 0 && err != -EIO)
1630 return err;
1631
1632 bridge = pci_upstream_bridge(dev);
1633 if (bridge)
1634 pcie_aspm_powersave_config_link(bridge);
1635
1636 err = pcibios_enable_device(dev, bars);
1637 if (err < 0)
1638 return err;
1639 pci_fixup_device(pci_fixup_enable, dev);
1640
1641 if (dev->msi_enabled || dev->msix_enabled)
1642 return 0;
1643
1644 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1645 if (pin) {
1646 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1647 if (cmd & PCI_COMMAND_INTX_DISABLE)
1648 pci_write_config_word(dev, PCI_COMMAND,
1649 cmd & ~PCI_COMMAND_INTX_DISABLE);
1650 }
1651
1652 return 0;
1653}
1654
1655
1656
1657
1658
1659
1660
1661
1662int pci_reenable_device(struct pci_dev *dev)
1663{
1664 if (pci_is_enabled(dev))
1665 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1666 return 0;
1667}
1668EXPORT_SYMBOL(pci_reenable_device);
1669
1670static void pci_enable_bridge(struct pci_dev *dev)
1671{
1672 struct pci_dev *bridge;
1673 int retval;
1674
1675 bridge = pci_upstream_bridge(dev);
1676 if (bridge)
1677 pci_enable_bridge(bridge);
1678
1679 if (pci_is_enabled(dev)) {
1680 if (!dev->is_busmaster)
1681 pci_set_master(dev);
1682 return;
1683 }
1684
1685 retval = pci_enable_device(dev);
1686 if (retval)
1687 pci_err(dev, "Error enabling bridge (%d), continuing\n",
1688 retval);
1689 pci_set_master(dev);
1690}
1691
1692static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1693{
1694 struct pci_dev *bridge;
1695 int err;
1696 int i, bars = 0;
1697
1698
1699
1700
1701
1702
1703
1704 if (dev->pm_cap) {
1705 u16 pmcsr;
1706 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1707 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1708 }
1709
1710 if (atomic_inc_return(&dev->enable_cnt) > 1)
1711 return 0;
1712
1713 bridge = pci_upstream_bridge(dev);
1714 if (bridge)
1715 pci_enable_bridge(bridge);
1716
1717
1718 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1719 if (dev->resource[i].flags & flags)
1720 bars |= (1 << i);
1721 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1722 if (dev->resource[i].flags & flags)
1723 bars |= (1 << i);
1724
1725 err = do_pci_enable_device(dev, bars);
1726 if (err < 0)
1727 atomic_dec(&dev->enable_cnt);
1728 return err;
1729}
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739int pci_enable_device_io(struct pci_dev *dev)
1740{
1741 return pci_enable_device_flags(dev, IORESOURCE_IO);
1742}
1743EXPORT_SYMBOL(pci_enable_device_io);
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753int pci_enable_device_mem(struct pci_dev *dev)
1754{
1755 return pci_enable_device_flags(dev, IORESOURCE_MEM);
1756}
1757EXPORT_SYMBOL(pci_enable_device_mem);
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770int pci_enable_device(struct pci_dev *dev)
1771{
1772 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1773}
1774EXPORT_SYMBOL(pci_enable_device);
1775
1776
1777
1778
1779
1780
1781
1782struct pci_devres {
1783 unsigned int enabled:1;
1784 unsigned int pinned:1;
1785 unsigned int orig_intx:1;
1786 unsigned int restore_intx:1;
1787 unsigned int mwi:1;
1788 u32 region_mask;
1789};
1790
1791static void pcim_release(struct device *gendev, void *res)
1792{
1793 struct pci_dev *dev = to_pci_dev(gendev);
1794 struct pci_devres *this = res;
1795 int i;
1796
1797 if (dev->msi_enabled)
1798 pci_disable_msi(dev);
1799 if (dev->msix_enabled)
1800 pci_disable_msix(dev);
1801
1802 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1803 if (this->region_mask & (1 << i))
1804 pci_release_region(dev, i);
1805
1806 if (this->mwi)
1807 pci_clear_mwi(dev);
1808
1809 if (this->restore_intx)
1810 pci_intx(dev, this->orig_intx);
1811
1812 if (this->enabled && !this->pinned)
1813 pci_disable_device(dev);
1814}
1815
1816static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1817{
1818 struct pci_devres *dr, *new_dr;
1819
1820 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1821 if (dr)
1822 return dr;
1823
1824 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1825 if (!new_dr)
1826 return NULL;
1827 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1828}
1829
1830static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
1831{
1832 if (pci_is_managed(pdev))
1833 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1834 return NULL;
1835}
1836
1837
1838
1839
1840
1841
1842
1843int pcim_enable_device(struct pci_dev *pdev)
1844{
1845 struct pci_devres *dr;
1846 int rc;
1847
1848 dr = get_pci_dr(pdev);
1849 if (unlikely(!dr))
1850 return -ENOMEM;
1851 if (dr->enabled)
1852 return 0;
1853
1854 rc = pci_enable_device(pdev);
1855 if (!rc) {
1856 pdev->is_managed = 1;
1857 dr->enabled = 1;
1858 }
1859 return rc;
1860}
1861EXPORT_SYMBOL(pcim_enable_device);
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871void pcim_pin_device(struct pci_dev *pdev)
1872{
1873 struct pci_devres *dr;
1874
1875 dr = find_pci_dr(pdev);
1876 WARN_ON(!dr || !dr->enabled);
1877 if (dr)
1878 dr->pinned = 1;
1879}
1880EXPORT_SYMBOL(pcim_pin_device);
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890int __weak pcibios_add_device(struct pci_dev *dev)
1891{
1892 return 0;
1893}
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904void __weak pcibios_release_device(struct pci_dev *dev) {}
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914void __weak pcibios_disable_device(struct pci_dev *dev) {}
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1926
1927static void do_pci_disable_device(struct pci_dev *dev)
1928{
1929 u16 pci_command;
1930
1931 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1932 if (pci_command & PCI_COMMAND_MASTER) {
1933 pci_command &= ~PCI_COMMAND_MASTER;
1934 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1935 }
1936
1937 pcibios_disable_device(dev);
1938}
1939
1940
1941
1942
1943
1944
1945
1946
1947void pci_disable_enabled_device(struct pci_dev *dev)
1948{
1949 if (pci_is_enabled(dev))
1950 do_pci_disable_device(dev);
1951}
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963void pci_disable_device(struct pci_dev *dev)
1964{
1965 struct pci_devres *dr;
1966
1967 dr = find_pci_dr(dev);
1968 if (dr)
1969 dr->enabled = 0;
1970
1971 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1972 "disabling already-disabled device");
1973
1974 if (atomic_dec_return(&dev->enable_cnt) != 0)
1975 return;
1976
1977 do_pci_disable_device(dev);
1978
1979 dev->is_busmaster = 0;
1980}
1981EXPORT_SYMBOL(pci_disable_device);
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1992 enum pcie_reset_state state)
1993{
1994 return -EINVAL;
1995}
1996
1997
1998
1999
2000
2001
2002
2003
2004int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2005{
2006 return pcibios_set_pcie_reset_state(dev, state);
2007}
2008EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2009
2010
2011
2012
2013
2014void pcie_clear_root_pme_status(struct pci_dev *dev)
2015{
2016 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2017}
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027bool pci_check_pme_status(struct pci_dev *dev)
2028{
2029 int pmcsr_pos;
2030 u16 pmcsr;
2031 bool ret = false;
2032
2033 if (!dev->pm_cap)
2034 return false;
2035
2036 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2037 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2038 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2039 return false;
2040
2041
2042 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2043 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2044
2045 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2046 ret = true;
2047 }
2048
2049 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2050
2051 return ret;
2052}
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2063{
2064 if (pme_poll_reset && dev->pme_poll)
2065 dev->pme_poll = false;
2066
2067 if (pci_check_pme_status(dev)) {
2068 pci_wakeup_event(dev);
2069 pm_request_resume(&dev->dev);
2070 }
2071 return 0;
2072}
2073
2074
2075
2076
2077
2078void pci_pme_wakeup_bus(struct pci_bus *bus)
2079{
2080 if (bus)
2081 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2082}
2083
2084
2085
2086
2087
2088
2089
2090bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2091{
2092 if (!dev->pm_cap)
2093 return false;
2094
2095 return !!(dev->pme_support & (1 << state));
2096}
2097EXPORT_SYMBOL(pci_pme_capable);
2098
2099static void pci_pme_list_scan(struct work_struct *work)
2100{
2101 struct pci_pme_device *pme_dev, *n;
2102
2103 mutex_lock(&pci_pme_list_mutex);
2104 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2105 if (pme_dev->dev->pme_poll) {
2106 struct pci_dev *bridge;
2107
2108 bridge = pme_dev->dev->bus->self;
2109
2110
2111
2112
2113
2114 if (bridge && bridge->current_state != PCI_D0)
2115 continue;
2116
2117
2118
2119
2120 if (pme_dev->dev->current_state == PCI_D3cold)
2121 continue;
2122
2123 pci_pme_wakeup(pme_dev->dev, NULL);
2124 } else {
2125 list_del(&pme_dev->list);
2126 kfree(pme_dev);
2127 }
2128 }
2129 if (!list_empty(&pci_pme_list))
2130 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2131 msecs_to_jiffies(PME_TIMEOUT));
2132 mutex_unlock(&pci_pme_list_mutex);
2133}
2134
2135static void __pci_pme_active(struct pci_dev *dev, bool enable)
2136{
2137 u16 pmcsr;
2138
2139 if (!dev->pme_support)
2140 return;
2141
2142 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2143
2144 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2145 if (!enable)
2146 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2147
2148 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2149}
2150
2151
2152
2153
2154
2155void pci_pme_restore(struct pci_dev *dev)
2156{
2157 u16 pmcsr;
2158
2159 if (!dev->pme_support)
2160 return;
2161
2162 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2163 if (dev->wakeup_prepared) {
2164 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2165 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2166 } else {
2167 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2168 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2169 }
2170 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2171}
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181void pci_pme_active(struct pci_dev *dev, bool enable)
2182{
2183 __pci_pme_active(dev, enable);
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205 if (dev->pme_poll) {
2206 struct pci_pme_device *pme_dev;
2207 if (enable) {
2208 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2209 GFP_KERNEL);
2210 if (!pme_dev) {
2211 pci_warn(dev, "can't enable PME#\n");
2212 return;
2213 }
2214 pme_dev->dev = dev;
2215 mutex_lock(&pci_pme_list_mutex);
2216 list_add(&pme_dev->list, &pci_pme_list);
2217 if (list_is_singular(&pci_pme_list))
2218 queue_delayed_work(system_freezable_wq,
2219 &pci_pme_work,
2220 msecs_to_jiffies(PME_TIMEOUT));
2221 mutex_unlock(&pci_pme_list_mutex);
2222 } else {
2223 mutex_lock(&pci_pme_list_mutex);
2224 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2225 if (pme_dev->dev == dev) {
2226 list_del(&pme_dev->list);
2227 kfree(pme_dev);
2228 break;
2229 }
2230 }
2231 mutex_unlock(&pci_pme_list_mutex);
2232 }
2233 }
2234
2235 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2236}
2237EXPORT_SYMBOL(pci_pme_active);
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2259{
2260 int ret = 0;
2261
2262
2263
2264
2265
2266
2267
2268
2269 if (!pci_power_manageable(dev))
2270 return 0;
2271
2272
2273 if (!!enable == !!dev->wakeup_prepared)
2274 return 0;
2275
2276
2277
2278
2279
2280
2281
2282 if (enable) {
2283 int error;
2284
2285 if (pci_pme_capable(dev, state))
2286 pci_pme_active(dev, true);
2287 else
2288 ret = 1;
2289 error = platform_pci_set_wakeup(dev, true);
2290 if (ret)
2291 ret = error;
2292 if (!ret)
2293 dev->wakeup_prepared = true;
2294 } else {
2295 platform_pci_set_wakeup(dev, false);
2296 pci_pme_active(dev, false);
2297 dev->wakeup_prepared = false;
2298 }
2299
2300 return ret;
2301}
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2313{
2314 if (enable && !device_may_wakeup(&pci_dev->dev))
2315 return -EINVAL;
2316
2317 return __pci_enable_wake(pci_dev, state, enable);
2318}
2319EXPORT_SYMBOL(pci_enable_wake);
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2336{
2337 return pci_pme_capable(dev, PCI_D3cold) ?
2338 pci_enable_wake(dev, PCI_D3cold, enable) :
2339 pci_enable_wake(dev, PCI_D3hot, enable);
2340}
2341EXPORT_SYMBOL(pci_wake_from_d3);
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2353{
2354 pci_power_t target_state = PCI_D3hot;
2355
2356 if (platform_pci_power_manageable(dev)) {
2357
2358
2359
2360 pci_power_t state = platform_pci_choose_state(dev);
2361
2362 switch (state) {
2363 case PCI_POWER_ERROR:
2364 case PCI_UNKNOWN:
2365 break;
2366 case PCI_D1:
2367 case PCI_D2:
2368 if (pci_no_d1d2(dev))
2369 break;
2370
2371 default:
2372 target_state = state;
2373 }
2374
2375 return target_state;
2376 }
2377
2378 if (!dev->pm_cap)
2379 target_state = PCI_D0;
2380
2381
2382
2383
2384
2385
2386 if (dev->current_state == PCI_D3cold)
2387 target_state = PCI_D3cold;
2388
2389 if (wakeup) {
2390
2391
2392
2393
2394 if (dev->pme_support) {
2395 while (target_state
2396 && !(dev->pme_support & (1 << target_state)))
2397 target_state--;
2398 }
2399 }
2400
2401 return target_state;
2402}
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413int pci_prepare_to_sleep(struct pci_dev *dev)
2414{
2415 bool wakeup = device_may_wakeup(&dev->dev);
2416 pci_power_t target_state = pci_target_state(dev, wakeup);
2417 int error;
2418
2419 if (target_state == PCI_POWER_ERROR)
2420 return -EIO;
2421
2422 pci_enable_wake(dev, target_state, wakeup);
2423
2424 error = pci_set_power_state(dev, target_state);
2425
2426 if (error)
2427 pci_enable_wake(dev, target_state, false);
2428
2429 return error;
2430}
2431EXPORT_SYMBOL(pci_prepare_to_sleep);
2432
2433
2434
2435
2436
2437
2438
2439
2440int pci_back_from_sleep(struct pci_dev *dev)
2441{
2442 pci_enable_wake(dev, PCI_D0, false);
2443 return pci_set_power_state(dev, PCI_D0);
2444}
2445EXPORT_SYMBOL(pci_back_from_sleep);
2446
2447
2448
2449
2450
2451
2452
2453
2454int pci_finish_runtime_suspend(struct pci_dev *dev)
2455{
2456 pci_power_t target_state;
2457 int error;
2458
2459 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2460 if (target_state == PCI_POWER_ERROR)
2461 return -EIO;
2462
2463 dev->runtime_d3cold = target_state == PCI_D3cold;
2464
2465 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2466
2467 error = pci_set_power_state(dev, target_state);
2468
2469 if (error) {
2470 pci_enable_wake(dev, target_state, false);
2471 dev->runtime_d3cold = false;
2472 }
2473
2474 return error;
2475}
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485bool pci_dev_run_wake(struct pci_dev *dev)
2486{
2487 struct pci_bus *bus = dev->bus;
2488
2489 if (!dev->pme_support)
2490 return false;
2491
2492
2493 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2494 return false;
2495
2496 if (device_can_wakeup(&dev->dev))
2497 return true;
2498
2499 while (bus->parent) {
2500 struct pci_dev *bridge = bus->self;
2501
2502 if (device_can_wakeup(&bridge->dev))
2503 return true;
2504
2505 bus = bus->parent;
2506 }
2507
2508
2509 if (bus->bridge)
2510 return device_can_wakeup(bus->bridge);
2511
2512 return false;
2513}
2514EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525bool pci_dev_need_resume(struct pci_dev *pci_dev)
2526{
2527 struct device *dev = &pci_dev->dev;
2528 pci_power_t target_state;
2529
2530 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2531 return true;
2532
2533 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2534
2535
2536
2537
2538
2539
2540 return target_state != pci_dev->current_state &&
2541 target_state != PCI_D3cold &&
2542 pci_dev->current_state != PCI_D3hot;
2543}
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2557{
2558 struct device *dev = &pci_dev->dev;
2559
2560 spin_lock_irq(&dev->power.lock);
2561
2562 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2563 pci_dev->current_state < PCI_D3cold)
2564 __pci_pme_active(pci_dev, false);
2565
2566 spin_unlock_irq(&dev->power.lock);
2567}
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577void pci_dev_complete_resume(struct pci_dev *pci_dev)
2578{
2579 struct device *dev = &pci_dev->dev;
2580
2581 if (!pci_dev_run_wake(pci_dev))
2582 return;
2583
2584 spin_lock_irq(&dev->power.lock);
2585
2586 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2587 __pci_pme_active(pci_dev, true);
2588
2589 spin_unlock_irq(&dev->power.lock);
2590}
2591
2592void pci_config_pm_runtime_get(struct pci_dev *pdev)
2593{
2594 struct device *dev = &pdev->dev;
2595 struct device *parent = dev->parent;
2596
2597 if (parent)
2598 pm_runtime_get_sync(parent);
2599 pm_runtime_get_noresume(dev);
2600
2601
2602
2603
2604 pm_runtime_barrier(dev);
2605
2606
2607
2608
2609
2610 if (pdev->current_state == PCI_D3cold)
2611 pm_runtime_resume(dev);
2612}
2613
2614void pci_config_pm_runtime_put(struct pci_dev *pdev)
2615{
2616 struct device *dev = &pdev->dev;
2617 struct device *parent = dev->parent;
2618
2619 pm_runtime_put(dev);
2620 if (parent)
2621 pm_runtime_put_sync(parent);
2622}
2623
2624static const struct dmi_system_id bridge_d3_blacklist[] = {
2625#ifdef CONFIG_X86
2626 {
2627
2628
2629
2630
2631
2632
2633 .ident = "X299 DESIGNARE EX-CF",
2634 .matches = {
2635 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2636 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2637 },
2638 },
2639#endif
2640 { }
2641};
2642
2643
2644
2645
2646
2647
2648
2649
2650bool pci_bridge_d3_possible(struct pci_dev *bridge)
2651{
2652 if (!pci_is_pcie(bridge))
2653 return false;
2654
2655 switch (pci_pcie_type(bridge)) {
2656 case PCI_EXP_TYPE_ROOT_PORT:
2657 case PCI_EXP_TYPE_UPSTREAM:
2658 case PCI_EXP_TYPE_DOWNSTREAM:
2659 if (pci_bridge_d3_disable)
2660 return false;
2661
2662
2663
2664
2665
2666 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2667 return false;
2668
2669 if (pci_bridge_d3_force)
2670 return true;
2671
2672
2673 if (bridge->is_thunderbolt)
2674 return true;
2675
2676
2677 if (platform_pci_bridge_d3(bridge))
2678 return true;
2679
2680
2681
2682
2683
2684
2685 if (bridge->is_hotplug_bridge)
2686 return false;
2687
2688 if (dmi_check_system(bridge_d3_blacklist))
2689 return false;
2690
2691
2692
2693
2694
2695 if (dmi_get_bios_year() >= 2015)
2696 return true;
2697 break;
2698 }
2699
2700 return false;
2701}
2702
2703static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2704{
2705 bool *d3cold_ok = data;
2706
2707 if (
2708 dev->no_d3cold || !dev->d3cold_allowed ||
2709
2710
2711 (device_may_wakeup(&dev->dev) &&
2712 !pci_pme_capable(dev, PCI_D3cold)) ||
2713
2714
2715 !pci_power_manageable(dev))
2716
2717 *d3cold_ok = false;
2718
2719 return !*d3cold_ok;
2720}
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730void pci_bridge_d3_update(struct pci_dev *dev)
2731{
2732 bool remove = !device_is_registered(&dev->dev);
2733 struct pci_dev *bridge;
2734 bool d3cold_ok = true;
2735
2736 bridge = pci_upstream_bridge(dev);
2737 if (!bridge || !pci_bridge_d3_possible(bridge))
2738 return;
2739
2740
2741
2742
2743
2744 if (remove && bridge->bridge_d3)
2745 return;
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755 if (!remove)
2756 pci_dev_check_d3cold(dev, &d3cold_ok);
2757
2758
2759
2760
2761
2762
2763
2764 if (d3cold_ok && !bridge->bridge_d3)
2765 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2766 &d3cold_ok);
2767
2768 if (bridge->bridge_d3 != d3cold_ok) {
2769 bridge->bridge_d3 = d3cold_ok;
2770
2771 pci_bridge_d3_update(bridge);
2772 }
2773}
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783void pci_d3cold_enable(struct pci_dev *dev)
2784{
2785 if (dev->no_d3cold) {
2786 dev->no_d3cold = false;
2787 pci_bridge_d3_update(dev);
2788 }
2789}
2790EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800void pci_d3cold_disable(struct pci_dev *dev)
2801{
2802 if (!dev->no_d3cold) {
2803 dev->no_d3cold = true;
2804 pci_bridge_d3_update(dev);
2805 }
2806}
2807EXPORT_SYMBOL_GPL(pci_d3cold_disable);
2808
2809
2810
2811
2812
2813void pci_pm_init(struct pci_dev *dev)
2814{
2815 int pm;
2816 u16 status;
2817 u16 pmc;
2818
2819 pm_runtime_forbid(&dev->dev);
2820 pm_runtime_set_active(&dev->dev);
2821 pm_runtime_enable(&dev->dev);
2822 device_enable_async_suspend(&dev->dev);
2823 dev->wakeup_prepared = false;
2824
2825 dev->pm_cap = 0;
2826 dev->pme_support = 0;
2827
2828
2829 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2830 if (!pm)
2831 return;
2832
2833 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2834
2835 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2836 pci_err(dev, "unsupported PM cap regs version (%u)\n",
2837 pmc & PCI_PM_CAP_VER_MASK);
2838 return;
2839 }
2840
2841 dev->pm_cap = pm;
2842 dev->d3_delay = PCI_PM_D3_WAIT;
2843 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
2844 dev->bridge_d3 = pci_bridge_d3_possible(dev);
2845 dev->d3cold_allowed = true;
2846
2847 dev->d1_support = false;
2848 dev->d2_support = false;
2849 if (!pci_no_d1d2(dev)) {
2850 if (pmc & PCI_PM_CAP_D1)
2851 dev->d1_support = true;
2852 if (pmc & PCI_PM_CAP_D2)
2853 dev->d2_support = true;
2854
2855 if (dev->d1_support || dev->d2_support)
2856 pci_info(dev, "supports%s%s\n",
2857 dev->d1_support ? " D1" : "",
2858 dev->d2_support ? " D2" : "");
2859 }
2860
2861 pmc &= PCI_PM_CAP_PME_MASK;
2862 if (pmc) {
2863 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
2864 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2865 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2866 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2867 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2868 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
2869 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
2870 dev->pme_poll = true;
2871
2872
2873
2874
2875 device_set_wakeup_capable(&dev->dev, true);
2876
2877 pci_pme_active(dev, false);
2878 }
2879
2880 pci_read_config_word(dev, PCI_STATUS, &status);
2881 if (status & PCI_STATUS_IMM_READY)
2882 dev->imm_ready = 1;
2883}
2884
2885static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
2886{
2887 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
2888
2889 switch (prop) {
2890 case PCI_EA_P_MEM:
2891 case PCI_EA_P_VF_MEM:
2892 flags |= IORESOURCE_MEM;
2893 break;
2894 case PCI_EA_P_MEM_PREFETCH:
2895 case PCI_EA_P_VF_MEM_PREFETCH:
2896 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
2897 break;
2898 case PCI_EA_P_IO:
2899 flags |= IORESOURCE_IO;
2900 break;
2901 default:
2902 return 0;
2903 }
2904
2905 return flags;
2906}
2907
2908static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
2909 u8 prop)
2910{
2911 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
2912 return &dev->resource[bei];
2913#ifdef CONFIG_PCI_IOV
2914 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
2915 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
2916 return &dev->resource[PCI_IOV_RESOURCES +
2917 bei - PCI_EA_BEI_VF_BAR0];
2918#endif
2919 else if (bei == PCI_EA_BEI_ROM)
2920 return &dev->resource[PCI_ROM_RESOURCE];
2921 else
2922 return NULL;
2923}
2924
2925
2926static int pci_ea_read(struct pci_dev *dev, int offset)
2927{
2928 struct resource *res;
2929 int ent_size, ent_offset = offset;
2930 resource_size_t start, end;
2931 unsigned long flags;
2932 u32 dw0, bei, base, max_offset;
2933 u8 prop;
2934 bool support_64 = (sizeof(resource_size_t) >= 8);
2935
2936 pci_read_config_dword(dev, ent_offset, &dw0);
2937 ent_offset += 4;
2938
2939
2940 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
2941
2942 if (!(dw0 & PCI_EA_ENABLE))
2943 goto out;
2944
2945 bei = (dw0 & PCI_EA_BEI) >> 4;
2946 prop = (dw0 & PCI_EA_PP) >> 8;
2947
2948
2949
2950
2951
2952 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
2953 prop = (dw0 & PCI_EA_SP) >> 16;
2954 if (prop > PCI_EA_P_BRIDGE_IO)
2955 goto out;
2956
2957 res = pci_ea_get_resource(dev, bei, prop);
2958 if (!res) {
2959 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
2960 goto out;
2961 }
2962
2963 flags = pci_ea_flags(dev, prop);
2964 if (!flags) {
2965 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
2966 goto out;
2967 }
2968
2969
2970 pci_read_config_dword(dev, ent_offset, &base);
2971 start = (base & PCI_EA_FIELD_MASK);
2972 ent_offset += 4;
2973
2974
2975 pci_read_config_dword(dev, ent_offset, &max_offset);
2976 ent_offset += 4;
2977
2978
2979 if (base & PCI_EA_IS_64) {
2980 u32 base_upper;
2981
2982 pci_read_config_dword(dev, ent_offset, &base_upper);
2983 ent_offset += 4;
2984
2985 flags |= IORESOURCE_MEM_64;
2986
2987
2988 if (!support_64 && base_upper)
2989 goto out;
2990
2991 if (support_64)
2992 start |= ((u64)base_upper << 32);
2993 }
2994
2995 end = start + (max_offset | 0x03);
2996
2997
2998 if (max_offset & PCI_EA_IS_64) {
2999 u32 max_offset_upper;
3000
3001 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3002 ent_offset += 4;
3003
3004 flags |= IORESOURCE_MEM_64;
3005
3006
3007 if (!support_64 && max_offset_upper)
3008 goto out;
3009
3010 if (support_64)
3011 end += ((u64)max_offset_upper << 32);
3012 }
3013
3014 if (end < start) {
3015 pci_err(dev, "EA Entry crosses address boundary\n");
3016 goto out;
3017 }
3018
3019 if (ent_size != ent_offset - offset) {
3020 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3021 ent_size, ent_offset - offset);
3022 goto out;
3023 }
3024
3025 res->name = pci_name(dev);
3026 res->start = start;
3027 res->end = end;
3028 res->flags = flags;
3029
3030 if (bei <= PCI_EA_BEI_BAR5)
3031 pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3032 bei, res, prop);
3033 else if (bei == PCI_EA_BEI_ROM)
3034 pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3035 res, prop);
3036 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3037 pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3038 bei - PCI_EA_BEI_VF_BAR0, res, prop);
3039 else
3040 pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3041 bei, res, prop);
3042
3043out:
3044 return offset + ent_size;
3045}
3046
3047
3048void pci_ea_init(struct pci_dev *dev)
3049{
3050 int ea;
3051 u8 num_ent;
3052 int offset;
3053 int i;
3054
3055
3056 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3057 if (!ea)
3058 return;
3059
3060
3061 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3062 &num_ent);
3063 num_ent &= PCI_EA_NUM_ENT_MASK;
3064
3065 offset = ea + PCI_EA_FIRST_ENT;
3066
3067
3068 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3069 offset += 4;
3070
3071
3072 for (i = 0; i < num_ent; ++i)
3073 offset = pci_ea_read(dev, offset);
3074}
3075
3076static void pci_add_saved_cap(struct pci_dev *pci_dev,
3077 struct pci_cap_saved_state *new_cap)
3078{
3079 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3080}
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3091 bool extended, unsigned int size)
3092{
3093 int pos;
3094 struct pci_cap_saved_state *save_state;
3095
3096 if (extended)
3097 pos = pci_find_ext_capability(dev, cap);
3098 else
3099 pos = pci_find_capability(dev, cap);
3100
3101 if (!pos)
3102 return 0;
3103
3104 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3105 if (!save_state)
3106 return -ENOMEM;
3107
3108 save_state->cap.cap_nr = cap;
3109 save_state->cap.cap_extended = extended;
3110 save_state->cap.size = size;
3111 pci_add_saved_cap(dev, save_state);
3112
3113 return 0;
3114}
3115
3116int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3117{
3118 return _pci_add_cap_save_buffer(dev, cap, false, size);
3119}
3120
3121int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3122{
3123 return _pci_add_cap_save_buffer(dev, cap, true, size);
3124}
3125
3126
3127
3128
3129
3130void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3131{
3132 int error;
3133
3134 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3135 PCI_EXP_SAVE_REGS * sizeof(u16));
3136 if (error)
3137 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3138
3139 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3140 if (error)
3141 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3142
3143 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3144 2 * sizeof(u16));
3145 if (error)
3146 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3147
3148 pci_allocate_vc_save_buffers(dev);
3149}
3150
3151void pci_free_cap_save_buffers(struct pci_dev *dev)
3152{
3153 struct pci_cap_saved_state *tmp;
3154 struct hlist_node *n;
3155
3156 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3157 kfree(tmp);
3158}
3159
3160
3161
3162
3163
3164
3165
3166
3167void pci_configure_ari(struct pci_dev *dev)
3168{
3169 u32 cap;
3170 struct pci_dev *bridge;
3171
3172 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3173 return;
3174
3175 bridge = dev->bus->self;
3176 if (!bridge)
3177 return;
3178
3179 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3180 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3181 return;
3182
3183 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3184 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3185 PCI_EXP_DEVCTL2_ARI);
3186 bridge->ari_enabled = 1;
3187 } else {
3188 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3189 PCI_EXP_DEVCTL2_ARI);
3190 bridge->ari_enabled = 0;
3191 }
3192}
3193
3194static int pci_acs_enable;
3195
3196
3197
3198
3199void pci_request_acs(void)
3200{
3201 pci_acs_enable = 1;
3202}
3203
3204static const char *disable_acs_redir_param;
3205
3206
3207
3208
3209
3210
3211
3212static void pci_disable_acs_redir(struct pci_dev *dev)
3213{
3214 int ret = 0;
3215 const char *p;
3216 int pos;
3217 u16 ctrl;
3218
3219 if (!disable_acs_redir_param)
3220 return;
3221
3222 p = disable_acs_redir_param;
3223 while (*p) {
3224 ret = pci_dev_str_match(dev, p, &p);
3225 if (ret < 0) {
3226 pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
3227 disable_acs_redir_param);
3228
3229 break;
3230 } else if (ret == 1) {
3231
3232 break;
3233 }
3234
3235 if (*p != ';' && *p != ',') {
3236
3237 break;
3238 }
3239 p++;
3240 }
3241
3242 if (ret != 1)
3243 return;
3244
3245 if (!pci_dev_specific_disable_acs_redir(dev))
3246 return;
3247
3248 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3249 if (!pos) {
3250 pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
3251 return;
3252 }
3253
3254 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
3255
3256
3257 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
3258
3259 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
3260
3261 pci_info(dev, "disabled ACS redirect\n");
3262}
3263
3264
3265
3266
3267
3268static void pci_std_enable_acs(struct pci_dev *dev)
3269{
3270 int pos;
3271 u16 cap;
3272 u16 ctrl;
3273
3274 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3275 if (!pos)
3276 return;
3277
3278 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
3279 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
3280
3281
3282 ctrl |= (cap & PCI_ACS_SV);
3283
3284
3285 ctrl |= (cap & PCI_ACS_RR);
3286
3287
3288 ctrl |= (cap & PCI_ACS_CR);
3289
3290
3291 ctrl |= (cap & PCI_ACS_UF);
3292
3293 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
3294}
3295
3296
3297
3298
3299
3300void pci_enable_acs(struct pci_dev *dev)
3301{
3302 if (!pci_acs_enable)
3303 goto disable_acs_redir;
3304
3305 if (!pci_dev_specific_enable_acs(dev))
3306 goto disable_acs_redir;
3307
3308 pci_std_enable_acs(dev);
3309
3310disable_acs_redir:
3311
3312
3313
3314
3315
3316
3317
3318 pci_disable_acs_redir(dev);
3319}
3320
3321static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3322{
3323 int pos;
3324 u16 cap, ctrl;
3325
3326 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
3327 if (!pos)
3328 return false;
3329
3330
3331
3332
3333
3334
3335 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3336 acs_flags &= (cap | PCI_ACS_EC);
3337
3338 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3339 return (ctrl & acs_flags) == acs_flags;
3340}
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3359{
3360 int ret;
3361
3362 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3363 if (ret >= 0)
3364 return ret > 0;
3365
3366
3367
3368
3369
3370
3371 if (!pci_is_pcie(pdev))
3372 return false;
3373
3374 switch (pci_pcie_type(pdev)) {
3375
3376
3377
3378
3379
3380 case PCI_EXP_TYPE_PCIE_BRIDGE:
3381
3382
3383
3384
3385
3386
3387 case PCI_EXP_TYPE_PCI_BRIDGE:
3388 case PCI_EXP_TYPE_RC_EC:
3389 return false;
3390
3391
3392
3393
3394
3395 case PCI_EXP_TYPE_DOWNSTREAM:
3396 case PCI_EXP_TYPE_ROOT_PORT:
3397 return pci_acs_flags_enabled(pdev, acs_flags);
3398
3399
3400
3401
3402
3403
3404
3405 case PCI_EXP_TYPE_ENDPOINT:
3406 case PCI_EXP_TYPE_UPSTREAM:
3407 case PCI_EXP_TYPE_LEG_END:
3408 case PCI_EXP_TYPE_RC_END:
3409 if (!pdev->multifunction)
3410 break;
3411
3412 return pci_acs_flags_enabled(pdev, acs_flags);
3413 }
3414
3415
3416
3417
3418
3419 return true;
3420}
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431bool pci_acs_path_enabled(struct pci_dev *start,
3432 struct pci_dev *end, u16 acs_flags)
3433{
3434 struct pci_dev *pdev, *parent = start;
3435
3436 do {
3437 pdev = parent;
3438
3439 if (!pci_acs_enabled(pdev, acs_flags))
3440 return false;
3441
3442 if (pci_is_root_bus(pdev->bus))
3443 return (end == NULL);
3444
3445 parent = pdev->bus->self;
3446 } while (pdev != end);
3447
3448 return true;
3449}
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3461{
3462 unsigned int pos, nbars, i;
3463 u32 ctrl;
3464
3465 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3466 if (!pos)
3467 return -ENOTSUPP;
3468
3469 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3470 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3471 PCI_REBAR_CTRL_NBAR_SHIFT;
3472
3473 for (i = 0; i < nbars; i++, pos += 8) {
3474 int bar_idx;
3475
3476 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3477 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3478 if (bar_idx == bar)
3479 return pos;
3480 }
3481
3482 return -ENOENT;
3483}
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3494{
3495 int pos;
3496 u32 cap;
3497
3498 pos = pci_rebar_find_pos(pdev, bar);
3499 if (pos < 0)
3500 return 0;
3501
3502 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3503 return (cap & PCI_REBAR_CAP_SIZES) >> 4;
3504}
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3515{
3516 int pos;
3517 u32 ctrl;
3518
3519 pos = pci_rebar_find_pos(pdev, bar);
3520 if (pos < 0)
3521 return pos;
3522
3523 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3524 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3525}
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3537{
3538 int pos;
3539 u32 ctrl;
3540
3541 pos = pci_rebar_find_pos(pdev, bar);
3542 if (pos < 0)
3543 return pos;
3544
3545 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3546 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3547 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3548 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3549 return 0;
3550}
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3566{
3567 struct pci_bus *bus = dev->bus;
3568 struct pci_dev *bridge;
3569 u32 cap, ctl2;
3570
3571 if (!pci_is_pcie(dev))
3572 return -EINVAL;
3573
3574
3575
3576
3577
3578
3579
3580
3581 switch (pci_pcie_type(dev)) {
3582 case PCI_EXP_TYPE_ENDPOINT:
3583 case PCI_EXP_TYPE_LEG_END:
3584 case PCI_EXP_TYPE_RC_END:
3585 break;
3586 default:
3587 return -EINVAL;
3588 }
3589
3590 while (bus->parent) {
3591 bridge = bus->self;
3592
3593 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3594
3595 switch (pci_pcie_type(bridge)) {
3596
3597 case PCI_EXP_TYPE_UPSTREAM:
3598 case PCI_EXP_TYPE_DOWNSTREAM:
3599 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3600 return -EINVAL;
3601 break;
3602
3603
3604 case PCI_EXP_TYPE_ROOT_PORT:
3605 if ((cap & cap_mask) != cap_mask)
3606 return -EINVAL;
3607 break;
3608 }
3609
3610
3611 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3612 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3613 &ctl2);
3614 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3615 return -EINVAL;
3616 }
3617
3618 bus = bus->parent;
3619 }
3620
3621 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3622 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3623 return 0;
3624}
3625EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3639{
3640 int slot;
3641
3642 if (pci_ari_enabled(dev->bus))
3643 slot = 0;
3644 else
3645 slot = PCI_SLOT(dev->devfn);
3646
3647 return (((pin - 1) + slot) % 4) + 1;
3648}
3649
3650int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3651{
3652 u8 pin;
3653
3654 pin = dev->pin;
3655 if (!pin)
3656 return -1;
3657
3658 while (!pci_is_root_bus(dev->bus)) {
3659 pin = pci_swizzle_interrupt_pin(dev, pin);
3660 dev = dev->bus->self;
3661 }
3662 *bridge = dev;
3663 return pin;
3664}
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3675{
3676 u8 pin = *pinp;
3677
3678 while (!pci_is_root_bus(dev->bus)) {
3679 pin = pci_swizzle_interrupt_pin(dev, pin);
3680 dev = dev->bus->self;
3681 }
3682 *pinp = pin;
3683 return PCI_SLOT(dev->devfn);
3684}
3685EXPORT_SYMBOL_GPL(pci_common_swizzle);
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697void pci_release_region(struct pci_dev *pdev, int bar)
3698{
3699 struct pci_devres *dr;
3700
3701 if (pci_resource_len(pdev, bar) == 0)
3702 return;
3703 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3704 release_region(pci_resource_start(pdev, bar),
3705 pci_resource_len(pdev, bar));
3706 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3707 release_mem_region(pci_resource_start(pdev, bar),
3708 pci_resource_len(pdev, bar));
3709
3710 dr = find_pci_dr(pdev);
3711 if (dr)
3712 dr->region_mask &= ~(1 << bar);
3713}
3714EXPORT_SYMBOL(pci_release_region);
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735static int __pci_request_region(struct pci_dev *pdev, int bar,
3736 const char *res_name, int exclusive)
3737{
3738 struct pci_devres *dr;
3739
3740 if (pci_resource_len(pdev, bar) == 0)
3741 return 0;
3742
3743 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3744 if (!request_region(pci_resource_start(pdev, bar),
3745 pci_resource_len(pdev, bar), res_name))
3746 goto err_out;
3747 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3748 if (!__request_mem_region(pci_resource_start(pdev, bar),
3749 pci_resource_len(pdev, bar), res_name,
3750 exclusive))
3751 goto err_out;
3752 }
3753
3754 dr = find_pci_dr(pdev);
3755 if (dr)
3756 dr->region_mask |= 1 << bar;
3757
3758 return 0;
3759
3760err_out:
3761 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3762 &pdev->resource[bar]);
3763 return -EBUSY;
3764}
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3781{
3782 return __pci_request_region(pdev, bar, res_name, 0);
3783}
3784EXPORT_SYMBOL(pci_request_region);
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3795{
3796 int i;
3797
3798 for (i = 0; i < PCI_STD_NUM_BARS; i++)
3799 if (bars & (1 << i))
3800 pci_release_region(pdev, i);
3801}
3802EXPORT_SYMBOL(pci_release_selected_regions);
3803
3804static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3805 const char *res_name, int excl)
3806{
3807 int i;
3808
3809 for (i = 0; i < PCI_STD_NUM_BARS; i++)
3810 if (bars & (1 << i))
3811 if (__pci_request_region(pdev, i, res_name, excl))
3812 goto err_out;
3813 return 0;
3814
3815err_out:
3816 while (--i >= 0)
3817 if (bars & (1 << i))
3818 pci_release_region(pdev, i);
3819
3820 return -EBUSY;
3821}
3822
3823
3824
3825
3826
3827
3828
3829
3830int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3831 const char *res_name)
3832{
3833 return __pci_request_selected_regions(pdev, bars, res_name, 0);
3834}
3835EXPORT_SYMBOL(pci_request_selected_regions);
3836
3837int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3838 const char *res_name)
3839{
3840 return __pci_request_selected_regions(pdev, bars, res_name,
3841 IORESOURCE_EXCLUSIVE);
3842}
3843EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855void pci_release_regions(struct pci_dev *pdev)
3856{
3857 pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
3858}
3859EXPORT_SYMBOL(pci_release_regions);
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3875{
3876 return pci_request_selected_regions(pdev,
3877 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
3878}
3879EXPORT_SYMBOL(pci_request_regions);
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3897{
3898 return pci_request_selected_regions_exclusive(pdev,
3899 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
3900}
3901EXPORT_SYMBOL(pci_request_regions_exclusive);
3902
3903
3904
3905
3906
3907int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
3908 resource_size_t size)
3909{
3910 int ret = 0;
3911#ifdef PCI_IOBASE
3912 struct logic_pio_hwaddr *range;
3913
3914 if (!size || addr + size < addr)
3915 return -EINVAL;
3916
3917 range = kzalloc(sizeof(*range), GFP_ATOMIC);
3918 if (!range)
3919 return -ENOMEM;
3920
3921 range->fwnode = fwnode;
3922 range->size = size;
3923 range->hw_start = addr;
3924 range->flags = LOGIC_PIO_CPU_MMIO;
3925
3926 ret = logic_pio_register_range(range);
3927 if (ret)
3928 kfree(range);
3929#endif
3930
3931 return ret;
3932}
3933
3934phys_addr_t pci_pio_to_address(unsigned long pio)
3935{
3936 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
3937
3938#ifdef PCI_IOBASE
3939 if (pio >= MMIO_UPPER_LIMIT)
3940 return address;
3941
3942 address = logic_pio_to_hwaddr(pio);
3943#endif
3944
3945 return address;
3946}
3947
3948unsigned long __weak pci_address_to_pio(phys_addr_t address)
3949{
3950#ifdef PCI_IOBASE
3951 return logic_pio_trans_cpuaddr(address);
3952#else
3953 if (address > IO_SPACE_LIMIT)
3954 return (unsigned long)-1;
3955
3956 return (unsigned long) address;
3957#endif
3958}
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
3971{
3972#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3973 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3974
3975 if (!(res->flags & IORESOURCE_IO))
3976 return -EINVAL;
3977
3978 if (res->end > IO_SPACE_LIMIT)
3979 return -EINVAL;
3980
3981 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
3982 pgprot_device(PAGE_KERNEL));
3983#else
3984
3985
3986
3987
3988 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
3989 return -ENODEV;
3990#endif
3991}
3992EXPORT_SYMBOL(pci_remap_iospace);
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002void pci_unmap_iospace(struct resource *res)
4003{
4004#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4005 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4006
4007 unmap_kernel_range(vaddr, resource_size(res));
4008#endif
4009}
4010EXPORT_SYMBOL(pci_unmap_iospace);
4011
4012static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4013{
4014 struct resource **res = ptr;
4015
4016 pci_unmap_iospace(*res);
4017}
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4029 phys_addr_t phys_addr)
4030{
4031 const struct resource **ptr;
4032 int error;
4033
4034 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4035 if (!ptr)
4036 return -ENOMEM;
4037
4038 error = pci_remap_iospace(res, phys_addr);
4039 if (error) {
4040 devres_free(ptr);
4041 } else {
4042 *ptr = res;
4043 devres_add(dev, ptr);
4044 }
4045
4046 return error;
4047}
4048EXPORT_SYMBOL(devm_pci_remap_iospace);
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4060 resource_size_t offset,
4061 resource_size_t size)
4062{
4063 void __iomem **ptr, *addr;
4064
4065 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4066 if (!ptr)
4067 return NULL;
4068
4069 addr = pci_remap_cfgspace(offset, size);
4070 if (addr) {
4071 *ptr = addr;
4072 devres_add(dev, ptr);
4073 } else
4074 devres_free(ptr);
4075
4076 return addr;
4077}
4078EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4100 struct resource *res)
4101{
4102 resource_size_t size;
4103 const char *name;
4104 void __iomem *dest_ptr;
4105
4106 BUG_ON(!dev);
4107
4108 if (!res || resource_type(res) != IORESOURCE_MEM) {
4109 dev_err(dev, "invalid resource\n");
4110 return IOMEM_ERR_PTR(-EINVAL);
4111 }
4112
4113 size = resource_size(res);
4114 name = res->name ?: dev_name(dev);
4115
4116 if (!devm_request_mem_region(dev, res->start, size, name)) {
4117 dev_err(dev, "can't request region for resource %pR\n", res);
4118 return IOMEM_ERR_PTR(-EBUSY);
4119 }
4120
4121 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4122 if (!dest_ptr) {
4123 dev_err(dev, "ioremap failed for resource %pR\n", res);
4124 devm_release_mem_region(dev, res->start, size);
4125 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4126 }
4127
4128 return dest_ptr;
4129}
4130EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4131
4132static void __pci_set_master(struct pci_dev *dev, bool enable)
4133{
4134 u16 old_cmd, cmd;
4135
4136 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4137 if (enable)
4138 cmd = old_cmd | PCI_COMMAND_MASTER;
4139 else
4140 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4141 if (cmd != old_cmd) {
4142 pci_dbg(dev, "%s bus mastering\n",
4143 enable ? "enabling" : "disabling");
4144 pci_write_config_word(dev, PCI_COMMAND, cmd);
4145 }
4146 dev->is_busmaster = enable;
4147}
4148
4149
4150
4151
4152
4153
4154
4155
4156char * __weak __init pcibios_setup(char *str)
4157{
4158 return str;
4159}
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169void __weak pcibios_set_master(struct pci_dev *dev)
4170{
4171 u8 lat;
4172
4173
4174 if (pci_is_pcie(dev))
4175 return;
4176
4177 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4178 if (lat < 16)
4179 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4180 else if (lat > pcibios_max_latency)
4181 lat = pcibios_max_latency;
4182 else
4183 return;
4184
4185 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4186}
4187
4188
4189
4190
4191
4192
4193
4194
4195void pci_set_master(struct pci_dev *dev)
4196{
4197 __pci_set_master(dev, true);
4198 pcibios_set_master(dev);
4199}
4200EXPORT_SYMBOL(pci_set_master);
4201
4202
4203
4204
4205
4206void pci_clear_master(struct pci_dev *dev)
4207{
4208 __pci_set_master(dev, false);
4209}
4210EXPORT_SYMBOL(pci_clear_master);
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222int pci_set_cacheline_size(struct pci_dev *dev)
4223{
4224 u8 cacheline_size;
4225
4226 if (!pci_cache_line_size)
4227 return -EINVAL;
4228
4229
4230
4231 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4232 if (cacheline_size >= pci_cache_line_size &&
4233 (cacheline_size % pci_cache_line_size) == 0)
4234 return 0;
4235
4236
4237 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4238
4239 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4240 if (cacheline_size == pci_cache_line_size)
4241 return 0;
4242
4243 pci_info(dev, "cache line size of %d is not supported\n",
4244 pci_cache_line_size << 2);
4245
4246 return -EINVAL;
4247}
4248EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258int pci_set_mwi(struct pci_dev *dev)
4259{
4260#ifdef PCI_DISABLE_MWI
4261 return 0;
4262#else
4263 int rc;
4264 u16 cmd;
4265
4266 rc = pci_set_cacheline_size(dev);
4267 if (rc)
4268 return rc;
4269
4270 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4271 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4272 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4273 cmd |= PCI_COMMAND_INVALIDATE;
4274 pci_write_config_word(dev, PCI_COMMAND, cmd);
4275 }
4276 return 0;
4277#endif
4278}
4279EXPORT_SYMBOL(pci_set_mwi);
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289int pcim_set_mwi(struct pci_dev *dev)
4290{
4291 struct pci_devres *dr;
4292
4293 dr = find_pci_dr(dev);
4294 if (!dr)
4295 return -ENOMEM;
4296
4297 dr->mwi = 1;
4298 return pci_set_mwi(dev);
4299}
4300EXPORT_SYMBOL(pcim_set_mwi);
4301
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311int pci_try_set_mwi(struct pci_dev *dev)
4312{
4313#ifdef PCI_DISABLE_MWI
4314 return 0;
4315#else
4316 return pci_set_mwi(dev);
4317#endif
4318}
4319EXPORT_SYMBOL(pci_try_set_mwi);
4320
4321
4322
4323
4324
4325
4326
4327void pci_clear_mwi(struct pci_dev *dev)
4328{
4329#ifndef PCI_DISABLE_MWI
4330 u16 cmd;
4331
4332 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4333 if (cmd & PCI_COMMAND_INVALIDATE) {
4334 cmd &= ~PCI_COMMAND_INVALIDATE;
4335 pci_write_config_word(dev, PCI_COMMAND, cmd);
4336 }
4337#endif
4338}
4339EXPORT_SYMBOL(pci_clear_mwi);
4340
4341
4342
4343
4344
4345
4346
4347
4348void pci_intx(struct pci_dev *pdev, int enable)
4349{
4350 u16 pci_command, new;
4351
4352 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4353
4354 if (enable)
4355 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4356 else
4357 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4358
4359 if (new != pci_command) {
4360 struct pci_devres *dr;
4361
4362 pci_write_config_word(pdev, PCI_COMMAND, new);
4363
4364 dr = find_pci_dr(pdev);
4365 if (dr && !dr->restore_intx) {
4366 dr->restore_intx = 1;
4367 dr->orig_intx = !enable;
4368 }
4369 }
4370}
4371EXPORT_SYMBOL_GPL(pci_intx);
4372
4373static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4374{
4375 struct pci_bus *bus = dev->bus;
4376 bool mask_updated = true;
4377 u32 cmd_status_dword;
4378 u16 origcmd, newcmd;
4379 unsigned long flags;
4380 bool irq_pending;
4381
4382
4383
4384
4385
4386 BUILD_BUG_ON(PCI_COMMAND % 4);
4387 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4388
4389 raw_spin_lock_irqsave(&pci_lock, flags);
4390
4391 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4392
4393 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4394
4395
4396
4397
4398
4399
4400 if (mask != irq_pending) {
4401 mask_updated = false;
4402 goto done;
4403 }
4404
4405 origcmd = cmd_status_dword;
4406 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4407 if (mask)
4408 newcmd |= PCI_COMMAND_INTX_DISABLE;
4409 if (newcmd != origcmd)
4410 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4411
4412done:
4413 raw_spin_unlock_irqrestore(&pci_lock, flags);
4414
4415 return mask_updated;
4416}
4417
4418
4419
4420
4421
4422
4423
4424
4425bool pci_check_and_mask_intx(struct pci_dev *dev)
4426{
4427 return pci_check_and_set_intx_mask(dev, true);
4428}
4429EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439bool pci_check_and_unmask_intx(struct pci_dev *dev)
4440{
4441 return pci_check_and_set_intx_mask(dev, false);
4442}
4443EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4444
4445
4446
4447
4448
4449
4450
4451int pci_wait_for_pending_transaction(struct pci_dev *dev)
4452{
4453 if (!pci_is_pcie(dev))
4454 return 1;
4455
4456 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4457 PCI_EXP_DEVSTA_TRPND);
4458}
4459EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4460
4461
4462
4463
4464
4465
4466
4467
4468bool pcie_has_flr(struct pci_dev *dev)
4469{
4470 u32 cap;
4471
4472 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4473 return false;
4474
4475 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
4476 return cap & PCI_EXP_DEVCAP_FLR;
4477}
4478EXPORT_SYMBOL_GPL(pcie_has_flr);
4479
4480
4481
4482
4483
4484
4485
4486
4487
4488int pcie_flr(struct pci_dev *dev)
4489{
4490 if (!pci_wait_for_pending_transaction(dev))
4491 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4492
4493 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4494
4495 if (dev->imm_ready)
4496 return 0;
4497
4498
4499
4500
4501
4502
4503 msleep(100);
4504
4505 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4506}
4507EXPORT_SYMBOL_GPL(pcie_flr);
4508
4509static int pci_af_flr(struct pci_dev *dev, int probe)
4510{
4511 int pos;
4512 u8 cap;
4513
4514 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4515 if (!pos)
4516 return -ENOTTY;
4517
4518 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4519 return -ENOTTY;
4520
4521 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4522 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4523 return -ENOTTY;
4524
4525 if (probe)
4526 return 0;
4527
4528
4529
4530
4531
4532
4533 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4534 PCI_AF_STATUS_TP << 8))
4535 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4536
4537 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4538
4539 if (dev->imm_ready)
4540 return 0;
4541
4542
4543
4544
4545
4546
4547
4548 msleep(100);
4549
4550 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4551}
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568static int pci_pm_reset(struct pci_dev *dev, int probe)
4569{
4570 u16 csr;
4571
4572 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4573 return -ENOTTY;
4574
4575 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4576 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4577 return -ENOTTY;
4578
4579 if (probe)
4580 return 0;
4581
4582 if (dev->current_state != PCI_D0)
4583 return -EINVAL;
4584
4585 csr &= ~PCI_PM_CTRL_STATE_MASK;
4586 csr |= PCI_D3hot;
4587 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4588 pci_dev_d3_sleep(dev);
4589
4590 csr &= ~PCI_PM_CTRL_STATE_MASK;
4591 csr |= PCI_D0;
4592 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4593 pci_dev_d3_sleep(dev);
4594
4595 return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4596}
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4607 int delay)
4608{
4609 int timeout = 1000;
4610 bool ret;
4611 u16 lnk_status;
4612
4613
4614
4615
4616
4617 if (!pdev->link_active_reporting) {
4618 msleep(1100);
4619 return true;
4620 }
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631 if (active)
4632 msleep(20);
4633 for (;;) {
4634 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4635 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4636 if (ret == active)
4637 break;
4638 if (timeout <= 0)
4639 break;
4640 msleep(10);
4641 timeout -= 10;
4642 }
4643 if (active && ret)
4644 msleep(delay);
4645 else if (ret != active)
4646 pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
4647 active ? "set" : "cleared");
4648 return ret == active;
4649}
4650
4651
4652
4653
4654
4655
4656
4657
4658bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4659{
4660 return pcie_wait_for_link_delay(pdev, active, 100);
4661}
4662
4663
4664
4665
4666
4667
4668
4669
4670static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4671{
4672 const struct pci_dev *pdev;
4673 int min_delay = 100;
4674 int max_delay = 0;
4675
4676 list_for_each_entry(pdev, &bus->devices, bus_list) {
4677 if (pdev->d3cold_delay < min_delay)
4678 min_delay = pdev->d3cold_delay;
4679 if (pdev->d3cold_delay > max_delay)
4680 max_delay = pdev->d3cold_delay;
4681 }
4682
4683 return max(min_delay, max_delay);
4684}
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
4698{
4699 struct pci_dev *child;
4700 int delay;
4701
4702 if (pci_dev_is_disconnected(dev))
4703 return;
4704
4705 if (!pci_is_bridge(dev) || !dev->bridge_d3)
4706 return;
4707
4708 down_read(&pci_bus_sem);
4709
4710
4711
4712
4713
4714
4715
4716 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4717 up_read(&pci_bus_sem);
4718 return;
4719 }
4720
4721
4722 delay = pci_bus_max_d3cold_delay(dev->subordinate);
4723 if (!delay) {
4724 up_read(&pci_bus_sem);
4725 return;
4726 }
4727
4728 child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
4729 bus_list);
4730 up_read(&pci_bus_sem);
4731
4732
4733
4734
4735
4736
4737
4738 if (!pci_is_pcie(dev)) {
4739 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4740 msleep(1000 + delay);
4741 return;
4742 }
4743
4744
4745
4746
4747
4748
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761 if (!pcie_downstream_port(dev))
4762 return;
4763
4764 if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4765 pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4766 msleep(delay);
4767 } else {
4768 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4769 delay);
4770 if (!pcie_wait_for_link_delay(dev, true, delay)) {
4771
4772 return;
4773 }
4774 }
4775
4776 if (!pci_device_is_present(child)) {
4777 pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
4778 msleep(delay);
4779 }
4780}
4781
4782void pci_reset_secondary_bus(struct pci_dev *dev)
4783{
4784 u16 ctrl;
4785
4786 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4787 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4788 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4789
4790
4791
4792
4793
4794 msleep(2);
4795
4796 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4797 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4798
4799
4800
4801
4802
4803
4804
4805
4806 ssleep(1);
4807}
4808
4809void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4810{
4811 pci_reset_secondary_bus(dev);
4812}
4813
4814
4815
4816
4817
4818
4819
4820
4821int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4822{
4823 pcibios_reset_secondary_bus(dev);
4824
4825 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
4826}
4827EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4828
4829static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4830{
4831 struct pci_dev *pdev;
4832
4833 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4834 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4835 return -ENOTTY;
4836
4837 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4838 if (pdev != dev)
4839 return -ENOTTY;
4840
4841 if (probe)
4842 return 0;
4843
4844 return pci_bridge_secondary_bus_reset(dev->bus->self);
4845}
4846
4847static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4848{
4849 int rc = -ENOTTY;
4850
4851 if (!hotplug || !try_module_get(hotplug->owner))
4852 return rc;
4853
4854 if (hotplug->ops->reset_slot)
4855 rc = hotplug->ops->reset_slot(hotplug, probe);
4856
4857 module_put(hotplug->owner);
4858
4859 return rc;
4860}
4861
4862static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4863{
4864 struct pci_dev *pdev;
4865
4866 if (dev->subordinate || !dev->slot ||
4867 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4868 return -ENOTTY;
4869
4870 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4871 if (pdev != dev && pdev->slot == dev->slot)
4872 return -ENOTTY;
4873
4874 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4875}
4876
4877static void pci_dev_lock(struct pci_dev *dev)
4878{
4879 pci_cfg_access_lock(dev);
4880
4881 device_lock(&dev->dev);
4882}
4883
4884
4885static int pci_dev_trylock(struct pci_dev *dev)
4886{
4887 if (pci_cfg_access_trylock(dev)) {
4888 if (device_trylock(&dev->dev))
4889 return 1;
4890 pci_cfg_access_unlock(dev);
4891 }
4892
4893 return 0;
4894}
4895
4896static void pci_dev_unlock(struct pci_dev *dev)
4897{
4898 device_unlock(&dev->dev);
4899 pci_cfg_access_unlock(dev);
4900}
4901
4902static void pci_dev_save_and_disable(struct pci_dev *dev)
4903{
4904 const struct pci_error_handlers *err_handler =
4905 dev->driver ? dev->driver->err_handler : NULL;
4906
4907
4908
4909
4910
4911
4912 if (err_handler && err_handler->reset_prepare)
4913 err_handler->reset_prepare(dev);
4914
4915
4916
4917
4918
4919
4920 pci_set_power_state(dev, PCI_D0);
4921
4922 pci_save_state(dev);
4923
4924
4925
4926
4927
4928
4929
4930 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
4931}
4932
4933static void pci_dev_restore(struct pci_dev *dev)
4934{
4935 const struct pci_error_handlers *err_handler =
4936 dev->driver ? dev->driver->err_handler : NULL;
4937
4938 pci_restore_state(dev);
4939
4940
4941
4942
4943
4944
4945 if (err_handler && err_handler->reset_done)
4946 err_handler->reset_done(dev);
4947}
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969int __pci_reset_function_locked(struct pci_dev *dev)
4970{
4971 int rc;
4972
4973 might_sleep();
4974
4975
4976
4977
4978
4979
4980
4981
4982
4983 rc = pci_dev_specific_reset(dev, 0);
4984 if (rc != -ENOTTY)
4985 return rc;
4986 if (pcie_has_flr(dev)) {
4987 rc = pcie_flr(dev);
4988 if (rc != -ENOTTY)
4989 return rc;
4990 }
4991 rc = pci_af_flr(dev, 0);
4992 if (rc != -ENOTTY)
4993 return rc;
4994 rc = pci_pm_reset(dev, 0);
4995 if (rc != -ENOTTY)
4996 return rc;
4997 rc = pci_dev_reset_slot_function(dev, 0);
4998 if (rc != -ENOTTY)
4999 return rc;
5000 return pci_parent_bus_reset(dev, 0);
5001}
5002EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5003
5004
5005
5006
5007
5008
5009
5010
5011
5012
5013
5014
5015int pci_probe_reset_function(struct pci_dev *dev)
5016{
5017 int rc;
5018
5019 might_sleep();
5020
5021 rc = pci_dev_specific_reset(dev, 1);
5022 if (rc != -ENOTTY)
5023 return rc;
5024 if (pcie_has_flr(dev))
5025 return 0;
5026 rc = pci_af_flr(dev, 1);
5027 if (rc != -ENOTTY)
5028 return rc;
5029 rc = pci_pm_reset(dev, 1);
5030 if (rc != -ENOTTY)
5031 return rc;
5032 rc = pci_dev_reset_slot_function(dev, 1);
5033 if (rc != -ENOTTY)
5034 return rc;
5035
5036 return pci_parent_bus_reset(dev, 1);
5037}
5038
5039
5040
5041
5042
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055int pci_reset_function(struct pci_dev *dev)
5056{
5057 int rc;
5058
5059 if (!dev->reset_fn)
5060 return -ENOTTY;
5061
5062 pci_dev_lock(dev);
5063 pci_dev_save_and_disable(dev);
5064
5065 rc = __pci_reset_function_locked(dev);
5066
5067 pci_dev_restore(dev);
5068 pci_dev_unlock(dev);
5069
5070 return rc;
5071}
5072EXPORT_SYMBOL_GPL(pci_reset_function);
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091int pci_reset_function_locked(struct pci_dev *dev)
5092{
5093 int rc;
5094
5095 if (!dev->reset_fn)
5096 return -ENOTTY;
5097
5098 pci_dev_save_and_disable(dev);
5099
5100 rc = __pci_reset_function_locked(dev);
5101
5102 pci_dev_restore(dev);
5103
5104 return rc;
5105}
5106EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5107
5108
5109
5110
5111
5112
5113
5114int pci_try_reset_function(struct pci_dev *dev)
5115{
5116 int rc;
5117
5118 if (!dev->reset_fn)
5119 return -ENOTTY;
5120
5121 if (!pci_dev_trylock(dev))
5122 return -EAGAIN;
5123
5124 pci_dev_save_and_disable(dev);
5125 rc = __pci_reset_function_locked(dev);
5126 pci_dev_restore(dev);
5127 pci_dev_unlock(dev);
5128
5129 return rc;
5130}
5131EXPORT_SYMBOL_GPL(pci_try_reset_function);
5132
5133
5134static bool pci_bus_resetable(struct pci_bus *bus)
5135{
5136 struct pci_dev *dev;
5137
5138
5139 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5140 return false;
5141
5142 list_for_each_entry(dev, &bus->devices, bus_list) {
5143 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5144 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5145 return false;
5146 }
5147
5148 return true;
5149}
5150
5151
5152static void pci_bus_lock(struct pci_bus *bus)
5153{
5154 struct pci_dev *dev;
5155
5156 list_for_each_entry(dev, &bus->devices, bus_list) {
5157 pci_dev_lock(dev);
5158 if (dev->subordinate)
5159 pci_bus_lock(dev->subordinate);
5160 }
5161}
5162
5163
5164static void pci_bus_unlock(struct pci_bus *bus)
5165{
5166 struct pci_dev *dev;
5167
5168 list_for_each_entry(dev, &bus->devices, bus_list) {
5169 if (dev->subordinate)
5170 pci_bus_unlock(dev->subordinate);
5171 pci_dev_unlock(dev);
5172 }
5173}
5174
5175
5176static int pci_bus_trylock(struct pci_bus *bus)
5177{
5178 struct pci_dev *dev;
5179
5180 list_for_each_entry(dev, &bus->devices, bus_list) {
5181 if (!pci_dev_trylock(dev))
5182 goto unlock;
5183 if (dev->subordinate) {
5184 if (!pci_bus_trylock(dev->subordinate)) {
5185 pci_dev_unlock(dev);
5186 goto unlock;
5187 }
5188 }
5189 }
5190 return 1;
5191
5192unlock:
5193 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5194 if (dev->subordinate)
5195 pci_bus_unlock(dev->subordinate);
5196 pci_dev_unlock(dev);
5197 }
5198 return 0;
5199}
5200
5201
5202static bool pci_slot_resetable(struct pci_slot *slot)
5203{
5204 struct pci_dev *dev;
5205
5206 if (slot->bus->self &&
5207 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5208 return false;
5209
5210 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5211 if (!dev->slot || dev->slot != slot)
5212 continue;
5213 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5214 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5215 return false;
5216 }
5217
5218 return true;
5219}
5220
5221
5222static void pci_slot_lock(struct pci_slot *slot)
5223{
5224 struct pci_dev *dev;
5225
5226 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5227 if (!dev->slot || dev->slot != slot)
5228 continue;
5229 pci_dev_lock(dev);
5230 if (dev->subordinate)
5231 pci_bus_lock(dev->subordinate);
5232 }
5233}
5234
5235
5236static void pci_slot_unlock(struct pci_slot *slot)
5237{
5238 struct pci_dev *dev;
5239
5240 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5241 if (!dev->slot || dev->slot != slot)
5242 continue;
5243 if (dev->subordinate)
5244 pci_bus_unlock(dev->subordinate);
5245 pci_dev_unlock(dev);
5246 }
5247}
5248
5249
5250static int pci_slot_trylock(struct pci_slot *slot)
5251{
5252 struct pci_dev *dev;
5253
5254 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5255 if (!dev->slot || dev->slot != slot)
5256 continue;
5257 if (!pci_dev_trylock(dev))
5258 goto unlock;
5259 if (dev->subordinate) {
5260 if (!pci_bus_trylock(dev->subordinate)) {
5261 pci_dev_unlock(dev);
5262 goto unlock;
5263 }
5264 }
5265 }
5266 return 1;
5267
5268unlock:
5269 list_for_each_entry_continue_reverse(dev,
5270 &slot->bus->devices, bus_list) {
5271 if (!dev->slot || dev->slot != slot)
5272 continue;
5273 if (dev->subordinate)
5274 pci_bus_unlock(dev->subordinate);
5275 pci_dev_unlock(dev);
5276 }
5277 return 0;
5278}
5279
5280
5281
5282
5283
5284static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5285{
5286 struct pci_dev *dev;
5287
5288 list_for_each_entry(dev, &bus->devices, bus_list) {
5289 pci_dev_save_and_disable(dev);
5290 if (dev->subordinate)
5291 pci_bus_save_and_disable_locked(dev->subordinate);
5292 }
5293}
5294
5295
5296
5297
5298
5299
5300static void pci_bus_restore_locked(struct pci_bus *bus)
5301{
5302 struct pci_dev *dev;
5303
5304 list_for_each_entry(dev, &bus->devices, bus_list) {
5305 pci_dev_restore(dev);
5306 if (dev->subordinate)
5307 pci_bus_restore_locked(dev->subordinate);
5308 }
5309}
5310
5311
5312
5313
5314
5315static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5316{
5317 struct pci_dev *dev;
5318
5319 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5320 if (!dev->slot || dev->slot != slot)
5321 continue;
5322 pci_dev_save_and_disable(dev);
5323 if (dev->subordinate)
5324 pci_bus_save_and_disable_locked(dev->subordinate);
5325 }
5326}
5327
5328
5329
5330
5331
5332
5333static void pci_slot_restore_locked(struct pci_slot *slot)
5334{
5335 struct pci_dev *dev;
5336
5337 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5338 if (!dev->slot || dev->slot != slot)
5339 continue;
5340 pci_dev_restore(dev);
5341 if (dev->subordinate)
5342 pci_bus_restore_locked(dev->subordinate);
5343 }
5344}
5345
5346static int pci_slot_reset(struct pci_slot *slot, int probe)
5347{
5348 int rc;
5349
5350 if (!slot || !pci_slot_resetable(slot))
5351 return -ENOTTY;
5352
5353 if (!probe)
5354 pci_slot_lock(slot);
5355
5356 might_sleep();
5357
5358 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5359
5360 if (!probe)
5361 pci_slot_unlock(slot);
5362
5363 return rc;
5364}
5365
5366
5367
5368
5369
5370
5371
5372int pci_probe_reset_slot(struct pci_slot *slot)
5373{
5374 return pci_slot_reset(slot, 1);
5375}
5376EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5377
5378
5379
5380
5381
5382
5383
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393static int __pci_reset_slot(struct pci_slot *slot)
5394{
5395 int rc;
5396
5397 rc = pci_slot_reset(slot, 1);
5398 if (rc)
5399 return rc;
5400
5401 if (pci_slot_trylock(slot)) {
5402 pci_slot_save_and_disable_locked(slot);
5403 might_sleep();
5404 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
5405 pci_slot_restore_locked(slot);
5406 pci_slot_unlock(slot);
5407 } else
5408 rc = -EAGAIN;
5409
5410 return rc;
5411}
5412
5413static int pci_bus_reset(struct pci_bus *bus, int probe)
5414{
5415 int ret;
5416
5417 if (!bus->self || !pci_bus_resetable(bus))
5418 return -ENOTTY;
5419
5420 if (probe)
5421 return 0;
5422
5423 pci_bus_lock(bus);
5424
5425 might_sleep();
5426
5427 ret = pci_bridge_secondary_bus_reset(bus->self);
5428
5429 pci_bus_unlock(bus);
5430
5431 return ret;
5432}
5433
5434
5435
5436
5437
5438
5439
5440
5441
5442int pci_bus_error_reset(struct pci_dev *bridge)
5443{
5444 struct pci_bus *bus = bridge->subordinate;
5445 struct pci_slot *slot;
5446
5447 if (!bus)
5448 return -ENOTTY;
5449
5450 mutex_lock(&pci_slot_mutex);
5451 if (list_empty(&bus->slots))
5452 goto bus_reset;
5453
5454 list_for_each_entry(slot, &bus->slots, list)
5455 if (pci_probe_reset_slot(slot))
5456 goto bus_reset;
5457
5458 list_for_each_entry(slot, &bus->slots, list)
5459 if (pci_slot_reset(slot, 0))
5460 goto bus_reset;
5461
5462 mutex_unlock(&pci_slot_mutex);
5463 return 0;
5464bus_reset:
5465 mutex_unlock(&pci_slot_mutex);
5466 return pci_bus_reset(bridge->subordinate, 0);
5467}
5468
5469
5470
5471
5472
5473
5474
5475int pci_probe_reset_bus(struct pci_bus *bus)
5476{
5477 return pci_bus_reset(bus, 1);
5478}
5479EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5480
5481
5482
5483
5484
5485
5486
5487static int __pci_reset_bus(struct pci_bus *bus)
5488{
5489 int rc;
5490
5491 rc = pci_bus_reset(bus, 1);
5492 if (rc)
5493 return rc;
5494
5495 if (pci_bus_trylock(bus)) {
5496 pci_bus_save_and_disable_locked(bus);
5497 might_sleep();
5498 rc = pci_bridge_secondary_bus_reset(bus->self);
5499 pci_bus_restore_locked(bus);
5500 pci_bus_unlock(bus);
5501 } else
5502 rc = -EAGAIN;
5503
5504 return rc;
5505}
5506
5507
5508
5509
5510
5511
5512
5513int pci_reset_bus(struct pci_dev *pdev)
5514{
5515 return (!pci_probe_reset_slot(pdev->slot)) ?
5516 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5517}
5518EXPORT_SYMBOL_GPL(pci_reset_bus);
5519
5520
5521
5522
5523
5524
5525
5526
5527int pcix_get_max_mmrbc(struct pci_dev *dev)
5528{
5529 int cap;
5530 u32 stat;
5531
5532 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5533 if (!cap)
5534 return -EINVAL;
5535
5536 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5537 return -EINVAL;
5538
5539 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5540}
5541EXPORT_SYMBOL(pcix_get_max_mmrbc);
5542
5543
5544
5545
5546
5547
5548
5549
5550int pcix_get_mmrbc(struct pci_dev *dev)
5551{
5552 int cap;
5553 u16 cmd;
5554
5555 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5556 if (!cap)
5557 return -EINVAL;
5558
5559 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5560 return -EINVAL;
5561
5562 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5563}
5564EXPORT_SYMBOL(pcix_get_mmrbc);
5565
5566
5567
5568
5569
5570
5571
5572
5573
5574
5575int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5576{
5577 int cap;
5578 u32 stat, v, o;
5579 u16 cmd;
5580
5581 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5582 return -EINVAL;
5583
5584 v = ffs(mmrbc) - 10;
5585
5586 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5587 if (!cap)
5588 return -EINVAL;
5589
5590 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5591 return -EINVAL;
5592
5593 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5594 return -E2BIG;
5595
5596 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5597 return -EINVAL;
5598
5599 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5600 if (o != v) {
5601 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5602 return -EIO;
5603
5604 cmd &= ~PCI_X_CMD_MAX_READ;
5605 cmd |= v << 2;
5606 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5607 return -EIO;
5608 }
5609 return 0;
5610}
5611EXPORT_SYMBOL(pcix_set_mmrbc);
5612
5613
5614
5615
5616
5617
5618
5619int pcie_get_readrq(struct pci_dev *dev)
5620{
5621 u16 ctl;
5622
5623 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5624
5625 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5626}
5627EXPORT_SYMBOL(pcie_get_readrq);
5628
5629
5630
5631
5632
5633
5634
5635
5636
5637int pcie_set_readrq(struct pci_dev *dev, int rq)
5638{
5639 u16 v;
5640
5641 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5642 return -EINVAL;
5643
5644
5645
5646
5647
5648
5649 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5650 int mps = pcie_get_mps(dev);
5651
5652 if (mps < rq)
5653 rq = mps;
5654 }
5655
5656 v = (ffs(rq) - 8) << 12;
5657
5658 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5659 PCI_EXP_DEVCTL_READRQ, v);
5660}
5661EXPORT_SYMBOL(pcie_set_readrq);
5662
5663
5664
5665
5666
5667
5668
5669int pcie_get_mps(struct pci_dev *dev)
5670{
5671 u16 ctl;
5672
5673 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5674
5675 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5676}
5677EXPORT_SYMBOL(pcie_get_mps);
5678
5679
5680
5681
5682
5683
5684
5685
5686
5687int pcie_set_mps(struct pci_dev *dev, int mps)
5688{
5689 u16 v;
5690
5691 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5692 return -EINVAL;
5693
5694 v = ffs(mps) - 8;
5695 if (v > dev->pcie_mpss)
5696 return -EINVAL;
5697 v <<= 5;
5698
5699 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5700 PCI_EXP_DEVCTL_PAYLOAD, v);
5701}
5702EXPORT_SYMBOL(pcie_set_mps);
5703
5704
5705
5706
5707
5708
5709
5710
5711
5712
5713
5714
5715
5716
5717
5718u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
5719 enum pci_bus_speed *speed,
5720 enum pcie_link_width *width)
5721{
5722 u16 lnksta;
5723 enum pci_bus_speed next_speed;
5724 enum pcie_link_width next_width;
5725 u32 bw, next_bw;
5726
5727 if (speed)
5728 *speed = PCI_SPEED_UNKNOWN;
5729 if (width)
5730 *width = PCIE_LNK_WIDTH_UNKNOWN;
5731
5732 bw = 0;
5733
5734 while (dev) {
5735 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5736
5737 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5738 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5739 PCI_EXP_LNKSTA_NLW_SHIFT;
5740
5741 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
5742
5743
5744 if (!bw || next_bw <= bw) {
5745 bw = next_bw;
5746
5747 if (limiting_dev)
5748 *limiting_dev = dev;
5749 if (speed)
5750 *speed = next_speed;
5751 if (width)
5752 *width = next_width;
5753 }
5754
5755 dev = pci_upstream_bridge(dev);
5756 }
5757
5758 return bw;
5759}
5760EXPORT_SYMBOL(pcie_bandwidth_available);
5761
5762
5763
5764
5765
5766
5767
5768
5769enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5770{
5771 u32 lnkcap2, lnkcap;
5772
5773
5774
5775
5776
5777
5778
5779
5780
5781
5782 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5783 if (lnkcap2) {
5784 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_32_0GB)
5785 return PCIE_SPEED_32_0GT;
5786 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
5787 return PCIE_SPEED_16_0GT;
5788 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
5789 return PCIE_SPEED_8_0GT;
5790 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
5791 return PCIE_SPEED_5_0GT;
5792 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
5793 return PCIE_SPEED_2_5GT;
5794 return PCI_SPEED_UNKNOWN;
5795 }
5796
5797 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5798 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
5799 return PCIE_SPEED_5_0GT;
5800 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
5801 return PCIE_SPEED_2_5GT;
5802
5803 return PCI_SPEED_UNKNOWN;
5804}
5805EXPORT_SYMBOL(pcie_get_speed_cap);
5806
5807
5808
5809
5810
5811
5812
5813
5814enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
5815{
5816 u32 lnkcap;
5817
5818 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5819 if (lnkcap)
5820 return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
5821
5822 return PCIE_LNK_WIDTH_UNKNOWN;
5823}
5824EXPORT_SYMBOL(pcie_get_width_cap);
5825
5826
5827
5828
5829
5830
5831
5832
5833
5834
5835
5836u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
5837 enum pcie_link_width *width)
5838{
5839 *speed = pcie_get_speed_cap(dev);
5840 *width = pcie_get_width_cap(dev);
5841
5842 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5843 return 0;
5844
5845 return *width * PCIE_SPEED2MBS_ENC(*speed);
5846}
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857
5858void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
5859{
5860 enum pcie_link_width width, width_cap;
5861 enum pci_bus_speed speed, speed_cap;
5862 struct pci_dev *limiting_dev = NULL;
5863 u32 bw_avail, bw_cap;
5864
5865 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
5866 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
5867
5868 if (bw_avail >= bw_cap && verbose)
5869 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
5870 bw_cap / 1000, bw_cap % 1000,
5871 PCIE_SPEED2STR(speed_cap), width_cap);
5872 else if (bw_avail < bw_cap)
5873 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
5874 bw_avail / 1000, bw_avail % 1000,
5875 PCIE_SPEED2STR(speed), width,
5876 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
5877 bw_cap / 1000, bw_cap % 1000,
5878 PCIE_SPEED2STR(speed_cap), width_cap);
5879}
5880
5881
5882
5883
5884
5885
5886
5887void pcie_print_link_status(struct pci_dev *dev)
5888{
5889 __pcie_print_link_status(dev, true);
5890}
5891EXPORT_SYMBOL(pcie_print_link_status);
5892
5893
5894
5895
5896
5897
5898
5899
5900int pci_select_bars(struct pci_dev *dev, unsigned long flags)
5901{
5902 int i, bars = 0;
5903 for (i = 0; i < PCI_NUM_RESOURCES; i++)
5904 if (pci_resource_flags(dev, i) & flags)
5905 bars |= (1 << i);
5906 return bars;
5907}
5908EXPORT_SYMBOL(pci_select_bars);
5909
5910
5911static arch_set_vga_state_t arch_set_vga_state;
5912
5913void __init pci_register_set_vga_state(arch_set_vga_state_t func)
5914{
5915 arch_set_vga_state = func;
5916}
5917
5918static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
5919 unsigned int command_bits, u32 flags)
5920{
5921 if (arch_set_vga_state)
5922 return arch_set_vga_state(dev, decode, command_bits,
5923 flags);
5924 return 0;
5925}
5926
5927
5928
5929
5930
5931
5932
5933
5934
5935int pci_set_vga_state(struct pci_dev *dev, bool decode,
5936 unsigned int command_bits, u32 flags)
5937{
5938 struct pci_bus *bus;
5939 struct pci_dev *bridge;
5940 u16 cmd;
5941 int rc;
5942
5943 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
5944
5945
5946 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
5947 if (rc)
5948 return rc;
5949
5950 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
5951 pci_read_config_word(dev, PCI_COMMAND, &cmd);
5952 if (decode == true)
5953 cmd |= command_bits;
5954 else
5955 cmd &= ~command_bits;
5956 pci_write_config_word(dev, PCI_COMMAND, cmd);
5957 }
5958
5959 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
5960 return 0;
5961
5962 bus = dev->bus;
5963 while (bus) {
5964 bridge = bus->self;
5965 if (bridge) {
5966 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
5967 &cmd);
5968 if (decode == true)
5969 cmd |= PCI_BRIDGE_CTL_VGA;
5970 else
5971 cmd &= ~PCI_BRIDGE_CTL_VGA;
5972 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
5973 cmd);
5974 }
5975 bus = bus->parent;
5976 }
5977 return 0;
5978}
5979
5980#ifdef CONFIG_ACPI
5981bool pci_pr3_present(struct pci_dev *pdev)
5982{
5983 struct acpi_device *adev;
5984
5985 if (acpi_disabled)
5986 return false;
5987
5988 adev = ACPI_COMPANION(&pdev->dev);
5989 if (!adev)
5990 return false;
5991
5992 return adev->power.flags.power_resources &&
5993 acpi_has_method(adev->handle, "_PR3");
5994}
5995EXPORT_SYMBOL_GPL(pci_pr3_present);
5996#endif
5997
5998
5999
6000
6001
6002
6003
6004
6005
6006
6007
6008
6009
6010
6011
6012
6013
6014
6015
6016
6017void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
6018{
6019 if (!dev->dma_alias_mask)
6020 dev->dma_alias_mask = bitmap_zalloc(U8_MAX, GFP_KERNEL);
6021 if (!dev->dma_alias_mask) {
6022 pci_warn(dev, "Unable to allocate DMA alias mask\n");
6023 return;
6024 }
6025
6026 set_bit(devfn, dev->dma_alias_mask);
6027 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6028 PCI_SLOT(devfn), PCI_FUNC(devfn));
6029}
6030
6031bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6032{
6033 return (dev1->dma_alias_mask &&
6034 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6035 (dev2->dma_alias_mask &&
6036 test_bit(dev1->devfn, dev2->dma_alias_mask));
6037}
6038
6039bool pci_device_is_present(struct pci_dev *pdev)
6040{
6041 u32 v;
6042
6043 if (pci_dev_is_disconnected(pdev))
6044 return false;
6045 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6046}
6047EXPORT_SYMBOL_GPL(pci_device_is_present);
6048
6049void pci_ignore_hotplug(struct pci_dev *dev)
6050{
6051 struct pci_dev *bridge = dev->bus->self;
6052
6053 dev->ignore_hotplug = 1;
6054
6055 if (bridge)
6056 bridge->ignore_hotplug = 1;
6057}
6058EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6059
6060resource_size_t __weak pcibios_default_alignment(void)
6061{
6062 return 0;
6063}
6064
6065
6066
6067
6068
6069void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6070 const struct resource *rsrc,
6071 resource_size_t *start, resource_size_t *end)
6072{
6073 *start = rsrc->start;
6074 *end = rsrc->end;
6075}
6076
6077static char *resource_alignment_param;
6078static DEFINE_SPINLOCK(resource_alignment_lock);
6079
6080
6081
6082
6083
6084
6085
6086
6087
6088static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6089 bool *resize)
6090{
6091 int align_order, count;
6092 resource_size_t align = pcibios_default_alignment();
6093 const char *p;
6094 int ret;
6095
6096 spin_lock(&resource_alignment_lock);
6097 p = resource_alignment_param;
6098 if (!p || !*p)
6099 goto out;
6100 if (pci_has_flag(PCI_PROBE_ONLY)) {
6101 align = 0;
6102 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6103 goto out;
6104 }
6105
6106 while (*p) {
6107 count = 0;
6108 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6109 p[count] == '@') {
6110 p += count + 1;
6111 } else {
6112 align_order = -1;
6113 }
6114
6115 ret = pci_dev_str_match(dev, p, &p);
6116 if (ret == 1) {
6117 *resize = true;
6118 if (align_order == -1)
6119 align = PAGE_SIZE;
6120 else
6121 align = 1 << align_order;
6122 break;
6123 } else if (ret < 0) {
6124 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6125 p);
6126 break;
6127 }
6128
6129 if (*p != ';' && *p != ',') {
6130
6131 break;
6132 }
6133 p++;
6134 }
6135out:
6136 spin_unlock(&resource_alignment_lock);
6137 return align;
6138}
6139
6140static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6141 resource_size_t align, bool resize)
6142{
6143 struct resource *r = &dev->resource[bar];
6144 resource_size_t size;
6145
6146 if (!(r->flags & IORESOURCE_MEM))
6147 return;
6148
6149 if (r->flags & IORESOURCE_PCI_FIXED) {
6150 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6151 bar, r, (unsigned long long)align);
6152 return;
6153 }
6154
6155 size = resource_size(r);
6156 if (size >= align)
6157 return;
6158
6159
6160
6161
6162
6163
6164
6165
6166
6167
6168
6169
6170
6171
6172
6173
6174
6175
6176
6177
6178
6179
6180
6181
6182
6183
6184
6185
6186
6187 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6188 bar, r, (unsigned long long)align);
6189
6190 if (resize) {
6191 r->start = 0;
6192 r->end = align - 1;
6193 } else {
6194 r->flags &= ~IORESOURCE_SIZEALIGN;
6195 r->flags |= IORESOURCE_STARTALIGN;
6196 r->start = align;
6197 r->end = r->start + size - 1;
6198 }
6199 r->flags |= IORESOURCE_UNSET;
6200}
6201
6202
6203
6204
6205
6206
6207
6208
6209void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6210{
6211 int i;
6212 struct resource *r;
6213 resource_size_t align;
6214 u16 command;
6215 bool resize = false;
6216
6217
6218
6219
6220
6221
6222
6223 if (dev->is_virtfn)
6224 return;
6225
6226
6227 align = pci_specified_resource_alignment(dev, &resize);
6228 if (!align)
6229 return;
6230
6231 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6232 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6233 pci_warn(dev, "Can't reassign resources to host bridge\n");
6234 return;
6235 }
6236
6237 pci_read_config_word(dev, PCI_COMMAND, &command);
6238 command &= ~PCI_COMMAND_MEMORY;
6239 pci_write_config_word(dev, PCI_COMMAND, command);
6240
6241 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6242 pci_request_resource_alignment(dev, i, align, resize);
6243
6244
6245
6246
6247
6248
6249 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6250 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6251 r = &dev->resource[i];
6252 if (!(r->flags & IORESOURCE_MEM))
6253 continue;
6254 r->flags |= IORESOURCE_UNSET;
6255 r->end = resource_size(r) - 1;
6256 r->start = 0;
6257 }
6258 pci_disable_bridge_window(dev);
6259 }
6260}
6261
6262static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6263{
6264 size_t count = 0;
6265
6266 spin_lock(&resource_alignment_lock);
6267 if (resource_alignment_param)
6268 count = snprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
6269 spin_unlock(&resource_alignment_lock);
6270
6271
6272
6273
6274
6275
6276 if (count >= 2 && buf[count - 2] != '\n' && count < PAGE_SIZE - 1) {
6277 buf[count - 1] = '\n';
6278 buf[count++] = 0;
6279 }
6280
6281 return count;
6282}
6283
6284static ssize_t resource_alignment_store(struct bus_type *bus,
6285 const char *buf, size_t count)
6286{
6287 char *param = kstrndup(buf, count, GFP_KERNEL);
6288
6289 if (!param)
6290 return -ENOMEM;
6291
6292 spin_lock(&resource_alignment_lock);
6293 kfree(resource_alignment_param);
6294 resource_alignment_param = param;
6295 spin_unlock(&resource_alignment_lock);
6296 return count;
6297}
6298
6299static BUS_ATTR_RW(resource_alignment);
6300
6301static int __init pci_resource_alignment_sysfs_init(void)
6302{
6303 return bus_create_file(&pci_bus_type,
6304 &bus_attr_resource_alignment);
6305}
6306late_initcall(pci_resource_alignment_sysfs_init);
6307
6308static void pci_no_domains(void)
6309{
6310#ifdef CONFIG_PCI_DOMAINS
6311 pci_domains_supported = 0;
6312#endif
6313}
6314
6315#ifdef CONFIG_PCI_DOMAINS_GENERIC
6316static atomic_t __domain_nr = ATOMIC_INIT(-1);
6317
6318static int pci_get_new_domain_nr(void)
6319{
6320 return atomic_inc_return(&__domain_nr);
6321}
6322
6323static int of_pci_bus_find_domain_nr(struct device *parent)
6324{
6325 static int use_dt_domains = -1;
6326 int domain = -1;
6327
6328 if (parent)
6329 domain = of_get_pci_domain_nr(parent->of_node);
6330
6331
6332
6333
6334
6335
6336
6337
6338
6339
6340
6341
6342
6343
6344
6345
6346
6347
6348
6349
6350
6351
6352
6353
6354
6355
6356
6357 if (domain >= 0 && use_dt_domains) {
6358 use_dt_domains = 1;
6359 } else if (domain < 0 && use_dt_domains != 1) {
6360 use_dt_domains = 0;
6361 domain = pci_get_new_domain_nr();
6362 } else {
6363 if (parent)
6364 pr_err("Node %pOF has ", parent->of_node);
6365 pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6366 domain = -1;
6367 }
6368
6369 return domain;
6370}
6371
6372int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6373{
6374 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6375 acpi_pci_bus_find_domain_nr(bus);
6376}
6377#endif
6378
6379
6380
6381
6382
6383
6384
6385
6386int __weak pci_ext_cfg_avail(void)
6387{
6388 return 1;
6389}
6390
6391void __weak pci_fixup_cardbus(struct pci_bus *bus)
6392{
6393}
6394EXPORT_SYMBOL(pci_fixup_cardbus);
6395
6396static int __init pci_setup(char *str)
6397{
6398 while (str) {
6399 char *k = strchr(str, ',');
6400 if (k)
6401 *k++ = 0;
6402 if (*str && (str = pcibios_setup(str)) && *str) {
6403 if (!strcmp(str, "nomsi")) {
6404 pci_no_msi();
6405 } else if (!strncmp(str, "noats", 5)) {
6406 pr_info("PCIe: ATS is disabled\n");
6407 pcie_ats_disabled = true;
6408 } else if (!strcmp(str, "noaer")) {
6409 pci_no_aer();
6410 } else if (!strcmp(str, "earlydump")) {
6411 pci_early_dump = true;
6412 } else if (!strncmp(str, "realloc=", 8)) {
6413 pci_realloc_get_opt(str + 8);
6414 } else if (!strncmp(str, "realloc", 7)) {
6415 pci_realloc_get_opt("on");
6416 } else if (!strcmp(str, "nodomains")) {
6417 pci_no_domains();
6418 } else if (!strncmp(str, "noari", 5)) {
6419 pcie_ari_disabled = true;
6420 } else if (!strncmp(str, "cbiosize=", 9)) {
6421 pci_cardbus_io_size = memparse(str + 9, &str);
6422 } else if (!strncmp(str, "cbmemsize=", 10)) {
6423 pci_cardbus_mem_size = memparse(str + 10, &str);
6424 } else if (!strncmp(str, "resource_alignment=", 19)) {
6425 resource_alignment_param = str + 19;
6426 } else if (!strncmp(str, "ecrc=", 5)) {
6427 pcie_ecrc_get_policy(str + 5);
6428 } else if (!strncmp(str, "hpiosize=", 9)) {
6429 pci_hotplug_io_size = memparse(str + 9, &str);
6430 } else if (!strncmp(str, "hpmmiosize=", 11)) {
6431 pci_hotplug_mmio_size = memparse(str + 11, &str);
6432 } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6433 pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6434 } else if (!strncmp(str, "hpmemsize=", 10)) {
6435 pci_hotplug_mmio_size = memparse(str + 10, &str);
6436 pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6437 } else if (!strncmp(str, "hpbussize=", 10)) {
6438 pci_hotplug_bus_size =
6439 simple_strtoul(str + 10, &str, 0);
6440 if (pci_hotplug_bus_size > 0xff)
6441 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6442 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6443 pcie_bus_config = PCIE_BUS_TUNE_OFF;
6444 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
6445 pcie_bus_config = PCIE_BUS_SAFE;
6446 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
6447 pcie_bus_config = PCIE_BUS_PERFORMANCE;
6448 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6449 pcie_bus_config = PCIE_BUS_PEER2PEER;
6450 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6451 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6452 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6453 disable_acs_redir_param = str + 18;
6454 } else {
6455 pr_err("PCI: Unknown option `%s'\n", str);
6456 }
6457 }
6458 str = k;
6459 }
6460 return 0;
6461}
6462early_param("pci", pci_setup);
6463
6464
6465
6466
6467
6468
6469
6470
6471
6472
6473static int __init pci_realloc_setup_params(void)
6474{
6475 resource_alignment_param = kstrdup(resource_alignment_param,
6476 GFP_KERNEL);
6477 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6478
6479 return 0;
6480}
6481pure_initcall(pci_realloc_setup_params);
6482