1
2
3
4
5
6
7
8
9
10
11#include <linux/acpi.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/dmi.h>
15#include <linux/init.h>
16#include <linux/msi.h>
17#include <linux/of.h>
18#include <linux/of_pci.h>
19#include <linux/pci.h>
20#include <linux/pm.h>
21#include <linux/slab.h>
22#include <linux/module.h>
23#include <linux/spinlock.h>
24#include <linux/string.h>
25#include <linux/log2.h>
26#include <linux/logic_pio.h>
27#include <linux/pm_wakeup.h>
28#include <linux/interrupt.h>
29#include <linux/device.h>
30#include <linux/pm_runtime.h>
31#include <linux/pci_hotplug.h>
32#include <linux/vmalloc.h>
33#include <linux/pci-ats.h>
34#include <asm/setup.h>
35#include <asm/dma.h>
36#include <linux/aer.h>
37#include "pci.h"
38
39DEFINE_MUTEX(pci_slot_mutex);
40
41const char *pci_power_names[] = {
42 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
43};
44EXPORT_SYMBOL_GPL(pci_power_names);
45
46int isa_dma_bridge_buggy;
47EXPORT_SYMBOL(isa_dma_bridge_buggy);
48
49int pci_pci_problems;
50EXPORT_SYMBOL(pci_pci_problems);
51
52unsigned int pci_pm_d3_delay;
53
54static void pci_pme_list_scan(struct work_struct *work);
55
56static LIST_HEAD(pci_pme_list);
57static DEFINE_MUTEX(pci_pme_list_mutex);
58static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
59
60struct pci_pme_device {
61 struct list_head list;
62 struct pci_dev *dev;
63};
64
65#define PME_TIMEOUT 1000
66
67static void pci_dev_d3_sleep(struct pci_dev *dev)
68{
69 unsigned int delay = dev->d3_delay;
70
71 if (delay < pci_pm_d3_delay)
72 delay = pci_pm_d3_delay;
73
74 if (delay)
75 msleep(delay);
76}
77
78#ifdef CONFIG_PCI_DOMAINS
79int pci_domains_supported = 1;
80#endif
81
82#define DEFAULT_CARDBUS_IO_SIZE (256)
83#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
84
85unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
86unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
87
88#define DEFAULT_HOTPLUG_IO_SIZE (256)
89#define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
90#define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
91
92unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
93
94
95
96
97
98unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
99unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
100
101#define DEFAULT_HOTPLUG_BUS_SIZE 1
102unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
103
104enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
105
106
107
108
109
110
111
112u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
113u8 pci_cache_line_size;
114
115
116
117
118
119unsigned int pcibios_max_latency = 255;
120
121
122static bool pcie_ari_disabled;
123
124
125static bool pcie_ats_disabled;
126
127
128bool pci_early_dump;
129
130bool pci_ats_disabled(void)
131{
132 return pcie_ats_disabled;
133}
134EXPORT_SYMBOL_GPL(pci_ats_disabled);
135
136
137static bool pci_bridge_d3_disable;
138
139static bool pci_bridge_d3_force;
140
141static int __init pcie_port_pm_setup(char *str)
142{
143 if (!strcmp(str, "off"))
144 pci_bridge_d3_disable = true;
145 else if (!strcmp(str, "force"))
146 pci_bridge_d3_force = true;
147 return 1;
148}
149__setup("pcie_port_pm=", pcie_port_pm_setup);
150
151
152#define PCIE_RESET_READY_POLL_MS 60000
153
154
155
156
157
158
159
160
161unsigned char pci_bus_max_busnr(struct pci_bus *bus)
162{
163 struct pci_bus *tmp;
164 unsigned char max, n;
165
166 max = bus->busn_res.end;
167 list_for_each_entry(tmp, &bus->children, node) {
168 n = pci_bus_max_busnr(tmp);
169 if (n > max)
170 max = n;
171 }
172 return max;
173}
174EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
175
176#ifdef CONFIG_HAS_IOMEM
177void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
178{
179 struct resource *res = &pdev->resource[bar];
180
181
182
183
184 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
185 pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
186 return NULL;
187 }
188 return ioremap(res->start, resource_size(res));
189}
190EXPORT_SYMBOL_GPL(pci_ioremap_bar);
191
192void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
193{
194
195
196
197 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
198 WARN_ON(1);
199 return NULL;
200 }
201 return ioremap_wc(pci_resource_start(pdev, bar),
202 pci_resource_len(pdev, bar));
203}
204EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
205#endif
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
227 const char **endptr)
228{
229 int ret;
230 int seg, bus, slot, func;
231 char *wpath, *p;
232 char end;
233
234 *endptr = strchrnul(path, ';');
235
236 wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
237 if (!wpath)
238 return -ENOMEM;
239
240 while (1) {
241 p = strrchr(wpath, '/');
242 if (!p)
243 break;
244 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
245 if (ret != 2) {
246 ret = -EINVAL;
247 goto free_and_exit;
248 }
249
250 if (dev->devfn != PCI_DEVFN(slot, func)) {
251 ret = 0;
252 goto free_and_exit;
253 }
254
255
256
257
258
259
260
261 dev = pci_upstream_bridge(dev);
262 if (!dev) {
263 ret = 0;
264 goto free_and_exit;
265 }
266
267 *p = 0;
268 }
269
270 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
271 &func, &end);
272 if (ret != 4) {
273 seg = 0;
274 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
275 if (ret != 3) {
276 ret = -EINVAL;
277 goto free_and_exit;
278 }
279 }
280
281 ret = (seg == pci_domain_nr(dev->bus) &&
282 bus == dev->bus->number &&
283 dev->devfn == PCI_DEVFN(slot, func));
284
285free_and_exit:
286 kfree(wpath);
287 return ret;
288}
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320static int pci_dev_str_match(struct pci_dev *dev, const char *p,
321 const char **endptr)
322{
323 int ret;
324 int count;
325 unsigned short vendor, device, subsystem_vendor, subsystem_device;
326
327 if (strncmp(p, "pci:", 4) == 0) {
328
329 p += 4;
330 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
331 &subsystem_vendor, &subsystem_device, &count);
332 if (ret != 4) {
333 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
334 if (ret != 2)
335 return -EINVAL;
336
337 subsystem_vendor = 0;
338 subsystem_device = 0;
339 }
340
341 p += count;
342
343 if ((!vendor || vendor == dev->vendor) &&
344 (!device || device == dev->device) &&
345 (!subsystem_vendor ||
346 subsystem_vendor == dev->subsystem_vendor) &&
347 (!subsystem_device ||
348 subsystem_device == dev->subsystem_device))
349 goto found;
350 } else {
351
352
353
354
355 ret = pci_dev_str_match_path(dev, p, &p);
356 if (ret < 0)
357 return ret;
358 else if (ret)
359 goto found;
360 }
361
362 *endptr = p;
363 return 0;
364
365found:
366 *endptr = p;
367 return 1;
368}
369
370static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
371 u8 pos, int cap, int *ttl)
372{
373 u8 id;
374 u16 ent;
375
376 pci_bus_read_config_byte(bus, devfn, pos, &pos);
377
378 while ((*ttl)--) {
379 if (pos < 0x40)
380 break;
381 pos &= ~3;
382 pci_bus_read_config_word(bus, devfn, pos, &ent);
383
384 id = ent & 0xff;
385 if (id == 0xff)
386 break;
387 if (id == cap)
388 return pos;
389 pos = (ent >> 8);
390 }
391 return 0;
392}
393
394static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
395 u8 pos, int cap)
396{
397 int ttl = PCI_FIND_CAP_TTL;
398
399 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
400}
401
402int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
403{
404 return __pci_find_next_cap(dev->bus, dev->devfn,
405 pos + PCI_CAP_LIST_NEXT, cap);
406}
407EXPORT_SYMBOL_GPL(pci_find_next_capability);
408
409static int __pci_bus_find_cap_start(struct pci_bus *bus,
410 unsigned int devfn, u8 hdr_type)
411{
412 u16 status;
413
414 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
415 if (!(status & PCI_STATUS_CAP_LIST))
416 return 0;
417
418 switch (hdr_type) {
419 case PCI_HEADER_TYPE_NORMAL:
420 case PCI_HEADER_TYPE_BRIDGE:
421 return PCI_CAPABILITY_LIST;
422 case PCI_HEADER_TYPE_CARDBUS:
423 return PCI_CB_CAPABILITY_LIST;
424 }
425
426 return 0;
427}
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448int pci_find_capability(struct pci_dev *dev, int cap)
449{
450 int pos;
451
452 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
453 if (pos)
454 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
455
456 return pos;
457}
458EXPORT_SYMBOL(pci_find_capability);
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
474{
475 int pos;
476 u8 hdr_type;
477
478 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
479
480 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
481 if (pos)
482 pos = __pci_find_next_cap(bus, devfn, pos, cap);
483
484 return pos;
485}
486EXPORT_SYMBOL(pci_bus_find_capability);
487
488
489
490
491
492
493
494
495
496
497
498
499int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
500{
501 u32 header;
502 int ttl;
503 int pos = PCI_CFG_SPACE_SIZE;
504
505
506 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
507
508 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
509 return 0;
510
511 if (start)
512 pos = start;
513
514 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
515 return 0;
516
517
518
519
520
521 if (header == 0)
522 return 0;
523
524 while (ttl-- > 0) {
525 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
526 return pos;
527
528 pos = PCI_EXT_CAP_NEXT(header);
529 if (pos < PCI_CFG_SPACE_SIZE)
530 break;
531
532 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
533 break;
534 }
535
536 return 0;
537}
538EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554int pci_find_ext_capability(struct pci_dev *dev, int cap)
555{
556 return pci_find_next_ext_capability(dev, 0, cap);
557}
558EXPORT_SYMBOL_GPL(pci_find_ext_capability);
559
560static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
561{
562 int rc, ttl = PCI_FIND_CAP_TTL;
563 u8 cap, mask;
564
565 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
566 mask = HT_3BIT_CAP_MASK;
567 else
568 mask = HT_5BIT_CAP_MASK;
569
570 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
571 PCI_CAP_ID_HT, &ttl);
572 while (pos) {
573 rc = pci_read_config_byte(dev, pos + 3, &cap);
574 if (rc != PCIBIOS_SUCCESSFUL)
575 return 0;
576
577 if ((cap & mask) == ht_cap)
578 return pos;
579
580 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
581 pos + PCI_CAP_LIST_NEXT,
582 PCI_CAP_ID_HT, &ttl);
583 }
584
585 return 0;
586}
587
588
589
590
591
592
593
594
595
596
597
598
599
600int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
601{
602 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
603}
604EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
605
606
607
608
609
610
611
612
613
614
615
616
617int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
618{
619 int pos;
620
621 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
622 if (pos)
623 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
624
625 return pos;
626}
627EXPORT_SYMBOL_GPL(pci_find_ht_capability);
628
629
630
631
632
633
634
635
636
637
638struct resource *pci_find_parent_resource(const struct pci_dev *dev,
639 struct resource *res)
640{
641 const struct pci_bus *bus = dev->bus;
642 struct resource *r;
643 int i;
644
645 pci_bus_for_each_resource(bus, r, i) {
646 if (!r)
647 continue;
648 if (resource_contains(r, res)) {
649
650
651
652
653
654 if (r->flags & IORESOURCE_PREFETCH &&
655 !(res->flags & IORESOURCE_PREFETCH))
656 return NULL;
657
658
659
660
661
662
663
664
665
666 return r;
667 }
668 }
669 return NULL;
670}
671EXPORT_SYMBOL(pci_find_parent_resource);
672
673
674
675
676
677
678
679
680
681
682struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
683{
684 int i;
685
686 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
687 struct resource *r = &dev->resource[i];
688
689 if (r->start && resource_contains(r, res))
690 return r;
691 }
692
693 return NULL;
694}
695EXPORT_SYMBOL(pci_find_resource);
696
697
698
699
700
701
702
703
704struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
705{
706 struct pci_dev *bridge, *highest_pcie_bridge = dev;
707
708 bridge = pci_upstream_bridge(dev);
709 while (bridge && pci_is_pcie(bridge)) {
710 highest_pcie_bridge = bridge;
711 bridge = pci_upstream_bridge(bridge);
712 }
713
714 if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
715 return NULL;
716
717 return highest_pcie_bridge;
718}
719EXPORT_SYMBOL(pci_find_pcie_root_port);
720
721
722
723
724
725
726
727
728
729int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
730{
731 int i;
732
733
734 for (i = 0; i < 4; i++) {
735 u16 status;
736 if (i)
737 msleep((1 << (i - 1)) * 100);
738
739 pci_read_config_word(dev, pos, &status);
740 if (!(status & mask))
741 return 1;
742 }
743
744 return 0;
745}
746
747
748
749
750
751
752
753
754static void pci_restore_bars(struct pci_dev *dev)
755{
756 int i;
757
758 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
759 pci_update_resource(dev, i);
760}
761
762static const struct pci_platform_pm_ops *pci_platform_pm;
763
764int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
765{
766 if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
767 !ops->choose_state || !ops->set_wakeup || !ops->need_resume)
768 return -EINVAL;
769 pci_platform_pm = ops;
770 return 0;
771}
772
773static inline bool platform_pci_power_manageable(struct pci_dev *dev)
774{
775 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
776}
777
778static inline int platform_pci_set_power_state(struct pci_dev *dev,
779 pci_power_t t)
780{
781 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
782}
783
784static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
785{
786 return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
787}
788
789static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
790{
791 if (pci_platform_pm && pci_platform_pm->refresh_state)
792 pci_platform_pm->refresh_state(dev);
793}
794
795static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
796{
797 return pci_platform_pm ?
798 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
799}
800
801static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
802{
803 return pci_platform_pm ?
804 pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
805}
806
807static inline bool platform_pci_need_resume(struct pci_dev *dev)
808{
809 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
810}
811
812static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
813{
814 return pci_platform_pm ? pci_platform_pm->bridge_d3(dev) : false;
815}
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
831{
832 u16 pmcsr;
833 bool need_restore = false;
834
835
836 if (dev->current_state == state)
837 return 0;
838
839 if (!dev->pm_cap)
840 return -EIO;
841
842 if (state < PCI_D0 || state > PCI_D3hot)
843 return -EINVAL;
844
845
846
847
848
849
850
851 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
852 && dev->current_state > state) {
853 pci_err(dev, "invalid power transition (from %s to %s)\n",
854 pci_power_name(dev->current_state),
855 pci_power_name(state));
856 return -EINVAL;
857 }
858
859
860 if ((state == PCI_D1 && !dev->d1_support)
861 || (state == PCI_D2 && !dev->d2_support))
862 return -EIO;
863
864 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
865 if (pmcsr == (u16) ~0) {
866 pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
867 pci_power_name(dev->current_state),
868 pci_power_name(state));
869 return -EIO;
870 }
871
872
873
874
875
876
877 switch (dev->current_state) {
878 case PCI_D0:
879 case PCI_D1:
880 case PCI_D2:
881 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
882 pmcsr |= state;
883 break;
884 case PCI_D3hot:
885 case PCI_D3cold:
886 case PCI_UNKNOWN:
887 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
888 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
889 need_restore = true;
890
891 default:
892 pmcsr = 0;
893 break;
894 }
895
896
897 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
898
899
900
901
902
903 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
904 pci_dev_d3_sleep(dev);
905 else if (state == PCI_D2 || dev->current_state == PCI_D2)
906 msleep(PCI_PM_D2_DELAY);
907
908 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
909 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
910 if (dev->current_state != state)
911 pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
912 pci_power_name(dev->current_state),
913 pci_power_name(state));
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928 if (need_restore)
929 pci_restore_bars(dev);
930
931 if (dev->bus->self)
932 pcie_aspm_pm_state_change(dev->bus->self);
933
934 return 0;
935}
936
937
938
939
940
941
942
943
944
945
946
947
948
949void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
950{
951 if (platform_pci_get_power_state(dev) == PCI_D3cold ||
952 !pci_device_is_present(dev)) {
953 dev->current_state = PCI_D3cold;
954 } else if (dev->pm_cap) {
955 u16 pmcsr;
956
957 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
958 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
959 } else {
960 dev->current_state = state;
961 }
962}
963
964
965
966
967
968
969
970
971void pci_refresh_power_state(struct pci_dev *dev)
972{
973 if (platform_pci_power_manageable(dev))
974 platform_pci_refresh_power_state(dev);
975
976 pci_update_current_state(dev, dev->current_state);
977}
978
979
980
981
982
983
984int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
985{
986 int error;
987
988 if (platform_pci_power_manageable(dev)) {
989 error = platform_pci_set_power_state(dev, state);
990 if (!error)
991 pci_update_current_state(dev, state);
992 } else
993 error = -ENODEV;
994
995 if (error && !dev->pm_cap)
996 dev->current_state = PCI_D0;
997
998 return error;
999}
1000EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1001
1002
1003
1004
1005
1006
1007static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
1008{
1009 pci_wakeup_event(pci_dev);
1010 pm_request_resume(&pci_dev->dev);
1011 return 0;
1012}
1013
1014
1015
1016
1017
1018void pci_wakeup_bus(struct pci_bus *bus)
1019{
1020 if (bus)
1021 pci_walk_bus(bus, pci_wakeup, NULL);
1022}
1023
1024static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1025{
1026 int delay = 1;
1027 u32 id;
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041 pci_read_config_dword(dev, PCI_COMMAND, &id);
1042 while (id == ~0) {
1043 if (delay > timeout) {
1044 pci_warn(dev, "not ready %dms after %s; giving up\n",
1045 delay - 1, reset_type);
1046 return -ENOTTY;
1047 }
1048
1049 if (delay > 1000)
1050 pci_info(dev, "not ready %dms after %s; waiting\n",
1051 delay - 1, reset_type);
1052
1053 msleep(delay);
1054 delay *= 2;
1055 pci_read_config_dword(dev, PCI_COMMAND, &id);
1056 }
1057
1058 if (delay > 1000)
1059 pci_info(dev, "ready %dms after %s\n", delay - 1,
1060 reset_type);
1061
1062 return 0;
1063}
1064
1065
1066
1067
1068
1069int pci_power_up(struct pci_dev *dev)
1070{
1071 pci_platform_power_transition(dev, PCI_D0);
1072
1073
1074
1075
1076
1077
1078 if (dev->runtime_d3cold) {
1079
1080
1081
1082
1083
1084 pci_wakeup_bus(dev->subordinate);
1085 }
1086
1087 return pci_raw_set_power_state(dev, PCI_D0);
1088}
1089
1090
1091
1092
1093
1094
1095static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1096{
1097 pci_power_t state = *(pci_power_t *)data;
1098
1099 dev->current_state = state;
1100 return 0;
1101}
1102
1103
1104
1105
1106
1107
1108void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1109{
1110 if (bus)
1111 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1112}
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1132{
1133 int error;
1134
1135
1136 if (state > PCI_D3cold)
1137 state = PCI_D3cold;
1138 else if (state < PCI_D0)
1139 state = PCI_D0;
1140 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1141
1142
1143
1144
1145
1146
1147
1148 return 0;
1149
1150
1151 if (dev->current_state == state)
1152 return 0;
1153
1154 if (state == PCI_D0)
1155 return pci_power_up(dev);
1156
1157
1158
1159
1160
1161 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1162 return 0;
1163
1164
1165
1166
1167
1168 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
1169 PCI_D3hot : state);
1170
1171 if (pci_platform_power_transition(dev, state))
1172 return error;
1173
1174
1175 if (state == PCI_D3cold)
1176 pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1177
1178 return 0;
1179}
1180EXPORT_SYMBOL(pci_set_power_state);
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
1192{
1193 pci_power_t ret;
1194
1195 if (!dev->pm_cap)
1196 return PCI_D0;
1197
1198 ret = platform_pci_choose_state(dev);
1199 if (ret != PCI_POWER_ERROR)
1200 return ret;
1201
1202 switch (state.event) {
1203 case PM_EVENT_ON:
1204 return PCI_D0;
1205 case PM_EVENT_FREEZE:
1206 case PM_EVENT_PRETHAW:
1207
1208 case PM_EVENT_SUSPEND:
1209 case PM_EVENT_HIBERNATE:
1210 return PCI_D3hot;
1211 default:
1212 pci_info(dev, "unrecognized suspend event %d\n",
1213 state.event);
1214 BUG();
1215 }
1216 return PCI_D0;
1217}
1218EXPORT_SYMBOL(pci_choose_state);
1219
1220#define PCI_EXP_SAVE_REGS 7
1221
1222static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1223 u16 cap, bool extended)
1224{
1225 struct pci_cap_saved_state *tmp;
1226
1227 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1228 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1229 return tmp;
1230 }
1231 return NULL;
1232}
1233
1234struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1235{
1236 return _pci_find_saved_cap(dev, cap, false);
1237}
1238
1239struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1240{
1241 return _pci_find_saved_cap(dev, cap, true);
1242}
1243
1244static int pci_save_pcie_state(struct pci_dev *dev)
1245{
1246 int i = 0;
1247 struct pci_cap_saved_state *save_state;
1248 u16 *cap;
1249
1250 if (!pci_is_pcie(dev))
1251 return 0;
1252
1253 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1254 if (!save_state) {
1255 pci_err(dev, "buffer not found in %s\n", __func__);
1256 return -ENOMEM;
1257 }
1258
1259 cap = (u16 *)&save_state->cap.data[0];
1260 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1261 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1262 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1263 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1264 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1265 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1266 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1267
1268 return 0;
1269}
1270
1271static void pci_restore_pcie_state(struct pci_dev *dev)
1272{
1273 int i = 0;
1274 struct pci_cap_saved_state *save_state;
1275 u16 *cap;
1276
1277 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1278 if (!save_state)
1279 return;
1280
1281 cap = (u16 *)&save_state->cap.data[0];
1282 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1283 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1284 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1285 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1286 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1287 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1288 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1289}
1290
1291static int pci_save_pcix_state(struct pci_dev *dev)
1292{
1293 int pos;
1294 struct pci_cap_saved_state *save_state;
1295
1296 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1297 if (!pos)
1298 return 0;
1299
1300 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1301 if (!save_state) {
1302 pci_err(dev, "buffer not found in %s\n", __func__);
1303 return -ENOMEM;
1304 }
1305
1306 pci_read_config_word(dev, pos + PCI_X_CMD,
1307 (u16 *)save_state->cap.data);
1308
1309 return 0;
1310}
1311
1312static void pci_restore_pcix_state(struct pci_dev *dev)
1313{
1314 int i = 0, pos;
1315 struct pci_cap_saved_state *save_state;
1316 u16 *cap;
1317
1318 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1319 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1320 if (!save_state || !pos)
1321 return;
1322 cap = (u16 *)&save_state->cap.data[0];
1323
1324 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1325}
1326
1327static void pci_save_ltr_state(struct pci_dev *dev)
1328{
1329 int ltr;
1330 struct pci_cap_saved_state *save_state;
1331 u16 *cap;
1332
1333 if (!pci_is_pcie(dev))
1334 return;
1335
1336 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1337 if (!ltr)
1338 return;
1339
1340 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1341 if (!save_state) {
1342 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1343 return;
1344 }
1345
1346 cap = (u16 *)&save_state->cap.data[0];
1347 pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
1348 pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
1349}
1350
1351static void pci_restore_ltr_state(struct pci_dev *dev)
1352{
1353 struct pci_cap_saved_state *save_state;
1354 int ltr;
1355 u16 *cap;
1356
1357 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1358 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1359 if (!save_state || !ltr)
1360 return;
1361
1362 cap = (u16 *)&save_state->cap.data[0];
1363 pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
1364 pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
1365}
1366
1367
1368
1369
1370
1371
1372int pci_save_state(struct pci_dev *dev)
1373{
1374 int i;
1375
1376 for (i = 0; i < 16; i++) {
1377 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1378 pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
1379 i * 4, dev->saved_config_space[i]);
1380 }
1381 dev->state_saved = true;
1382
1383 i = pci_save_pcie_state(dev);
1384 if (i != 0)
1385 return i;
1386
1387 i = pci_save_pcix_state(dev);
1388 if (i != 0)
1389 return i;
1390
1391 pci_save_ltr_state(dev);
1392 pci_save_dpc_state(dev);
1393 pci_save_aer_state(dev);
1394 return pci_save_vc_state(dev);
1395}
1396EXPORT_SYMBOL(pci_save_state);
1397
1398static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1399 u32 saved_val, int retry, bool force)
1400{
1401 u32 val;
1402
1403 pci_read_config_dword(pdev, offset, &val);
1404 if (!force && val == saved_val)
1405 return;
1406
1407 for (;;) {
1408 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1409 offset, val, saved_val);
1410 pci_write_config_dword(pdev, offset, saved_val);
1411 if (retry-- <= 0)
1412 return;
1413
1414 pci_read_config_dword(pdev, offset, &val);
1415 if (val == saved_val)
1416 return;
1417
1418 mdelay(1);
1419 }
1420}
1421
1422static void pci_restore_config_space_range(struct pci_dev *pdev,
1423 int start, int end, int retry,
1424 bool force)
1425{
1426 int index;
1427
1428 for (index = end; index >= start; index--)
1429 pci_restore_config_dword(pdev, 4 * index,
1430 pdev->saved_config_space[index],
1431 retry, force);
1432}
1433
1434static void pci_restore_config_space(struct pci_dev *pdev)
1435{
1436 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1437 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1438
1439 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1440 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1441 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1442 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1443
1444
1445
1446
1447
1448
1449 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1450 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1451 } else {
1452 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1453 }
1454}
1455
1456static void pci_restore_rebar_state(struct pci_dev *pdev)
1457{
1458 unsigned int pos, nbars, i;
1459 u32 ctrl;
1460
1461 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1462 if (!pos)
1463 return;
1464
1465 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1466 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1467 PCI_REBAR_CTRL_NBAR_SHIFT;
1468
1469 for (i = 0; i < nbars; i++, pos += 8) {
1470 struct resource *res;
1471 int bar_idx, size;
1472
1473 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1474 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1475 res = pdev->resource + bar_idx;
1476 size = ilog2(resource_size(res)) - 20;
1477 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1478 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1479 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1480 }
1481}
1482
1483
1484
1485
1486
1487void pci_restore_state(struct pci_dev *dev)
1488{
1489 if (!dev->state_saved)
1490 return;
1491
1492
1493
1494
1495
1496 pci_restore_ltr_state(dev);
1497
1498 pci_restore_pcie_state(dev);
1499 pci_restore_pasid_state(dev);
1500 pci_restore_pri_state(dev);
1501 pci_restore_ats_state(dev);
1502 pci_restore_vc_state(dev);
1503 pci_restore_rebar_state(dev);
1504 pci_restore_dpc_state(dev);
1505
1506 pci_cleanup_aer_error_status_regs(dev);
1507 pci_restore_aer_state(dev);
1508
1509 pci_restore_config_space(dev);
1510
1511 pci_restore_pcix_state(dev);
1512 pci_restore_msi_state(dev);
1513
1514
1515 pci_enable_acs(dev);
1516 pci_restore_iov_state(dev);
1517
1518 dev->state_saved = false;
1519}
1520EXPORT_SYMBOL(pci_restore_state);
1521
1522struct pci_saved_state {
1523 u32 config_space[16];
1524 struct pci_cap_saved_data cap[0];
1525};
1526
1527
1528
1529
1530
1531
1532
1533
1534struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1535{
1536 struct pci_saved_state *state;
1537 struct pci_cap_saved_state *tmp;
1538 struct pci_cap_saved_data *cap;
1539 size_t size;
1540
1541 if (!dev->state_saved)
1542 return NULL;
1543
1544 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1545
1546 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1547 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1548
1549 state = kzalloc(size, GFP_KERNEL);
1550 if (!state)
1551 return NULL;
1552
1553 memcpy(state->config_space, dev->saved_config_space,
1554 sizeof(state->config_space));
1555
1556 cap = state->cap;
1557 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1558 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1559 memcpy(cap, &tmp->cap, len);
1560 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1561 }
1562
1563
1564 return state;
1565}
1566EXPORT_SYMBOL_GPL(pci_store_saved_state);
1567
1568
1569
1570
1571
1572
1573int pci_load_saved_state(struct pci_dev *dev,
1574 struct pci_saved_state *state)
1575{
1576 struct pci_cap_saved_data *cap;
1577
1578 dev->state_saved = false;
1579
1580 if (!state)
1581 return 0;
1582
1583 memcpy(dev->saved_config_space, state->config_space,
1584 sizeof(state->config_space));
1585
1586 cap = state->cap;
1587 while (cap->size) {
1588 struct pci_cap_saved_state *tmp;
1589
1590 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1591 if (!tmp || tmp->cap.size != cap->size)
1592 return -EINVAL;
1593
1594 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1595 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1596 sizeof(struct pci_cap_saved_data) + cap->size);
1597 }
1598
1599 dev->state_saved = true;
1600 return 0;
1601}
1602EXPORT_SYMBOL_GPL(pci_load_saved_state);
1603
1604
1605
1606
1607
1608
1609
1610int pci_load_and_free_saved_state(struct pci_dev *dev,
1611 struct pci_saved_state **state)
1612{
1613 int ret = pci_load_saved_state(dev, *state);
1614 kfree(*state);
1615 *state = NULL;
1616 return ret;
1617}
1618EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1619
1620int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1621{
1622 return pci_enable_resources(dev, bars);
1623}
1624
1625static int do_pci_enable_device(struct pci_dev *dev, int bars)
1626{
1627 int err;
1628 struct pci_dev *bridge;
1629 u16 cmd;
1630 u8 pin;
1631
1632 err = pci_set_power_state(dev, PCI_D0);
1633 if (err < 0 && err != -EIO)
1634 return err;
1635
1636 bridge = pci_upstream_bridge(dev);
1637 if (bridge)
1638 pcie_aspm_powersave_config_link(bridge);
1639
1640 err = pcibios_enable_device(dev, bars);
1641 if (err < 0)
1642 return err;
1643 pci_fixup_device(pci_fixup_enable, dev);
1644
1645 if (dev->msi_enabled || dev->msix_enabled)
1646 return 0;
1647
1648 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1649 if (pin) {
1650 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1651 if (cmd & PCI_COMMAND_INTX_DISABLE)
1652 pci_write_config_word(dev, PCI_COMMAND,
1653 cmd & ~PCI_COMMAND_INTX_DISABLE);
1654 }
1655
1656 return 0;
1657}
1658
1659
1660
1661
1662
1663
1664
1665
1666int pci_reenable_device(struct pci_dev *dev)
1667{
1668 if (pci_is_enabled(dev))
1669 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1670 return 0;
1671}
1672EXPORT_SYMBOL(pci_reenable_device);
1673
1674static void pci_enable_bridge(struct pci_dev *dev)
1675{
1676 struct pci_dev *bridge;
1677 int retval;
1678
1679 bridge = pci_upstream_bridge(dev);
1680 if (bridge)
1681 pci_enable_bridge(bridge);
1682
1683 if (pci_is_enabled(dev)) {
1684 if (!dev->is_busmaster)
1685 pci_set_master(dev);
1686 return;
1687 }
1688
1689 retval = pci_enable_device(dev);
1690 if (retval)
1691 pci_err(dev, "Error enabling bridge (%d), continuing\n",
1692 retval);
1693 pci_set_master(dev);
1694}
1695
1696static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1697{
1698 struct pci_dev *bridge;
1699 int err;
1700 int i, bars = 0;
1701
1702
1703
1704
1705
1706
1707
1708 if (dev->pm_cap) {
1709 u16 pmcsr;
1710 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1711 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1712 }
1713
1714 if (atomic_inc_return(&dev->enable_cnt) > 1)
1715 return 0;
1716
1717 bridge = pci_upstream_bridge(dev);
1718 if (bridge)
1719 pci_enable_bridge(bridge);
1720
1721
1722 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1723 if (dev->resource[i].flags & flags)
1724 bars |= (1 << i);
1725 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1726 if (dev->resource[i].flags & flags)
1727 bars |= (1 << i);
1728
1729 err = do_pci_enable_device(dev, bars);
1730 if (err < 0)
1731 atomic_dec(&dev->enable_cnt);
1732 return err;
1733}
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743int pci_enable_device_io(struct pci_dev *dev)
1744{
1745 return pci_enable_device_flags(dev, IORESOURCE_IO);
1746}
1747EXPORT_SYMBOL(pci_enable_device_io);
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757int pci_enable_device_mem(struct pci_dev *dev)
1758{
1759 return pci_enable_device_flags(dev, IORESOURCE_MEM);
1760}
1761EXPORT_SYMBOL(pci_enable_device_mem);
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774int pci_enable_device(struct pci_dev *dev)
1775{
1776 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1777}
1778EXPORT_SYMBOL(pci_enable_device);
1779
1780
1781
1782
1783
1784
1785
1786struct pci_devres {
1787 unsigned int enabled:1;
1788 unsigned int pinned:1;
1789 unsigned int orig_intx:1;
1790 unsigned int restore_intx:1;
1791 unsigned int mwi:1;
1792 u32 region_mask;
1793};
1794
1795static void pcim_release(struct device *gendev, void *res)
1796{
1797 struct pci_dev *dev = to_pci_dev(gendev);
1798 struct pci_devres *this = res;
1799 int i;
1800
1801 if (dev->msi_enabled)
1802 pci_disable_msi(dev);
1803 if (dev->msix_enabled)
1804 pci_disable_msix(dev);
1805
1806 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1807 if (this->region_mask & (1 << i))
1808 pci_release_region(dev, i);
1809
1810 if (this->mwi)
1811 pci_clear_mwi(dev);
1812
1813 if (this->restore_intx)
1814 pci_intx(dev, this->orig_intx);
1815
1816 if (this->enabled && !this->pinned)
1817 pci_disable_device(dev);
1818}
1819
1820static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1821{
1822 struct pci_devres *dr, *new_dr;
1823
1824 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1825 if (dr)
1826 return dr;
1827
1828 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1829 if (!new_dr)
1830 return NULL;
1831 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1832}
1833
1834static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
1835{
1836 if (pci_is_managed(pdev))
1837 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1838 return NULL;
1839}
1840
1841
1842
1843
1844
1845
1846
1847int pcim_enable_device(struct pci_dev *pdev)
1848{
1849 struct pci_devres *dr;
1850 int rc;
1851
1852 dr = get_pci_dr(pdev);
1853 if (unlikely(!dr))
1854 return -ENOMEM;
1855 if (dr->enabled)
1856 return 0;
1857
1858 rc = pci_enable_device(pdev);
1859 if (!rc) {
1860 pdev->is_managed = 1;
1861 dr->enabled = 1;
1862 }
1863 return rc;
1864}
1865EXPORT_SYMBOL(pcim_enable_device);
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875void pcim_pin_device(struct pci_dev *pdev)
1876{
1877 struct pci_devres *dr;
1878
1879 dr = find_pci_dr(pdev);
1880 WARN_ON(!dr || !dr->enabled);
1881 if (dr)
1882 dr->pinned = 1;
1883}
1884EXPORT_SYMBOL(pcim_pin_device);
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894int __weak pcibios_add_device(struct pci_dev *dev)
1895{
1896 return 0;
1897}
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908void __weak pcibios_release_device(struct pci_dev *dev) {}
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918void __weak pcibios_disable_device(struct pci_dev *dev) {}
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1930
1931static void do_pci_disable_device(struct pci_dev *dev)
1932{
1933 u16 pci_command;
1934
1935 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1936 if (pci_command & PCI_COMMAND_MASTER) {
1937 pci_command &= ~PCI_COMMAND_MASTER;
1938 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1939 }
1940
1941 pcibios_disable_device(dev);
1942}
1943
1944
1945
1946
1947
1948
1949
1950
1951void pci_disable_enabled_device(struct pci_dev *dev)
1952{
1953 if (pci_is_enabled(dev))
1954 do_pci_disable_device(dev);
1955}
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967void pci_disable_device(struct pci_dev *dev)
1968{
1969 struct pci_devres *dr;
1970
1971 dr = find_pci_dr(dev);
1972 if (dr)
1973 dr->enabled = 0;
1974
1975 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1976 "disabling already-disabled device");
1977
1978 if (atomic_dec_return(&dev->enable_cnt) != 0)
1979 return;
1980
1981 do_pci_disable_device(dev);
1982
1983 dev->is_busmaster = 0;
1984}
1985EXPORT_SYMBOL(pci_disable_device);
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1996 enum pcie_reset_state state)
1997{
1998 return -EINVAL;
1999}
2000
2001
2002
2003
2004
2005
2006
2007
2008int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2009{
2010 return pcibios_set_pcie_reset_state(dev, state);
2011}
2012EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2013
2014
2015
2016
2017
2018void pcie_clear_root_pme_status(struct pci_dev *dev)
2019{
2020 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2021}
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031bool pci_check_pme_status(struct pci_dev *dev)
2032{
2033 int pmcsr_pos;
2034 u16 pmcsr;
2035 bool ret = false;
2036
2037 if (!dev->pm_cap)
2038 return false;
2039
2040 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2041 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2042 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2043 return false;
2044
2045
2046 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2047 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2048
2049 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2050 ret = true;
2051 }
2052
2053 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2054
2055 return ret;
2056}
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2067{
2068 if (pme_poll_reset && dev->pme_poll)
2069 dev->pme_poll = false;
2070
2071 if (pci_check_pme_status(dev)) {
2072 pci_wakeup_event(dev);
2073 pm_request_resume(&dev->dev);
2074 }
2075 return 0;
2076}
2077
2078
2079
2080
2081
2082void pci_pme_wakeup_bus(struct pci_bus *bus)
2083{
2084 if (bus)
2085 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2086}
2087
2088
2089
2090
2091
2092
2093
2094bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2095{
2096 if (!dev->pm_cap)
2097 return false;
2098
2099 return !!(dev->pme_support & (1 << state));
2100}
2101EXPORT_SYMBOL(pci_pme_capable);
2102
2103static void pci_pme_list_scan(struct work_struct *work)
2104{
2105 struct pci_pme_device *pme_dev, *n;
2106
2107 mutex_lock(&pci_pme_list_mutex);
2108 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2109 if (pme_dev->dev->pme_poll) {
2110 struct pci_dev *bridge;
2111
2112 bridge = pme_dev->dev->bus->self;
2113
2114
2115
2116
2117
2118 if (bridge && bridge->current_state != PCI_D0)
2119 continue;
2120
2121
2122
2123
2124 if (pme_dev->dev->current_state == PCI_D3cold)
2125 continue;
2126
2127 pci_pme_wakeup(pme_dev->dev, NULL);
2128 } else {
2129 list_del(&pme_dev->list);
2130 kfree(pme_dev);
2131 }
2132 }
2133 if (!list_empty(&pci_pme_list))
2134 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2135 msecs_to_jiffies(PME_TIMEOUT));
2136 mutex_unlock(&pci_pme_list_mutex);
2137}
2138
2139static void __pci_pme_active(struct pci_dev *dev, bool enable)
2140{
2141 u16 pmcsr;
2142
2143 if (!dev->pme_support)
2144 return;
2145
2146 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2147
2148 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2149 if (!enable)
2150 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2151
2152 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2153}
2154
2155
2156
2157
2158
2159void pci_pme_restore(struct pci_dev *dev)
2160{
2161 u16 pmcsr;
2162
2163 if (!dev->pme_support)
2164 return;
2165
2166 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2167 if (dev->wakeup_prepared) {
2168 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2169 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2170 } else {
2171 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2172 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2173 }
2174 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2175}
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185void pci_pme_active(struct pci_dev *dev, bool enable)
2186{
2187 __pci_pme_active(dev, enable);
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209 if (dev->pme_poll) {
2210 struct pci_pme_device *pme_dev;
2211 if (enable) {
2212 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2213 GFP_KERNEL);
2214 if (!pme_dev) {
2215 pci_warn(dev, "can't enable PME#\n");
2216 return;
2217 }
2218 pme_dev->dev = dev;
2219 mutex_lock(&pci_pme_list_mutex);
2220 list_add(&pme_dev->list, &pci_pme_list);
2221 if (list_is_singular(&pci_pme_list))
2222 queue_delayed_work(system_freezable_wq,
2223 &pci_pme_work,
2224 msecs_to_jiffies(PME_TIMEOUT));
2225 mutex_unlock(&pci_pme_list_mutex);
2226 } else {
2227 mutex_lock(&pci_pme_list_mutex);
2228 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2229 if (pme_dev->dev == dev) {
2230 list_del(&pme_dev->list);
2231 kfree(pme_dev);
2232 break;
2233 }
2234 }
2235 mutex_unlock(&pci_pme_list_mutex);
2236 }
2237 }
2238
2239 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2240}
2241EXPORT_SYMBOL(pci_pme_active);
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2263{
2264 int ret = 0;
2265
2266
2267
2268
2269
2270
2271
2272
2273 if (!pci_power_manageable(dev))
2274 return 0;
2275
2276
2277 if (!!enable == !!dev->wakeup_prepared)
2278 return 0;
2279
2280
2281
2282
2283
2284
2285
2286 if (enable) {
2287 int error;
2288
2289 if (pci_pme_capable(dev, state))
2290 pci_pme_active(dev, true);
2291 else
2292 ret = 1;
2293 error = platform_pci_set_wakeup(dev, true);
2294 if (ret)
2295 ret = error;
2296 if (!ret)
2297 dev->wakeup_prepared = true;
2298 } else {
2299 platform_pci_set_wakeup(dev, false);
2300 pci_pme_active(dev, false);
2301 dev->wakeup_prepared = false;
2302 }
2303
2304 return ret;
2305}
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2317{
2318 if (enable && !device_may_wakeup(&pci_dev->dev))
2319 return -EINVAL;
2320
2321 return __pci_enable_wake(pci_dev, state, enable);
2322}
2323EXPORT_SYMBOL(pci_enable_wake);
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2340{
2341 return pci_pme_capable(dev, PCI_D3cold) ?
2342 pci_enable_wake(dev, PCI_D3cold, enable) :
2343 pci_enable_wake(dev, PCI_D3hot, enable);
2344}
2345EXPORT_SYMBOL(pci_wake_from_d3);
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2357{
2358 pci_power_t target_state = PCI_D3hot;
2359
2360 if (platform_pci_power_manageable(dev)) {
2361
2362
2363
2364 pci_power_t state = platform_pci_choose_state(dev);
2365
2366 switch (state) {
2367 case PCI_POWER_ERROR:
2368 case PCI_UNKNOWN:
2369 break;
2370 case PCI_D1:
2371 case PCI_D2:
2372 if (pci_no_d1d2(dev))
2373 break;
2374
2375 default:
2376 target_state = state;
2377 }
2378
2379 return target_state;
2380 }
2381
2382 if (!dev->pm_cap)
2383 target_state = PCI_D0;
2384
2385
2386
2387
2388
2389
2390 if (dev->current_state == PCI_D3cold)
2391 target_state = PCI_D3cold;
2392
2393 if (wakeup) {
2394
2395
2396
2397
2398 if (dev->pme_support) {
2399 while (target_state
2400 && !(dev->pme_support & (1 << target_state)))
2401 target_state--;
2402 }
2403 }
2404
2405 return target_state;
2406}
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417int pci_prepare_to_sleep(struct pci_dev *dev)
2418{
2419 bool wakeup = device_may_wakeup(&dev->dev);
2420 pci_power_t target_state = pci_target_state(dev, wakeup);
2421 int error;
2422
2423 if (target_state == PCI_POWER_ERROR)
2424 return -EIO;
2425
2426 pci_enable_wake(dev, target_state, wakeup);
2427
2428 error = pci_set_power_state(dev, target_state);
2429
2430 if (error)
2431 pci_enable_wake(dev, target_state, false);
2432
2433 return error;
2434}
2435EXPORT_SYMBOL(pci_prepare_to_sleep);
2436
2437
2438
2439
2440
2441
2442
2443
2444int pci_back_from_sleep(struct pci_dev *dev)
2445{
2446 pci_enable_wake(dev, PCI_D0, false);
2447 return pci_set_power_state(dev, PCI_D0);
2448}
2449EXPORT_SYMBOL(pci_back_from_sleep);
2450
2451
2452
2453
2454
2455
2456
2457
2458int pci_finish_runtime_suspend(struct pci_dev *dev)
2459{
2460 pci_power_t target_state;
2461 int error;
2462
2463 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2464 if (target_state == PCI_POWER_ERROR)
2465 return -EIO;
2466
2467 dev->runtime_d3cold = target_state == PCI_D3cold;
2468
2469 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2470
2471 error = pci_set_power_state(dev, target_state);
2472
2473 if (error) {
2474 pci_enable_wake(dev, target_state, false);
2475 dev->runtime_d3cold = false;
2476 }
2477
2478 return error;
2479}
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489bool pci_dev_run_wake(struct pci_dev *dev)
2490{
2491 struct pci_bus *bus = dev->bus;
2492
2493 if (!dev->pme_support)
2494 return false;
2495
2496
2497 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2498 return false;
2499
2500 if (device_can_wakeup(&dev->dev))
2501 return true;
2502
2503 while (bus->parent) {
2504 struct pci_dev *bridge = bus->self;
2505
2506 if (device_can_wakeup(&bridge->dev))
2507 return true;
2508
2509 bus = bus->parent;
2510 }
2511
2512
2513 if (bus->bridge)
2514 return device_can_wakeup(bus->bridge);
2515
2516 return false;
2517}
2518EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529bool pci_dev_need_resume(struct pci_dev *pci_dev)
2530{
2531 struct device *dev = &pci_dev->dev;
2532 pci_power_t target_state;
2533
2534 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2535 return true;
2536
2537 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2538
2539
2540
2541
2542
2543
2544 return target_state != pci_dev->current_state &&
2545 target_state != PCI_D3cold &&
2546 pci_dev->current_state != PCI_D3hot;
2547}
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2561{
2562 struct device *dev = &pci_dev->dev;
2563
2564 spin_lock_irq(&dev->power.lock);
2565
2566 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2567 pci_dev->current_state < PCI_D3cold)
2568 __pci_pme_active(pci_dev, false);
2569
2570 spin_unlock_irq(&dev->power.lock);
2571}
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581void pci_dev_complete_resume(struct pci_dev *pci_dev)
2582{
2583 struct device *dev = &pci_dev->dev;
2584
2585 if (!pci_dev_run_wake(pci_dev))
2586 return;
2587
2588 spin_lock_irq(&dev->power.lock);
2589
2590 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2591 __pci_pme_active(pci_dev, true);
2592
2593 spin_unlock_irq(&dev->power.lock);
2594}
2595
2596void pci_config_pm_runtime_get(struct pci_dev *pdev)
2597{
2598 struct device *dev = &pdev->dev;
2599 struct device *parent = dev->parent;
2600
2601 if (parent)
2602 pm_runtime_get_sync(parent);
2603 pm_runtime_get_noresume(dev);
2604
2605
2606
2607
2608 pm_runtime_barrier(dev);
2609
2610
2611
2612
2613
2614 if (pdev->current_state == PCI_D3cold)
2615 pm_runtime_resume(dev);
2616}
2617
2618void pci_config_pm_runtime_put(struct pci_dev *pdev)
2619{
2620 struct device *dev = &pdev->dev;
2621 struct device *parent = dev->parent;
2622
2623 pm_runtime_put(dev);
2624 if (parent)
2625 pm_runtime_put_sync(parent);
2626}
2627
2628static const struct dmi_system_id bridge_d3_blacklist[] = {
2629#ifdef CONFIG_X86
2630 {
2631
2632
2633
2634
2635
2636
2637 .ident = "X299 DESIGNARE EX-CF",
2638 .matches = {
2639 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2640 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2641 },
2642 },
2643#endif
2644 { }
2645};
2646
2647
2648
2649
2650
2651
2652
2653
2654bool pci_bridge_d3_possible(struct pci_dev *bridge)
2655{
2656 if (!pci_is_pcie(bridge))
2657 return false;
2658
2659 switch (pci_pcie_type(bridge)) {
2660 case PCI_EXP_TYPE_ROOT_PORT:
2661 case PCI_EXP_TYPE_UPSTREAM:
2662 case PCI_EXP_TYPE_DOWNSTREAM:
2663 if (pci_bridge_d3_disable)
2664 return false;
2665
2666
2667
2668
2669
2670 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2671 return false;
2672
2673 if (pci_bridge_d3_force)
2674 return true;
2675
2676
2677 if (bridge->is_thunderbolt)
2678 return true;
2679
2680
2681 if (platform_pci_bridge_d3(bridge))
2682 return true;
2683
2684
2685
2686
2687
2688
2689 if (bridge->is_hotplug_bridge)
2690 return false;
2691
2692 if (dmi_check_system(bridge_d3_blacklist))
2693 return false;
2694
2695
2696
2697
2698
2699 if (dmi_get_bios_year() >= 2015)
2700 return true;
2701 break;
2702 }
2703
2704 return false;
2705}
2706
2707static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2708{
2709 bool *d3cold_ok = data;
2710
2711 if (
2712 dev->no_d3cold || !dev->d3cold_allowed ||
2713
2714
2715 (device_may_wakeup(&dev->dev) &&
2716 !pci_pme_capable(dev, PCI_D3cold)) ||
2717
2718
2719 !pci_power_manageable(dev))
2720
2721 *d3cold_ok = false;
2722
2723 return !*d3cold_ok;
2724}
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734void pci_bridge_d3_update(struct pci_dev *dev)
2735{
2736 bool remove = !device_is_registered(&dev->dev);
2737 struct pci_dev *bridge;
2738 bool d3cold_ok = true;
2739
2740 bridge = pci_upstream_bridge(dev);
2741 if (!bridge || !pci_bridge_d3_possible(bridge))
2742 return;
2743
2744
2745
2746
2747
2748 if (remove && bridge->bridge_d3)
2749 return;
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759 if (!remove)
2760 pci_dev_check_d3cold(dev, &d3cold_ok);
2761
2762
2763
2764
2765
2766
2767
2768 if (d3cold_ok && !bridge->bridge_d3)
2769 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2770 &d3cold_ok);
2771
2772 if (bridge->bridge_d3 != d3cold_ok) {
2773 bridge->bridge_d3 = d3cold_ok;
2774
2775 pci_bridge_d3_update(bridge);
2776 }
2777}
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787void pci_d3cold_enable(struct pci_dev *dev)
2788{
2789 if (dev->no_d3cold) {
2790 dev->no_d3cold = false;
2791 pci_bridge_d3_update(dev);
2792 }
2793}
2794EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804void pci_d3cold_disable(struct pci_dev *dev)
2805{
2806 if (!dev->no_d3cold) {
2807 dev->no_d3cold = true;
2808 pci_bridge_d3_update(dev);
2809 }
2810}
2811EXPORT_SYMBOL_GPL(pci_d3cold_disable);
2812
2813
2814
2815
2816
2817void pci_pm_init(struct pci_dev *dev)
2818{
2819 int pm;
2820 u16 status;
2821 u16 pmc;
2822
2823 pm_runtime_forbid(&dev->dev);
2824 pm_runtime_set_active(&dev->dev);
2825 pm_runtime_enable(&dev->dev);
2826 device_enable_async_suspend(&dev->dev);
2827 dev->wakeup_prepared = false;
2828
2829 dev->pm_cap = 0;
2830 dev->pme_support = 0;
2831
2832
2833 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2834 if (!pm)
2835 return;
2836
2837 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2838
2839 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2840 pci_err(dev, "unsupported PM cap regs version (%u)\n",
2841 pmc & PCI_PM_CAP_VER_MASK);
2842 return;
2843 }
2844
2845 dev->pm_cap = pm;
2846 dev->d3_delay = PCI_PM_D3_WAIT;
2847 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
2848 dev->bridge_d3 = pci_bridge_d3_possible(dev);
2849 dev->d3cold_allowed = true;
2850
2851 dev->d1_support = false;
2852 dev->d2_support = false;
2853 if (!pci_no_d1d2(dev)) {
2854 if (pmc & PCI_PM_CAP_D1)
2855 dev->d1_support = true;
2856 if (pmc & PCI_PM_CAP_D2)
2857 dev->d2_support = true;
2858
2859 if (dev->d1_support || dev->d2_support)
2860 pci_info(dev, "supports%s%s\n",
2861 dev->d1_support ? " D1" : "",
2862 dev->d2_support ? " D2" : "");
2863 }
2864
2865 pmc &= PCI_PM_CAP_PME_MASK;
2866 if (pmc) {
2867 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
2868 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2869 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2870 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2871 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2872 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
2873 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
2874 dev->pme_poll = true;
2875
2876
2877
2878
2879 device_set_wakeup_capable(&dev->dev, true);
2880
2881 pci_pme_active(dev, false);
2882 }
2883
2884 pci_read_config_word(dev, PCI_STATUS, &status);
2885 if (status & PCI_STATUS_IMM_READY)
2886 dev->imm_ready = 1;
2887}
2888
2889static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
2890{
2891 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
2892
2893 switch (prop) {
2894 case PCI_EA_P_MEM:
2895 case PCI_EA_P_VF_MEM:
2896 flags |= IORESOURCE_MEM;
2897 break;
2898 case PCI_EA_P_MEM_PREFETCH:
2899 case PCI_EA_P_VF_MEM_PREFETCH:
2900 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
2901 break;
2902 case PCI_EA_P_IO:
2903 flags |= IORESOURCE_IO;
2904 break;
2905 default:
2906 return 0;
2907 }
2908
2909 return flags;
2910}
2911
2912static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
2913 u8 prop)
2914{
2915 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
2916 return &dev->resource[bei];
2917#ifdef CONFIG_PCI_IOV
2918 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
2919 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
2920 return &dev->resource[PCI_IOV_RESOURCES +
2921 bei - PCI_EA_BEI_VF_BAR0];
2922#endif
2923 else if (bei == PCI_EA_BEI_ROM)
2924 return &dev->resource[PCI_ROM_RESOURCE];
2925 else
2926 return NULL;
2927}
2928
2929
2930static int pci_ea_read(struct pci_dev *dev, int offset)
2931{
2932 struct resource *res;
2933 int ent_size, ent_offset = offset;
2934 resource_size_t start, end;
2935 unsigned long flags;
2936 u32 dw0, bei, base, max_offset;
2937 u8 prop;
2938 bool support_64 = (sizeof(resource_size_t) >= 8);
2939
2940 pci_read_config_dword(dev, ent_offset, &dw0);
2941 ent_offset += 4;
2942
2943
2944 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
2945
2946 if (!(dw0 & PCI_EA_ENABLE))
2947 goto out;
2948
2949 bei = (dw0 & PCI_EA_BEI) >> 4;
2950 prop = (dw0 & PCI_EA_PP) >> 8;
2951
2952
2953
2954
2955
2956 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
2957 prop = (dw0 & PCI_EA_SP) >> 16;
2958 if (prop > PCI_EA_P_BRIDGE_IO)
2959 goto out;
2960
2961 res = pci_ea_get_resource(dev, bei, prop);
2962 if (!res) {
2963 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
2964 goto out;
2965 }
2966
2967 flags = pci_ea_flags(dev, prop);
2968 if (!flags) {
2969 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
2970 goto out;
2971 }
2972
2973
2974 pci_read_config_dword(dev, ent_offset, &base);
2975 start = (base & PCI_EA_FIELD_MASK);
2976 ent_offset += 4;
2977
2978
2979 pci_read_config_dword(dev, ent_offset, &max_offset);
2980 ent_offset += 4;
2981
2982
2983 if (base & PCI_EA_IS_64) {
2984 u32 base_upper;
2985
2986 pci_read_config_dword(dev, ent_offset, &base_upper);
2987 ent_offset += 4;
2988
2989 flags |= IORESOURCE_MEM_64;
2990
2991
2992 if (!support_64 && base_upper)
2993 goto out;
2994
2995 if (support_64)
2996 start |= ((u64)base_upper << 32);
2997 }
2998
2999 end = start + (max_offset | 0x03);
3000
3001
3002 if (max_offset & PCI_EA_IS_64) {
3003 u32 max_offset_upper;
3004
3005 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3006 ent_offset += 4;
3007
3008 flags |= IORESOURCE_MEM_64;
3009
3010
3011 if (!support_64 && max_offset_upper)
3012 goto out;
3013
3014 if (support_64)
3015 end += ((u64)max_offset_upper << 32);
3016 }
3017
3018 if (end < start) {
3019 pci_err(dev, "EA Entry crosses address boundary\n");
3020 goto out;
3021 }
3022
3023 if (ent_size != ent_offset - offset) {
3024 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3025 ent_size, ent_offset - offset);
3026 goto out;
3027 }
3028
3029 res->name = pci_name(dev);
3030 res->start = start;
3031 res->end = end;
3032 res->flags = flags;
3033
3034 if (bei <= PCI_EA_BEI_BAR5)
3035 pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3036 bei, res, prop);
3037 else if (bei == PCI_EA_BEI_ROM)
3038 pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3039 res, prop);
3040 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3041 pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3042 bei - PCI_EA_BEI_VF_BAR0, res, prop);
3043 else
3044 pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3045 bei, res, prop);
3046
3047out:
3048 return offset + ent_size;
3049}
3050
3051
3052void pci_ea_init(struct pci_dev *dev)
3053{
3054 int ea;
3055 u8 num_ent;
3056 int offset;
3057 int i;
3058
3059
3060 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3061 if (!ea)
3062 return;
3063
3064
3065 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3066 &num_ent);
3067 num_ent &= PCI_EA_NUM_ENT_MASK;
3068
3069 offset = ea + PCI_EA_FIRST_ENT;
3070
3071
3072 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3073 offset += 4;
3074
3075
3076 for (i = 0; i < num_ent; ++i)
3077 offset = pci_ea_read(dev, offset);
3078}
3079
3080static void pci_add_saved_cap(struct pci_dev *pci_dev,
3081 struct pci_cap_saved_state *new_cap)
3082{
3083 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3084}
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3095 bool extended, unsigned int size)
3096{
3097 int pos;
3098 struct pci_cap_saved_state *save_state;
3099
3100 if (extended)
3101 pos = pci_find_ext_capability(dev, cap);
3102 else
3103 pos = pci_find_capability(dev, cap);
3104
3105 if (!pos)
3106 return 0;
3107
3108 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3109 if (!save_state)
3110 return -ENOMEM;
3111
3112 save_state->cap.cap_nr = cap;
3113 save_state->cap.cap_extended = extended;
3114 save_state->cap.size = size;
3115 pci_add_saved_cap(dev, save_state);
3116
3117 return 0;
3118}
3119
3120int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3121{
3122 return _pci_add_cap_save_buffer(dev, cap, false, size);
3123}
3124
3125int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3126{
3127 return _pci_add_cap_save_buffer(dev, cap, true, size);
3128}
3129
3130
3131
3132
3133
3134void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3135{
3136 int error;
3137
3138 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3139 PCI_EXP_SAVE_REGS * sizeof(u16));
3140 if (error)
3141 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3142
3143 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3144 if (error)
3145 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3146
3147 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3148 2 * sizeof(u16));
3149 if (error)
3150 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3151
3152 pci_allocate_vc_save_buffers(dev);
3153}
3154
3155void pci_free_cap_save_buffers(struct pci_dev *dev)
3156{
3157 struct pci_cap_saved_state *tmp;
3158 struct hlist_node *n;
3159
3160 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3161 kfree(tmp);
3162}
3163
3164
3165
3166
3167
3168
3169
3170
3171void pci_configure_ari(struct pci_dev *dev)
3172{
3173 u32 cap;
3174 struct pci_dev *bridge;
3175
3176 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3177 return;
3178
3179 bridge = dev->bus->self;
3180 if (!bridge)
3181 return;
3182
3183 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3184 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3185 return;
3186
3187 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3188 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3189 PCI_EXP_DEVCTL2_ARI);
3190 bridge->ari_enabled = 1;
3191 } else {
3192 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3193 PCI_EXP_DEVCTL2_ARI);
3194 bridge->ari_enabled = 0;
3195 }
3196}
3197
3198static int pci_acs_enable;
3199
3200
3201
3202
3203void pci_request_acs(void)
3204{
3205 pci_acs_enable = 1;
3206}
3207
3208static const char *disable_acs_redir_param;
3209
3210
3211
3212
3213
3214
3215
3216static void pci_disable_acs_redir(struct pci_dev *dev)
3217{
3218 int ret = 0;
3219 const char *p;
3220 int pos;
3221 u16 ctrl;
3222
3223 if (!disable_acs_redir_param)
3224 return;
3225
3226 p = disable_acs_redir_param;
3227 while (*p) {
3228 ret = pci_dev_str_match(dev, p, &p);
3229 if (ret < 0) {
3230 pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
3231 disable_acs_redir_param);
3232
3233 break;
3234 } else if (ret == 1) {
3235
3236 break;
3237 }
3238
3239 if (*p != ';' && *p != ',') {
3240
3241 break;
3242 }
3243 p++;
3244 }
3245
3246 if (ret != 1)
3247 return;
3248
3249 if (!pci_dev_specific_disable_acs_redir(dev))
3250 return;
3251
3252 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3253 if (!pos) {
3254 pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
3255 return;
3256 }
3257
3258 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
3259
3260
3261 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
3262
3263 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
3264
3265 pci_info(dev, "disabled ACS redirect\n");
3266}
3267
3268
3269
3270
3271
3272static void pci_std_enable_acs(struct pci_dev *dev)
3273{
3274 int pos;
3275 u16 cap;
3276 u16 ctrl;
3277
3278 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3279 if (!pos)
3280 return;
3281
3282 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
3283 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
3284
3285
3286 ctrl |= (cap & PCI_ACS_SV);
3287
3288
3289 ctrl |= (cap & PCI_ACS_RR);
3290
3291
3292 ctrl |= (cap & PCI_ACS_CR);
3293
3294
3295 ctrl |= (cap & PCI_ACS_UF);
3296
3297 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
3298}
3299
3300
3301
3302
3303
3304void pci_enable_acs(struct pci_dev *dev)
3305{
3306 if (!pci_acs_enable)
3307 goto disable_acs_redir;
3308
3309 if (!pci_dev_specific_enable_acs(dev))
3310 goto disable_acs_redir;
3311
3312 pci_std_enable_acs(dev);
3313
3314disable_acs_redir:
3315
3316
3317
3318
3319
3320
3321
3322 pci_disable_acs_redir(dev);
3323}
3324
3325static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3326{
3327 int pos;
3328 u16 cap, ctrl;
3329
3330 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
3331 if (!pos)
3332 return false;
3333
3334
3335
3336
3337
3338
3339 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3340 acs_flags &= (cap | PCI_ACS_EC);
3341
3342 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3343 return (ctrl & acs_flags) == acs_flags;
3344}
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3363{
3364 int ret;
3365
3366 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3367 if (ret >= 0)
3368 return ret > 0;
3369
3370
3371
3372
3373
3374
3375 if (!pci_is_pcie(pdev))
3376 return false;
3377
3378 switch (pci_pcie_type(pdev)) {
3379
3380
3381
3382
3383
3384 case PCI_EXP_TYPE_PCIE_BRIDGE:
3385
3386
3387
3388
3389
3390
3391 case PCI_EXP_TYPE_PCI_BRIDGE:
3392 case PCI_EXP_TYPE_RC_EC:
3393 return false;
3394
3395
3396
3397
3398
3399 case PCI_EXP_TYPE_DOWNSTREAM:
3400 case PCI_EXP_TYPE_ROOT_PORT:
3401 return pci_acs_flags_enabled(pdev, acs_flags);
3402
3403
3404
3405
3406
3407
3408
3409 case PCI_EXP_TYPE_ENDPOINT:
3410 case PCI_EXP_TYPE_UPSTREAM:
3411 case PCI_EXP_TYPE_LEG_END:
3412 case PCI_EXP_TYPE_RC_END:
3413 if (!pdev->multifunction)
3414 break;
3415
3416 return pci_acs_flags_enabled(pdev, acs_flags);
3417 }
3418
3419
3420
3421
3422
3423 return true;
3424}
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435bool pci_acs_path_enabled(struct pci_dev *start,
3436 struct pci_dev *end, u16 acs_flags)
3437{
3438 struct pci_dev *pdev, *parent = start;
3439
3440 do {
3441 pdev = parent;
3442
3443 if (!pci_acs_enabled(pdev, acs_flags))
3444 return false;
3445
3446 if (pci_is_root_bus(pdev->bus))
3447 return (end == NULL);
3448
3449 parent = pdev->bus->self;
3450 } while (pdev != end);
3451
3452 return true;
3453}
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3465{
3466 unsigned int pos, nbars, i;
3467 u32 ctrl;
3468
3469 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3470 if (!pos)
3471 return -ENOTSUPP;
3472
3473 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3474 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3475 PCI_REBAR_CTRL_NBAR_SHIFT;
3476
3477 for (i = 0; i < nbars; i++, pos += 8) {
3478 int bar_idx;
3479
3480 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3481 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3482 if (bar_idx == bar)
3483 return pos;
3484 }
3485
3486 return -ENOENT;
3487}
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3498{
3499 int pos;
3500 u32 cap;
3501
3502 pos = pci_rebar_find_pos(pdev, bar);
3503 if (pos < 0)
3504 return 0;
3505
3506 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3507 return (cap & PCI_REBAR_CAP_SIZES) >> 4;
3508}
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3519{
3520 int pos;
3521 u32 ctrl;
3522
3523 pos = pci_rebar_find_pos(pdev, bar);
3524 if (pos < 0)
3525 return pos;
3526
3527 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3528 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3529}
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3541{
3542 int pos;
3543 u32 ctrl;
3544
3545 pos = pci_rebar_find_pos(pdev, bar);
3546 if (pos < 0)
3547 return pos;
3548
3549 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3550 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3551 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3552 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3553 return 0;
3554}
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3570{
3571 struct pci_bus *bus = dev->bus;
3572 struct pci_dev *bridge;
3573 u32 cap, ctl2;
3574
3575 if (!pci_is_pcie(dev))
3576 return -EINVAL;
3577
3578
3579
3580
3581
3582
3583
3584
3585 switch (pci_pcie_type(dev)) {
3586 case PCI_EXP_TYPE_ENDPOINT:
3587 case PCI_EXP_TYPE_LEG_END:
3588 case PCI_EXP_TYPE_RC_END:
3589 break;
3590 default:
3591 return -EINVAL;
3592 }
3593
3594 while (bus->parent) {
3595 bridge = bus->self;
3596
3597 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3598
3599 switch (pci_pcie_type(bridge)) {
3600
3601 case PCI_EXP_TYPE_UPSTREAM:
3602 case PCI_EXP_TYPE_DOWNSTREAM:
3603 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3604 return -EINVAL;
3605 break;
3606
3607
3608 case PCI_EXP_TYPE_ROOT_PORT:
3609 if ((cap & cap_mask) != cap_mask)
3610 return -EINVAL;
3611 break;
3612 }
3613
3614
3615 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3616 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3617 &ctl2);
3618 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3619 return -EINVAL;
3620 }
3621
3622 bus = bus->parent;
3623 }
3624
3625 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3626 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3627 return 0;
3628}
3629EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3643{
3644 int slot;
3645
3646 if (pci_ari_enabled(dev->bus))
3647 slot = 0;
3648 else
3649 slot = PCI_SLOT(dev->devfn);
3650
3651 return (((pin - 1) + slot) % 4) + 1;
3652}
3653
3654int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3655{
3656 u8 pin;
3657
3658 pin = dev->pin;
3659 if (!pin)
3660 return -1;
3661
3662 while (!pci_is_root_bus(dev->bus)) {
3663 pin = pci_swizzle_interrupt_pin(dev, pin);
3664 dev = dev->bus->self;
3665 }
3666 *bridge = dev;
3667 return pin;
3668}
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3679{
3680 u8 pin = *pinp;
3681
3682 while (!pci_is_root_bus(dev->bus)) {
3683 pin = pci_swizzle_interrupt_pin(dev, pin);
3684 dev = dev->bus->self;
3685 }
3686 *pinp = pin;
3687 return PCI_SLOT(dev->devfn);
3688}
3689EXPORT_SYMBOL_GPL(pci_common_swizzle);
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701void pci_release_region(struct pci_dev *pdev, int bar)
3702{
3703 struct pci_devres *dr;
3704
3705 if (pci_resource_len(pdev, bar) == 0)
3706 return;
3707 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3708 release_region(pci_resource_start(pdev, bar),
3709 pci_resource_len(pdev, bar));
3710 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3711 release_mem_region(pci_resource_start(pdev, bar),
3712 pci_resource_len(pdev, bar));
3713
3714 dr = find_pci_dr(pdev);
3715 if (dr)
3716 dr->region_mask &= ~(1 << bar);
3717}
3718EXPORT_SYMBOL(pci_release_region);
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739static int __pci_request_region(struct pci_dev *pdev, int bar,
3740 const char *res_name, int exclusive)
3741{
3742 struct pci_devres *dr;
3743
3744 if (pci_resource_len(pdev, bar) == 0)
3745 return 0;
3746
3747 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3748 if (!request_region(pci_resource_start(pdev, bar),
3749 pci_resource_len(pdev, bar), res_name))
3750 goto err_out;
3751 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3752 if (!__request_mem_region(pci_resource_start(pdev, bar),
3753 pci_resource_len(pdev, bar), res_name,
3754 exclusive))
3755 goto err_out;
3756 }
3757
3758 dr = find_pci_dr(pdev);
3759 if (dr)
3760 dr->region_mask |= 1 << bar;
3761
3762 return 0;
3763
3764err_out:
3765 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3766 &pdev->resource[bar]);
3767 return -EBUSY;
3768}
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3785{
3786 return __pci_request_region(pdev, bar, res_name, 0);
3787}
3788EXPORT_SYMBOL(pci_request_region);
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3799{
3800 int i;
3801
3802 for (i = 0; i < PCI_STD_NUM_BARS; i++)
3803 if (bars & (1 << i))
3804 pci_release_region(pdev, i);
3805}
3806EXPORT_SYMBOL(pci_release_selected_regions);
3807
3808static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3809 const char *res_name, int excl)
3810{
3811 int i;
3812
3813 for (i = 0; i < PCI_STD_NUM_BARS; i++)
3814 if (bars & (1 << i))
3815 if (__pci_request_region(pdev, i, res_name, excl))
3816 goto err_out;
3817 return 0;
3818
3819err_out:
3820 while (--i >= 0)
3821 if (bars & (1 << i))
3822 pci_release_region(pdev, i);
3823
3824 return -EBUSY;
3825}
3826
3827
3828
3829
3830
3831
3832
3833
3834int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3835 const char *res_name)
3836{
3837 return __pci_request_selected_regions(pdev, bars, res_name, 0);
3838}
3839EXPORT_SYMBOL(pci_request_selected_regions);
3840
3841int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3842 const char *res_name)
3843{
3844 return __pci_request_selected_regions(pdev, bars, res_name,
3845 IORESOURCE_EXCLUSIVE);
3846}
3847EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859void pci_release_regions(struct pci_dev *pdev)
3860{
3861 pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
3862}
3863EXPORT_SYMBOL(pci_release_regions);
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3879{
3880 return pci_request_selected_regions(pdev,
3881 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
3882}
3883EXPORT_SYMBOL(pci_request_regions);
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3901{
3902 return pci_request_selected_regions_exclusive(pdev,
3903 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
3904}
3905EXPORT_SYMBOL(pci_request_regions_exclusive);
3906
3907
3908
3909
3910
3911int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
3912 resource_size_t size)
3913{
3914 int ret = 0;
3915#ifdef PCI_IOBASE
3916 struct logic_pio_hwaddr *range;
3917
3918 if (!size || addr + size < addr)
3919 return -EINVAL;
3920
3921 range = kzalloc(sizeof(*range), GFP_ATOMIC);
3922 if (!range)
3923 return -ENOMEM;
3924
3925 range->fwnode = fwnode;
3926 range->size = size;
3927 range->hw_start = addr;
3928 range->flags = LOGIC_PIO_CPU_MMIO;
3929
3930 ret = logic_pio_register_range(range);
3931 if (ret)
3932 kfree(range);
3933#endif
3934
3935 return ret;
3936}
3937
3938phys_addr_t pci_pio_to_address(unsigned long pio)
3939{
3940 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
3941
3942#ifdef PCI_IOBASE
3943 if (pio >= MMIO_UPPER_LIMIT)
3944 return address;
3945
3946 address = logic_pio_to_hwaddr(pio);
3947#endif
3948
3949 return address;
3950}
3951
3952unsigned long __weak pci_address_to_pio(phys_addr_t address)
3953{
3954#ifdef PCI_IOBASE
3955 return logic_pio_trans_cpuaddr(address);
3956#else
3957 if (address > IO_SPACE_LIMIT)
3958 return (unsigned long)-1;
3959
3960 return (unsigned long) address;
3961#endif
3962}
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
3975{
3976#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3977 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3978
3979 if (!(res->flags & IORESOURCE_IO))
3980 return -EINVAL;
3981
3982 if (res->end > IO_SPACE_LIMIT)
3983 return -EINVAL;
3984
3985 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
3986 pgprot_device(PAGE_KERNEL));
3987#else
3988
3989
3990
3991
3992 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
3993 return -ENODEV;
3994#endif
3995}
3996EXPORT_SYMBOL(pci_remap_iospace);
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006void pci_unmap_iospace(struct resource *res)
4007{
4008#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4009 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4010
4011 unmap_kernel_range(vaddr, resource_size(res));
4012#endif
4013}
4014EXPORT_SYMBOL(pci_unmap_iospace);
4015
4016static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4017{
4018 struct resource **res = ptr;
4019
4020 pci_unmap_iospace(*res);
4021}
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4033 phys_addr_t phys_addr)
4034{
4035 const struct resource **ptr;
4036 int error;
4037
4038 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4039 if (!ptr)
4040 return -ENOMEM;
4041
4042 error = pci_remap_iospace(res, phys_addr);
4043 if (error) {
4044 devres_free(ptr);
4045 } else {
4046 *ptr = res;
4047 devres_add(dev, ptr);
4048 }
4049
4050 return error;
4051}
4052EXPORT_SYMBOL(devm_pci_remap_iospace);
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4064 resource_size_t offset,
4065 resource_size_t size)
4066{
4067 void __iomem **ptr, *addr;
4068
4069 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4070 if (!ptr)
4071 return NULL;
4072
4073 addr = pci_remap_cfgspace(offset, size);
4074 if (addr) {
4075 *ptr = addr;
4076 devres_add(dev, ptr);
4077 } else
4078 devres_free(ptr);
4079
4080 return addr;
4081}
4082EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4104 struct resource *res)
4105{
4106 resource_size_t size;
4107 const char *name;
4108 void __iomem *dest_ptr;
4109
4110 BUG_ON(!dev);
4111
4112 if (!res || resource_type(res) != IORESOURCE_MEM) {
4113 dev_err(dev, "invalid resource\n");
4114 return IOMEM_ERR_PTR(-EINVAL);
4115 }
4116
4117 size = resource_size(res);
4118 name = res->name ?: dev_name(dev);
4119
4120 if (!devm_request_mem_region(dev, res->start, size, name)) {
4121 dev_err(dev, "can't request region for resource %pR\n", res);
4122 return IOMEM_ERR_PTR(-EBUSY);
4123 }
4124
4125 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4126 if (!dest_ptr) {
4127 dev_err(dev, "ioremap failed for resource %pR\n", res);
4128 devm_release_mem_region(dev, res->start, size);
4129 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4130 }
4131
4132 return dest_ptr;
4133}
4134EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4135
4136static void __pci_set_master(struct pci_dev *dev, bool enable)
4137{
4138 u16 old_cmd, cmd;
4139
4140 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4141 if (enable)
4142 cmd = old_cmd | PCI_COMMAND_MASTER;
4143 else
4144 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4145 if (cmd != old_cmd) {
4146 pci_dbg(dev, "%s bus mastering\n",
4147 enable ? "enabling" : "disabling");
4148 pci_write_config_word(dev, PCI_COMMAND, cmd);
4149 }
4150 dev->is_busmaster = enable;
4151}
4152
4153
4154
4155
4156
4157
4158
4159
4160char * __weak __init pcibios_setup(char *str)
4161{
4162 return str;
4163}
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173void __weak pcibios_set_master(struct pci_dev *dev)
4174{
4175 u8 lat;
4176
4177
4178 if (pci_is_pcie(dev))
4179 return;
4180
4181 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4182 if (lat < 16)
4183 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4184 else if (lat > pcibios_max_latency)
4185 lat = pcibios_max_latency;
4186 else
4187 return;
4188
4189 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4190}
4191
4192
4193
4194
4195
4196
4197
4198
4199void pci_set_master(struct pci_dev *dev)
4200{
4201 __pci_set_master(dev, true);
4202 pcibios_set_master(dev);
4203}
4204EXPORT_SYMBOL(pci_set_master);
4205
4206
4207
4208
4209
4210void pci_clear_master(struct pci_dev *dev)
4211{
4212 __pci_set_master(dev, false);
4213}
4214EXPORT_SYMBOL(pci_clear_master);
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226int pci_set_cacheline_size(struct pci_dev *dev)
4227{
4228 u8 cacheline_size;
4229
4230 if (!pci_cache_line_size)
4231 return -EINVAL;
4232
4233
4234
4235 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4236 if (cacheline_size >= pci_cache_line_size &&
4237 (cacheline_size % pci_cache_line_size) == 0)
4238 return 0;
4239
4240
4241 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4242
4243 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4244 if (cacheline_size == pci_cache_line_size)
4245 return 0;
4246
4247 pci_info(dev, "cache line size of %d is not supported\n",
4248 pci_cache_line_size << 2);
4249
4250 return -EINVAL;
4251}
4252EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262int pci_set_mwi(struct pci_dev *dev)
4263{
4264#ifdef PCI_DISABLE_MWI
4265 return 0;
4266#else
4267 int rc;
4268 u16 cmd;
4269
4270 rc = pci_set_cacheline_size(dev);
4271 if (rc)
4272 return rc;
4273
4274 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4275 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4276 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4277 cmd |= PCI_COMMAND_INVALIDATE;
4278 pci_write_config_word(dev, PCI_COMMAND, cmd);
4279 }
4280 return 0;
4281#endif
4282}
4283EXPORT_SYMBOL(pci_set_mwi);
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293int pcim_set_mwi(struct pci_dev *dev)
4294{
4295 struct pci_devres *dr;
4296
4297 dr = find_pci_dr(dev);
4298 if (!dr)
4299 return -ENOMEM;
4300
4301 dr->mwi = 1;
4302 return pci_set_mwi(dev);
4303}
4304EXPORT_SYMBOL(pcim_set_mwi);
4305
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315int pci_try_set_mwi(struct pci_dev *dev)
4316{
4317#ifdef PCI_DISABLE_MWI
4318 return 0;
4319#else
4320 return pci_set_mwi(dev);
4321#endif
4322}
4323EXPORT_SYMBOL(pci_try_set_mwi);
4324
4325
4326
4327
4328
4329
4330
4331void pci_clear_mwi(struct pci_dev *dev)
4332{
4333#ifndef PCI_DISABLE_MWI
4334 u16 cmd;
4335
4336 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4337 if (cmd & PCI_COMMAND_INVALIDATE) {
4338 cmd &= ~PCI_COMMAND_INVALIDATE;
4339 pci_write_config_word(dev, PCI_COMMAND, cmd);
4340 }
4341#endif
4342}
4343EXPORT_SYMBOL(pci_clear_mwi);
4344
4345
4346
4347
4348
4349
4350
4351
4352void pci_intx(struct pci_dev *pdev, int enable)
4353{
4354 u16 pci_command, new;
4355
4356 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4357
4358 if (enable)
4359 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4360 else
4361 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4362
4363 if (new != pci_command) {
4364 struct pci_devres *dr;
4365
4366 pci_write_config_word(pdev, PCI_COMMAND, new);
4367
4368 dr = find_pci_dr(pdev);
4369 if (dr && !dr->restore_intx) {
4370 dr->restore_intx = 1;
4371 dr->orig_intx = !enable;
4372 }
4373 }
4374}
4375EXPORT_SYMBOL_GPL(pci_intx);
4376
4377static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4378{
4379 struct pci_bus *bus = dev->bus;
4380 bool mask_updated = true;
4381 u32 cmd_status_dword;
4382 u16 origcmd, newcmd;
4383 unsigned long flags;
4384 bool irq_pending;
4385
4386
4387
4388
4389
4390 BUILD_BUG_ON(PCI_COMMAND % 4);
4391 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4392
4393 raw_spin_lock_irqsave(&pci_lock, flags);
4394
4395 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4396
4397 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4398
4399
4400
4401
4402
4403
4404 if (mask != irq_pending) {
4405 mask_updated = false;
4406 goto done;
4407 }
4408
4409 origcmd = cmd_status_dword;
4410 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4411 if (mask)
4412 newcmd |= PCI_COMMAND_INTX_DISABLE;
4413 if (newcmd != origcmd)
4414 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4415
4416done:
4417 raw_spin_unlock_irqrestore(&pci_lock, flags);
4418
4419 return mask_updated;
4420}
4421
4422
4423
4424
4425
4426
4427
4428
4429bool pci_check_and_mask_intx(struct pci_dev *dev)
4430{
4431 return pci_check_and_set_intx_mask(dev, true);
4432}
4433EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443bool pci_check_and_unmask_intx(struct pci_dev *dev)
4444{
4445 return pci_check_and_set_intx_mask(dev, false);
4446}
4447EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4448
4449
4450
4451
4452
4453
4454
4455int pci_wait_for_pending_transaction(struct pci_dev *dev)
4456{
4457 if (!pci_is_pcie(dev))
4458 return 1;
4459
4460 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4461 PCI_EXP_DEVSTA_TRPND);
4462}
4463EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4464
4465
4466
4467
4468
4469
4470
4471
4472bool pcie_has_flr(struct pci_dev *dev)
4473{
4474 u32 cap;
4475
4476 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4477 return false;
4478
4479 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
4480 return cap & PCI_EXP_DEVCAP_FLR;
4481}
4482EXPORT_SYMBOL_GPL(pcie_has_flr);
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492int pcie_flr(struct pci_dev *dev)
4493{
4494 if (!pci_wait_for_pending_transaction(dev))
4495 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4496
4497 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4498
4499 if (dev->imm_ready)
4500 return 0;
4501
4502
4503
4504
4505
4506
4507 msleep(100);
4508
4509 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4510}
4511EXPORT_SYMBOL_GPL(pcie_flr);
4512
4513static int pci_af_flr(struct pci_dev *dev, int probe)
4514{
4515 int pos;
4516 u8 cap;
4517
4518 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4519 if (!pos)
4520 return -ENOTTY;
4521
4522 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4523 return -ENOTTY;
4524
4525 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4526 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4527 return -ENOTTY;
4528
4529 if (probe)
4530 return 0;
4531
4532
4533
4534
4535
4536
4537 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4538 PCI_AF_STATUS_TP << 8))
4539 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4540
4541 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4542
4543 if (dev->imm_ready)
4544 return 0;
4545
4546
4547
4548
4549
4550
4551
4552 msleep(100);
4553
4554 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4555}
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572static int pci_pm_reset(struct pci_dev *dev, int probe)
4573{
4574 u16 csr;
4575
4576 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4577 return -ENOTTY;
4578
4579 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4580 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4581 return -ENOTTY;
4582
4583 if (probe)
4584 return 0;
4585
4586 if (dev->current_state != PCI_D0)
4587 return -EINVAL;
4588
4589 csr &= ~PCI_PM_CTRL_STATE_MASK;
4590 csr |= PCI_D3hot;
4591 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4592 pci_dev_d3_sleep(dev);
4593
4594 csr &= ~PCI_PM_CTRL_STATE_MASK;
4595 csr |= PCI_D0;
4596 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4597 pci_dev_d3_sleep(dev);
4598
4599 return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4600}
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4611 int delay)
4612{
4613 int timeout = 1000;
4614 bool ret;
4615 u16 lnk_status;
4616
4617
4618
4619
4620
4621 if (!pdev->link_active_reporting) {
4622 msleep(1100);
4623 return true;
4624 }
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635 if (active)
4636 msleep(20);
4637 for (;;) {
4638 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4639 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4640 if (ret == active)
4641 break;
4642 if (timeout <= 0)
4643 break;
4644 msleep(10);
4645 timeout -= 10;
4646 }
4647 if (active && ret)
4648 msleep(delay);
4649 else if (ret != active)
4650 pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
4651 active ? "set" : "cleared");
4652 return ret == active;
4653}
4654
4655
4656
4657
4658
4659
4660
4661
4662bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4663{
4664 return pcie_wait_for_link_delay(pdev, active, 100);
4665}
4666
4667
4668
4669
4670
4671
4672
4673
4674static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4675{
4676 const struct pci_dev *pdev;
4677 int min_delay = 100;
4678 int max_delay = 0;
4679
4680 list_for_each_entry(pdev, &bus->devices, bus_list) {
4681 if (pdev->d3cold_delay < min_delay)
4682 min_delay = pdev->d3cold_delay;
4683 if (pdev->d3cold_delay > max_delay)
4684 max_delay = pdev->d3cold_delay;
4685 }
4686
4687 return max(min_delay, max_delay);
4688}
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
4702{
4703 struct pci_dev *child;
4704 int delay;
4705
4706 if (pci_dev_is_disconnected(dev))
4707 return;
4708
4709 if (!pci_is_bridge(dev) || !dev->bridge_d3)
4710 return;
4711
4712 down_read(&pci_bus_sem);
4713
4714
4715
4716
4717
4718
4719
4720 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4721 up_read(&pci_bus_sem);
4722 return;
4723 }
4724
4725
4726 delay = pci_bus_max_d3cold_delay(dev->subordinate);
4727 if (!delay) {
4728 up_read(&pci_bus_sem);
4729 return;
4730 }
4731
4732 child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
4733 bus_list);
4734 up_read(&pci_bus_sem);
4735
4736
4737
4738
4739
4740
4741
4742 if (!pci_is_pcie(dev)) {
4743 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4744 msleep(1000 + delay);
4745 return;
4746 }
4747
4748
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765 if (!pcie_downstream_port(dev))
4766 return;
4767
4768 if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4769 pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4770 msleep(delay);
4771 } else {
4772 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4773 delay);
4774 if (!pcie_wait_for_link_delay(dev, true, delay)) {
4775
4776 return;
4777 }
4778 }
4779
4780 if (!pci_device_is_present(child)) {
4781 pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
4782 msleep(delay);
4783 }
4784}
4785
4786void pci_reset_secondary_bus(struct pci_dev *dev)
4787{
4788 u16 ctrl;
4789
4790 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4791 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4792 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4793
4794
4795
4796
4797
4798 msleep(2);
4799
4800 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4801 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4802
4803
4804
4805
4806
4807
4808
4809
4810 ssleep(1);
4811}
4812
4813void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4814{
4815 pci_reset_secondary_bus(dev);
4816}
4817
4818
4819
4820
4821
4822
4823
4824
4825int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4826{
4827 pcibios_reset_secondary_bus(dev);
4828
4829 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
4830}
4831EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4832
4833static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4834{
4835 struct pci_dev *pdev;
4836
4837 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4838 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4839 return -ENOTTY;
4840
4841 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4842 if (pdev != dev)
4843 return -ENOTTY;
4844
4845 if (probe)
4846 return 0;
4847
4848 return pci_bridge_secondary_bus_reset(dev->bus->self);
4849}
4850
4851static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4852{
4853 int rc = -ENOTTY;
4854
4855 if (!hotplug || !try_module_get(hotplug->owner))
4856 return rc;
4857
4858 if (hotplug->ops->reset_slot)
4859 rc = hotplug->ops->reset_slot(hotplug, probe);
4860
4861 module_put(hotplug->owner);
4862
4863 return rc;
4864}
4865
4866static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4867{
4868 struct pci_dev *pdev;
4869
4870 if (dev->subordinate || !dev->slot ||
4871 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4872 return -ENOTTY;
4873
4874 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4875 if (pdev != dev && pdev->slot == dev->slot)
4876 return -ENOTTY;
4877
4878 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4879}
4880
4881static void pci_dev_lock(struct pci_dev *dev)
4882{
4883 pci_cfg_access_lock(dev);
4884
4885 device_lock(&dev->dev);
4886}
4887
4888
4889static int pci_dev_trylock(struct pci_dev *dev)
4890{
4891 if (pci_cfg_access_trylock(dev)) {
4892 if (device_trylock(&dev->dev))
4893 return 1;
4894 pci_cfg_access_unlock(dev);
4895 }
4896
4897 return 0;
4898}
4899
4900static void pci_dev_unlock(struct pci_dev *dev)
4901{
4902 device_unlock(&dev->dev);
4903 pci_cfg_access_unlock(dev);
4904}
4905
4906static void pci_dev_save_and_disable(struct pci_dev *dev)
4907{
4908 const struct pci_error_handlers *err_handler =
4909 dev->driver ? dev->driver->err_handler : NULL;
4910
4911
4912
4913
4914
4915
4916 if (err_handler && err_handler->reset_prepare)
4917 err_handler->reset_prepare(dev);
4918
4919
4920
4921
4922
4923
4924 pci_set_power_state(dev, PCI_D0);
4925
4926 pci_save_state(dev);
4927
4928
4929
4930
4931
4932
4933
4934 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
4935}
4936
4937static void pci_dev_restore(struct pci_dev *dev)
4938{
4939 const struct pci_error_handlers *err_handler =
4940 dev->driver ? dev->driver->err_handler : NULL;
4941
4942 pci_restore_state(dev);
4943
4944
4945
4946
4947
4948
4949 if (err_handler && err_handler->reset_done)
4950 err_handler->reset_done(dev);
4951}
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971
4972
4973int __pci_reset_function_locked(struct pci_dev *dev)
4974{
4975 int rc;
4976
4977 might_sleep();
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987 rc = pci_dev_specific_reset(dev, 0);
4988 if (rc != -ENOTTY)
4989 return rc;
4990 if (pcie_has_flr(dev)) {
4991 rc = pcie_flr(dev);
4992 if (rc != -ENOTTY)
4993 return rc;
4994 }
4995 rc = pci_af_flr(dev, 0);
4996 if (rc != -ENOTTY)
4997 return rc;
4998 rc = pci_pm_reset(dev, 0);
4999 if (rc != -ENOTTY)
5000 return rc;
5001 rc = pci_dev_reset_slot_function(dev, 0);
5002 if (rc != -ENOTTY)
5003 return rc;
5004 return pci_parent_bus_reset(dev, 0);
5005}
5006EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5007
5008
5009
5010
5011
5012
5013
5014
5015
5016
5017
5018
5019int pci_probe_reset_function(struct pci_dev *dev)
5020{
5021 int rc;
5022
5023 might_sleep();
5024
5025 rc = pci_dev_specific_reset(dev, 1);
5026 if (rc != -ENOTTY)
5027 return rc;
5028 if (pcie_has_flr(dev))
5029 return 0;
5030 rc = pci_af_flr(dev, 1);
5031 if (rc != -ENOTTY)
5032 return rc;
5033 rc = pci_pm_reset(dev, 1);
5034 if (rc != -ENOTTY)
5035 return rc;
5036 rc = pci_dev_reset_slot_function(dev, 1);
5037 if (rc != -ENOTTY)
5038 return rc;
5039
5040 return pci_parent_bus_reset(dev, 1);
5041}
5042
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059int pci_reset_function(struct pci_dev *dev)
5060{
5061 int rc;
5062
5063 if (!dev->reset_fn)
5064 return -ENOTTY;
5065
5066 pci_dev_lock(dev);
5067 pci_dev_save_and_disable(dev);
5068
5069 rc = __pci_reset_function_locked(dev);
5070
5071 pci_dev_restore(dev);
5072 pci_dev_unlock(dev);
5073
5074 return rc;
5075}
5076EXPORT_SYMBOL_GPL(pci_reset_function);
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095int pci_reset_function_locked(struct pci_dev *dev)
5096{
5097 int rc;
5098
5099 if (!dev->reset_fn)
5100 return -ENOTTY;
5101
5102 pci_dev_save_and_disable(dev);
5103
5104 rc = __pci_reset_function_locked(dev);
5105
5106 pci_dev_restore(dev);
5107
5108 return rc;
5109}
5110EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5111
5112
5113
5114
5115
5116
5117
5118int pci_try_reset_function(struct pci_dev *dev)
5119{
5120 int rc;
5121
5122 if (!dev->reset_fn)
5123 return -ENOTTY;
5124
5125 if (!pci_dev_trylock(dev))
5126 return -EAGAIN;
5127
5128 pci_dev_save_and_disable(dev);
5129 rc = __pci_reset_function_locked(dev);
5130 pci_dev_restore(dev);
5131 pci_dev_unlock(dev);
5132
5133 return rc;
5134}
5135EXPORT_SYMBOL_GPL(pci_try_reset_function);
5136
5137
5138static bool pci_bus_resetable(struct pci_bus *bus)
5139{
5140 struct pci_dev *dev;
5141
5142
5143 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5144 return false;
5145
5146 list_for_each_entry(dev, &bus->devices, bus_list) {
5147 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5148 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5149 return false;
5150 }
5151
5152 return true;
5153}
5154
5155
5156static void pci_bus_lock(struct pci_bus *bus)
5157{
5158 struct pci_dev *dev;
5159
5160 list_for_each_entry(dev, &bus->devices, bus_list) {
5161 pci_dev_lock(dev);
5162 if (dev->subordinate)
5163 pci_bus_lock(dev->subordinate);
5164 }
5165}
5166
5167
5168static void pci_bus_unlock(struct pci_bus *bus)
5169{
5170 struct pci_dev *dev;
5171
5172 list_for_each_entry(dev, &bus->devices, bus_list) {
5173 if (dev->subordinate)
5174 pci_bus_unlock(dev->subordinate);
5175 pci_dev_unlock(dev);
5176 }
5177}
5178
5179
5180static int pci_bus_trylock(struct pci_bus *bus)
5181{
5182 struct pci_dev *dev;
5183
5184 list_for_each_entry(dev, &bus->devices, bus_list) {
5185 if (!pci_dev_trylock(dev))
5186 goto unlock;
5187 if (dev->subordinate) {
5188 if (!pci_bus_trylock(dev->subordinate)) {
5189 pci_dev_unlock(dev);
5190 goto unlock;
5191 }
5192 }
5193 }
5194 return 1;
5195
5196unlock:
5197 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5198 if (dev->subordinate)
5199 pci_bus_unlock(dev->subordinate);
5200 pci_dev_unlock(dev);
5201 }
5202 return 0;
5203}
5204
5205
5206static bool pci_slot_resetable(struct pci_slot *slot)
5207{
5208 struct pci_dev *dev;
5209
5210 if (slot->bus->self &&
5211 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5212 return false;
5213
5214 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5215 if (!dev->slot || dev->slot != slot)
5216 continue;
5217 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5218 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5219 return false;
5220 }
5221
5222 return true;
5223}
5224
5225
5226static void pci_slot_lock(struct pci_slot *slot)
5227{
5228 struct pci_dev *dev;
5229
5230 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5231 if (!dev->slot || dev->slot != slot)
5232 continue;
5233 pci_dev_lock(dev);
5234 if (dev->subordinate)
5235 pci_bus_lock(dev->subordinate);
5236 }
5237}
5238
5239
5240static void pci_slot_unlock(struct pci_slot *slot)
5241{
5242 struct pci_dev *dev;
5243
5244 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5245 if (!dev->slot || dev->slot != slot)
5246 continue;
5247 if (dev->subordinate)
5248 pci_bus_unlock(dev->subordinate);
5249 pci_dev_unlock(dev);
5250 }
5251}
5252
5253
5254static int pci_slot_trylock(struct pci_slot *slot)
5255{
5256 struct pci_dev *dev;
5257
5258 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5259 if (!dev->slot || dev->slot != slot)
5260 continue;
5261 if (!pci_dev_trylock(dev))
5262 goto unlock;
5263 if (dev->subordinate) {
5264 if (!pci_bus_trylock(dev->subordinate)) {
5265 pci_dev_unlock(dev);
5266 goto unlock;
5267 }
5268 }
5269 }
5270 return 1;
5271
5272unlock:
5273 list_for_each_entry_continue_reverse(dev,
5274 &slot->bus->devices, bus_list) {
5275 if (!dev->slot || dev->slot != slot)
5276 continue;
5277 if (dev->subordinate)
5278 pci_bus_unlock(dev->subordinate);
5279 pci_dev_unlock(dev);
5280 }
5281 return 0;
5282}
5283
5284
5285
5286
5287
5288static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5289{
5290 struct pci_dev *dev;
5291
5292 list_for_each_entry(dev, &bus->devices, bus_list) {
5293 pci_dev_save_and_disable(dev);
5294 if (dev->subordinate)
5295 pci_bus_save_and_disable_locked(dev->subordinate);
5296 }
5297}
5298
5299
5300
5301
5302
5303
5304static void pci_bus_restore_locked(struct pci_bus *bus)
5305{
5306 struct pci_dev *dev;
5307
5308 list_for_each_entry(dev, &bus->devices, bus_list) {
5309 pci_dev_restore(dev);
5310 if (dev->subordinate)
5311 pci_bus_restore_locked(dev->subordinate);
5312 }
5313}
5314
5315
5316
5317
5318
5319static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5320{
5321 struct pci_dev *dev;
5322
5323 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5324 if (!dev->slot || dev->slot != slot)
5325 continue;
5326 pci_dev_save_and_disable(dev);
5327 if (dev->subordinate)
5328 pci_bus_save_and_disable_locked(dev->subordinate);
5329 }
5330}
5331
5332
5333
5334
5335
5336
5337static void pci_slot_restore_locked(struct pci_slot *slot)
5338{
5339 struct pci_dev *dev;
5340
5341 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5342 if (!dev->slot || dev->slot != slot)
5343 continue;
5344 pci_dev_restore(dev);
5345 if (dev->subordinate)
5346 pci_bus_restore_locked(dev->subordinate);
5347 }
5348}
5349
5350static int pci_slot_reset(struct pci_slot *slot, int probe)
5351{
5352 int rc;
5353
5354 if (!slot || !pci_slot_resetable(slot))
5355 return -ENOTTY;
5356
5357 if (!probe)
5358 pci_slot_lock(slot);
5359
5360 might_sleep();
5361
5362 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5363
5364 if (!probe)
5365 pci_slot_unlock(slot);
5366
5367 return rc;
5368}
5369
5370
5371
5372
5373
5374
5375
5376int pci_probe_reset_slot(struct pci_slot *slot)
5377{
5378 return pci_slot_reset(slot, 1);
5379}
5380EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5381
5382
5383
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393
5394
5395
5396
5397static int __pci_reset_slot(struct pci_slot *slot)
5398{
5399 int rc;
5400
5401 rc = pci_slot_reset(slot, 1);
5402 if (rc)
5403 return rc;
5404
5405 if (pci_slot_trylock(slot)) {
5406 pci_slot_save_and_disable_locked(slot);
5407 might_sleep();
5408 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
5409 pci_slot_restore_locked(slot);
5410 pci_slot_unlock(slot);
5411 } else
5412 rc = -EAGAIN;
5413
5414 return rc;
5415}
5416
5417static int pci_bus_reset(struct pci_bus *bus, int probe)
5418{
5419 int ret;
5420
5421 if (!bus->self || !pci_bus_resetable(bus))
5422 return -ENOTTY;
5423
5424 if (probe)
5425 return 0;
5426
5427 pci_bus_lock(bus);
5428
5429 might_sleep();
5430
5431 ret = pci_bridge_secondary_bus_reset(bus->self);
5432
5433 pci_bus_unlock(bus);
5434
5435 return ret;
5436}
5437
5438
5439
5440
5441
5442
5443
5444
5445
5446int pci_bus_error_reset(struct pci_dev *bridge)
5447{
5448 struct pci_bus *bus = bridge->subordinate;
5449 struct pci_slot *slot;
5450
5451 if (!bus)
5452 return -ENOTTY;
5453
5454 mutex_lock(&pci_slot_mutex);
5455 if (list_empty(&bus->slots))
5456 goto bus_reset;
5457
5458 list_for_each_entry(slot, &bus->slots, list)
5459 if (pci_probe_reset_slot(slot))
5460 goto bus_reset;
5461
5462 list_for_each_entry(slot, &bus->slots, list)
5463 if (pci_slot_reset(slot, 0))
5464 goto bus_reset;
5465
5466 mutex_unlock(&pci_slot_mutex);
5467 return 0;
5468bus_reset:
5469 mutex_unlock(&pci_slot_mutex);
5470 return pci_bus_reset(bridge->subordinate, 0);
5471}
5472
5473
5474
5475
5476
5477
5478
5479int pci_probe_reset_bus(struct pci_bus *bus)
5480{
5481 return pci_bus_reset(bus, 1);
5482}
5483EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5484
5485
5486
5487
5488
5489
5490
5491static int __pci_reset_bus(struct pci_bus *bus)
5492{
5493 int rc;
5494
5495 rc = pci_bus_reset(bus, 1);
5496 if (rc)
5497 return rc;
5498
5499 if (pci_bus_trylock(bus)) {
5500 pci_bus_save_and_disable_locked(bus);
5501 might_sleep();
5502 rc = pci_bridge_secondary_bus_reset(bus->self);
5503 pci_bus_restore_locked(bus);
5504 pci_bus_unlock(bus);
5505 } else
5506 rc = -EAGAIN;
5507
5508 return rc;
5509}
5510
5511
5512
5513
5514
5515
5516
5517int pci_reset_bus(struct pci_dev *pdev)
5518{
5519 return (!pci_probe_reset_slot(pdev->slot)) ?
5520 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5521}
5522EXPORT_SYMBOL_GPL(pci_reset_bus);
5523
5524
5525
5526
5527
5528
5529
5530
5531int pcix_get_max_mmrbc(struct pci_dev *dev)
5532{
5533 int cap;
5534 u32 stat;
5535
5536 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5537 if (!cap)
5538 return -EINVAL;
5539
5540 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5541 return -EINVAL;
5542
5543 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5544}
5545EXPORT_SYMBOL(pcix_get_max_mmrbc);
5546
5547
5548
5549
5550
5551
5552
5553
5554int pcix_get_mmrbc(struct pci_dev *dev)
5555{
5556 int cap;
5557 u16 cmd;
5558
5559 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5560 if (!cap)
5561 return -EINVAL;
5562
5563 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5564 return -EINVAL;
5565
5566 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5567}
5568EXPORT_SYMBOL(pcix_get_mmrbc);
5569
5570
5571
5572
5573
5574
5575
5576
5577
5578
5579int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5580{
5581 int cap;
5582 u32 stat, v, o;
5583 u16 cmd;
5584
5585 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5586 return -EINVAL;
5587
5588 v = ffs(mmrbc) - 10;
5589
5590 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5591 if (!cap)
5592 return -EINVAL;
5593
5594 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5595 return -EINVAL;
5596
5597 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5598 return -E2BIG;
5599
5600 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5601 return -EINVAL;
5602
5603 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5604 if (o != v) {
5605 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5606 return -EIO;
5607
5608 cmd &= ~PCI_X_CMD_MAX_READ;
5609 cmd |= v << 2;
5610 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5611 return -EIO;
5612 }
5613 return 0;
5614}
5615EXPORT_SYMBOL(pcix_set_mmrbc);
5616
5617
5618
5619
5620
5621
5622
5623int pcie_get_readrq(struct pci_dev *dev)
5624{
5625 u16 ctl;
5626
5627 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5628
5629 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5630}
5631EXPORT_SYMBOL(pcie_get_readrq);
5632
5633
5634
5635
5636
5637
5638
5639
5640
5641int pcie_set_readrq(struct pci_dev *dev, int rq)
5642{
5643 u16 v;
5644
5645 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5646 return -EINVAL;
5647
5648
5649
5650
5651
5652
5653 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5654 int mps = pcie_get_mps(dev);
5655
5656 if (mps < rq)
5657 rq = mps;
5658 }
5659
5660 v = (ffs(rq) - 8) << 12;
5661
5662 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5663 PCI_EXP_DEVCTL_READRQ, v);
5664}
5665EXPORT_SYMBOL(pcie_set_readrq);
5666
5667
5668
5669
5670
5671
5672
5673int pcie_get_mps(struct pci_dev *dev)
5674{
5675 u16 ctl;
5676
5677 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5678
5679 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5680}
5681EXPORT_SYMBOL(pcie_get_mps);
5682
5683
5684
5685
5686
5687
5688
5689
5690
5691int pcie_set_mps(struct pci_dev *dev, int mps)
5692{
5693 u16 v;
5694
5695 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5696 return -EINVAL;
5697
5698 v = ffs(mps) - 8;
5699 if (v > dev->pcie_mpss)
5700 return -EINVAL;
5701 v <<= 5;
5702
5703 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5704 PCI_EXP_DEVCTL_PAYLOAD, v);
5705}
5706EXPORT_SYMBOL(pcie_set_mps);
5707
5708
5709
5710
5711
5712
5713
5714
5715
5716
5717
5718
5719
5720
5721
5722u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
5723 enum pci_bus_speed *speed,
5724 enum pcie_link_width *width)
5725{
5726 u16 lnksta;
5727 enum pci_bus_speed next_speed;
5728 enum pcie_link_width next_width;
5729 u32 bw, next_bw;
5730
5731 if (speed)
5732 *speed = PCI_SPEED_UNKNOWN;
5733 if (width)
5734 *width = PCIE_LNK_WIDTH_UNKNOWN;
5735
5736 bw = 0;
5737
5738 while (dev) {
5739 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5740
5741 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5742 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5743 PCI_EXP_LNKSTA_NLW_SHIFT;
5744
5745 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
5746
5747
5748 if (!bw || next_bw <= bw) {
5749 bw = next_bw;
5750
5751 if (limiting_dev)
5752 *limiting_dev = dev;
5753 if (speed)
5754 *speed = next_speed;
5755 if (width)
5756 *width = next_width;
5757 }
5758
5759 dev = pci_upstream_bridge(dev);
5760 }
5761
5762 return bw;
5763}
5764EXPORT_SYMBOL(pcie_bandwidth_available);
5765
5766
5767
5768
5769
5770
5771
5772
5773enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5774{
5775 u32 lnkcap2, lnkcap;
5776
5777
5778
5779
5780
5781
5782
5783
5784
5785
5786 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5787 if (lnkcap2) {
5788 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_32_0GB)
5789 return PCIE_SPEED_32_0GT;
5790 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
5791 return PCIE_SPEED_16_0GT;
5792 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
5793 return PCIE_SPEED_8_0GT;
5794 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
5795 return PCIE_SPEED_5_0GT;
5796 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
5797 return PCIE_SPEED_2_5GT;
5798 return PCI_SPEED_UNKNOWN;
5799 }
5800
5801 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5802 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
5803 return PCIE_SPEED_5_0GT;
5804 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
5805 return PCIE_SPEED_2_5GT;
5806
5807 return PCI_SPEED_UNKNOWN;
5808}
5809EXPORT_SYMBOL(pcie_get_speed_cap);
5810
5811
5812
5813
5814
5815
5816
5817
5818enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
5819{
5820 u32 lnkcap;
5821
5822 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5823 if (lnkcap)
5824 return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
5825
5826 return PCIE_LNK_WIDTH_UNKNOWN;
5827}
5828EXPORT_SYMBOL(pcie_get_width_cap);
5829
5830
5831
5832
5833
5834
5835
5836
5837
5838
5839
5840u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
5841 enum pcie_link_width *width)
5842{
5843 *speed = pcie_get_speed_cap(dev);
5844 *width = pcie_get_width_cap(dev);
5845
5846 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5847 return 0;
5848
5849 return *width * PCIE_SPEED2MBS_ENC(*speed);
5850}
5851
5852
5853
5854
5855
5856
5857
5858
5859
5860
5861
5862void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
5863{
5864 enum pcie_link_width width, width_cap;
5865 enum pci_bus_speed speed, speed_cap;
5866 struct pci_dev *limiting_dev = NULL;
5867 u32 bw_avail, bw_cap;
5868
5869 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
5870 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
5871
5872 if (bw_avail >= bw_cap && verbose)
5873 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
5874 bw_cap / 1000, bw_cap % 1000,
5875 PCIE_SPEED2STR(speed_cap), width_cap);
5876 else if (bw_avail < bw_cap)
5877 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
5878 bw_avail / 1000, bw_avail % 1000,
5879 PCIE_SPEED2STR(speed), width,
5880 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
5881 bw_cap / 1000, bw_cap % 1000,
5882 PCIE_SPEED2STR(speed_cap), width_cap);
5883}
5884
5885
5886
5887
5888
5889
5890
5891void pcie_print_link_status(struct pci_dev *dev)
5892{
5893 __pcie_print_link_status(dev, true);
5894}
5895EXPORT_SYMBOL(pcie_print_link_status);
5896
5897
5898
5899
5900
5901
5902
5903
5904int pci_select_bars(struct pci_dev *dev, unsigned long flags)
5905{
5906 int i, bars = 0;
5907 for (i = 0; i < PCI_NUM_RESOURCES; i++)
5908 if (pci_resource_flags(dev, i) & flags)
5909 bars |= (1 << i);
5910 return bars;
5911}
5912EXPORT_SYMBOL(pci_select_bars);
5913
5914
5915static arch_set_vga_state_t arch_set_vga_state;
5916
5917void __init pci_register_set_vga_state(arch_set_vga_state_t func)
5918{
5919 arch_set_vga_state = func;
5920}
5921
5922static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
5923 unsigned int command_bits, u32 flags)
5924{
5925 if (arch_set_vga_state)
5926 return arch_set_vga_state(dev, decode, command_bits,
5927 flags);
5928 return 0;
5929}
5930
5931
5932
5933
5934
5935
5936
5937
5938
5939int pci_set_vga_state(struct pci_dev *dev, bool decode,
5940 unsigned int command_bits, u32 flags)
5941{
5942 struct pci_bus *bus;
5943 struct pci_dev *bridge;
5944 u16 cmd;
5945 int rc;
5946
5947 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
5948
5949
5950 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
5951 if (rc)
5952 return rc;
5953
5954 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
5955 pci_read_config_word(dev, PCI_COMMAND, &cmd);
5956 if (decode == true)
5957 cmd |= command_bits;
5958 else
5959 cmd &= ~command_bits;
5960 pci_write_config_word(dev, PCI_COMMAND, cmd);
5961 }
5962
5963 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
5964 return 0;
5965
5966 bus = dev->bus;
5967 while (bus) {
5968 bridge = bus->self;
5969 if (bridge) {
5970 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
5971 &cmd);
5972 if (decode == true)
5973 cmd |= PCI_BRIDGE_CTL_VGA;
5974 else
5975 cmd &= ~PCI_BRIDGE_CTL_VGA;
5976 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
5977 cmd);
5978 }
5979 bus = bus->parent;
5980 }
5981 return 0;
5982}
5983
5984#ifdef CONFIG_ACPI
5985bool pci_pr3_present(struct pci_dev *pdev)
5986{
5987 struct acpi_device *adev;
5988
5989 if (acpi_disabled)
5990 return false;
5991
5992 adev = ACPI_COMPANION(&pdev->dev);
5993 if (!adev)
5994 return false;
5995
5996 return adev->power.flags.power_resources &&
5997 acpi_has_method(adev->handle, "_PR3");
5998}
5999EXPORT_SYMBOL_GPL(pci_pr3_present);
6000#endif
6001
6002
6003
6004
6005
6006
6007
6008
6009
6010
6011
6012
6013
6014
6015
6016
6017
6018
6019
6020
6021
6022void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns)
6023{
6024 int devfn_to;
6025
6026 nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from);
6027 devfn_to = devfn_from + nr_devfns - 1;
6028
6029 if (!dev->dma_alias_mask)
6030 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6031 if (!dev->dma_alias_mask) {
6032 pci_warn(dev, "Unable to allocate DMA alias mask\n");
6033 return;
6034 }
6035
6036 bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6037
6038 if (nr_devfns == 1)
6039 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6040 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6041 else if (nr_devfns > 1)
6042 pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6043 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6044 PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6045}
6046
6047bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6048{
6049 return (dev1->dma_alias_mask &&
6050 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6051 (dev2->dma_alias_mask &&
6052 test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6053 pci_real_dma_dev(dev1) == dev2 ||
6054 pci_real_dma_dev(dev2) == dev1;
6055}
6056
6057bool pci_device_is_present(struct pci_dev *pdev)
6058{
6059 u32 v;
6060
6061 if (pci_dev_is_disconnected(pdev))
6062 return false;
6063 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6064}
6065EXPORT_SYMBOL_GPL(pci_device_is_present);
6066
6067void pci_ignore_hotplug(struct pci_dev *dev)
6068{
6069 struct pci_dev *bridge = dev->bus->self;
6070
6071 dev->ignore_hotplug = 1;
6072
6073 if (bridge)
6074 bridge->ignore_hotplug = 1;
6075}
6076EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6077
6078
6079
6080
6081
6082
6083
6084
6085
6086
6087
6088struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6089{
6090 return dev;
6091}
6092
6093resource_size_t __weak pcibios_default_alignment(void)
6094{
6095 return 0;
6096}
6097
6098
6099
6100
6101
6102void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6103 const struct resource *rsrc,
6104 resource_size_t *start, resource_size_t *end)
6105{
6106 *start = rsrc->start;
6107 *end = rsrc->end;
6108}
6109
6110static char *resource_alignment_param;
6111static DEFINE_SPINLOCK(resource_alignment_lock);
6112
6113
6114
6115
6116
6117
6118
6119
6120
6121static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6122 bool *resize)
6123{
6124 int align_order, count;
6125 resource_size_t align = pcibios_default_alignment();
6126 const char *p;
6127 int ret;
6128
6129 spin_lock(&resource_alignment_lock);
6130 p = resource_alignment_param;
6131 if (!p || !*p)
6132 goto out;
6133 if (pci_has_flag(PCI_PROBE_ONLY)) {
6134 align = 0;
6135 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6136 goto out;
6137 }
6138
6139 while (*p) {
6140 count = 0;
6141 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6142 p[count] == '@') {
6143 p += count + 1;
6144 } else {
6145 align_order = -1;
6146 }
6147
6148 ret = pci_dev_str_match(dev, p, &p);
6149 if (ret == 1) {
6150 *resize = true;
6151 if (align_order == -1)
6152 align = PAGE_SIZE;
6153 else
6154 align = 1 << align_order;
6155 break;
6156 } else if (ret < 0) {
6157 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6158 p);
6159 break;
6160 }
6161
6162 if (*p != ';' && *p != ',') {
6163
6164 break;
6165 }
6166 p++;
6167 }
6168out:
6169 spin_unlock(&resource_alignment_lock);
6170 return align;
6171}
6172
6173static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6174 resource_size_t align, bool resize)
6175{
6176 struct resource *r = &dev->resource[bar];
6177 resource_size_t size;
6178
6179 if (!(r->flags & IORESOURCE_MEM))
6180 return;
6181
6182 if (r->flags & IORESOURCE_PCI_FIXED) {
6183 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6184 bar, r, (unsigned long long)align);
6185 return;
6186 }
6187
6188 size = resource_size(r);
6189 if (size >= align)
6190 return;
6191
6192
6193
6194
6195
6196
6197
6198
6199
6200
6201
6202
6203
6204
6205
6206
6207
6208
6209
6210
6211
6212
6213
6214
6215
6216
6217
6218
6219
6220 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6221 bar, r, (unsigned long long)align);
6222
6223 if (resize) {
6224 r->start = 0;
6225 r->end = align - 1;
6226 } else {
6227 r->flags &= ~IORESOURCE_SIZEALIGN;
6228 r->flags |= IORESOURCE_STARTALIGN;
6229 r->start = align;
6230 r->end = r->start + size - 1;
6231 }
6232 r->flags |= IORESOURCE_UNSET;
6233}
6234
6235
6236
6237
6238
6239
6240
6241
6242void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6243{
6244 int i;
6245 struct resource *r;
6246 resource_size_t align;
6247 u16 command;
6248 bool resize = false;
6249
6250
6251
6252
6253
6254
6255
6256 if (dev->is_virtfn)
6257 return;
6258
6259
6260 align = pci_specified_resource_alignment(dev, &resize);
6261 if (!align)
6262 return;
6263
6264 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6265 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6266 pci_warn(dev, "Can't reassign resources to host bridge\n");
6267 return;
6268 }
6269
6270 pci_read_config_word(dev, PCI_COMMAND, &command);
6271 command &= ~PCI_COMMAND_MEMORY;
6272 pci_write_config_word(dev, PCI_COMMAND, command);
6273
6274 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6275 pci_request_resource_alignment(dev, i, align, resize);
6276
6277
6278
6279
6280
6281
6282 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6283 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6284 r = &dev->resource[i];
6285 if (!(r->flags & IORESOURCE_MEM))
6286 continue;
6287 r->flags |= IORESOURCE_UNSET;
6288 r->end = resource_size(r) - 1;
6289 r->start = 0;
6290 }
6291 pci_disable_bridge_window(dev);
6292 }
6293}
6294
6295static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6296{
6297 size_t count = 0;
6298
6299 spin_lock(&resource_alignment_lock);
6300 if (resource_alignment_param)
6301 count = snprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
6302 spin_unlock(&resource_alignment_lock);
6303
6304
6305
6306
6307
6308
6309 if (count >= 2 && buf[count - 2] != '\n' && count < PAGE_SIZE - 1) {
6310 buf[count - 1] = '\n';
6311 buf[count++] = 0;
6312 }
6313
6314 return count;
6315}
6316
6317static ssize_t resource_alignment_store(struct bus_type *bus,
6318 const char *buf, size_t count)
6319{
6320 char *param = kstrndup(buf, count, GFP_KERNEL);
6321
6322 if (!param)
6323 return -ENOMEM;
6324
6325 spin_lock(&resource_alignment_lock);
6326 kfree(resource_alignment_param);
6327 resource_alignment_param = param;
6328 spin_unlock(&resource_alignment_lock);
6329 return count;
6330}
6331
6332static BUS_ATTR_RW(resource_alignment);
6333
6334static int __init pci_resource_alignment_sysfs_init(void)
6335{
6336 return bus_create_file(&pci_bus_type,
6337 &bus_attr_resource_alignment);
6338}
6339late_initcall(pci_resource_alignment_sysfs_init);
6340
6341static void pci_no_domains(void)
6342{
6343#ifdef CONFIG_PCI_DOMAINS
6344 pci_domains_supported = 0;
6345#endif
6346}
6347
6348#ifdef CONFIG_PCI_DOMAINS_GENERIC
6349static atomic_t __domain_nr = ATOMIC_INIT(-1);
6350
6351static int pci_get_new_domain_nr(void)
6352{
6353 return atomic_inc_return(&__domain_nr);
6354}
6355
6356static int of_pci_bus_find_domain_nr(struct device *parent)
6357{
6358 static int use_dt_domains = -1;
6359 int domain = -1;
6360
6361 if (parent)
6362 domain = of_get_pci_domain_nr(parent->of_node);
6363
6364
6365
6366
6367
6368
6369
6370
6371
6372
6373
6374
6375
6376
6377
6378
6379
6380
6381
6382
6383
6384
6385
6386
6387
6388
6389
6390 if (domain >= 0 && use_dt_domains) {
6391 use_dt_domains = 1;
6392 } else if (domain < 0 && use_dt_domains != 1) {
6393 use_dt_domains = 0;
6394 domain = pci_get_new_domain_nr();
6395 } else {
6396 if (parent)
6397 pr_err("Node %pOF has ", parent->of_node);
6398 pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6399 domain = -1;
6400 }
6401
6402 return domain;
6403}
6404
6405int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6406{
6407 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6408 acpi_pci_bus_find_domain_nr(bus);
6409}
6410#endif
6411
6412
6413
6414
6415
6416
6417
6418
6419int __weak pci_ext_cfg_avail(void)
6420{
6421 return 1;
6422}
6423
6424void __weak pci_fixup_cardbus(struct pci_bus *bus)
6425{
6426}
6427EXPORT_SYMBOL(pci_fixup_cardbus);
6428
6429static int __init pci_setup(char *str)
6430{
6431 while (str) {
6432 char *k = strchr(str, ',');
6433 if (k)
6434 *k++ = 0;
6435 if (*str && (str = pcibios_setup(str)) && *str) {
6436 if (!strcmp(str, "nomsi")) {
6437 pci_no_msi();
6438 } else if (!strncmp(str, "noats", 5)) {
6439 pr_info("PCIe: ATS is disabled\n");
6440 pcie_ats_disabled = true;
6441 } else if (!strcmp(str, "noaer")) {
6442 pci_no_aer();
6443 } else if (!strcmp(str, "earlydump")) {
6444 pci_early_dump = true;
6445 } else if (!strncmp(str, "realloc=", 8)) {
6446 pci_realloc_get_opt(str + 8);
6447 } else if (!strncmp(str, "realloc", 7)) {
6448 pci_realloc_get_opt("on");
6449 } else if (!strcmp(str, "nodomains")) {
6450 pci_no_domains();
6451 } else if (!strncmp(str, "noari", 5)) {
6452 pcie_ari_disabled = true;
6453 } else if (!strncmp(str, "cbiosize=", 9)) {
6454 pci_cardbus_io_size = memparse(str + 9, &str);
6455 } else if (!strncmp(str, "cbmemsize=", 10)) {
6456 pci_cardbus_mem_size = memparse(str + 10, &str);
6457 } else if (!strncmp(str, "resource_alignment=", 19)) {
6458 resource_alignment_param = str + 19;
6459 } else if (!strncmp(str, "ecrc=", 5)) {
6460 pcie_ecrc_get_policy(str + 5);
6461 } else if (!strncmp(str, "hpiosize=", 9)) {
6462 pci_hotplug_io_size = memparse(str + 9, &str);
6463 } else if (!strncmp(str, "hpmmiosize=", 11)) {
6464 pci_hotplug_mmio_size = memparse(str + 11, &str);
6465 } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6466 pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6467 } else if (!strncmp(str, "hpmemsize=", 10)) {
6468 pci_hotplug_mmio_size = memparse(str + 10, &str);
6469 pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6470 } else if (!strncmp(str, "hpbussize=", 10)) {
6471 pci_hotplug_bus_size =
6472 simple_strtoul(str + 10, &str, 0);
6473 if (pci_hotplug_bus_size > 0xff)
6474 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6475 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6476 pcie_bus_config = PCIE_BUS_TUNE_OFF;
6477 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
6478 pcie_bus_config = PCIE_BUS_SAFE;
6479 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
6480 pcie_bus_config = PCIE_BUS_PERFORMANCE;
6481 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6482 pcie_bus_config = PCIE_BUS_PEER2PEER;
6483 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6484 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6485 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6486 disable_acs_redir_param = str + 18;
6487 } else {
6488 pr_err("PCI: Unknown option `%s'\n", str);
6489 }
6490 }
6491 str = k;
6492 }
6493 return 0;
6494}
6495early_param("pci", pci_setup);
6496
6497
6498
6499
6500
6501
6502
6503
6504
6505
6506static int __init pci_realloc_setup_params(void)
6507{
6508 resource_alignment_param = kstrdup(resource_alignment_param,
6509 GFP_KERNEL);
6510 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6511
6512 return 0;
6513}
6514pure_initcall(pci_realloc_setup_params);
6515