1
2
3
4
5
6
7
8
9
10
11#include <linux/acpi.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/dmi.h>
15#include <linux/init.h>
16#include <linux/msi.h>
17#include <linux/of.h>
18#include <linux/of_pci.h>
19#include <linux/pci.h>
20#include <linux/pm.h>
21#include <linux/slab.h>
22#include <linux/module.h>
23#include <linux/spinlock.h>
24#include <linux/string.h>
25#include <linux/log2.h>
26#include <linux/logic_pio.h>
27#include <linux/pm_wakeup.h>
28#include <linux/interrupt.h>
29#include <linux/device.h>
30#include <linux/pm_runtime.h>
31#include <linux/pci_hotplug.h>
32#include <linux/vmalloc.h>
33#include <linux/pci-ats.h>
34#include <asm/setup.h>
35#include <asm/dma.h>
36#include <linux/aer.h>
37#include "pci.h"
38
39DEFINE_MUTEX(pci_slot_mutex);
40
41const char *pci_power_names[] = {
42 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
43};
44EXPORT_SYMBOL_GPL(pci_power_names);
45
46int isa_dma_bridge_buggy;
47EXPORT_SYMBOL(isa_dma_bridge_buggy);
48
49int pci_pci_problems;
50EXPORT_SYMBOL(pci_pci_problems);
51
52unsigned int pci_pm_d3_delay;
53
54static void pci_pme_list_scan(struct work_struct *work);
55
56static LIST_HEAD(pci_pme_list);
57static DEFINE_MUTEX(pci_pme_list_mutex);
58static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
59
60struct pci_pme_device {
61 struct list_head list;
62 struct pci_dev *dev;
63};
64
65#define PME_TIMEOUT 1000
66
67static void pci_dev_d3_sleep(struct pci_dev *dev)
68{
69 unsigned int delay = dev->d3_delay;
70
71 if (delay < pci_pm_d3_delay)
72 delay = pci_pm_d3_delay;
73
74 if (delay)
75 msleep(delay);
76}
77
78#ifdef CONFIG_PCI_DOMAINS
79int pci_domains_supported = 1;
80#endif
81
82#define DEFAULT_CARDBUS_IO_SIZE (256)
83#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
84
85unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
86unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
87
88#define DEFAULT_HOTPLUG_IO_SIZE (256)
89#define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
90#define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
91
92unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
93
94
95
96
97
98unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
99unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
100
101#define DEFAULT_HOTPLUG_BUS_SIZE 1
102unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
103
104enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
105
106
107
108
109
110
111
112u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
113u8 pci_cache_line_size;
114
115
116
117
118
119unsigned int pcibios_max_latency = 255;
120
121
122static bool pcie_ari_disabled;
123
124
125static bool pcie_ats_disabled;
126
127
128bool pci_early_dump;
129
130bool pci_ats_disabled(void)
131{
132 return pcie_ats_disabled;
133}
134EXPORT_SYMBOL_GPL(pci_ats_disabled);
135
136
137static bool pci_bridge_d3_disable;
138
139static bool pci_bridge_d3_force;
140
141static int __init pcie_port_pm_setup(char *str)
142{
143 if (!strcmp(str, "off"))
144 pci_bridge_d3_disable = true;
145 else if (!strcmp(str, "force"))
146 pci_bridge_d3_force = true;
147 return 1;
148}
149__setup("pcie_port_pm=", pcie_port_pm_setup);
150
151
152#define PCIE_RESET_READY_POLL_MS 60000
153
154
155
156
157
158
159
160
161unsigned char pci_bus_max_busnr(struct pci_bus *bus)
162{
163 struct pci_bus *tmp;
164 unsigned char max, n;
165
166 max = bus->busn_res.end;
167 list_for_each_entry(tmp, &bus->children, node) {
168 n = pci_bus_max_busnr(tmp);
169 if (n > max)
170 max = n;
171 }
172 return max;
173}
174EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
175
176
177
178
179
180
181
182int pci_status_get_and_clear_errors(struct pci_dev *pdev)
183{
184 u16 status;
185 int ret;
186
187 ret = pci_read_config_word(pdev, PCI_STATUS, &status);
188 if (ret != PCIBIOS_SUCCESSFUL)
189 return -EIO;
190
191 status &= PCI_STATUS_ERROR_BITS;
192 if (status)
193 pci_write_config_word(pdev, PCI_STATUS, status);
194
195 return status;
196}
197EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
198
199#ifdef CONFIG_HAS_IOMEM
200void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
201{
202 struct resource *res = &pdev->resource[bar];
203
204
205
206
207 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
208 pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
209 return NULL;
210 }
211 return ioremap(res->start, resource_size(res));
212}
213EXPORT_SYMBOL_GPL(pci_ioremap_bar);
214
215void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
216{
217
218
219
220 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
221 WARN_ON(1);
222 return NULL;
223 }
224 return ioremap_wc(pci_resource_start(pdev, bar),
225 pci_resource_len(pdev, bar));
226}
227EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
228#endif
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
250 const char **endptr)
251{
252 int ret;
253 int seg, bus, slot, func;
254 char *wpath, *p;
255 char end;
256
257 *endptr = strchrnul(path, ';');
258
259 wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
260 if (!wpath)
261 return -ENOMEM;
262
263 while (1) {
264 p = strrchr(wpath, '/');
265 if (!p)
266 break;
267 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
268 if (ret != 2) {
269 ret = -EINVAL;
270 goto free_and_exit;
271 }
272
273 if (dev->devfn != PCI_DEVFN(slot, func)) {
274 ret = 0;
275 goto free_and_exit;
276 }
277
278
279
280
281
282
283
284 dev = pci_upstream_bridge(dev);
285 if (!dev) {
286 ret = 0;
287 goto free_and_exit;
288 }
289
290 *p = 0;
291 }
292
293 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
294 &func, &end);
295 if (ret != 4) {
296 seg = 0;
297 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
298 if (ret != 3) {
299 ret = -EINVAL;
300 goto free_and_exit;
301 }
302 }
303
304 ret = (seg == pci_domain_nr(dev->bus) &&
305 bus == dev->bus->number &&
306 dev->devfn == PCI_DEVFN(slot, func));
307
308free_and_exit:
309 kfree(wpath);
310 return ret;
311}
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343static int pci_dev_str_match(struct pci_dev *dev, const char *p,
344 const char **endptr)
345{
346 int ret;
347 int count;
348 unsigned short vendor, device, subsystem_vendor, subsystem_device;
349
350 if (strncmp(p, "pci:", 4) == 0) {
351
352 p += 4;
353 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
354 &subsystem_vendor, &subsystem_device, &count);
355 if (ret != 4) {
356 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
357 if (ret != 2)
358 return -EINVAL;
359
360 subsystem_vendor = 0;
361 subsystem_device = 0;
362 }
363
364 p += count;
365
366 if ((!vendor || vendor == dev->vendor) &&
367 (!device || device == dev->device) &&
368 (!subsystem_vendor ||
369 subsystem_vendor == dev->subsystem_vendor) &&
370 (!subsystem_device ||
371 subsystem_device == dev->subsystem_device))
372 goto found;
373 } else {
374
375
376
377
378 ret = pci_dev_str_match_path(dev, p, &p);
379 if (ret < 0)
380 return ret;
381 else if (ret)
382 goto found;
383 }
384
385 *endptr = p;
386 return 0;
387
388found:
389 *endptr = p;
390 return 1;
391}
392
393static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
394 u8 pos, int cap, int *ttl)
395{
396 u8 id;
397 u16 ent;
398
399 pci_bus_read_config_byte(bus, devfn, pos, &pos);
400
401 while ((*ttl)--) {
402 if (pos < 0x40)
403 break;
404 pos &= ~3;
405 pci_bus_read_config_word(bus, devfn, pos, &ent);
406
407 id = ent & 0xff;
408 if (id == 0xff)
409 break;
410 if (id == cap)
411 return pos;
412 pos = (ent >> 8);
413 }
414 return 0;
415}
416
417static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
418 u8 pos, int cap)
419{
420 int ttl = PCI_FIND_CAP_TTL;
421
422 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
423}
424
425int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
426{
427 return __pci_find_next_cap(dev->bus, dev->devfn,
428 pos + PCI_CAP_LIST_NEXT, cap);
429}
430EXPORT_SYMBOL_GPL(pci_find_next_capability);
431
432static int __pci_bus_find_cap_start(struct pci_bus *bus,
433 unsigned int devfn, u8 hdr_type)
434{
435 u16 status;
436
437 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
438 if (!(status & PCI_STATUS_CAP_LIST))
439 return 0;
440
441 switch (hdr_type) {
442 case PCI_HEADER_TYPE_NORMAL:
443 case PCI_HEADER_TYPE_BRIDGE:
444 return PCI_CAPABILITY_LIST;
445 case PCI_HEADER_TYPE_CARDBUS:
446 return PCI_CB_CAPABILITY_LIST;
447 }
448
449 return 0;
450}
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471int pci_find_capability(struct pci_dev *dev, int cap)
472{
473 int pos;
474
475 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
476 if (pos)
477 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
478
479 return pos;
480}
481EXPORT_SYMBOL(pci_find_capability);
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
497{
498 int pos;
499 u8 hdr_type;
500
501 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
502
503 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
504 if (pos)
505 pos = __pci_find_next_cap(bus, devfn, pos, cap);
506
507 return pos;
508}
509EXPORT_SYMBOL(pci_bus_find_capability);
510
511
512
513
514
515
516
517
518
519
520
521
522int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
523{
524 u32 header;
525 int ttl;
526 int pos = PCI_CFG_SPACE_SIZE;
527
528
529 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
530
531 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
532 return 0;
533
534 if (start)
535 pos = start;
536
537 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
538 return 0;
539
540
541
542
543
544 if (header == 0)
545 return 0;
546
547 while (ttl-- > 0) {
548 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
549 return pos;
550
551 pos = PCI_EXT_CAP_NEXT(header);
552 if (pos < PCI_CFG_SPACE_SIZE)
553 break;
554
555 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
556 break;
557 }
558
559 return 0;
560}
561EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577int pci_find_ext_capability(struct pci_dev *dev, int cap)
578{
579 return pci_find_next_ext_capability(dev, 0, cap);
580}
581EXPORT_SYMBOL_GPL(pci_find_ext_capability);
582
583
584
585
586
587
588
589
590
591
592u64 pci_get_dsn(struct pci_dev *dev)
593{
594 u32 dword;
595 u64 dsn;
596 int pos;
597
598 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
599 if (!pos)
600 return 0;
601
602
603
604
605
606
607 pos += 4;
608 pci_read_config_dword(dev, pos, &dword);
609 dsn = (u64)dword;
610 pci_read_config_dword(dev, pos + 4, &dword);
611 dsn |= ((u64)dword) << 32;
612
613 return dsn;
614}
615EXPORT_SYMBOL_GPL(pci_get_dsn);
616
617static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
618{
619 int rc, ttl = PCI_FIND_CAP_TTL;
620 u8 cap, mask;
621
622 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
623 mask = HT_3BIT_CAP_MASK;
624 else
625 mask = HT_5BIT_CAP_MASK;
626
627 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
628 PCI_CAP_ID_HT, &ttl);
629 while (pos) {
630 rc = pci_read_config_byte(dev, pos + 3, &cap);
631 if (rc != PCIBIOS_SUCCESSFUL)
632 return 0;
633
634 if ((cap & mask) == ht_cap)
635 return pos;
636
637 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
638 pos + PCI_CAP_LIST_NEXT,
639 PCI_CAP_ID_HT, &ttl);
640 }
641
642 return 0;
643}
644
645
646
647
648
649
650
651
652
653
654
655
656
657int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
658{
659 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
660}
661EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
662
663
664
665
666
667
668
669
670
671
672
673
674int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
675{
676 int pos;
677
678 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
679 if (pos)
680 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
681
682 return pos;
683}
684EXPORT_SYMBOL_GPL(pci_find_ht_capability);
685
686
687
688
689
690
691
692
693
694
695struct resource *pci_find_parent_resource(const struct pci_dev *dev,
696 struct resource *res)
697{
698 const struct pci_bus *bus = dev->bus;
699 struct resource *r;
700 int i;
701
702 pci_bus_for_each_resource(bus, r, i) {
703 if (!r)
704 continue;
705 if (resource_contains(r, res)) {
706
707
708
709
710
711 if (r->flags & IORESOURCE_PREFETCH &&
712 !(res->flags & IORESOURCE_PREFETCH))
713 return NULL;
714
715
716
717
718
719
720
721
722
723 return r;
724 }
725 }
726 return NULL;
727}
728EXPORT_SYMBOL(pci_find_parent_resource);
729
730
731
732
733
734
735
736
737
738
739struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
740{
741 int i;
742
743 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
744 struct resource *r = &dev->resource[i];
745
746 if (r->start && resource_contains(r, res))
747 return r;
748 }
749
750 return NULL;
751}
752EXPORT_SYMBOL(pci_find_resource);
753
754
755
756
757
758
759
760
761struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
762{
763 struct pci_dev *bridge, *highest_pcie_bridge = dev;
764
765 bridge = pci_upstream_bridge(dev);
766 while (bridge && pci_is_pcie(bridge)) {
767 highest_pcie_bridge = bridge;
768 bridge = pci_upstream_bridge(bridge);
769 }
770
771 if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
772 return NULL;
773
774 return highest_pcie_bridge;
775}
776EXPORT_SYMBOL(pci_find_pcie_root_port);
777
778
779
780
781
782
783
784
785
786int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
787{
788 int i;
789
790
791 for (i = 0; i < 4; i++) {
792 u16 status;
793 if (i)
794 msleep((1 << (i - 1)) * 100);
795
796 pci_read_config_word(dev, pos, &status);
797 if (!(status & mask))
798 return 1;
799 }
800
801 return 0;
802}
803
804
805
806
807
808
809
810
811static void pci_restore_bars(struct pci_dev *dev)
812{
813 int i;
814
815 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
816 pci_update_resource(dev, i);
817}
818
819static const struct pci_platform_pm_ops *pci_platform_pm;
820
821int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
822{
823 if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
824 !ops->choose_state || !ops->set_wakeup || !ops->need_resume)
825 return -EINVAL;
826 pci_platform_pm = ops;
827 return 0;
828}
829
830static inline bool platform_pci_power_manageable(struct pci_dev *dev)
831{
832 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
833}
834
835static inline int platform_pci_set_power_state(struct pci_dev *dev,
836 pci_power_t t)
837{
838 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
839}
840
841static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
842{
843 return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
844}
845
846static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
847{
848 if (pci_platform_pm && pci_platform_pm->refresh_state)
849 pci_platform_pm->refresh_state(dev);
850}
851
852static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
853{
854 return pci_platform_pm ?
855 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
856}
857
858static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
859{
860 return pci_platform_pm ?
861 pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
862}
863
864static inline bool platform_pci_need_resume(struct pci_dev *dev)
865{
866 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
867}
868
869static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
870{
871 return pci_platform_pm ? pci_platform_pm->bridge_d3(dev) : false;
872}
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
888{
889 u16 pmcsr;
890 bool need_restore = false;
891
892
893 if (dev->current_state == state)
894 return 0;
895
896 if (!dev->pm_cap)
897 return -EIO;
898
899 if (state < PCI_D0 || state > PCI_D3hot)
900 return -EINVAL;
901
902
903
904
905
906
907
908 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
909 && dev->current_state > state) {
910 pci_err(dev, "invalid power transition (from %s to %s)\n",
911 pci_power_name(dev->current_state),
912 pci_power_name(state));
913 return -EINVAL;
914 }
915
916
917 if ((state == PCI_D1 && !dev->d1_support)
918 || (state == PCI_D2 && !dev->d2_support))
919 return -EIO;
920
921 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
922 if (pmcsr == (u16) ~0) {
923 pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
924 pci_power_name(dev->current_state),
925 pci_power_name(state));
926 return -EIO;
927 }
928
929
930
931
932
933
934 switch (dev->current_state) {
935 case PCI_D0:
936 case PCI_D1:
937 case PCI_D2:
938 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
939 pmcsr |= state;
940 break;
941 case PCI_D3hot:
942 case PCI_D3cold:
943 case PCI_UNKNOWN:
944 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
945 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
946 need_restore = true;
947
948 default:
949 pmcsr = 0;
950 break;
951 }
952
953
954 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
955
956
957
958
959
960 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
961 pci_dev_d3_sleep(dev);
962 else if (state == PCI_D2 || dev->current_state == PCI_D2)
963 msleep(PCI_PM_D2_DELAY);
964
965 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
966 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
967 if (dev->current_state != state)
968 pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
969 pci_power_name(dev->current_state),
970 pci_power_name(state));
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985 if (need_restore)
986 pci_restore_bars(dev);
987
988 if (dev->bus->self)
989 pcie_aspm_pm_state_change(dev->bus->self);
990
991 return 0;
992}
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1007{
1008 if (platform_pci_get_power_state(dev) == PCI_D3cold ||
1009 !pci_device_is_present(dev)) {
1010 dev->current_state = PCI_D3cold;
1011 } else if (dev->pm_cap) {
1012 u16 pmcsr;
1013
1014 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1015 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1016 } else {
1017 dev->current_state = state;
1018 }
1019}
1020
1021
1022
1023
1024
1025
1026
1027
1028void pci_refresh_power_state(struct pci_dev *dev)
1029{
1030 if (platform_pci_power_manageable(dev))
1031 platform_pci_refresh_power_state(dev);
1032
1033 pci_update_current_state(dev, dev->current_state);
1034}
1035
1036
1037
1038
1039
1040
1041int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1042{
1043 int error;
1044
1045 if (platform_pci_power_manageable(dev)) {
1046 error = platform_pci_set_power_state(dev, state);
1047 if (!error)
1048 pci_update_current_state(dev, state);
1049 } else
1050 error = -ENODEV;
1051
1052 if (error && !dev->pm_cap)
1053 dev->current_state = PCI_D0;
1054
1055 return error;
1056}
1057EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1058
1059
1060
1061
1062
1063
1064static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
1065{
1066 pci_wakeup_event(pci_dev);
1067 pm_request_resume(&pci_dev->dev);
1068 return 0;
1069}
1070
1071
1072
1073
1074
1075void pci_wakeup_bus(struct pci_bus *bus)
1076{
1077 if (bus)
1078 pci_walk_bus(bus, pci_wakeup, NULL);
1079}
1080
1081static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1082{
1083 int delay = 1;
1084 u32 id;
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098 pci_read_config_dword(dev, PCI_COMMAND, &id);
1099 while (id == ~0) {
1100 if (delay > timeout) {
1101 pci_warn(dev, "not ready %dms after %s; giving up\n",
1102 delay - 1, reset_type);
1103 return -ENOTTY;
1104 }
1105
1106 if (delay > 1000)
1107 pci_info(dev, "not ready %dms after %s; waiting\n",
1108 delay - 1, reset_type);
1109
1110 msleep(delay);
1111 delay *= 2;
1112 pci_read_config_dword(dev, PCI_COMMAND, &id);
1113 }
1114
1115 if (delay > 1000)
1116 pci_info(dev, "ready %dms after %s\n", delay - 1,
1117 reset_type);
1118
1119 return 0;
1120}
1121
1122
1123
1124
1125
1126int pci_power_up(struct pci_dev *dev)
1127{
1128 pci_platform_power_transition(dev, PCI_D0);
1129
1130
1131
1132
1133
1134
1135 if (dev->runtime_d3cold) {
1136
1137
1138
1139
1140
1141 pci_wakeup_bus(dev->subordinate);
1142 }
1143
1144 return pci_raw_set_power_state(dev, PCI_D0);
1145}
1146
1147
1148
1149
1150
1151
1152static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1153{
1154 pci_power_t state = *(pci_power_t *)data;
1155
1156 dev->current_state = state;
1157 return 0;
1158}
1159
1160
1161
1162
1163
1164
1165void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1166{
1167 if (bus)
1168 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1169}
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1189{
1190 int error;
1191
1192
1193 if (state > PCI_D3cold)
1194 state = PCI_D3cold;
1195 else if (state < PCI_D0)
1196 state = PCI_D0;
1197 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1198
1199
1200
1201
1202
1203
1204
1205 return 0;
1206
1207
1208 if (dev->current_state == state)
1209 return 0;
1210
1211 if (state == PCI_D0)
1212 return pci_power_up(dev);
1213
1214
1215
1216
1217
1218 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1219 return 0;
1220
1221
1222
1223
1224
1225 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
1226 PCI_D3hot : state);
1227
1228 if (pci_platform_power_transition(dev, state))
1229 return error;
1230
1231
1232 if (state == PCI_D3cold)
1233 pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1234
1235 return 0;
1236}
1237EXPORT_SYMBOL(pci_set_power_state);
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
1249{
1250 pci_power_t ret;
1251
1252 if (!dev->pm_cap)
1253 return PCI_D0;
1254
1255 ret = platform_pci_choose_state(dev);
1256 if (ret != PCI_POWER_ERROR)
1257 return ret;
1258
1259 switch (state.event) {
1260 case PM_EVENT_ON:
1261 return PCI_D0;
1262 case PM_EVENT_FREEZE:
1263 case PM_EVENT_PRETHAW:
1264
1265 case PM_EVENT_SUSPEND:
1266 case PM_EVENT_HIBERNATE:
1267 return PCI_D3hot;
1268 default:
1269 pci_info(dev, "unrecognized suspend event %d\n",
1270 state.event);
1271 BUG();
1272 }
1273 return PCI_D0;
1274}
1275EXPORT_SYMBOL(pci_choose_state);
1276
1277#define PCI_EXP_SAVE_REGS 7
1278
1279static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1280 u16 cap, bool extended)
1281{
1282 struct pci_cap_saved_state *tmp;
1283
1284 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1285 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1286 return tmp;
1287 }
1288 return NULL;
1289}
1290
1291struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1292{
1293 return _pci_find_saved_cap(dev, cap, false);
1294}
1295
1296struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1297{
1298 return _pci_find_saved_cap(dev, cap, true);
1299}
1300
1301static int pci_save_pcie_state(struct pci_dev *dev)
1302{
1303 int i = 0;
1304 struct pci_cap_saved_state *save_state;
1305 u16 *cap;
1306
1307 if (!pci_is_pcie(dev))
1308 return 0;
1309
1310 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1311 if (!save_state) {
1312 pci_err(dev, "buffer not found in %s\n", __func__);
1313 return -ENOMEM;
1314 }
1315
1316 cap = (u16 *)&save_state->cap.data[0];
1317 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1318 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1319 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1320 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1321 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1322 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1323 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1324
1325 return 0;
1326}
1327
1328static void pci_restore_pcie_state(struct pci_dev *dev)
1329{
1330 int i = 0;
1331 struct pci_cap_saved_state *save_state;
1332 u16 *cap;
1333
1334 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1335 if (!save_state)
1336 return;
1337
1338 cap = (u16 *)&save_state->cap.data[0];
1339 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1340 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1341 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1342 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1343 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1344 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1345 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1346}
1347
1348static int pci_save_pcix_state(struct pci_dev *dev)
1349{
1350 int pos;
1351 struct pci_cap_saved_state *save_state;
1352
1353 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1354 if (!pos)
1355 return 0;
1356
1357 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1358 if (!save_state) {
1359 pci_err(dev, "buffer not found in %s\n", __func__);
1360 return -ENOMEM;
1361 }
1362
1363 pci_read_config_word(dev, pos + PCI_X_CMD,
1364 (u16 *)save_state->cap.data);
1365
1366 return 0;
1367}
1368
1369static void pci_restore_pcix_state(struct pci_dev *dev)
1370{
1371 int i = 0, pos;
1372 struct pci_cap_saved_state *save_state;
1373 u16 *cap;
1374
1375 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1376 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1377 if (!save_state || !pos)
1378 return;
1379 cap = (u16 *)&save_state->cap.data[0];
1380
1381 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1382}
1383
1384static void pci_save_ltr_state(struct pci_dev *dev)
1385{
1386 int ltr;
1387 struct pci_cap_saved_state *save_state;
1388 u16 *cap;
1389
1390 if (!pci_is_pcie(dev))
1391 return;
1392
1393 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1394 if (!ltr)
1395 return;
1396
1397 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1398 if (!save_state) {
1399 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1400 return;
1401 }
1402
1403 cap = (u16 *)&save_state->cap.data[0];
1404 pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
1405 pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
1406}
1407
1408static void pci_restore_ltr_state(struct pci_dev *dev)
1409{
1410 struct pci_cap_saved_state *save_state;
1411 int ltr;
1412 u16 *cap;
1413
1414 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1415 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1416 if (!save_state || !ltr)
1417 return;
1418
1419 cap = (u16 *)&save_state->cap.data[0];
1420 pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
1421 pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
1422}
1423
1424
1425
1426
1427
1428
1429int pci_save_state(struct pci_dev *dev)
1430{
1431 int i;
1432
1433 for (i = 0; i < 16; i++) {
1434 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1435 pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
1436 i * 4, dev->saved_config_space[i]);
1437 }
1438 dev->state_saved = true;
1439
1440 i = pci_save_pcie_state(dev);
1441 if (i != 0)
1442 return i;
1443
1444 i = pci_save_pcix_state(dev);
1445 if (i != 0)
1446 return i;
1447
1448 pci_save_ltr_state(dev);
1449 pci_save_dpc_state(dev);
1450 pci_save_aer_state(dev);
1451 return pci_save_vc_state(dev);
1452}
1453EXPORT_SYMBOL(pci_save_state);
1454
1455static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1456 u32 saved_val, int retry, bool force)
1457{
1458 u32 val;
1459
1460 pci_read_config_dword(pdev, offset, &val);
1461 if (!force && val == saved_val)
1462 return;
1463
1464 for (;;) {
1465 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1466 offset, val, saved_val);
1467 pci_write_config_dword(pdev, offset, saved_val);
1468 if (retry-- <= 0)
1469 return;
1470
1471 pci_read_config_dword(pdev, offset, &val);
1472 if (val == saved_val)
1473 return;
1474
1475 mdelay(1);
1476 }
1477}
1478
1479static void pci_restore_config_space_range(struct pci_dev *pdev,
1480 int start, int end, int retry,
1481 bool force)
1482{
1483 int index;
1484
1485 for (index = end; index >= start; index--)
1486 pci_restore_config_dword(pdev, 4 * index,
1487 pdev->saved_config_space[index],
1488 retry, force);
1489}
1490
1491static void pci_restore_config_space(struct pci_dev *pdev)
1492{
1493 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1494 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1495
1496 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1497 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1498 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1499 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1500
1501
1502
1503
1504
1505
1506 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1507 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1508 } else {
1509 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1510 }
1511}
1512
1513static void pci_restore_rebar_state(struct pci_dev *pdev)
1514{
1515 unsigned int pos, nbars, i;
1516 u32 ctrl;
1517
1518 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1519 if (!pos)
1520 return;
1521
1522 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1523 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1524 PCI_REBAR_CTRL_NBAR_SHIFT;
1525
1526 for (i = 0; i < nbars; i++, pos += 8) {
1527 struct resource *res;
1528 int bar_idx, size;
1529
1530 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1531 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1532 res = pdev->resource + bar_idx;
1533 size = ilog2(resource_size(res)) - 20;
1534 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1535 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1536 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1537 }
1538}
1539
1540
1541
1542
1543
1544void pci_restore_state(struct pci_dev *dev)
1545{
1546 if (!dev->state_saved)
1547 return;
1548
1549
1550
1551
1552
1553 pci_restore_ltr_state(dev);
1554
1555 pci_restore_pcie_state(dev);
1556 pci_restore_pasid_state(dev);
1557 pci_restore_pri_state(dev);
1558 pci_restore_ats_state(dev);
1559 pci_restore_vc_state(dev);
1560 pci_restore_rebar_state(dev);
1561 pci_restore_dpc_state(dev);
1562
1563 pci_aer_clear_status(dev);
1564 pci_restore_aer_state(dev);
1565
1566 pci_restore_config_space(dev);
1567
1568 pci_restore_pcix_state(dev);
1569 pci_restore_msi_state(dev);
1570
1571
1572 pci_enable_acs(dev);
1573 pci_restore_iov_state(dev);
1574
1575 dev->state_saved = false;
1576}
1577EXPORT_SYMBOL(pci_restore_state);
1578
1579struct pci_saved_state {
1580 u32 config_space[16];
1581 struct pci_cap_saved_data cap[0];
1582};
1583
1584
1585
1586
1587
1588
1589
1590
1591struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1592{
1593 struct pci_saved_state *state;
1594 struct pci_cap_saved_state *tmp;
1595 struct pci_cap_saved_data *cap;
1596 size_t size;
1597
1598 if (!dev->state_saved)
1599 return NULL;
1600
1601 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1602
1603 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1604 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1605
1606 state = kzalloc(size, GFP_KERNEL);
1607 if (!state)
1608 return NULL;
1609
1610 memcpy(state->config_space, dev->saved_config_space,
1611 sizeof(state->config_space));
1612
1613 cap = state->cap;
1614 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1615 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1616 memcpy(cap, &tmp->cap, len);
1617 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1618 }
1619
1620
1621 return state;
1622}
1623EXPORT_SYMBOL_GPL(pci_store_saved_state);
1624
1625
1626
1627
1628
1629
1630int pci_load_saved_state(struct pci_dev *dev,
1631 struct pci_saved_state *state)
1632{
1633 struct pci_cap_saved_data *cap;
1634
1635 dev->state_saved = false;
1636
1637 if (!state)
1638 return 0;
1639
1640 memcpy(dev->saved_config_space, state->config_space,
1641 sizeof(state->config_space));
1642
1643 cap = state->cap;
1644 while (cap->size) {
1645 struct pci_cap_saved_state *tmp;
1646
1647 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1648 if (!tmp || tmp->cap.size != cap->size)
1649 return -EINVAL;
1650
1651 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1652 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1653 sizeof(struct pci_cap_saved_data) + cap->size);
1654 }
1655
1656 dev->state_saved = true;
1657 return 0;
1658}
1659EXPORT_SYMBOL_GPL(pci_load_saved_state);
1660
1661
1662
1663
1664
1665
1666
1667int pci_load_and_free_saved_state(struct pci_dev *dev,
1668 struct pci_saved_state **state)
1669{
1670 int ret = pci_load_saved_state(dev, *state);
1671 kfree(*state);
1672 *state = NULL;
1673 return ret;
1674}
1675EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1676
1677int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1678{
1679 return pci_enable_resources(dev, bars);
1680}
1681
1682static int do_pci_enable_device(struct pci_dev *dev, int bars)
1683{
1684 int err;
1685 struct pci_dev *bridge;
1686 u16 cmd;
1687 u8 pin;
1688
1689 err = pci_set_power_state(dev, PCI_D0);
1690 if (err < 0 && err != -EIO)
1691 return err;
1692
1693 bridge = pci_upstream_bridge(dev);
1694 if (bridge)
1695 pcie_aspm_powersave_config_link(bridge);
1696
1697 err = pcibios_enable_device(dev, bars);
1698 if (err < 0)
1699 return err;
1700 pci_fixup_device(pci_fixup_enable, dev);
1701
1702 if (dev->msi_enabled || dev->msix_enabled)
1703 return 0;
1704
1705 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1706 if (pin) {
1707 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1708 if (cmd & PCI_COMMAND_INTX_DISABLE)
1709 pci_write_config_word(dev, PCI_COMMAND,
1710 cmd & ~PCI_COMMAND_INTX_DISABLE);
1711 }
1712
1713 return 0;
1714}
1715
1716
1717
1718
1719
1720
1721
1722
1723int pci_reenable_device(struct pci_dev *dev)
1724{
1725 if (pci_is_enabled(dev))
1726 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1727 return 0;
1728}
1729EXPORT_SYMBOL(pci_reenable_device);
1730
1731static void pci_enable_bridge(struct pci_dev *dev)
1732{
1733 struct pci_dev *bridge;
1734 int retval;
1735
1736 bridge = pci_upstream_bridge(dev);
1737 if (bridge)
1738 pci_enable_bridge(bridge);
1739
1740 if (pci_is_enabled(dev)) {
1741 if (!dev->is_busmaster)
1742 pci_set_master(dev);
1743 return;
1744 }
1745
1746 retval = pci_enable_device(dev);
1747 if (retval)
1748 pci_err(dev, "Error enabling bridge (%d), continuing\n",
1749 retval);
1750 pci_set_master(dev);
1751}
1752
1753static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1754{
1755 struct pci_dev *bridge;
1756 int err;
1757 int i, bars = 0;
1758
1759
1760
1761
1762
1763
1764
1765 if (dev->pm_cap) {
1766 u16 pmcsr;
1767 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1768 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1769 }
1770
1771 if (atomic_inc_return(&dev->enable_cnt) > 1)
1772 return 0;
1773
1774 bridge = pci_upstream_bridge(dev);
1775 if (bridge)
1776 pci_enable_bridge(bridge);
1777
1778
1779 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1780 if (dev->resource[i].flags & flags)
1781 bars |= (1 << i);
1782 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1783 if (dev->resource[i].flags & flags)
1784 bars |= (1 << i);
1785
1786 err = do_pci_enable_device(dev, bars);
1787 if (err < 0)
1788 atomic_dec(&dev->enable_cnt);
1789 return err;
1790}
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800int pci_enable_device_io(struct pci_dev *dev)
1801{
1802 return pci_enable_device_flags(dev, IORESOURCE_IO);
1803}
1804EXPORT_SYMBOL(pci_enable_device_io);
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814int pci_enable_device_mem(struct pci_dev *dev)
1815{
1816 return pci_enable_device_flags(dev, IORESOURCE_MEM);
1817}
1818EXPORT_SYMBOL(pci_enable_device_mem);
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831int pci_enable_device(struct pci_dev *dev)
1832{
1833 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1834}
1835EXPORT_SYMBOL(pci_enable_device);
1836
1837
1838
1839
1840
1841
1842
1843struct pci_devres {
1844 unsigned int enabled:1;
1845 unsigned int pinned:1;
1846 unsigned int orig_intx:1;
1847 unsigned int restore_intx:1;
1848 unsigned int mwi:1;
1849 u32 region_mask;
1850};
1851
1852static void pcim_release(struct device *gendev, void *res)
1853{
1854 struct pci_dev *dev = to_pci_dev(gendev);
1855 struct pci_devres *this = res;
1856 int i;
1857
1858 if (dev->msi_enabled)
1859 pci_disable_msi(dev);
1860 if (dev->msix_enabled)
1861 pci_disable_msix(dev);
1862
1863 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1864 if (this->region_mask & (1 << i))
1865 pci_release_region(dev, i);
1866
1867 if (this->mwi)
1868 pci_clear_mwi(dev);
1869
1870 if (this->restore_intx)
1871 pci_intx(dev, this->orig_intx);
1872
1873 if (this->enabled && !this->pinned)
1874 pci_disable_device(dev);
1875}
1876
1877static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1878{
1879 struct pci_devres *dr, *new_dr;
1880
1881 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1882 if (dr)
1883 return dr;
1884
1885 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1886 if (!new_dr)
1887 return NULL;
1888 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1889}
1890
1891static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
1892{
1893 if (pci_is_managed(pdev))
1894 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1895 return NULL;
1896}
1897
1898
1899
1900
1901
1902
1903
1904int pcim_enable_device(struct pci_dev *pdev)
1905{
1906 struct pci_devres *dr;
1907 int rc;
1908
1909 dr = get_pci_dr(pdev);
1910 if (unlikely(!dr))
1911 return -ENOMEM;
1912 if (dr->enabled)
1913 return 0;
1914
1915 rc = pci_enable_device(pdev);
1916 if (!rc) {
1917 pdev->is_managed = 1;
1918 dr->enabled = 1;
1919 }
1920 return rc;
1921}
1922EXPORT_SYMBOL(pcim_enable_device);
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932void pcim_pin_device(struct pci_dev *pdev)
1933{
1934 struct pci_devres *dr;
1935
1936 dr = find_pci_dr(pdev);
1937 WARN_ON(!dr || !dr->enabled);
1938 if (dr)
1939 dr->pinned = 1;
1940}
1941EXPORT_SYMBOL(pcim_pin_device);
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951int __weak pcibios_add_device(struct pci_dev *dev)
1952{
1953 return 0;
1954}
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965void __weak pcibios_release_device(struct pci_dev *dev) {}
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975void __weak pcibios_disable_device(struct pci_dev *dev) {}
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1987
1988static void do_pci_disable_device(struct pci_dev *dev)
1989{
1990 u16 pci_command;
1991
1992 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1993 if (pci_command & PCI_COMMAND_MASTER) {
1994 pci_command &= ~PCI_COMMAND_MASTER;
1995 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1996 }
1997
1998 pcibios_disable_device(dev);
1999}
2000
2001
2002
2003
2004
2005
2006
2007
2008void pci_disable_enabled_device(struct pci_dev *dev)
2009{
2010 if (pci_is_enabled(dev))
2011 do_pci_disable_device(dev);
2012}
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024void pci_disable_device(struct pci_dev *dev)
2025{
2026 struct pci_devres *dr;
2027
2028 dr = find_pci_dr(dev);
2029 if (dr)
2030 dr->enabled = 0;
2031
2032 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2033 "disabling already-disabled device");
2034
2035 if (atomic_dec_return(&dev->enable_cnt) != 0)
2036 return;
2037
2038 do_pci_disable_device(dev);
2039
2040 dev->is_busmaster = 0;
2041}
2042EXPORT_SYMBOL(pci_disable_device);
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2053 enum pcie_reset_state state)
2054{
2055 return -EINVAL;
2056}
2057
2058
2059
2060
2061
2062
2063
2064
2065int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2066{
2067 return pcibios_set_pcie_reset_state(dev, state);
2068}
2069EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2070
2071
2072
2073
2074
2075void pcie_clear_root_pme_status(struct pci_dev *dev)
2076{
2077 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2078}
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088bool pci_check_pme_status(struct pci_dev *dev)
2089{
2090 int pmcsr_pos;
2091 u16 pmcsr;
2092 bool ret = false;
2093
2094 if (!dev->pm_cap)
2095 return false;
2096
2097 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2098 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2099 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2100 return false;
2101
2102
2103 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2104 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2105
2106 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2107 ret = true;
2108 }
2109
2110 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2111
2112 return ret;
2113}
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2124{
2125 if (pme_poll_reset && dev->pme_poll)
2126 dev->pme_poll = false;
2127
2128 if (pci_check_pme_status(dev)) {
2129 pci_wakeup_event(dev);
2130 pm_request_resume(&dev->dev);
2131 }
2132 return 0;
2133}
2134
2135
2136
2137
2138
2139void pci_pme_wakeup_bus(struct pci_bus *bus)
2140{
2141 if (bus)
2142 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2143}
2144
2145
2146
2147
2148
2149
2150
2151bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2152{
2153 if (!dev->pm_cap)
2154 return false;
2155
2156 return !!(dev->pme_support & (1 << state));
2157}
2158EXPORT_SYMBOL(pci_pme_capable);
2159
2160static void pci_pme_list_scan(struct work_struct *work)
2161{
2162 struct pci_pme_device *pme_dev, *n;
2163
2164 mutex_lock(&pci_pme_list_mutex);
2165 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2166 if (pme_dev->dev->pme_poll) {
2167 struct pci_dev *bridge;
2168
2169 bridge = pme_dev->dev->bus->self;
2170
2171
2172
2173
2174
2175 if (bridge && bridge->current_state != PCI_D0)
2176 continue;
2177
2178
2179
2180
2181 if (pme_dev->dev->current_state == PCI_D3cold)
2182 continue;
2183
2184 pci_pme_wakeup(pme_dev->dev, NULL);
2185 } else {
2186 list_del(&pme_dev->list);
2187 kfree(pme_dev);
2188 }
2189 }
2190 if (!list_empty(&pci_pme_list))
2191 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2192 msecs_to_jiffies(PME_TIMEOUT));
2193 mutex_unlock(&pci_pme_list_mutex);
2194}
2195
2196static void __pci_pme_active(struct pci_dev *dev, bool enable)
2197{
2198 u16 pmcsr;
2199
2200 if (!dev->pme_support)
2201 return;
2202
2203 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2204
2205 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2206 if (!enable)
2207 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2208
2209 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2210}
2211
2212
2213
2214
2215
2216void pci_pme_restore(struct pci_dev *dev)
2217{
2218 u16 pmcsr;
2219
2220 if (!dev->pme_support)
2221 return;
2222
2223 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2224 if (dev->wakeup_prepared) {
2225 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2226 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2227 } else {
2228 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2229 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2230 }
2231 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2232}
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242void pci_pme_active(struct pci_dev *dev, bool enable)
2243{
2244 __pci_pme_active(dev, enable);
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266 if (dev->pme_poll) {
2267 struct pci_pme_device *pme_dev;
2268 if (enable) {
2269 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2270 GFP_KERNEL);
2271 if (!pme_dev) {
2272 pci_warn(dev, "can't enable PME#\n");
2273 return;
2274 }
2275 pme_dev->dev = dev;
2276 mutex_lock(&pci_pme_list_mutex);
2277 list_add(&pme_dev->list, &pci_pme_list);
2278 if (list_is_singular(&pci_pme_list))
2279 queue_delayed_work(system_freezable_wq,
2280 &pci_pme_work,
2281 msecs_to_jiffies(PME_TIMEOUT));
2282 mutex_unlock(&pci_pme_list_mutex);
2283 } else {
2284 mutex_lock(&pci_pme_list_mutex);
2285 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2286 if (pme_dev->dev == dev) {
2287 list_del(&pme_dev->list);
2288 kfree(pme_dev);
2289 break;
2290 }
2291 }
2292 mutex_unlock(&pci_pme_list_mutex);
2293 }
2294 }
2295
2296 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2297}
2298EXPORT_SYMBOL(pci_pme_active);
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2320{
2321 int ret = 0;
2322
2323
2324
2325
2326
2327
2328
2329
2330 if (!pci_power_manageable(dev))
2331 return 0;
2332
2333
2334 if (!!enable == !!dev->wakeup_prepared)
2335 return 0;
2336
2337
2338
2339
2340
2341
2342
2343 if (enable) {
2344 int error;
2345
2346 if (pci_pme_capable(dev, state))
2347 pci_pme_active(dev, true);
2348 else
2349 ret = 1;
2350 error = platform_pci_set_wakeup(dev, true);
2351 if (ret)
2352 ret = error;
2353 if (!ret)
2354 dev->wakeup_prepared = true;
2355 } else {
2356 platform_pci_set_wakeup(dev, false);
2357 pci_pme_active(dev, false);
2358 dev->wakeup_prepared = false;
2359 }
2360
2361 return ret;
2362}
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2374{
2375 if (enable && !device_may_wakeup(&pci_dev->dev))
2376 return -EINVAL;
2377
2378 return __pci_enable_wake(pci_dev, state, enable);
2379}
2380EXPORT_SYMBOL(pci_enable_wake);
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2397{
2398 return pci_pme_capable(dev, PCI_D3cold) ?
2399 pci_enable_wake(dev, PCI_D3cold, enable) :
2400 pci_enable_wake(dev, PCI_D3hot, enable);
2401}
2402EXPORT_SYMBOL(pci_wake_from_d3);
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2414{
2415 pci_power_t target_state = PCI_D3hot;
2416
2417 if (platform_pci_power_manageable(dev)) {
2418
2419
2420
2421 pci_power_t state = platform_pci_choose_state(dev);
2422
2423 switch (state) {
2424 case PCI_POWER_ERROR:
2425 case PCI_UNKNOWN:
2426 break;
2427 case PCI_D1:
2428 case PCI_D2:
2429 if (pci_no_d1d2(dev))
2430 break;
2431
2432 default:
2433 target_state = state;
2434 }
2435
2436 return target_state;
2437 }
2438
2439 if (!dev->pm_cap)
2440 target_state = PCI_D0;
2441
2442
2443
2444
2445
2446
2447 if (dev->current_state == PCI_D3cold)
2448 target_state = PCI_D3cold;
2449
2450 if (wakeup) {
2451
2452
2453
2454
2455 if (dev->pme_support) {
2456 while (target_state
2457 && !(dev->pme_support & (1 << target_state)))
2458 target_state--;
2459 }
2460 }
2461
2462 return target_state;
2463}
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474int pci_prepare_to_sleep(struct pci_dev *dev)
2475{
2476 bool wakeup = device_may_wakeup(&dev->dev);
2477 pci_power_t target_state = pci_target_state(dev, wakeup);
2478 int error;
2479
2480 if (target_state == PCI_POWER_ERROR)
2481 return -EIO;
2482
2483 pci_enable_wake(dev, target_state, wakeup);
2484
2485 error = pci_set_power_state(dev, target_state);
2486
2487 if (error)
2488 pci_enable_wake(dev, target_state, false);
2489
2490 return error;
2491}
2492EXPORT_SYMBOL(pci_prepare_to_sleep);
2493
2494
2495
2496
2497
2498
2499
2500
2501int pci_back_from_sleep(struct pci_dev *dev)
2502{
2503 pci_enable_wake(dev, PCI_D0, false);
2504 return pci_set_power_state(dev, PCI_D0);
2505}
2506EXPORT_SYMBOL(pci_back_from_sleep);
2507
2508
2509
2510
2511
2512
2513
2514
2515int pci_finish_runtime_suspend(struct pci_dev *dev)
2516{
2517 pci_power_t target_state;
2518 int error;
2519
2520 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2521 if (target_state == PCI_POWER_ERROR)
2522 return -EIO;
2523
2524 dev->runtime_d3cold = target_state == PCI_D3cold;
2525
2526 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2527
2528 error = pci_set_power_state(dev, target_state);
2529
2530 if (error) {
2531 pci_enable_wake(dev, target_state, false);
2532 dev->runtime_d3cold = false;
2533 }
2534
2535 return error;
2536}
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546bool pci_dev_run_wake(struct pci_dev *dev)
2547{
2548 struct pci_bus *bus = dev->bus;
2549
2550 if (!dev->pme_support)
2551 return false;
2552
2553
2554 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2555 return false;
2556
2557 if (device_can_wakeup(&dev->dev))
2558 return true;
2559
2560 while (bus->parent) {
2561 struct pci_dev *bridge = bus->self;
2562
2563 if (device_can_wakeup(&bridge->dev))
2564 return true;
2565
2566 bus = bus->parent;
2567 }
2568
2569
2570 if (bus->bridge)
2571 return device_can_wakeup(bus->bridge);
2572
2573 return false;
2574}
2575EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586bool pci_dev_need_resume(struct pci_dev *pci_dev)
2587{
2588 struct device *dev = &pci_dev->dev;
2589 pci_power_t target_state;
2590
2591 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2592 return true;
2593
2594 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2595
2596
2597
2598
2599
2600
2601 return target_state != pci_dev->current_state &&
2602 target_state != PCI_D3cold &&
2603 pci_dev->current_state != PCI_D3hot;
2604}
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2618{
2619 struct device *dev = &pci_dev->dev;
2620
2621 spin_lock_irq(&dev->power.lock);
2622
2623 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2624 pci_dev->current_state < PCI_D3cold)
2625 __pci_pme_active(pci_dev, false);
2626
2627 spin_unlock_irq(&dev->power.lock);
2628}
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638void pci_dev_complete_resume(struct pci_dev *pci_dev)
2639{
2640 struct device *dev = &pci_dev->dev;
2641
2642 if (!pci_dev_run_wake(pci_dev))
2643 return;
2644
2645 spin_lock_irq(&dev->power.lock);
2646
2647 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2648 __pci_pme_active(pci_dev, true);
2649
2650 spin_unlock_irq(&dev->power.lock);
2651}
2652
2653void pci_config_pm_runtime_get(struct pci_dev *pdev)
2654{
2655 struct device *dev = &pdev->dev;
2656 struct device *parent = dev->parent;
2657
2658 if (parent)
2659 pm_runtime_get_sync(parent);
2660 pm_runtime_get_noresume(dev);
2661
2662
2663
2664
2665 pm_runtime_barrier(dev);
2666
2667
2668
2669
2670
2671 if (pdev->current_state == PCI_D3cold)
2672 pm_runtime_resume(dev);
2673}
2674
2675void pci_config_pm_runtime_put(struct pci_dev *pdev)
2676{
2677 struct device *dev = &pdev->dev;
2678 struct device *parent = dev->parent;
2679
2680 pm_runtime_put(dev);
2681 if (parent)
2682 pm_runtime_put_sync(parent);
2683}
2684
2685static const struct dmi_system_id bridge_d3_blacklist[] = {
2686#ifdef CONFIG_X86
2687 {
2688
2689
2690
2691
2692
2693
2694 .ident = "X299 DESIGNARE EX-CF",
2695 .matches = {
2696 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2697 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2698 },
2699 },
2700#endif
2701 { }
2702};
2703
2704
2705
2706
2707
2708
2709
2710
2711bool pci_bridge_d3_possible(struct pci_dev *bridge)
2712{
2713 if (!pci_is_pcie(bridge))
2714 return false;
2715
2716 switch (pci_pcie_type(bridge)) {
2717 case PCI_EXP_TYPE_ROOT_PORT:
2718 case PCI_EXP_TYPE_UPSTREAM:
2719 case PCI_EXP_TYPE_DOWNSTREAM:
2720 if (pci_bridge_d3_disable)
2721 return false;
2722
2723
2724
2725
2726
2727 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2728 return false;
2729
2730 if (pci_bridge_d3_force)
2731 return true;
2732
2733
2734 if (bridge->is_thunderbolt)
2735 return true;
2736
2737
2738 if (platform_pci_bridge_d3(bridge))
2739 return true;
2740
2741
2742
2743
2744
2745
2746 if (bridge->is_hotplug_bridge)
2747 return false;
2748
2749 if (dmi_check_system(bridge_d3_blacklist))
2750 return false;
2751
2752
2753
2754
2755
2756 if (dmi_get_bios_year() >= 2015)
2757 return true;
2758 break;
2759 }
2760
2761 return false;
2762}
2763
2764static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2765{
2766 bool *d3cold_ok = data;
2767
2768 if (
2769 dev->no_d3cold || !dev->d3cold_allowed ||
2770
2771
2772 (device_may_wakeup(&dev->dev) &&
2773 !pci_pme_capable(dev, PCI_D3cold)) ||
2774
2775
2776 !pci_power_manageable(dev))
2777
2778 *d3cold_ok = false;
2779
2780 return !*d3cold_ok;
2781}
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791void pci_bridge_d3_update(struct pci_dev *dev)
2792{
2793 bool remove = !device_is_registered(&dev->dev);
2794 struct pci_dev *bridge;
2795 bool d3cold_ok = true;
2796
2797 bridge = pci_upstream_bridge(dev);
2798 if (!bridge || !pci_bridge_d3_possible(bridge))
2799 return;
2800
2801
2802
2803
2804
2805 if (remove && bridge->bridge_d3)
2806 return;
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816 if (!remove)
2817 pci_dev_check_d3cold(dev, &d3cold_ok);
2818
2819
2820
2821
2822
2823
2824
2825 if (d3cold_ok && !bridge->bridge_d3)
2826 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2827 &d3cold_ok);
2828
2829 if (bridge->bridge_d3 != d3cold_ok) {
2830 bridge->bridge_d3 = d3cold_ok;
2831
2832 pci_bridge_d3_update(bridge);
2833 }
2834}
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844void pci_d3cold_enable(struct pci_dev *dev)
2845{
2846 if (dev->no_d3cold) {
2847 dev->no_d3cold = false;
2848 pci_bridge_d3_update(dev);
2849 }
2850}
2851EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861void pci_d3cold_disable(struct pci_dev *dev)
2862{
2863 if (!dev->no_d3cold) {
2864 dev->no_d3cold = true;
2865 pci_bridge_d3_update(dev);
2866 }
2867}
2868EXPORT_SYMBOL_GPL(pci_d3cold_disable);
2869
2870
2871
2872
2873
2874void pci_pm_init(struct pci_dev *dev)
2875{
2876 int pm;
2877 u16 status;
2878 u16 pmc;
2879
2880 pm_runtime_forbid(&dev->dev);
2881 pm_runtime_set_active(&dev->dev);
2882 pm_runtime_enable(&dev->dev);
2883 device_enable_async_suspend(&dev->dev);
2884 dev->wakeup_prepared = false;
2885
2886 dev->pm_cap = 0;
2887 dev->pme_support = 0;
2888
2889
2890 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2891 if (!pm)
2892 return;
2893
2894 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2895
2896 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2897 pci_err(dev, "unsupported PM cap regs version (%u)\n",
2898 pmc & PCI_PM_CAP_VER_MASK);
2899 return;
2900 }
2901
2902 dev->pm_cap = pm;
2903 dev->d3_delay = PCI_PM_D3_WAIT;
2904 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
2905 dev->bridge_d3 = pci_bridge_d3_possible(dev);
2906 dev->d3cold_allowed = true;
2907
2908 dev->d1_support = false;
2909 dev->d2_support = false;
2910 if (!pci_no_d1d2(dev)) {
2911 if (pmc & PCI_PM_CAP_D1)
2912 dev->d1_support = true;
2913 if (pmc & PCI_PM_CAP_D2)
2914 dev->d2_support = true;
2915
2916 if (dev->d1_support || dev->d2_support)
2917 pci_info(dev, "supports%s%s\n",
2918 dev->d1_support ? " D1" : "",
2919 dev->d2_support ? " D2" : "");
2920 }
2921
2922 pmc &= PCI_PM_CAP_PME_MASK;
2923 if (pmc) {
2924 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
2925 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2926 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2927 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2928 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2929 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
2930 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
2931 dev->pme_poll = true;
2932
2933
2934
2935
2936 device_set_wakeup_capable(&dev->dev, true);
2937
2938 pci_pme_active(dev, false);
2939 }
2940
2941 pci_read_config_word(dev, PCI_STATUS, &status);
2942 if (status & PCI_STATUS_IMM_READY)
2943 dev->imm_ready = 1;
2944}
2945
2946static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
2947{
2948 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
2949
2950 switch (prop) {
2951 case PCI_EA_P_MEM:
2952 case PCI_EA_P_VF_MEM:
2953 flags |= IORESOURCE_MEM;
2954 break;
2955 case PCI_EA_P_MEM_PREFETCH:
2956 case PCI_EA_P_VF_MEM_PREFETCH:
2957 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
2958 break;
2959 case PCI_EA_P_IO:
2960 flags |= IORESOURCE_IO;
2961 break;
2962 default:
2963 return 0;
2964 }
2965
2966 return flags;
2967}
2968
2969static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
2970 u8 prop)
2971{
2972 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
2973 return &dev->resource[bei];
2974#ifdef CONFIG_PCI_IOV
2975 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
2976 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
2977 return &dev->resource[PCI_IOV_RESOURCES +
2978 bei - PCI_EA_BEI_VF_BAR0];
2979#endif
2980 else if (bei == PCI_EA_BEI_ROM)
2981 return &dev->resource[PCI_ROM_RESOURCE];
2982 else
2983 return NULL;
2984}
2985
2986
2987static int pci_ea_read(struct pci_dev *dev, int offset)
2988{
2989 struct resource *res;
2990 int ent_size, ent_offset = offset;
2991 resource_size_t start, end;
2992 unsigned long flags;
2993 u32 dw0, bei, base, max_offset;
2994 u8 prop;
2995 bool support_64 = (sizeof(resource_size_t) >= 8);
2996
2997 pci_read_config_dword(dev, ent_offset, &dw0);
2998 ent_offset += 4;
2999
3000
3001 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
3002
3003 if (!(dw0 & PCI_EA_ENABLE))
3004 goto out;
3005
3006 bei = (dw0 & PCI_EA_BEI) >> 4;
3007 prop = (dw0 & PCI_EA_PP) >> 8;
3008
3009
3010
3011
3012
3013 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3014 prop = (dw0 & PCI_EA_SP) >> 16;
3015 if (prop > PCI_EA_P_BRIDGE_IO)
3016 goto out;
3017
3018 res = pci_ea_get_resource(dev, bei, prop);
3019 if (!res) {
3020 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3021 goto out;
3022 }
3023
3024 flags = pci_ea_flags(dev, prop);
3025 if (!flags) {
3026 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3027 goto out;
3028 }
3029
3030
3031 pci_read_config_dword(dev, ent_offset, &base);
3032 start = (base & PCI_EA_FIELD_MASK);
3033 ent_offset += 4;
3034
3035
3036 pci_read_config_dword(dev, ent_offset, &max_offset);
3037 ent_offset += 4;
3038
3039
3040 if (base & PCI_EA_IS_64) {
3041 u32 base_upper;
3042
3043 pci_read_config_dword(dev, ent_offset, &base_upper);
3044 ent_offset += 4;
3045
3046 flags |= IORESOURCE_MEM_64;
3047
3048
3049 if (!support_64 && base_upper)
3050 goto out;
3051
3052 if (support_64)
3053 start |= ((u64)base_upper << 32);
3054 }
3055
3056 end = start + (max_offset | 0x03);
3057
3058
3059 if (max_offset & PCI_EA_IS_64) {
3060 u32 max_offset_upper;
3061
3062 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3063 ent_offset += 4;
3064
3065 flags |= IORESOURCE_MEM_64;
3066
3067
3068 if (!support_64 && max_offset_upper)
3069 goto out;
3070
3071 if (support_64)
3072 end += ((u64)max_offset_upper << 32);
3073 }
3074
3075 if (end < start) {
3076 pci_err(dev, "EA Entry crosses address boundary\n");
3077 goto out;
3078 }
3079
3080 if (ent_size != ent_offset - offset) {
3081 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3082 ent_size, ent_offset - offset);
3083 goto out;
3084 }
3085
3086 res->name = pci_name(dev);
3087 res->start = start;
3088 res->end = end;
3089 res->flags = flags;
3090
3091 if (bei <= PCI_EA_BEI_BAR5)
3092 pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3093 bei, res, prop);
3094 else if (bei == PCI_EA_BEI_ROM)
3095 pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3096 res, prop);
3097 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3098 pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3099 bei - PCI_EA_BEI_VF_BAR0, res, prop);
3100 else
3101 pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3102 bei, res, prop);
3103
3104out:
3105 return offset + ent_size;
3106}
3107
3108
3109void pci_ea_init(struct pci_dev *dev)
3110{
3111 int ea;
3112 u8 num_ent;
3113 int offset;
3114 int i;
3115
3116
3117 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3118 if (!ea)
3119 return;
3120
3121
3122 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3123 &num_ent);
3124 num_ent &= PCI_EA_NUM_ENT_MASK;
3125
3126 offset = ea + PCI_EA_FIRST_ENT;
3127
3128
3129 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3130 offset += 4;
3131
3132
3133 for (i = 0; i < num_ent; ++i)
3134 offset = pci_ea_read(dev, offset);
3135}
3136
3137static void pci_add_saved_cap(struct pci_dev *pci_dev,
3138 struct pci_cap_saved_state *new_cap)
3139{
3140 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3141}
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3152 bool extended, unsigned int size)
3153{
3154 int pos;
3155 struct pci_cap_saved_state *save_state;
3156
3157 if (extended)
3158 pos = pci_find_ext_capability(dev, cap);
3159 else
3160 pos = pci_find_capability(dev, cap);
3161
3162 if (!pos)
3163 return 0;
3164
3165 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3166 if (!save_state)
3167 return -ENOMEM;
3168
3169 save_state->cap.cap_nr = cap;
3170 save_state->cap.cap_extended = extended;
3171 save_state->cap.size = size;
3172 pci_add_saved_cap(dev, save_state);
3173
3174 return 0;
3175}
3176
3177int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3178{
3179 return _pci_add_cap_save_buffer(dev, cap, false, size);
3180}
3181
3182int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3183{
3184 return _pci_add_cap_save_buffer(dev, cap, true, size);
3185}
3186
3187
3188
3189
3190
3191void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3192{
3193 int error;
3194
3195 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3196 PCI_EXP_SAVE_REGS * sizeof(u16));
3197 if (error)
3198 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3199
3200 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3201 if (error)
3202 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3203
3204 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3205 2 * sizeof(u16));
3206 if (error)
3207 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3208
3209 pci_allocate_vc_save_buffers(dev);
3210}
3211
3212void pci_free_cap_save_buffers(struct pci_dev *dev)
3213{
3214 struct pci_cap_saved_state *tmp;
3215 struct hlist_node *n;
3216
3217 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3218 kfree(tmp);
3219}
3220
3221
3222
3223
3224
3225
3226
3227
3228void pci_configure_ari(struct pci_dev *dev)
3229{
3230 u32 cap;
3231 struct pci_dev *bridge;
3232
3233 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3234 return;
3235
3236 bridge = dev->bus->self;
3237 if (!bridge)
3238 return;
3239
3240 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3241 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3242 return;
3243
3244 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3245 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3246 PCI_EXP_DEVCTL2_ARI);
3247 bridge->ari_enabled = 1;
3248 } else {
3249 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3250 PCI_EXP_DEVCTL2_ARI);
3251 bridge->ari_enabled = 0;
3252 }
3253}
3254
3255static int pci_acs_enable;
3256
3257
3258
3259
3260void pci_request_acs(void)
3261{
3262 pci_acs_enable = 1;
3263}
3264
3265static const char *disable_acs_redir_param;
3266
3267
3268
3269
3270
3271
3272
3273static void pci_disable_acs_redir(struct pci_dev *dev)
3274{
3275 int ret = 0;
3276 const char *p;
3277 int pos;
3278 u16 ctrl;
3279
3280 if (!disable_acs_redir_param)
3281 return;
3282
3283 p = disable_acs_redir_param;
3284 while (*p) {
3285 ret = pci_dev_str_match(dev, p, &p);
3286 if (ret < 0) {
3287 pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
3288 disable_acs_redir_param);
3289
3290 break;
3291 } else if (ret == 1) {
3292
3293 break;
3294 }
3295
3296 if (*p != ';' && *p != ',') {
3297
3298 break;
3299 }
3300 p++;
3301 }
3302
3303 if (ret != 1)
3304 return;
3305
3306 if (!pci_dev_specific_disable_acs_redir(dev))
3307 return;
3308
3309 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3310 if (!pos) {
3311 pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
3312 return;
3313 }
3314
3315 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
3316
3317
3318 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
3319
3320 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
3321
3322 pci_info(dev, "disabled ACS redirect\n");
3323}
3324
3325
3326
3327
3328
3329static void pci_std_enable_acs(struct pci_dev *dev)
3330{
3331 int pos;
3332 u16 cap;
3333 u16 ctrl;
3334
3335 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3336 if (!pos)
3337 return;
3338
3339 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
3340 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
3341
3342
3343 ctrl |= (cap & PCI_ACS_SV);
3344
3345
3346 ctrl |= (cap & PCI_ACS_RR);
3347
3348
3349 ctrl |= (cap & PCI_ACS_CR);
3350
3351
3352 ctrl |= (cap & PCI_ACS_UF);
3353
3354 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
3355}
3356
3357
3358
3359
3360
3361void pci_enable_acs(struct pci_dev *dev)
3362{
3363 if (!pci_acs_enable)
3364 goto disable_acs_redir;
3365
3366 if (!pci_dev_specific_enable_acs(dev))
3367 goto disable_acs_redir;
3368
3369 pci_std_enable_acs(dev);
3370
3371disable_acs_redir:
3372
3373
3374
3375
3376
3377
3378
3379 pci_disable_acs_redir(dev);
3380}
3381
3382static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3383{
3384 int pos;
3385 u16 cap, ctrl;
3386
3387 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
3388 if (!pos)
3389 return false;
3390
3391
3392
3393
3394
3395
3396 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3397 acs_flags &= (cap | PCI_ACS_EC);
3398
3399 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3400 return (ctrl & acs_flags) == acs_flags;
3401}
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3420{
3421 int ret;
3422
3423 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3424 if (ret >= 0)
3425 return ret > 0;
3426
3427
3428
3429
3430
3431
3432 if (!pci_is_pcie(pdev))
3433 return false;
3434
3435 switch (pci_pcie_type(pdev)) {
3436
3437
3438
3439
3440
3441 case PCI_EXP_TYPE_PCIE_BRIDGE:
3442
3443
3444
3445
3446
3447
3448 case PCI_EXP_TYPE_PCI_BRIDGE:
3449 case PCI_EXP_TYPE_RC_EC:
3450 return false;
3451
3452
3453
3454
3455
3456 case PCI_EXP_TYPE_DOWNSTREAM:
3457 case PCI_EXP_TYPE_ROOT_PORT:
3458 return pci_acs_flags_enabled(pdev, acs_flags);
3459
3460
3461
3462
3463
3464
3465
3466 case PCI_EXP_TYPE_ENDPOINT:
3467 case PCI_EXP_TYPE_UPSTREAM:
3468 case PCI_EXP_TYPE_LEG_END:
3469 case PCI_EXP_TYPE_RC_END:
3470 if (!pdev->multifunction)
3471 break;
3472
3473 return pci_acs_flags_enabled(pdev, acs_flags);
3474 }
3475
3476
3477
3478
3479
3480 return true;
3481}
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492bool pci_acs_path_enabled(struct pci_dev *start,
3493 struct pci_dev *end, u16 acs_flags)
3494{
3495 struct pci_dev *pdev, *parent = start;
3496
3497 do {
3498 pdev = parent;
3499
3500 if (!pci_acs_enabled(pdev, acs_flags))
3501 return false;
3502
3503 if (pci_is_root_bus(pdev->bus))
3504 return (end == NULL);
3505
3506 parent = pdev->bus->self;
3507 } while (pdev != end);
3508
3509 return true;
3510}
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3522{
3523 unsigned int pos, nbars, i;
3524 u32 ctrl;
3525
3526 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3527 if (!pos)
3528 return -ENOTSUPP;
3529
3530 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3531 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3532 PCI_REBAR_CTRL_NBAR_SHIFT;
3533
3534 for (i = 0; i < nbars; i++, pos += 8) {
3535 int bar_idx;
3536
3537 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3538 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3539 if (bar_idx == bar)
3540 return pos;
3541 }
3542
3543 return -ENOENT;
3544}
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3555{
3556 int pos;
3557 u32 cap;
3558
3559 pos = pci_rebar_find_pos(pdev, bar);
3560 if (pos < 0)
3561 return 0;
3562
3563 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3564 return (cap & PCI_REBAR_CAP_SIZES) >> 4;
3565}
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3576{
3577 int pos;
3578 u32 ctrl;
3579
3580 pos = pci_rebar_find_pos(pdev, bar);
3581 if (pos < 0)
3582 return pos;
3583
3584 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3585 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3586}
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3598{
3599 int pos;
3600 u32 ctrl;
3601
3602 pos = pci_rebar_find_pos(pdev, bar);
3603 if (pos < 0)
3604 return pos;
3605
3606 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3607 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3608 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3609 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3610 return 0;
3611}
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3627{
3628 struct pci_bus *bus = dev->bus;
3629 struct pci_dev *bridge;
3630 u32 cap, ctl2;
3631
3632 if (!pci_is_pcie(dev))
3633 return -EINVAL;
3634
3635
3636
3637
3638
3639
3640
3641
3642 switch (pci_pcie_type(dev)) {
3643 case PCI_EXP_TYPE_ENDPOINT:
3644 case PCI_EXP_TYPE_LEG_END:
3645 case PCI_EXP_TYPE_RC_END:
3646 break;
3647 default:
3648 return -EINVAL;
3649 }
3650
3651 while (bus->parent) {
3652 bridge = bus->self;
3653
3654 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3655
3656 switch (pci_pcie_type(bridge)) {
3657
3658 case PCI_EXP_TYPE_UPSTREAM:
3659 case PCI_EXP_TYPE_DOWNSTREAM:
3660 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3661 return -EINVAL;
3662 break;
3663
3664
3665 case PCI_EXP_TYPE_ROOT_PORT:
3666 if ((cap & cap_mask) != cap_mask)
3667 return -EINVAL;
3668 break;
3669 }
3670
3671
3672 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3673 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3674 &ctl2);
3675 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3676 return -EINVAL;
3677 }
3678
3679 bus = bus->parent;
3680 }
3681
3682 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3683 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3684 return 0;
3685}
3686EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3700{
3701 int slot;
3702
3703 if (pci_ari_enabled(dev->bus))
3704 slot = 0;
3705 else
3706 slot = PCI_SLOT(dev->devfn);
3707
3708 return (((pin - 1) + slot) % 4) + 1;
3709}
3710
3711int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3712{
3713 u8 pin;
3714
3715 pin = dev->pin;
3716 if (!pin)
3717 return -1;
3718
3719 while (!pci_is_root_bus(dev->bus)) {
3720 pin = pci_swizzle_interrupt_pin(dev, pin);
3721 dev = dev->bus->self;
3722 }
3723 *bridge = dev;
3724 return pin;
3725}
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3736{
3737 u8 pin = *pinp;
3738
3739 while (!pci_is_root_bus(dev->bus)) {
3740 pin = pci_swizzle_interrupt_pin(dev, pin);
3741 dev = dev->bus->self;
3742 }
3743 *pinp = pin;
3744 return PCI_SLOT(dev->devfn);
3745}
3746EXPORT_SYMBOL_GPL(pci_common_swizzle);
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758void pci_release_region(struct pci_dev *pdev, int bar)
3759{
3760 struct pci_devres *dr;
3761
3762 if (pci_resource_len(pdev, bar) == 0)
3763 return;
3764 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3765 release_region(pci_resource_start(pdev, bar),
3766 pci_resource_len(pdev, bar));
3767 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3768 release_mem_region(pci_resource_start(pdev, bar),
3769 pci_resource_len(pdev, bar));
3770
3771 dr = find_pci_dr(pdev);
3772 if (dr)
3773 dr->region_mask &= ~(1 << bar);
3774}
3775EXPORT_SYMBOL(pci_release_region);
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796static int __pci_request_region(struct pci_dev *pdev, int bar,
3797 const char *res_name, int exclusive)
3798{
3799 struct pci_devres *dr;
3800
3801 if (pci_resource_len(pdev, bar) == 0)
3802 return 0;
3803
3804 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3805 if (!request_region(pci_resource_start(pdev, bar),
3806 pci_resource_len(pdev, bar), res_name))
3807 goto err_out;
3808 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3809 if (!__request_mem_region(pci_resource_start(pdev, bar),
3810 pci_resource_len(pdev, bar), res_name,
3811 exclusive))
3812 goto err_out;
3813 }
3814
3815 dr = find_pci_dr(pdev);
3816 if (dr)
3817 dr->region_mask |= 1 << bar;
3818
3819 return 0;
3820
3821err_out:
3822 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3823 &pdev->resource[bar]);
3824 return -EBUSY;
3825}
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3842{
3843 return __pci_request_region(pdev, bar, res_name, 0);
3844}
3845EXPORT_SYMBOL(pci_request_region);
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3856{
3857 int i;
3858
3859 for (i = 0; i < PCI_STD_NUM_BARS; i++)
3860 if (bars & (1 << i))
3861 pci_release_region(pdev, i);
3862}
3863EXPORT_SYMBOL(pci_release_selected_regions);
3864
3865static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3866 const char *res_name, int excl)
3867{
3868 int i;
3869
3870 for (i = 0; i < PCI_STD_NUM_BARS; i++)
3871 if (bars & (1 << i))
3872 if (__pci_request_region(pdev, i, res_name, excl))
3873 goto err_out;
3874 return 0;
3875
3876err_out:
3877 while (--i >= 0)
3878 if (bars & (1 << i))
3879 pci_release_region(pdev, i);
3880
3881 return -EBUSY;
3882}
3883
3884
3885
3886
3887
3888
3889
3890
3891int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3892 const char *res_name)
3893{
3894 return __pci_request_selected_regions(pdev, bars, res_name, 0);
3895}
3896EXPORT_SYMBOL(pci_request_selected_regions);
3897
3898int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3899 const char *res_name)
3900{
3901 return __pci_request_selected_regions(pdev, bars, res_name,
3902 IORESOURCE_EXCLUSIVE);
3903}
3904EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916void pci_release_regions(struct pci_dev *pdev)
3917{
3918 pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
3919}
3920EXPORT_SYMBOL(pci_release_regions);
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3936{
3937 return pci_request_selected_regions(pdev,
3938 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
3939}
3940EXPORT_SYMBOL(pci_request_regions);
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3958{
3959 return pci_request_selected_regions_exclusive(pdev,
3960 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
3961}
3962EXPORT_SYMBOL(pci_request_regions_exclusive);
3963
3964
3965
3966
3967
3968int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
3969 resource_size_t size)
3970{
3971 int ret = 0;
3972#ifdef PCI_IOBASE
3973 struct logic_pio_hwaddr *range;
3974
3975 if (!size || addr + size < addr)
3976 return -EINVAL;
3977
3978 range = kzalloc(sizeof(*range), GFP_ATOMIC);
3979 if (!range)
3980 return -ENOMEM;
3981
3982 range->fwnode = fwnode;
3983 range->size = size;
3984 range->hw_start = addr;
3985 range->flags = LOGIC_PIO_CPU_MMIO;
3986
3987 ret = logic_pio_register_range(range);
3988 if (ret)
3989 kfree(range);
3990#endif
3991
3992 return ret;
3993}
3994
3995phys_addr_t pci_pio_to_address(unsigned long pio)
3996{
3997 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
3998
3999#ifdef PCI_IOBASE
4000 if (pio >= MMIO_UPPER_LIMIT)
4001 return address;
4002
4003 address = logic_pio_to_hwaddr(pio);
4004#endif
4005
4006 return address;
4007}
4008
4009unsigned long __weak pci_address_to_pio(phys_addr_t address)
4010{
4011#ifdef PCI_IOBASE
4012 return logic_pio_trans_cpuaddr(address);
4013#else
4014 if (address > IO_SPACE_LIMIT)
4015 return (unsigned long)-1;
4016
4017 return (unsigned long) address;
4018#endif
4019}
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4032{
4033#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4034 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4035
4036 if (!(res->flags & IORESOURCE_IO))
4037 return -EINVAL;
4038
4039 if (res->end > IO_SPACE_LIMIT)
4040 return -EINVAL;
4041
4042 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4043 pgprot_device(PAGE_KERNEL));
4044#else
4045
4046
4047
4048
4049 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4050 return -ENODEV;
4051#endif
4052}
4053EXPORT_SYMBOL(pci_remap_iospace);
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063void pci_unmap_iospace(struct resource *res)
4064{
4065#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4066 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4067
4068 unmap_kernel_range(vaddr, resource_size(res));
4069#endif
4070}
4071EXPORT_SYMBOL(pci_unmap_iospace);
4072
4073static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4074{
4075 struct resource **res = ptr;
4076
4077 pci_unmap_iospace(*res);
4078}
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4090 phys_addr_t phys_addr)
4091{
4092 const struct resource **ptr;
4093 int error;
4094
4095 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4096 if (!ptr)
4097 return -ENOMEM;
4098
4099 error = pci_remap_iospace(res, phys_addr);
4100 if (error) {
4101 devres_free(ptr);
4102 } else {
4103 *ptr = res;
4104 devres_add(dev, ptr);
4105 }
4106
4107 return error;
4108}
4109EXPORT_SYMBOL(devm_pci_remap_iospace);
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4121 resource_size_t offset,
4122 resource_size_t size)
4123{
4124 void __iomem **ptr, *addr;
4125
4126 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4127 if (!ptr)
4128 return NULL;
4129
4130 addr = pci_remap_cfgspace(offset, size);
4131 if (addr) {
4132 *ptr = addr;
4133 devres_add(dev, ptr);
4134 } else
4135 devres_free(ptr);
4136
4137 return addr;
4138}
4139EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4161 struct resource *res)
4162{
4163 resource_size_t size;
4164 const char *name;
4165 void __iomem *dest_ptr;
4166
4167 BUG_ON(!dev);
4168
4169 if (!res || resource_type(res) != IORESOURCE_MEM) {
4170 dev_err(dev, "invalid resource\n");
4171 return IOMEM_ERR_PTR(-EINVAL);
4172 }
4173
4174 size = resource_size(res);
4175 name = res->name ?: dev_name(dev);
4176
4177 if (!devm_request_mem_region(dev, res->start, size, name)) {
4178 dev_err(dev, "can't request region for resource %pR\n", res);
4179 return IOMEM_ERR_PTR(-EBUSY);
4180 }
4181
4182 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4183 if (!dest_ptr) {
4184 dev_err(dev, "ioremap failed for resource %pR\n", res);
4185 devm_release_mem_region(dev, res->start, size);
4186 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4187 }
4188
4189 return dest_ptr;
4190}
4191EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4192
4193static void __pci_set_master(struct pci_dev *dev, bool enable)
4194{
4195 u16 old_cmd, cmd;
4196
4197 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4198 if (enable)
4199 cmd = old_cmd | PCI_COMMAND_MASTER;
4200 else
4201 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4202 if (cmd != old_cmd) {
4203 pci_dbg(dev, "%s bus mastering\n",
4204 enable ? "enabling" : "disabling");
4205 pci_write_config_word(dev, PCI_COMMAND, cmd);
4206 }
4207 dev->is_busmaster = enable;
4208}
4209
4210
4211
4212
4213
4214
4215
4216
4217char * __weak __init pcibios_setup(char *str)
4218{
4219 return str;
4220}
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230void __weak pcibios_set_master(struct pci_dev *dev)
4231{
4232 u8 lat;
4233
4234
4235 if (pci_is_pcie(dev))
4236 return;
4237
4238 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4239 if (lat < 16)
4240 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4241 else if (lat > pcibios_max_latency)
4242 lat = pcibios_max_latency;
4243 else
4244 return;
4245
4246 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4247}
4248
4249
4250
4251
4252
4253
4254
4255
4256void pci_set_master(struct pci_dev *dev)
4257{
4258 __pci_set_master(dev, true);
4259 pcibios_set_master(dev);
4260}
4261EXPORT_SYMBOL(pci_set_master);
4262
4263
4264
4265
4266
4267void pci_clear_master(struct pci_dev *dev)
4268{
4269 __pci_set_master(dev, false);
4270}
4271EXPORT_SYMBOL(pci_clear_master);
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283int pci_set_cacheline_size(struct pci_dev *dev)
4284{
4285 u8 cacheline_size;
4286
4287 if (!pci_cache_line_size)
4288 return -EINVAL;
4289
4290
4291
4292 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4293 if (cacheline_size >= pci_cache_line_size &&
4294 (cacheline_size % pci_cache_line_size) == 0)
4295 return 0;
4296
4297
4298 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4299
4300 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4301 if (cacheline_size == pci_cache_line_size)
4302 return 0;
4303
4304 pci_info(dev, "cache line size of %d is not supported\n",
4305 pci_cache_line_size << 2);
4306
4307 return -EINVAL;
4308}
4309EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319int pci_set_mwi(struct pci_dev *dev)
4320{
4321#ifdef PCI_DISABLE_MWI
4322 return 0;
4323#else
4324 int rc;
4325 u16 cmd;
4326
4327 rc = pci_set_cacheline_size(dev);
4328 if (rc)
4329 return rc;
4330
4331 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4332 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4333 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4334 cmd |= PCI_COMMAND_INVALIDATE;
4335 pci_write_config_word(dev, PCI_COMMAND, cmd);
4336 }
4337 return 0;
4338#endif
4339}
4340EXPORT_SYMBOL(pci_set_mwi);
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350int pcim_set_mwi(struct pci_dev *dev)
4351{
4352 struct pci_devres *dr;
4353
4354 dr = find_pci_dr(dev);
4355 if (!dr)
4356 return -ENOMEM;
4357
4358 dr->mwi = 1;
4359 return pci_set_mwi(dev);
4360}
4361EXPORT_SYMBOL(pcim_set_mwi);
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372int pci_try_set_mwi(struct pci_dev *dev)
4373{
4374#ifdef PCI_DISABLE_MWI
4375 return 0;
4376#else
4377 return pci_set_mwi(dev);
4378#endif
4379}
4380EXPORT_SYMBOL(pci_try_set_mwi);
4381
4382
4383
4384
4385
4386
4387
4388void pci_clear_mwi(struct pci_dev *dev)
4389{
4390#ifndef PCI_DISABLE_MWI
4391 u16 cmd;
4392
4393 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4394 if (cmd & PCI_COMMAND_INVALIDATE) {
4395 cmd &= ~PCI_COMMAND_INVALIDATE;
4396 pci_write_config_word(dev, PCI_COMMAND, cmd);
4397 }
4398#endif
4399}
4400EXPORT_SYMBOL(pci_clear_mwi);
4401
4402
4403
4404
4405
4406
4407
4408
4409void pci_intx(struct pci_dev *pdev, int enable)
4410{
4411 u16 pci_command, new;
4412
4413 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4414
4415 if (enable)
4416 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4417 else
4418 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4419
4420 if (new != pci_command) {
4421 struct pci_devres *dr;
4422
4423 pci_write_config_word(pdev, PCI_COMMAND, new);
4424
4425 dr = find_pci_dr(pdev);
4426 if (dr && !dr->restore_intx) {
4427 dr->restore_intx = 1;
4428 dr->orig_intx = !enable;
4429 }
4430 }
4431}
4432EXPORT_SYMBOL_GPL(pci_intx);
4433
4434static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4435{
4436 struct pci_bus *bus = dev->bus;
4437 bool mask_updated = true;
4438 u32 cmd_status_dword;
4439 u16 origcmd, newcmd;
4440 unsigned long flags;
4441 bool irq_pending;
4442
4443
4444
4445
4446
4447 BUILD_BUG_ON(PCI_COMMAND % 4);
4448 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4449
4450 raw_spin_lock_irqsave(&pci_lock, flags);
4451
4452 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4453
4454 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4455
4456
4457
4458
4459
4460
4461 if (mask != irq_pending) {
4462 mask_updated = false;
4463 goto done;
4464 }
4465
4466 origcmd = cmd_status_dword;
4467 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4468 if (mask)
4469 newcmd |= PCI_COMMAND_INTX_DISABLE;
4470 if (newcmd != origcmd)
4471 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4472
4473done:
4474 raw_spin_unlock_irqrestore(&pci_lock, flags);
4475
4476 return mask_updated;
4477}
4478
4479
4480
4481
4482
4483
4484
4485
4486bool pci_check_and_mask_intx(struct pci_dev *dev)
4487{
4488 return pci_check_and_set_intx_mask(dev, true);
4489}
4490EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500bool pci_check_and_unmask_intx(struct pci_dev *dev)
4501{
4502 return pci_check_and_set_intx_mask(dev, false);
4503}
4504EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4505
4506
4507
4508
4509
4510
4511
4512int pci_wait_for_pending_transaction(struct pci_dev *dev)
4513{
4514 if (!pci_is_pcie(dev))
4515 return 1;
4516
4517 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4518 PCI_EXP_DEVSTA_TRPND);
4519}
4520EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4521
4522
4523
4524
4525
4526
4527
4528
4529bool pcie_has_flr(struct pci_dev *dev)
4530{
4531 u32 cap;
4532
4533 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4534 return false;
4535
4536 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
4537 return cap & PCI_EXP_DEVCAP_FLR;
4538}
4539EXPORT_SYMBOL_GPL(pcie_has_flr);
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549int pcie_flr(struct pci_dev *dev)
4550{
4551 if (!pci_wait_for_pending_transaction(dev))
4552 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4553
4554 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4555
4556 if (dev->imm_ready)
4557 return 0;
4558
4559
4560
4561
4562
4563
4564 msleep(100);
4565
4566 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4567}
4568EXPORT_SYMBOL_GPL(pcie_flr);
4569
4570static int pci_af_flr(struct pci_dev *dev, int probe)
4571{
4572 int pos;
4573 u8 cap;
4574
4575 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4576 if (!pos)
4577 return -ENOTTY;
4578
4579 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4580 return -ENOTTY;
4581
4582 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4583 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4584 return -ENOTTY;
4585
4586 if (probe)
4587 return 0;
4588
4589
4590
4591
4592
4593
4594 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4595 PCI_AF_STATUS_TP << 8))
4596 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4597
4598 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4599
4600 if (dev->imm_ready)
4601 return 0;
4602
4603
4604
4605
4606
4607
4608
4609 msleep(100);
4610
4611 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4612}
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629static int pci_pm_reset(struct pci_dev *dev, int probe)
4630{
4631 u16 csr;
4632
4633 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4634 return -ENOTTY;
4635
4636 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4637 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4638 return -ENOTTY;
4639
4640 if (probe)
4641 return 0;
4642
4643 if (dev->current_state != PCI_D0)
4644 return -EINVAL;
4645
4646 csr &= ~PCI_PM_CTRL_STATE_MASK;
4647 csr |= PCI_D3hot;
4648 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4649 pci_dev_d3_sleep(dev);
4650
4651 csr &= ~PCI_PM_CTRL_STATE_MASK;
4652 csr |= PCI_D0;
4653 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4654 pci_dev_d3_sleep(dev);
4655
4656 return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4657}
4658
4659
4660
4661
4662
4663
4664
4665
4666
4667static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4668 int delay)
4669{
4670 int timeout = 1000;
4671 bool ret;
4672 u16 lnk_status;
4673
4674
4675
4676
4677
4678 if (!pdev->link_active_reporting) {
4679 msleep(1100);
4680 return true;
4681 }
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692 if (active)
4693 msleep(20);
4694 for (;;) {
4695 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4696 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4697 if (ret == active)
4698 break;
4699 if (timeout <= 0)
4700 break;
4701 msleep(10);
4702 timeout -= 10;
4703 }
4704 if (active && ret)
4705 msleep(delay);
4706 else if (ret != active)
4707 pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
4708 active ? "set" : "cleared");
4709 return ret == active;
4710}
4711
4712
4713
4714
4715
4716
4717
4718
4719bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4720{
4721 return pcie_wait_for_link_delay(pdev, active, 100);
4722}
4723
4724
4725
4726
4727
4728
4729
4730
4731static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4732{
4733 const struct pci_dev *pdev;
4734 int min_delay = 100;
4735 int max_delay = 0;
4736
4737 list_for_each_entry(pdev, &bus->devices, bus_list) {
4738 if (pdev->d3cold_delay < min_delay)
4739 min_delay = pdev->d3cold_delay;
4740 if (pdev->d3cold_delay > max_delay)
4741 max_delay = pdev->d3cold_delay;
4742 }
4743
4744 return max(min_delay, max_delay);
4745}
4746
4747
4748
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
4759{
4760 struct pci_dev *child;
4761 int delay;
4762
4763 if (pci_dev_is_disconnected(dev))
4764 return;
4765
4766 if (!pci_is_bridge(dev) || !dev->bridge_d3)
4767 return;
4768
4769 down_read(&pci_bus_sem);
4770
4771
4772
4773
4774
4775
4776
4777 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4778 up_read(&pci_bus_sem);
4779 return;
4780 }
4781
4782
4783 delay = pci_bus_max_d3cold_delay(dev->subordinate);
4784 if (!delay) {
4785 up_read(&pci_bus_sem);
4786 return;
4787 }
4788
4789 child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
4790 bus_list);
4791 up_read(&pci_bus_sem);
4792
4793
4794
4795
4796
4797
4798
4799 if (!pci_is_pcie(dev)) {
4800 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4801 msleep(1000 + delay);
4802 return;
4803 }
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822 if (!pcie_downstream_port(dev))
4823 return;
4824
4825 if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4826 pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4827 msleep(delay);
4828 } else {
4829 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4830 delay);
4831 if (!pcie_wait_for_link_delay(dev, true, delay)) {
4832
4833 return;
4834 }
4835 }
4836
4837 if (!pci_device_is_present(child)) {
4838 pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
4839 msleep(delay);
4840 }
4841}
4842
4843void pci_reset_secondary_bus(struct pci_dev *dev)
4844{
4845 u16 ctrl;
4846
4847 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4848 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4849 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4850
4851
4852
4853
4854
4855 msleep(2);
4856
4857 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4858 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4859
4860
4861
4862
4863
4864
4865
4866
4867 ssleep(1);
4868}
4869
4870void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4871{
4872 pci_reset_secondary_bus(dev);
4873}
4874
4875
4876
4877
4878
4879
4880
4881
4882int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4883{
4884 pcibios_reset_secondary_bus(dev);
4885
4886 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
4887}
4888EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4889
4890static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4891{
4892 struct pci_dev *pdev;
4893
4894 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4895 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4896 return -ENOTTY;
4897
4898 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4899 if (pdev != dev)
4900 return -ENOTTY;
4901
4902 if (probe)
4903 return 0;
4904
4905 return pci_bridge_secondary_bus_reset(dev->bus->self);
4906}
4907
4908static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4909{
4910 int rc = -ENOTTY;
4911
4912 if (!hotplug || !try_module_get(hotplug->owner))
4913 return rc;
4914
4915 if (hotplug->ops->reset_slot)
4916 rc = hotplug->ops->reset_slot(hotplug, probe);
4917
4918 module_put(hotplug->owner);
4919
4920 return rc;
4921}
4922
4923static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4924{
4925 struct pci_dev *pdev;
4926
4927 if (dev->subordinate || !dev->slot ||
4928 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4929 return -ENOTTY;
4930
4931 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4932 if (pdev != dev && pdev->slot == dev->slot)
4933 return -ENOTTY;
4934
4935 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4936}
4937
4938static void pci_dev_lock(struct pci_dev *dev)
4939{
4940 pci_cfg_access_lock(dev);
4941
4942 device_lock(&dev->dev);
4943}
4944
4945
4946static int pci_dev_trylock(struct pci_dev *dev)
4947{
4948 if (pci_cfg_access_trylock(dev)) {
4949 if (device_trylock(&dev->dev))
4950 return 1;
4951 pci_cfg_access_unlock(dev);
4952 }
4953
4954 return 0;
4955}
4956
4957static void pci_dev_unlock(struct pci_dev *dev)
4958{
4959 device_unlock(&dev->dev);
4960 pci_cfg_access_unlock(dev);
4961}
4962
4963static void pci_dev_save_and_disable(struct pci_dev *dev)
4964{
4965 const struct pci_error_handlers *err_handler =
4966 dev->driver ? dev->driver->err_handler : NULL;
4967
4968
4969
4970
4971
4972
4973 if (err_handler && err_handler->reset_prepare)
4974 err_handler->reset_prepare(dev);
4975
4976
4977
4978
4979
4980
4981 pci_set_power_state(dev, PCI_D0);
4982
4983 pci_save_state(dev);
4984
4985
4986
4987
4988
4989
4990
4991 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
4992}
4993
4994static void pci_dev_restore(struct pci_dev *dev)
4995{
4996 const struct pci_error_handlers *err_handler =
4997 dev->driver ? dev->driver->err_handler : NULL;
4998
4999 pci_restore_state(dev);
5000
5001
5002
5003
5004
5005
5006 if (err_handler && err_handler->reset_done)
5007 err_handler->reset_done(dev);
5008}
5009
5010
5011
5012
5013
5014
5015
5016
5017
5018
5019
5020
5021
5022
5023
5024
5025
5026
5027
5028
5029
5030int __pci_reset_function_locked(struct pci_dev *dev)
5031{
5032 int rc;
5033
5034 might_sleep();
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044 rc = pci_dev_specific_reset(dev, 0);
5045 if (rc != -ENOTTY)
5046 return rc;
5047 if (pcie_has_flr(dev)) {
5048 rc = pcie_flr(dev);
5049 if (rc != -ENOTTY)
5050 return rc;
5051 }
5052 rc = pci_af_flr(dev, 0);
5053 if (rc != -ENOTTY)
5054 return rc;
5055 rc = pci_pm_reset(dev, 0);
5056 if (rc != -ENOTTY)
5057 return rc;
5058 rc = pci_dev_reset_slot_function(dev, 0);
5059 if (rc != -ENOTTY)
5060 return rc;
5061 return pci_parent_bus_reset(dev, 0);
5062}
5063EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075
5076int pci_probe_reset_function(struct pci_dev *dev)
5077{
5078 int rc;
5079
5080 might_sleep();
5081
5082 rc = pci_dev_specific_reset(dev, 1);
5083 if (rc != -ENOTTY)
5084 return rc;
5085 if (pcie_has_flr(dev))
5086 return 0;
5087 rc = pci_af_flr(dev, 1);
5088 if (rc != -ENOTTY)
5089 return rc;
5090 rc = pci_pm_reset(dev, 1);
5091 if (rc != -ENOTTY)
5092 return rc;
5093 rc = pci_dev_reset_slot_function(dev, 1);
5094 if (rc != -ENOTTY)
5095 return rc;
5096
5097 return pci_parent_bus_reset(dev, 1);
5098}
5099
5100
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114
5115
5116int pci_reset_function(struct pci_dev *dev)
5117{
5118 int rc;
5119
5120 if (!dev->reset_fn)
5121 return -ENOTTY;
5122
5123 pci_dev_lock(dev);
5124 pci_dev_save_and_disable(dev);
5125
5126 rc = __pci_reset_function_locked(dev);
5127
5128 pci_dev_restore(dev);
5129 pci_dev_unlock(dev);
5130
5131 return rc;
5132}
5133EXPORT_SYMBOL_GPL(pci_reset_function);
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144
5145
5146
5147
5148
5149
5150
5151
5152int pci_reset_function_locked(struct pci_dev *dev)
5153{
5154 int rc;
5155
5156 if (!dev->reset_fn)
5157 return -ENOTTY;
5158
5159 pci_dev_save_and_disable(dev);
5160
5161 rc = __pci_reset_function_locked(dev);
5162
5163 pci_dev_restore(dev);
5164
5165 return rc;
5166}
5167EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5168
5169
5170
5171
5172
5173
5174
5175int pci_try_reset_function(struct pci_dev *dev)
5176{
5177 int rc;
5178
5179 if (!dev->reset_fn)
5180 return -ENOTTY;
5181
5182 if (!pci_dev_trylock(dev))
5183 return -EAGAIN;
5184
5185 pci_dev_save_and_disable(dev);
5186 rc = __pci_reset_function_locked(dev);
5187 pci_dev_restore(dev);
5188 pci_dev_unlock(dev);
5189
5190 return rc;
5191}
5192EXPORT_SYMBOL_GPL(pci_try_reset_function);
5193
5194
5195static bool pci_bus_resetable(struct pci_bus *bus)
5196{
5197 struct pci_dev *dev;
5198
5199
5200 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5201 return false;
5202
5203 list_for_each_entry(dev, &bus->devices, bus_list) {
5204 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5205 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5206 return false;
5207 }
5208
5209 return true;
5210}
5211
5212
5213static void pci_bus_lock(struct pci_bus *bus)
5214{
5215 struct pci_dev *dev;
5216
5217 list_for_each_entry(dev, &bus->devices, bus_list) {
5218 pci_dev_lock(dev);
5219 if (dev->subordinate)
5220 pci_bus_lock(dev->subordinate);
5221 }
5222}
5223
5224
5225static void pci_bus_unlock(struct pci_bus *bus)
5226{
5227 struct pci_dev *dev;
5228
5229 list_for_each_entry(dev, &bus->devices, bus_list) {
5230 if (dev->subordinate)
5231 pci_bus_unlock(dev->subordinate);
5232 pci_dev_unlock(dev);
5233 }
5234}
5235
5236
5237static int pci_bus_trylock(struct pci_bus *bus)
5238{
5239 struct pci_dev *dev;
5240
5241 list_for_each_entry(dev, &bus->devices, bus_list) {
5242 if (!pci_dev_trylock(dev))
5243 goto unlock;
5244 if (dev->subordinate) {
5245 if (!pci_bus_trylock(dev->subordinate)) {
5246 pci_dev_unlock(dev);
5247 goto unlock;
5248 }
5249 }
5250 }
5251 return 1;
5252
5253unlock:
5254 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5255 if (dev->subordinate)
5256 pci_bus_unlock(dev->subordinate);
5257 pci_dev_unlock(dev);
5258 }
5259 return 0;
5260}
5261
5262
5263static bool pci_slot_resetable(struct pci_slot *slot)
5264{
5265 struct pci_dev *dev;
5266
5267 if (slot->bus->self &&
5268 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5269 return false;
5270
5271 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5272 if (!dev->slot || dev->slot != slot)
5273 continue;
5274 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5275 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5276 return false;
5277 }
5278
5279 return true;
5280}
5281
5282
5283static void pci_slot_lock(struct pci_slot *slot)
5284{
5285 struct pci_dev *dev;
5286
5287 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5288 if (!dev->slot || dev->slot != slot)
5289 continue;
5290 pci_dev_lock(dev);
5291 if (dev->subordinate)
5292 pci_bus_lock(dev->subordinate);
5293 }
5294}
5295
5296
5297static void pci_slot_unlock(struct pci_slot *slot)
5298{
5299 struct pci_dev *dev;
5300
5301 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5302 if (!dev->slot || dev->slot != slot)
5303 continue;
5304 if (dev->subordinate)
5305 pci_bus_unlock(dev->subordinate);
5306 pci_dev_unlock(dev);
5307 }
5308}
5309
5310
5311static int pci_slot_trylock(struct pci_slot *slot)
5312{
5313 struct pci_dev *dev;
5314
5315 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5316 if (!dev->slot || dev->slot != slot)
5317 continue;
5318 if (!pci_dev_trylock(dev))
5319 goto unlock;
5320 if (dev->subordinate) {
5321 if (!pci_bus_trylock(dev->subordinate)) {
5322 pci_dev_unlock(dev);
5323 goto unlock;
5324 }
5325 }
5326 }
5327 return 1;
5328
5329unlock:
5330 list_for_each_entry_continue_reverse(dev,
5331 &slot->bus->devices, bus_list) {
5332 if (!dev->slot || dev->slot != slot)
5333 continue;
5334 if (dev->subordinate)
5335 pci_bus_unlock(dev->subordinate);
5336 pci_dev_unlock(dev);
5337 }
5338 return 0;
5339}
5340
5341
5342
5343
5344
5345static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5346{
5347 struct pci_dev *dev;
5348
5349 list_for_each_entry(dev, &bus->devices, bus_list) {
5350 pci_dev_save_and_disable(dev);
5351 if (dev->subordinate)
5352 pci_bus_save_and_disable_locked(dev->subordinate);
5353 }
5354}
5355
5356
5357
5358
5359
5360
5361static void pci_bus_restore_locked(struct pci_bus *bus)
5362{
5363 struct pci_dev *dev;
5364
5365 list_for_each_entry(dev, &bus->devices, bus_list) {
5366 pci_dev_restore(dev);
5367 if (dev->subordinate)
5368 pci_bus_restore_locked(dev->subordinate);
5369 }
5370}
5371
5372
5373
5374
5375
5376static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5377{
5378 struct pci_dev *dev;
5379
5380 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5381 if (!dev->slot || dev->slot != slot)
5382 continue;
5383 pci_dev_save_and_disable(dev);
5384 if (dev->subordinate)
5385 pci_bus_save_and_disable_locked(dev->subordinate);
5386 }
5387}
5388
5389
5390
5391
5392
5393
5394static void pci_slot_restore_locked(struct pci_slot *slot)
5395{
5396 struct pci_dev *dev;
5397
5398 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5399 if (!dev->slot || dev->slot != slot)
5400 continue;
5401 pci_dev_restore(dev);
5402 if (dev->subordinate)
5403 pci_bus_restore_locked(dev->subordinate);
5404 }
5405}
5406
5407static int pci_slot_reset(struct pci_slot *slot, int probe)
5408{
5409 int rc;
5410
5411 if (!slot || !pci_slot_resetable(slot))
5412 return -ENOTTY;
5413
5414 if (!probe)
5415 pci_slot_lock(slot);
5416
5417 might_sleep();
5418
5419 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5420
5421 if (!probe)
5422 pci_slot_unlock(slot);
5423
5424 return rc;
5425}
5426
5427
5428
5429
5430
5431
5432
5433int pci_probe_reset_slot(struct pci_slot *slot)
5434{
5435 return pci_slot_reset(slot, 1);
5436}
5437EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5438
5439
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452
5453
5454static int __pci_reset_slot(struct pci_slot *slot)
5455{
5456 int rc;
5457
5458 rc = pci_slot_reset(slot, 1);
5459 if (rc)
5460 return rc;
5461
5462 if (pci_slot_trylock(slot)) {
5463 pci_slot_save_and_disable_locked(slot);
5464 might_sleep();
5465 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
5466 pci_slot_restore_locked(slot);
5467 pci_slot_unlock(slot);
5468 } else
5469 rc = -EAGAIN;
5470
5471 return rc;
5472}
5473
5474static int pci_bus_reset(struct pci_bus *bus, int probe)
5475{
5476 int ret;
5477
5478 if (!bus->self || !pci_bus_resetable(bus))
5479 return -ENOTTY;
5480
5481 if (probe)
5482 return 0;
5483
5484 pci_bus_lock(bus);
5485
5486 might_sleep();
5487
5488 ret = pci_bridge_secondary_bus_reset(bus->self);
5489
5490 pci_bus_unlock(bus);
5491
5492 return ret;
5493}
5494
5495
5496
5497
5498
5499
5500
5501
5502
5503int pci_bus_error_reset(struct pci_dev *bridge)
5504{
5505 struct pci_bus *bus = bridge->subordinate;
5506 struct pci_slot *slot;
5507
5508 if (!bus)
5509 return -ENOTTY;
5510
5511 mutex_lock(&pci_slot_mutex);
5512 if (list_empty(&bus->slots))
5513 goto bus_reset;
5514
5515 list_for_each_entry(slot, &bus->slots, list)
5516 if (pci_probe_reset_slot(slot))
5517 goto bus_reset;
5518
5519 list_for_each_entry(slot, &bus->slots, list)
5520 if (pci_slot_reset(slot, 0))
5521 goto bus_reset;
5522
5523 mutex_unlock(&pci_slot_mutex);
5524 return 0;
5525bus_reset:
5526 mutex_unlock(&pci_slot_mutex);
5527 return pci_bus_reset(bridge->subordinate, 0);
5528}
5529
5530
5531
5532
5533
5534
5535
5536int pci_probe_reset_bus(struct pci_bus *bus)
5537{
5538 return pci_bus_reset(bus, 1);
5539}
5540EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5541
5542
5543
5544
5545
5546
5547
5548static int __pci_reset_bus(struct pci_bus *bus)
5549{
5550 int rc;
5551
5552 rc = pci_bus_reset(bus, 1);
5553 if (rc)
5554 return rc;
5555
5556 if (pci_bus_trylock(bus)) {
5557 pci_bus_save_and_disable_locked(bus);
5558 might_sleep();
5559 rc = pci_bridge_secondary_bus_reset(bus->self);
5560 pci_bus_restore_locked(bus);
5561 pci_bus_unlock(bus);
5562 } else
5563 rc = -EAGAIN;
5564
5565 return rc;
5566}
5567
5568
5569
5570
5571
5572
5573
5574int pci_reset_bus(struct pci_dev *pdev)
5575{
5576 return (!pci_probe_reset_slot(pdev->slot)) ?
5577 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5578}
5579EXPORT_SYMBOL_GPL(pci_reset_bus);
5580
5581
5582
5583
5584
5585
5586
5587
5588int pcix_get_max_mmrbc(struct pci_dev *dev)
5589{
5590 int cap;
5591 u32 stat;
5592
5593 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5594 if (!cap)
5595 return -EINVAL;
5596
5597 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5598 return -EINVAL;
5599
5600 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5601}
5602EXPORT_SYMBOL(pcix_get_max_mmrbc);
5603
5604
5605
5606
5607
5608
5609
5610
5611int pcix_get_mmrbc(struct pci_dev *dev)
5612{
5613 int cap;
5614 u16 cmd;
5615
5616 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5617 if (!cap)
5618 return -EINVAL;
5619
5620 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5621 return -EINVAL;
5622
5623 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5624}
5625EXPORT_SYMBOL(pcix_get_mmrbc);
5626
5627
5628
5629
5630
5631
5632
5633
5634
5635
5636int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5637{
5638 int cap;
5639 u32 stat, v, o;
5640 u16 cmd;
5641
5642 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5643 return -EINVAL;
5644
5645 v = ffs(mmrbc) - 10;
5646
5647 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5648 if (!cap)
5649 return -EINVAL;
5650
5651 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5652 return -EINVAL;
5653
5654 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5655 return -E2BIG;
5656
5657 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5658 return -EINVAL;
5659
5660 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5661 if (o != v) {
5662 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5663 return -EIO;
5664
5665 cmd &= ~PCI_X_CMD_MAX_READ;
5666 cmd |= v << 2;
5667 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5668 return -EIO;
5669 }
5670 return 0;
5671}
5672EXPORT_SYMBOL(pcix_set_mmrbc);
5673
5674
5675
5676
5677
5678
5679
5680int pcie_get_readrq(struct pci_dev *dev)
5681{
5682 u16 ctl;
5683
5684 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5685
5686 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5687}
5688EXPORT_SYMBOL(pcie_get_readrq);
5689
5690
5691
5692
5693
5694
5695
5696
5697
5698int pcie_set_readrq(struct pci_dev *dev, int rq)
5699{
5700 u16 v;
5701
5702 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5703 return -EINVAL;
5704
5705
5706
5707
5708
5709
5710 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5711 int mps = pcie_get_mps(dev);
5712
5713 if (mps < rq)
5714 rq = mps;
5715 }
5716
5717 v = (ffs(rq) - 8) << 12;
5718
5719 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5720 PCI_EXP_DEVCTL_READRQ, v);
5721}
5722EXPORT_SYMBOL(pcie_set_readrq);
5723
5724
5725
5726
5727
5728
5729
5730int pcie_get_mps(struct pci_dev *dev)
5731{
5732 u16 ctl;
5733
5734 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5735
5736 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5737}
5738EXPORT_SYMBOL(pcie_get_mps);
5739
5740
5741
5742
5743
5744
5745
5746
5747
5748int pcie_set_mps(struct pci_dev *dev, int mps)
5749{
5750 u16 v;
5751
5752 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5753 return -EINVAL;
5754
5755 v = ffs(mps) - 8;
5756 if (v > dev->pcie_mpss)
5757 return -EINVAL;
5758 v <<= 5;
5759
5760 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5761 PCI_EXP_DEVCTL_PAYLOAD, v);
5762}
5763EXPORT_SYMBOL(pcie_set_mps);
5764
5765
5766
5767
5768
5769
5770
5771
5772
5773
5774
5775
5776
5777
5778
5779u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
5780 enum pci_bus_speed *speed,
5781 enum pcie_link_width *width)
5782{
5783 u16 lnksta;
5784 enum pci_bus_speed next_speed;
5785 enum pcie_link_width next_width;
5786 u32 bw, next_bw;
5787
5788 if (speed)
5789 *speed = PCI_SPEED_UNKNOWN;
5790 if (width)
5791 *width = PCIE_LNK_WIDTH_UNKNOWN;
5792
5793 bw = 0;
5794
5795 while (dev) {
5796 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5797
5798 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5799 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5800 PCI_EXP_LNKSTA_NLW_SHIFT;
5801
5802 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
5803
5804
5805 if (!bw || next_bw <= bw) {
5806 bw = next_bw;
5807
5808 if (limiting_dev)
5809 *limiting_dev = dev;
5810 if (speed)
5811 *speed = next_speed;
5812 if (width)
5813 *width = next_width;
5814 }
5815
5816 dev = pci_upstream_bridge(dev);
5817 }
5818
5819 return bw;
5820}
5821EXPORT_SYMBOL(pcie_bandwidth_available);
5822
5823
5824
5825
5826
5827
5828
5829
5830enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5831{
5832 u32 lnkcap2, lnkcap;
5833
5834
5835
5836
5837
5838
5839
5840
5841
5842
5843 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5844
5845
5846 if (lnkcap2)
5847 return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
5848
5849 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5850 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
5851 return PCIE_SPEED_5_0GT;
5852 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
5853 return PCIE_SPEED_2_5GT;
5854
5855 return PCI_SPEED_UNKNOWN;
5856}
5857EXPORT_SYMBOL(pcie_get_speed_cap);
5858
5859
5860
5861
5862
5863
5864
5865
5866enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
5867{
5868 u32 lnkcap;
5869
5870 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5871 if (lnkcap)
5872 return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
5873
5874 return PCIE_LNK_WIDTH_UNKNOWN;
5875}
5876EXPORT_SYMBOL(pcie_get_width_cap);
5877
5878
5879
5880
5881
5882
5883
5884
5885
5886
5887
5888u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
5889 enum pcie_link_width *width)
5890{
5891 *speed = pcie_get_speed_cap(dev);
5892 *width = pcie_get_width_cap(dev);
5893
5894 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5895 return 0;
5896
5897 return *width * PCIE_SPEED2MBS_ENC(*speed);
5898}
5899
5900
5901
5902
5903
5904
5905
5906
5907
5908
5909
5910void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
5911{
5912 enum pcie_link_width width, width_cap;
5913 enum pci_bus_speed speed, speed_cap;
5914 struct pci_dev *limiting_dev = NULL;
5915 u32 bw_avail, bw_cap;
5916
5917 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
5918 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
5919
5920 if (bw_avail >= bw_cap && verbose)
5921 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
5922 bw_cap / 1000, bw_cap % 1000,
5923 pci_speed_string(speed_cap), width_cap);
5924 else if (bw_avail < bw_cap)
5925 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
5926 bw_avail / 1000, bw_avail % 1000,
5927 pci_speed_string(speed), width,
5928 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
5929 bw_cap / 1000, bw_cap % 1000,
5930 pci_speed_string(speed_cap), width_cap);
5931}
5932
5933
5934
5935
5936
5937
5938
5939void pcie_print_link_status(struct pci_dev *dev)
5940{
5941 __pcie_print_link_status(dev, true);
5942}
5943EXPORT_SYMBOL(pcie_print_link_status);
5944
5945
5946
5947
5948
5949
5950
5951
5952int pci_select_bars(struct pci_dev *dev, unsigned long flags)
5953{
5954 int i, bars = 0;
5955 for (i = 0; i < PCI_NUM_RESOURCES; i++)
5956 if (pci_resource_flags(dev, i) & flags)
5957 bars |= (1 << i);
5958 return bars;
5959}
5960EXPORT_SYMBOL(pci_select_bars);
5961
5962
5963static arch_set_vga_state_t arch_set_vga_state;
5964
5965void __init pci_register_set_vga_state(arch_set_vga_state_t func)
5966{
5967 arch_set_vga_state = func;
5968}
5969
5970static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
5971 unsigned int command_bits, u32 flags)
5972{
5973 if (arch_set_vga_state)
5974 return arch_set_vga_state(dev, decode, command_bits,
5975 flags);
5976 return 0;
5977}
5978
5979
5980
5981
5982
5983
5984
5985
5986
5987int pci_set_vga_state(struct pci_dev *dev, bool decode,
5988 unsigned int command_bits, u32 flags)
5989{
5990 struct pci_bus *bus;
5991 struct pci_dev *bridge;
5992 u16 cmd;
5993 int rc;
5994
5995 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
5996
5997
5998 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
5999 if (rc)
6000 return rc;
6001
6002 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6003 pci_read_config_word(dev, PCI_COMMAND, &cmd);
6004 if (decode == true)
6005 cmd |= command_bits;
6006 else
6007 cmd &= ~command_bits;
6008 pci_write_config_word(dev, PCI_COMMAND, cmd);
6009 }
6010
6011 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6012 return 0;
6013
6014 bus = dev->bus;
6015 while (bus) {
6016 bridge = bus->self;
6017 if (bridge) {
6018 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6019 &cmd);
6020 if (decode == true)
6021 cmd |= PCI_BRIDGE_CTL_VGA;
6022 else
6023 cmd &= ~PCI_BRIDGE_CTL_VGA;
6024 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6025 cmd);
6026 }
6027 bus = bus->parent;
6028 }
6029 return 0;
6030}
6031
6032#ifdef CONFIG_ACPI
6033bool pci_pr3_present(struct pci_dev *pdev)
6034{
6035 struct acpi_device *adev;
6036
6037 if (acpi_disabled)
6038 return false;
6039
6040 adev = ACPI_COMPANION(&pdev->dev);
6041 if (!adev)
6042 return false;
6043
6044 return adev->power.flags.power_resources &&
6045 acpi_has_method(adev->handle, "_PR3");
6046}
6047EXPORT_SYMBOL_GPL(pci_pr3_present);
6048#endif
6049
6050
6051
6052
6053
6054
6055
6056
6057
6058
6059
6060
6061
6062
6063
6064
6065
6066
6067
6068
6069
6070void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns)
6071{
6072 int devfn_to;
6073
6074 nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from);
6075 devfn_to = devfn_from + nr_devfns - 1;
6076
6077 if (!dev->dma_alias_mask)
6078 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6079 if (!dev->dma_alias_mask) {
6080 pci_warn(dev, "Unable to allocate DMA alias mask\n");
6081 return;
6082 }
6083
6084 bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6085
6086 if (nr_devfns == 1)
6087 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6088 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6089 else if (nr_devfns > 1)
6090 pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6091 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6092 PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6093}
6094
6095bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6096{
6097 return (dev1->dma_alias_mask &&
6098 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6099 (dev2->dma_alias_mask &&
6100 test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6101 pci_real_dma_dev(dev1) == dev2 ||
6102 pci_real_dma_dev(dev2) == dev1;
6103}
6104
6105bool pci_device_is_present(struct pci_dev *pdev)
6106{
6107 u32 v;
6108
6109 if (pci_dev_is_disconnected(pdev))
6110 return false;
6111 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6112}
6113EXPORT_SYMBOL_GPL(pci_device_is_present);
6114
6115void pci_ignore_hotplug(struct pci_dev *dev)
6116{
6117 struct pci_dev *bridge = dev->bus->self;
6118
6119 dev->ignore_hotplug = 1;
6120
6121 if (bridge)
6122 bridge->ignore_hotplug = 1;
6123}
6124EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6125
6126
6127
6128
6129
6130
6131
6132
6133
6134
6135
6136struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6137{
6138 return dev;
6139}
6140
6141resource_size_t __weak pcibios_default_alignment(void)
6142{
6143 return 0;
6144}
6145
6146
6147
6148
6149
6150void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6151 const struct resource *rsrc,
6152 resource_size_t *start, resource_size_t *end)
6153{
6154 *start = rsrc->start;
6155 *end = rsrc->end;
6156}
6157
6158static char *resource_alignment_param;
6159static DEFINE_SPINLOCK(resource_alignment_lock);
6160
6161
6162
6163
6164
6165
6166
6167
6168
6169static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6170 bool *resize)
6171{
6172 int align_order, count;
6173 resource_size_t align = pcibios_default_alignment();
6174 const char *p;
6175 int ret;
6176
6177 spin_lock(&resource_alignment_lock);
6178 p = resource_alignment_param;
6179 if (!p || !*p)
6180 goto out;
6181 if (pci_has_flag(PCI_PROBE_ONLY)) {
6182 align = 0;
6183 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6184 goto out;
6185 }
6186
6187 while (*p) {
6188 count = 0;
6189 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6190 p[count] == '@') {
6191 p += count + 1;
6192 } else {
6193 align_order = -1;
6194 }
6195
6196 ret = pci_dev_str_match(dev, p, &p);
6197 if (ret == 1) {
6198 *resize = true;
6199 if (align_order == -1)
6200 align = PAGE_SIZE;
6201 else
6202 align = 1 << align_order;
6203 break;
6204 } else if (ret < 0) {
6205 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6206 p);
6207 break;
6208 }
6209
6210 if (*p != ';' && *p != ',') {
6211
6212 break;
6213 }
6214 p++;
6215 }
6216out:
6217 spin_unlock(&resource_alignment_lock);
6218 return align;
6219}
6220
6221static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6222 resource_size_t align, bool resize)
6223{
6224 struct resource *r = &dev->resource[bar];
6225 resource_size_t size;
6226
6227 if (!(r->flags & IORESOURCE_MEM))
6228 return;
6229
6230 if (r->flags & IORESOURCE_PCI_FIXED) {
6231 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6232 bar, r, (unsigned long long)align);
6233 return;
6234 }
6235
6236 size = resource_size(r);
6237 if (size >= align)
6238 return;
6239
6240
6241
6242
6243
6244
6245
6246
6247
6248
6249
6250
6251
6252
6253
6254
6255
6256
6257
6258
6259
6260
6261
6262
6263
6264
6265
6266
6267
6268 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6269 bar, r, (unsigned long long)align);
6270
6271 if (resize) {
6272 r->start = 0;
6273 r->end = align - 1;
6274 } else {
6275 r->flags &= ~IORESOURCE_SIZEALIGN;
6276 r->flags |= IORESOURCE_STARTALIGN;
6277 r->start = align;
6278 r->end = r->start + size - 1;
6279 }
6280 r->flags |= IORESOURCE_UNSET;
6281}
6282
6283
6284
6285
6286
6287
6288
6289
6290void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6291{
6292 int i;
6293 struct resource *r;
6294 resource_size_t align;
6295 u16 command;
6296 bool resize = false;
6297
6298
6299
6300
6301
6302
6303
6304 if (dev->is_virtfn)
6305 return;
6306
6307
6308 align = pci_specified_resource_alignment(dev, &resize);
6309 if (!align)
6310 return;
6311
6312 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6313 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6314 pci_warn(dev, "Can't reassign resources to host bridge\n");
6315 return;
6316 }
6317
6318 pci_read_config_word(dev, PCI_COMMAND, &command);
6319 command &= ~PCI_COMMAND_MEMORY;
6320 pci_write_config_word(dev, PCI_COMMAND, command);
6321
6322 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6323 pci_request_resource_alignment(dev, i, align, resize);
6324
6325
6326
6327
6328
6329
6330 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6331 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6332 r = &dev->resource[i];
6333 if (!(r->flags & IORESOURCE_MEM))
6334 continue;
6335 r->flags |= IORESOURCE_UNSET;
6336 r->end = resource_size(r) - 1;
6337 r->start = 0;
6338 }
6339 pci_disable_bridge_window(dev);
6340 }
6341}
6342
6343static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6344{
6345 size_t count = 0;
6346
6347 spin_lock(&resource_alignment_lock);
6348 if (resource_alignment_param)
6349 count = snprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
6350 spin_unlock(&resource_alignment_lock);
6351
6352
6353
6354
6355
6356
6357 if (count >= 2 && buf[count - 2] != '\n' && count < PAGE_SIZE - 1) {
6358 buf[count - 1] = '\n';
6359 buf[count++] = 0;
6360 }
6361
6362 return count;
6363}
6364
6365static ssize_t resource_alignment_store(struct bus_type *bus,
6366 const char *buf, size_t count)
6367{
6368 char *param = kstrndup(buf, count, GFP_KERNEL);
6369
6370 if (!param)
6371 return -ENOMEM;
6372
6373 spin_lock(&resource_alignment_lock);
6374 kfree(resource_alignment_param);
6375 resource_alignment_param = param;
6376 spin_unlock(&resource_alignment_lock);
6377 return count;
6378}
6379
6380static BUS_ATTR_RW(resource_alignment);
6381
6382static int __init pci_resource_alignment_sysfs_init(void)
6383{
6384 return bus_create_file(&pci_bus_type,
6385 &bus_attr_resource_alignment);
6386}
6387late_initcall(pci_resource_alignment_sysfs_init);
6388
6389static void pci_no_domains(void)
6390{
6391#ifdef CONFIG_PCI_DOMAINS
6392 pci_domains_supported = 0;
6393#endif
6394}
6395
6396#ifdef CONFIG_PCI_DOMAINS_GENERIC
6397static atomic_t __domain_nr = ATOMIC_INIT(-1);
6398
6399static int pci_get_new_domain_nr(void)
6400{
6401 return atomic_inc_return(&__domain_nr);
6402}
6403
6404static int of_pci_bus_find_domain_nr(struct device *parent)
6405{
6406 static int use_dt_domains = -1;
6407 int domain = -1;
6408
6409 if (parent)
6410 domain = of_get_pci_domain_nr(parent->of_node);
6411
6412
6413
6414
6415
6416
6417
6418
6419
6420
6421
6422
6423
6424
6425
6426
6427
6428
6429
6430
6431
6432
6433
6434
6435
6436
6437
6438 if (domain >= 0 && use_dt_domains) {
6439 use_dt_domains = 1;
6440 } else if (domain < 0 && use_dt_domains != 1) {
6441 use_dt_domains = 0;
6442 domain = pci_get_new_domain_nr();
6443 } else {
6444 if (parent)
6445 pr_err("Node %pOF has ", parent->of_node);
6446 pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6447 domain = -1;
6448 }
6449
6450 return domain;
6451}
6452
6453int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6454{
6455 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6456 acpi_pci_bus_find_domain_nr(bus);
6457}
6458#endif
6459
6460
6461
6462
6463
6464
6465
6466
6467int __weak pci_ext_cfg_avail(void)
6468{
6469 return 1;
6470}
6471
6472void __weak pci_fixup_cardbus(struct pci_bus *bus)
6473{
6474}
6475EXPORT_SYMBOL(pci_fixup_cardbus);
6476
6477static int __init pci_setup(char *str)
6478{
6479 while (str) {
6480 char *k = strchr(str, ',');
6481 if (k)
6482 *k++ = 0;
6483 if (*str && (str = pcibios_setup(str)) && *str) {
6484 if (!strcmp(str, "nomsi")) {
6485 pci_no_msi();
6486 } else if (!strncmp(str, "noats", 5)) {
6487 pr_info("PCIe: ATS is disabled\n");
6488 pcie_ats_disabled = true;
6489 } else if (!strcmp(str, "noaer")) {
6490 pci_no_aer();
6491 } else if (!strcmp(str, "earlydump")) {
6492 pci_early_dump = true;
6493 } else if (!strncmp(str, "realloc=", 8)) {
6494 pci_realloc_get_opt(str + 8);
6495 } else if (!strncmp(str, "realloc", 7)) {
6496 pci_realloc_get_opt("on");
6497 } else if (!strcmp(str, "nodomains")) {
6498 pci_no_domains();
6499 } else if (!strncmp(str, "noari", 5)) {
6500 pcie_ari_disabled = true;
6501 } else if (!strncmp(str, "cbiosize=", 9)) {
6502 pci_cardbus_io_size = memparse(str + 9, &str);
6503 } else if (!strncmp(str, "cbmemsize=", 10)) {
6504 pci_cardbus_mem_size = memparse(str + 10, &str);
6505 } else if (!strncmp(str, "resource_alignment=", 19)) {
6506 resource_alignment_param = str + 19;
6507 } else if (!strncmp(str, "ecrc=", 5)) {
6508 pcie_ecrc_get_policy(str + 5);
6509 } else if (!strncmp(str, "hpiosize=", 9)) {
6510 pci_hotplug_io_size = memparse(str + 9, &str);
6511 } else if (!strncmp(str, "hpmmiosize=", 11)) {
6512 pci_hotplug_mmio_size = memparse(str + 11, &str);
6513 } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6514 pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6515 } else if (!strncmp(str, "hpmemsize=", 10)) {
6516 pci_hotplug_mmio_size = memparse(str + 10, &str);
6517 pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6518 } else if (!strncmp(str, "hpbussize=", 10)) {
6519 pci_hotplug_bus_size =
6520 simple_strtoul(str + 10, &str, 0);
6521 if (pci_hotplug_bus_size > 0xff)
6522 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6523 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6524 pcie_bus_config = PCIE_BUS_TUNE_OFF;
6525 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
6526 pcie_bus_config = PCIE_BUS_SAFE;
6527 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
6528 pcie_bus_config = PCIE_BUS_PERFORMANCE;
6529 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6530 pcie_bus_config = PCIE_BUS_PEER2PEER;
6531 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6532 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6533 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6534 disable_acs_redir_param = str + 18;
6535 } else {
6536 pr_err("PCI: Unknown option `%s'\n", str);
6537 }
6538 }
6539 str = k;
6540 }
6541 return 0;
6542}
6543early_param("pci", pci_setup);
6544
6545
6546
6547
6548
6549
6550
6551
6552
6553
6554static int __init pci_realloc_setup_params(void)
6555{
6556 resource_alignment_param = kstrdup(resource_alignment_param,
6557 GFP_KERNEL);
6558 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6559
6560 return 0;
6561}
6562pure_initcall(pci_realloc_setup_params);
6563