1
2
3
4
5
6
7
8
9
10
11#include <linux/acpi.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/dmi.h>
15#include <linux/init.h>
16#include <linux/msi.h>
17#include <linux/of.h>
18#include <linux/pci.h>
19#include <linux/pm.h>
20#include <linux/slab.h>
21#include <linux/module.h>
22#include <linux/spinlock.h>
23#include <linux/string.h>
24#include <linux/log2.h>
25#include <linux/logic_pio.h>
26#include <linux/pm_wakeup.h>
27#include <linux/interrupt.h>
28#include <linux/device.h>
29#include <linux/pm_runtime.h>
30#include <linux/pci_hotplug.h>
31#include <linux/vmalloc.h>
32#include <asm/dma.h>
33#include <linux/aer.h>
34#include "pci.h"
35
36DEFINE_MUTEX(pci_slot_mutex);
37
38const char *pci_power_names[] = {
39 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
40};
41EXPORT_SYMBOL_GPL(pci_power_names);
42
43int isa_dma_bridge_buggy;
44EXPORT_SYMBOL(isa_dma_bridge_buggy);
45
46int pci_pci_problems;
47EXPORT_SYMBOL(pci_pci_problems);
48
49unsigned int pci_pm_d3hot_delay;
50
51static void pci_pme_list_scan(struct work_struct *work);
52
53static LIST_HEAD(pci_pme_list);
54static DEFINE_MUTEX(pci_pme_list_mutex);
55static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
56
57struct pci_pme_device {
58 struct list_head list;
59 struct pci_dev *dev;
60};
61
62#define PME_TIMEOUT 1000
63
64static void pci_dev_d3_sleep(struct pci_dev *dev)
65{
66 unsigned int delay = dev->d3hot_delay;
67
68 if (delay < pci_pm_d3hot_delay)
69 delay = pci_pm_d3hot_delay;
70
71 if (delay)
72 msleep(delay);
73}
74
75#ifdef CONFIG_PCI_DOMAINS
76int pci_domains_supported = 1;
77#endif
78
79#define DEFAULT_CARDBUS_IO_SIZE (256)
80#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
81
82unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
83unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
84
85#define DEFAULT_HOTPLUG_IO_SIZE (256)
86#define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
87#define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
88
89unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
90
91
92
93
94
95unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
96unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
97
98#define DEFAULT_HOTPLUG_BUS_SIZE 1
99unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
100
101
102
103#ifdef CONFIG_PCIE_BUS_TUNE_OFF
104enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
105#elif defined CONFIG_PCIE_BUS_SAFE
106enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
107#elif defined CONFIG_PCIE_BUS_PERFORMANCE
108enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
109#elif defined CONFIG_PCIE_BUS_PEER2PEER
110enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
111#else
112enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
113#endif
114
115
116
117
118
119
120
121u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
122u8 pci_cache_line_size;
123
124
125
126
127
128unsigned int pcibios_max_latency = 255;
129
130
131static bool pcie_ari_disabled;
132
133
134static bool pcie_ats_disabled;
135
136
137bool pci_early_dump;
138
139bool pci_ats_disabled(void)
140{
141 return pcie_ats_disabled;
142}
143EXPORT_SYMBOL_GPL(pci_ats_disabled);
144
145
146static bool pci_bridge_d3_disable;
147
148static bool pci_bridge_d3_force;
149
150static int __init pcie_port_pm_setup(char *str)
151{
152 if (!strcmp(str, "off"))
153 pci_bridge_d3_disable = true;
154 else if (!strcmp(str, "force"))
155 pci_bridge_d3_force = true;
156 return 1;
157}
158__setup("pcie_port_pm=", pcie_port_pm_setup);
159
160
161#define PCIE_RESET_READY_POLL_MS 60000
162
163
164
165
166
167
168
169
170unsigned char pci_bus_max_busnr(struct pci_bus *bus)
171{
172 struct pci_bus *tmp;
173 unsigned char max, n;
174
175 max = bus->busn_res.end;
176 list_for_each_entry(tmp, &bus->children, node) {
177 n = pci_bus_max_busnr(tmp);
178 if (n > max)
179 max = n;
180 }
181 return max;
182}
183EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
184
185
186
187
188
189
190
191int pci_status_get_and_clear_errors(struct pci_dev *pdev)
192{
193 u16 status;
194 int ret;
195
196 ret = pci_read_config_word(pdev, PCI_STATUS, &status);
197 if (ret != PCIBIOS_SUCCESSFUL)
198 return -EIO;
199
200 status &= PCI_STATUS_ERROR_BITS;
201 if (status)
202 pci_write_config_word(pdev, PCI_STATUS, status);
203
204 return status;
205}
206EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
207
208#ifdef CONFIG_HAS_IOMEM
209void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
210{
211 struct resource *res = &pdev->resource[bar];
212
213
214
215
216 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
217 pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
218 return NULL;
219 }
220 return ioremap(res->start, resource_size(res));
221}
222EXPORT_SYMBOL_GPL(pci_ioremap_bar);
223
224void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
225{
226
227
228
229 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
230 WARN_ON(1);
231 return NULL;
232 }
233 return ioremap_wc(pci_resource_start(pdev, bar),
234 pci_resource_len(pdev, bar));
235}
236EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
237#endif
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
259 const char **endptr)
260{
261 int ret;
262 int seg, bus, slot, func;
263 char *wpath, *p;
264 char end;
265
266 *endptr = strchrnul(path, ';');
267
268 wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
269 if (!wpath)
270 return -ENOMEM;
271
272 while (1) {
273 p = strrchr(wpath, '/');
274 if (!p)
275 break;
276 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
277 if (ret != 2) {
278 ret = -EINVAL;
279 goto free_and_exit;
280 }
281
282 if (dev->devfn != PCI_DEVFN(slot, func)) {
283 ret = 0;
284 goto free_and_exit;
285 }
286
287
288
289
290
291
292
293 dev = pci_upstream_bridge(dev);
294 if (!dev) {
295 ret = 0;
296 goto free_and_exit;
297 }
298
299 *p = 0;
300 }
301
302 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
303 &func, &end);
304 if (ret != 4) {
305 seg = 0;
306 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
307 if (ret != 3) {
308 ret = -EINVAL;
309 goto free_and_exit;
310 }
311 }
312
313 ret = (seg == pci_domain_nr(dev->bus) &&
314 bus == dev->bus->number &&
315 dev->devfn == PCI_DEVFN(slot, func));
316
317free_and_exit:
318 kfree(wpath);
319 return ret;
320}
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352static int pci_dev_str_match(struct pci_dev *dev, const char *p,
353 const char **endptr)
354{
355 int ret;
356 int count;
357 unsigned short vendor, device, subsystem_vendor, subsystem_device;
358
359 if (strncmp(p, "pci:", 4) == 0) {
360
361 p += 4;
362 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
363 &subsystem_vendor, &subsystem_device, &count);
364 if (ret != 4) {
365 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
366 if (ret != 2)
367 return -EINVAL;
368
369 subsystem_vendor = 0;
370 subsystem_device = 0;
371 }
372
373 p += count;
374
375 if ((!vendor || vendor == dev->vendor) &&
376 (!device || device == dev->device) &&
377 (!subsystem_vendor ||
378 subsystem_vendor == dev->subsystem_vendor) &&
379 (!subsystem_device ||
380 subsystem_device == dev->subsystem_device))
381 goto found;
382 } else {
383
384
385
386
387 ret = pci_dev_str_match_path(dev, p, &p);
388 if (ret < 0)
389 return ret;
390 else if (ret)
391 goto found;
392 }
393
394 *endptr = p;
395 return 0;
396
397found:
398 *endptr = p;
399 return 1;
400}
401
402static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
403 u8 pos, int cap, int *ttl)
404{
405 u8 id;
406 u16 ent;
407
408 pci_bus_read_config_byte(bus, devfn, pos, &pos);
409
410 while ((*ttl)--) {
411 if (pos < 0x40)
412 break;
413 pos &= ~3;
414 pci_bus_read_config_word(bus, devfn, pos, &ent);
415
416 id = ent & 0xff;
417 if (id == 0xff)
418 break;
419 if (id == cap)
420 return pos;
421 pos = (ent >> 8);
422 }
423 return 0;
424}
425
426static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
427 u8 pos, int cap)
428{
429 int ttl = PCI_FIND_CAP_TTL;
430
431 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
432}
433
434int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
435{
436 return __pci_find_next_cap(dev->bus, dev->devfn,
437 pos + PCI_CAP_LIST_NEXT, cap);
438}
439EXPORT_SYMBOL_GPL(pci_find_next_capability);
440
441static int __pci_bus_find_cap_start(struct pci_bus *bus,
442 unsigned int devfn, u8 hdr_type)
443{
444 u16 status;
445
446 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
447 if (!(status & PCI_STATUS_CAP_LIST))
448 return 0;
449
450 switch (hdr_type) {
451 case PCI_HEADER_TYPE_NORMAL:
452 case PCI_HEADER_TYPE_BRIDGE:
453 return PCI_CAPABILITY_LIST;
454 case PCI_HEADER_TYPE_CARDBUS:
455 return PCI_CB_CAPABILITY_LIST;
456 }
457
458 return 0;
459}
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480int pci_find_capability(struct pci_dev *dev, int cap)
481{
482 int pos;
483
484 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
485 if (pos)
486 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
487
488 return pos;
489}
490EXPORT_SYMBOL(pci_find_capability);
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
506{
507 int pos;
508 u8 hdr_type;
509
510 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
511
512 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
513 if (pos)
514 pos = __pci_find_next_cap(bus, devfn, pos, cap);
515
516 return pos;
517}
518EXPORT_SYMBOL(pci_bus_find_capability);
519
520
521
522
523
524
525
526
527
528
529
530
531int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
532{
533 u32 header;
534 int ttl;
535 int pos = PCI_CFG_SPACE_SIZE;
536
537
538 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
539
540 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
541 return 0;
542
543 if (start)
544 pos = start;
545
546 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
547 return 0;
548
549
550
551
552
553 if (header == 0)
554 return 0;
555
556 while (ttl-- > 0) {
557 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
558 return pos;
559
560 pos = PCI_EXT_CAP_NEXT(header);
561 if (pos < PCI_CFG_SPACE_SIZE)
562 break;
563
564 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
565 break;
566 }
567
568 return 0;
569}
570EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586int pci_find_ext_capability(struct pci_dev *dev, int cap)
587{
588 return pci_find_next_ext_capability(dev, 0, cap);
589}
590EXPORT_SYMBOL_GPL(pci_find_ext_capability);
591
592
593
594
595
596
597
598
599
600
601u64 pci_get_dsn(struct pci_dev *dev)
602{
603 u32 dword;
604 u64 dsn;
605 int pos;
606
607 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
608 if (!pos)
609 return 0;
610
611
612
613
614
615
616 pos += 4;
617 pci_read_config_dword(dev, pos, &dword);
618 dsn = (u64)dword;
619 pci_read_config_dword(dev, pos + 4, &dword);
620 dsn |= ((u64)dword) << 32;
621
622 return dsn;
623}
624EXPORT_SYMBOL_GPL(pci_get_dsn);
625
626static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
627{
628 int rc, ttl = PCI_FIND_CAP_TTL;
629 u8 cap, mask;
630
631 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
632 mask = HT_3BIT_CAP_MASK;
633 else
634 mask = HT_5BIT_CAP_MASK;
635
636 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
637 PCI_CAP_ID_HT, &ttl);
638 while (pos) {
639 rc = pci_read_config_byte(dev, pos + 3, &cap);
640 if (rc != PCIBIOS_SUCCESSFUL)
641 return 0;
642
643 if ((cap & mask) == ht_cap)
644 return pos;
645
646 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
647 pos + PCI_CAP_LIST_NEXT,
648 PCI_CAP_ID_HT, &ttl);
649 }
650
651 return 0;
652}
653
654
655
656
657
658
659
660
661
662
663
664
665
666int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
667{
668 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
669}
670EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
671
672
673
674
675
676
677
678
679
680
681
682
683int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
684{
685 int pos;
686
687 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
688 if (pos)
689 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
690
691 return pos;
692}
693EXPORT_SYMBOL_GPL(pci_find_ht_capability);
694
695
696
697
698
699
700
701
702
703
704struct resource *pci_find_parent_resource(const struct pci_dev *dev,
705 struct resource *res)
706{
707 const struct pci_bus *bus = dev->bus;
708 struct resource *r;
709 int i;
710
711 pci_bus_for_each_resource(bus, r, i) {
712 if (!r)
713 continue;
714 if (resource_contains(r, res)) {
715
716
717
718
719
720 if (r->flags & IORESOURCE_PREFETCH &&
721 !(res->flags & IORESOURCE_PREFETCH))
722 return NULL;
723
724
725
726
727
728
729
730
731
732 return r;
733 }
734 }
735 return NULL;
736}
737EXPORT_SYMBOL(pci_find_parent_resource);
738
739
740
741
742
743
744
745
746
747
748struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
749{
750 int i;
751
752 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
753 struct resource *r = &dev->resource[i];
754
755 if (r->start && resource_contains(r, res))
756 return r;
757 }
758
759 return NULL;
760}
761EXPORT_SYMBOL(pci_find_resource);
762
763
764
765
766
767
768
769
770
771int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
772{
773 int i;
774
775
776 for (i = 0; i < 4; i++) {
777 u16 status;
778 if (i)
779 msleep((1 << (i - 1)) * 100);
780
781 pci_read_config_word(dev, pos, &status);
782 if (!(status & mask))
783 return 1;
784 }
785
786 return 0;
787}
788
789static int pci_acs_enable;
790
791
792
793
794void pci_request_acs(void)
795{
796 pci_acs_enable = 1;
797}
798
799static const char *disable_acs_redir_param;
800
801
802
803
804
805
806
807static void pci_disable_acs_redir(struct pci_dev *dev)
808{
809 int ret = 0;
810 const char *p;
811 int pos;
812 u16 ctrl;
813
814 if (!disable_acs_redir_param)
815 return;
816
817 p = disable_acs_redir_param;
818 while (*p) {
819 ret = pci_dev_str_match(dev, p, &p);
820 if (ret < 0) {
821 pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
822 disable_acs_redir_param);
823
824 break;
825 } else if (ret == 1) {
826
827 break;
828 }
829
830 if (*p != ';' && *p != ',') {
831
832 break;
833 }
834 p++;
835 }
836
837 if (ret != 1)
838 return;
839
840 if (!pci_dev_specific_disable_acs_redir(dev))
841 return;
842
843 pos = dev->acs_cap;
844 if (!pos) {
845 pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
846 return;
847 }
848
849 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
850
851
852 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
853
854 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
855
856 pci_info(dev, "disabled ACS redirect\n");
857}
858
859
860
861
862
863static void pci_std_enable_acs(struct pci_dev *dev)
864{
865 int pos;
866 u16 cap;
867 u16 ctrl;
868
869 pos = dev->acs_cap;
870 if (!pos)
871 return;
872
873 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
874 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
875
876
877 ctrl |= (cap & PCI_ACS_SV);
878
879
880 ctrl |= (cap & PCI_ACS_RR);
881
882
883 ctrl |= (cap & PCI_ACS_CR);
884
885
886 ctrl |= (cap & PCI_ACS_UF);
887
888
889 if (dev->external_facing || dev->untrusted)
890 ctrl |= (cap & PCI_ACS_TB);
891
892 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
893}
894
895
896
897
898
899static void pci_enable_acs(struct pci_dev *dev)
900{
901 if (!pci_acs_enable)
902 goto disable_acs_redir;
903
904 if (!pci_dev_specific_enable_acs(dev))
905 goto disable_acs_redir;
906
907 pci_std_enable_acs(dev);
908
909disable_acs_redir:
910
911
912
913
914
915
916
917 pci_disable_acs_redir(dev);
918}
919
920
921
922
923
924
925
926
927static void pci_restore_bars(struct pci_dev *dev)
928{
929 int i;
930
931 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
932 pci_update_resource(dev, i);
933}
934
935static const struct pci_platform_pm_ops *pci_platform_pm;
936
937int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
938{
939 if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
940 !ops->choose_state || !ops->set_wakeup || !ops->need_resume)
941 return -EINVAL;
942 pci_platform_pm = ops;
943 return 0;
944}
945
946static inline bool platform_pci_power_manageable(struct pci_dev *dev)
947{
948 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
949}
950
951static inline int platform_pci_set_power_state(struct pci_dev *dev,
952 pci_power_t t)
953{
954 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
955}
956
957static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
958{
959 return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
960}
961
962static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
963{
964 if (pci_platform_pm && pci_platform_pm->refresh_state)
965 pci_platform_pm->refresh_state(dev);
966}
967
968static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
969{
970 return pci_platform_pm ?
971 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
972}
973
974static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
975{
976 return pci_platform_pm ?
977 pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
978}
979
980static inline bool platform_pci_need_resume(struct pci_dev *dev)
981{
982 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
983}
984
985static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
986{
987 if (pci_platform_pm && pci_platform_pm->bridge_d3)
988 return pci_platform_pm->bridge_d3(dev);
989 return false;
990}
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1006{
1007 u16 pmcsr;
1008 bool need_restore = false;
1009
1010
1011 if (dev->current_state == state)
1012 return 0;
1013
1014 if (!dev->pm_cap)
1015 return -EIO;
1016
1017 if (state < PCI_D0 || state > PCI_D3hot)
1018 return -EINVAL;
1019
1020
1021
1022
1023
1024
1025
1026 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
1027 && dev->current_state > state) {
1028 pci_err(dev, "invalid power transition (from %s to %s)\n",
1029 pci_power_name(dev->current_state),
1030 pci_power_name(state));
1031 return -EINVAL;
1032 }
1033
1034
1035 if ((state == PCI_D1 && !dev->d1_support)
1036 || (state == PCI_D2 && !dev->d2_support))
1037 return -EIO;
1038
1039 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1040 if (pmcsr == (u16) ~0) {
1041 pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
1042 pci_power_name(dev->current_state),
1043 pci_power_name(state));
1044 return -EIO;
1045 }
1046
1047
1048
1049
1050
1051
1052 switch (dev->current_state) {
1053 case PCI_D0:
1054 case PCI_D1:
1055 case PCI_D2:
1056 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1057 pmcsr |= state;
1058 break;
1059 case PCI_D3hot:
1060 case PCI_D3cold:
1061 case PCI_UNKNOWN:
1062 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
1063 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
1064 need_restore = true;
1065 fallthrough;
1066 default:
1067 pmcsr = 0;
1068 break;
1069 }
1070
1071
1072 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1073
1074
1075
1076
1077
1078 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1079 pci_dev_d3_sleep(dev);
1080 else if (state == PCI_D2 || dev->current_state == PCI_D2)
1081 udelay(PCI_PM_D2_DELAY);
1082
1083 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1084 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1085 if (dev->current_state != state)
1086 pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
1087 pci_power_name(dev->current_state),
1088 pci_power_name(state));
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103 if (need_restore)
1104 pci_restore_bars(dev);
1105
1106 if (dev->bus->self)
1107 pcie_aspm_pm_state_change(dev->bus->self);
1108
1109 return 0;
1110}
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1125{
1126 if (platform_pci_get_power_state(dev) == PCI_D3cold ||
1127 !pci_device_is_present(dev)) {
1128 dev->current_state = PCI_D3cold;
1129 } else if (dev->pm_cap) {
1130 u16 pmcsr;
1131
1132 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1133 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1134 } else {
1135 dev->current_state = state;
1136 }
1137}
1138
1139
1140
1141
1142
1143
1144
1145
1146void pci_refresh_power_state(struct pci_dev *dev)
1147{
1148 if (platform_pci_power_manageable(dev))
1149 platform_pci_refresh_power_state(dev);
1150
1151 pci_update_current_state(dev, dev->current_state);
1152}
1153
1154
1155
1156
1157
1158
1159int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1160{
1161 int error;
1162
1163 if (platform_pci_power_manageable(dev)) {
1164 error = platform_pci_set_power_state(dev, state);
1165 if (!error)
1166 pci_update_current_state(dev, state);
1167 } else
1168 error = -ENODEV;
1169
1170 if (error && !dev->pm_cap)
1171 dev->current_state = PCI_D0;
1172
1173 return error;
1174}
1175EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1176
1177
1178
1179
1180
1181
1182static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
1183{
1184 pci_wakeup_event(pci_dev);
1185 pm_request_resume(&pci_dev->dev);
1186 return 0;
1187}
1188
1189
1190
1191
1192
1193void pci_wakeup_bus(struct pci_bus *bus)
1194{
1195 if (bus)
1196 pci_walk_bus(bus, pci_wakeup, NULL);
1197}
1198
1199static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1200{
1201 int delay = 1;
1202 u32 id;
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216 pci_read_config_dword(dev, PCI_COMMAND, &id);
1217 while (id == ~0) {
1218 if (delay > timeout) {
1219 pci_warn(dev, "not ready %dms after %s; giving up\n",
1220 delay - 1, reset_type);
1221 return -ENOTTY;
1222 }
1223
1224 if (delay > 1000)
1225 pci_info(dev, "not ready %dms after %s; waiting\n",
1226 delay - 1, reset_type);
1227
1228 msleep(delay);
1229 delay *= 2;
1230 pci_read_config_dword(dev, PCI_COMMAND, &id);
1231 }
1232
1233 if (delay > 1000)
1234 pci_info(dev, "ready %dms after %s\n", delay - 1,
1235 reset_type);
1236
1237 return 0;
1238}
1239
1240
1241
1242
1243
1244int pci_power_up(struct pci_dev *dev)
1245{
1246 pci_platform_power_transition(dev, PCI_D0);
1247
1248
1249
1250
1251
1252
1253 if (dev->runtime_d3cold) {
1254
1255
1256
1257
1258
1259 pci_wakeup_bus(dev->subordinate);
1260 }
1261
1262 return pci_raw_set_power_state(dev, PCI_D0);
1263}
1264
1265
1266
1267
1268
1269
1270static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1271{
1272 pci_power_t state = *(pci_power_t *)data;
1273
1274 dev->current_state = state;
1275 return 0;
1276}
1277
1278
1279
1280
1281
1282
1283void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1284{
1285 if (bus)
1286 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1287}
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1307{
1308 int error;
1309
1310
1311 if (state > PCI_D3cold)
1312 state = PCI_D3cold;
1313 else if (state < PCI_D0)
1314 state = PCI_D0;
1315 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1316
1317
1318
1319
1320
1321
1322
1323 return 0;
1324
1325
1326 if (dev->current_state == state)
1327 return 0;
1328
1329 if (state == PCI_D0)
1330 return pci_power_up(dev);
1331
1332
1333
1334
1335
1336 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1337 return 0;
1338
1339
1340
1341
1342
1343 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
1344 PCI_D3hot : state);
1345
1346 if (pci_platform_power_transition(dev, state))
1347 return error;
1348
1349
1350 if (state == PCI_D3cold)
1351 pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1352
1353 return 0;
1354}
1355EXPORT_SYMBOL(pci_set_power_state);
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
1367{
1368 pci_power_t ret;
1369
1370 if (!dev->pm_cap)
1371 return PCI_D0;
1372
1373 ret = platform_pci_choose_state(dev);
1374 if (ret != PCI_POWER_ERROR)
1375 return ret;
1376
1377 switch (state.event) {
1378 case PM_EVENT_ON:
1379 return PCI_D0;
1380 case PM_EVENT_FREEZE:
1381 case PM_EVENT_PRETHAW:
1382
1383 case PM_EVENT_SUSPEND:
1384 case PM_EVENT_HIBERNATE:
1385 return PCI_D3hot;
1386 default:
1387 pci_info(dev, "unrecognized suspend event %d\n",
1388 state.event);
1389 BUG();
1390 }
1391 return PCI_D0;
1392}
1393EXPORT_SYMBOL(pci_choose_state);
1394
1395#define PCI_EXP_SAVE_REGS 7
1396
1397static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1398 u16 cap, bool extended)
1399{
1400 struct pci_cap_saved_state *tmp;
1401
1402 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1403 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1404 return tmp;
1405 }
1406 return NULL;
1407}
1408
1409struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1410{
1411 return _pci_find_saved_cap(dev, cap, false);
1412}
1413
1414struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1415{
1416 return _pci_find_saved_cap(dev, cap, true);
1417}
1418
1419static int pci_save_pcie_state(struct pci_dev *dev)
1420{
1421 int i = 0;
1422 struct pci_cap_saved_state *save_state;
1423 u16 *cap;
1424
1425 if (!pci_is_pcie(dev))
1426 return 0;
1427
1428 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1429 if (!save_state) {
1430 pci_err(dev, "buffer not found in %s\n", __func__);
1431 return -ENOMEM;
1432 }
1433
1434 cap = (u16 *)&save_state->cap.data[0];
1435 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1436 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1437 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1438 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1439 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1440 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1441 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1442
1443 return 0;
1444}
1445
1446static void pci_restore_pcie_state(struct pci_dev *dev)
1447{
1448 int i = 0;
1449 struct pci_cap_saved_state *save_state;
1450 u16 *cap;
1451
1452 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1453 if (!save_state)
1454 return;
1455
1456 cap = (u16 *)&save_state->cap.data[0];
1457 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1458 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1459 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1460 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1461 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1462 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1463 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1464}
1465
1466static int pci_save_pcix_state(struct pci_dev *dev)
1467{
1468 int pos;
1469 struct pci_cap_saved_state *save_state;
1470
1471 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1472 if (!pos)
1473 return 0;
1474
1475 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1476 if (!save_state) {
1477 pci_err(dev, "buffer not found in %s\n", __func__);
1478 return -ENOMEM;
1479 }
1480
1481 pci_read_config_word(dev, pos + PCI_X_CMD,
1482 (u16 *)save_state->cap.data);
1483
1484 return 0;
1485}
1486
1487static void pci_restore_pcix_state(struct pci_dev *dev)
1488{
1489 int i = 0, pos;
1490 struct pci_cap_saved_state *save_state;
1491 u16 *cap;
1492
1493 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1494 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1495 if (!save_state || !pos)
1496 return;
1497 cap = (u16 *)&save_state->cap.data[0];
1498
1499 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1500}
1501
1502static void pci_save_ltr_state(struct pci_dev *dev)
1503{
1504 int ltr;
1505 struct pci_cap_saved_state *save_state;
1506 u16 *cap;
1507
1508 if (!pci_is_pcie(dev))
1509 return;
1510
1511 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1512 if (!ltr)
1513 return;
1514
1515 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1516 if (!save_state) {
1517 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1518 return;
1519 }
1520
1521 cap = (u16 *)&save_state->cap.data[0];
1522 pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
1523 pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
1524}
1525
1526static void pci_restore_ltr_state(struct pci_dev *dev)
1527{
1528 struct pci_cap_saved_state *save_state;
1529 int ltr;
1530 u16 *cap;
1531
1532 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1533 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1534 if (!save_state || !ltr)
1535 return;
1536
1537 cap = (u16 *)&save_state->cap.data[0];
1538 pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
1539 pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
1540}
1541
1542
1543
1544
1545
1546
1547int pci_save_state(struct pci_dev *dev)
1548{
1549 int i;
1550
1551 for (i = 0; i < 16; i++) {
1552 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1553 pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
1554 i * 4, dev->saved_config_space[i]);
1555 }
1556 dev->state_saved = true;
1557
1558 i = pci_save_pcie_state(dev);
1559 if (i != 0)
1560 return i;
1561
1562 i = pci_save_pcix_state(dev);
1563 if (i != 0)
1564 return i;
1565
1566 pci_save_ltr_state(dev);
1567 pci_save_dpc_state(dev);
1568 pci_save_aer_state(dev);
1569 return pci_save_vc_state(dev);
1570}
1571EXPORT_SYMBOL(pci_save_state);
1572
1573static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1574 u32 saved_val, int retry, bool force)
1575{
1576 u32 val;
1577
1578 pci_read_config_dword(pdev, offset, &val);
1579 if (!force && val == saved_val)
1580 return;
1581
1582 for (;;) {
1583 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1584 offset, val, saved_val);
1585 pci_write_config_dword(pdev, offset, saved_val);
1586 if (retry-- <= 0)
1587 return;
1588
1589 pci_read_config_dword(pdev, offset, &val);
1590 if (val == saved_val)
1591 return;
1592
1593 mdelay(1);
1594 }
1595}
1596
1597static void pci_restore_config_space_range(struct pci_dev *pdev,
1598 int start, int end, int retry,
1599 bool force)
1600{
1601 int index;
1602
1603 for (index = end; index >= start; index--)
1604 pci_restore_config_dword(pdev, 4 * index,
1605 pdev->saved_config_space[index],
1606 retry, force);
1607}
1608
1609static void pci_restore_config_space(struct pci_dev *pdev)
1610{
1611 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1612 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1613
1614 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1615 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1616 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1617 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1618
1619
1620
1621
1622
1623
1624 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1625 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1626 } else {
1627 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1628 }
1629}
1630
1631static void pci_restore_rebar_state(struct pci_dev *pdev)
1632{
1633 unsigned int pos, nbars, i;
1634 u32 ctrl;
1635
1636 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1637 if (!pos)
1638 return;
1639
1640 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1641 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1642 PCI_REBAR_CTRL_NBAR_SHIFT;
1643
1644 for (i = 0; i < nbars; i++, pos += 8) {
1645 struct resource *res;
1646 int bar_idx, size;
1647
1648 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1649 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1650 res = pdev->resource + bar_idx;
1651 size = ilog2(resource_size(res)) - 20;
1652 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1653 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1654 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1655 }
1656}
1657
1658
1659
1660
1661
1662void pci_restore_state(struct pci_dev *dev)
1663{
1664 if (!dev->state_saved)
1665 return;
1666
1667
1668
1669
1670
1671 pci_restore_ltr_state(dev);
1672
1673 pci_restore_pcie_state(dev);
1674 pci_restore_pasid_state(dev);
1675 pci_restore_pri_state(dev);
1676 pci_restore_ats_state(dev);
1677 pci_restore_vc_state(dev);
1678 pci_restore_rebar_state(dev);
1679 pci_restore_dpc_state(dev);
1680
1681 pci_aer_clear_status(dev);
1682 pci_restore_aer_state(dev);
1683
1684 pci_restore_config_space(dev);
1685
1686 pci_restore_pcix_state(dev);
1687 pci_restore_msi_state(dev);
1688
1689
1690 pci_enable_acs(dev);
1691 pci_restore_iov_state(dev);
1692
1693 dev->state_saved = false;
1694}
1695EXPORT_SYMBOL(pci_restore_state);
1696
1697struct pci_saved_state {
1698 u32 config_space[16];
1699 struct pci_cap_saved_data cap[];
1700};
1701
1702
1703
1704
1705
1706
1707
1708
1709struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1710{
1711 struct pci_saved_state *state;
1712 struct pci_cap_saved_state *tmp;
1713 struct pci_cap_saved_data *cap;
1714 size_t size;
1715
1716 if (!dev->state_saved)
1717 return NULL;
1718
1719 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1720
1721 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1722 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1723
1724 state = kzalloc(size, GFP_KERNEL);
1725 if (!state)
1726 return NULL;
1727
1728 memcpy(state->config_space, dev->saved_config_space,
1729 sizeof(state->config_space));
1730
1731 cap = state->cap;
1732 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1733 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1734 memcpy(cap, &tmp->cap, len);
1735 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1736 }
1737
1738
1739 return state;
1740}
1741EXPORT_SYMBOL_GPL(pci_store_saved_state);
1742
1743
1744
1745
1746
1747
1748int pci_load_saved_state(struct pci_dev *dev,
1749 struct pci_saved_state *state)
1750{
1751 struct pci_cap_saved_data *cap;
1752
1753 dev->state_saved = false;
1754
1755 if (!state)
1756 return 0;
1757
1758 memcpy(dev->saved_config_space, state->config_space,
1759 sizeof(state->config_space));
1760
1761 cap = state->cap;
1762 while (cap->size) {
1763 struct pci_cap_saved_state *tmp;
1764
1765 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1766 if (!tmp || tmp->cap.size != cap->size)
1767 return -EINVAL;
1768
1769 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1770 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1771 sizeof(struct pci_cap_saved_data) + cap->size);
1772 }
1773
1774 dev->state_saved = true;
1775 return 0;
1776}
1777EXPORT_SYMBOL_GPL(pci_load_saved_state);
1778
1779
1780
1781
1782
1783
1784
1785int pci_load_and_free_saved_state(struct pci_dev *dev,
1786 struct pci_saved_state **state)
1787{
1788 int ret = pci_load_saved_state(dev, *state);
1789 kfree(*state);
1790 *state = NULL;
1791 return ret;
1792}
1793EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1794
1795int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1796{
1797 return pci_enable_resources(dev, bars);
1798}
1799
1800static int do_pci_enable_device(struct pci_dev *dev, int bars)
1801{
1802 int err;
1803 struct pci_dev *bridge;
1804 u16 cmd;
1805 u8 pin;
1806
1807 err = pci_set_power_state(dev, PCI_D0);
1808 if (err < 0 && err != -EIO)
1809 return err;
1810
1811 bridge = pci_upstream_bridge(dev);
1812 if (bridge)
1813 pcie_aspm_powersave_config_link(bridge);
1814
1815 err = pcibios_enable_device(dev, bars);
1816 if (err < 0)
1817 return err;
1818 pci_fixup_device(pci_fixup_enable, dev);
1819
1820 if (dev->msi_enabled || dev->msix_enabled)
1821 return 0;
1822
1823 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1824 if (pin) {
1825 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1826 if (cmd & PCI_COMMAND_INTX_DISABLE)
1827 pci_write_config_word(dev, PCI_COMMAND,
1828 cmd & ~PCI_COMMAND_INTX_DISABLE);
1829 }
1830
1831 return 0;
1832}
1833
1834
1835
1836
1837
1838
1839
1840
1841int pci_reenable_device(struct pci_dev *dev)
1842{
1843 if (pci_is_enabled(dev))
1844 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1845 return 0;
1846}
1847EXPORT_SYMBOL(pci_reenable_device);
1848
1849static void pci_enable_bridge(struct pci_dev *dev)
1850{
1851 struct pci_dev *bridge;
1852 int retval;
1853
1854 bridge = pci_upstream_bridge(dev);
1855 if (bridge)
1856 pci_enable_bridge(bridge);
1857
1858 if (pci_is_enabled(dev)) {
1859 if (!dev->is_busmaster)
1860 pci_set_master(dev);
1861 return;
1862 }
1863
1864 retval = pci_enable_device(dev);
1865 if (retval)
1866 pci_err(dev, "Error enabling bridge (%d), continuing\n",
1867 retval);
1868 pci_set_master(dev);
1869}
1870
1871static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1872{
1873 struct pci_dev *bridge;
1874 int err;
1875 int i, bars = 0;
1876
1877
1878
1879
1880
1881
1882
1883 if (dev->pm_cap) {
1884 u16 pmcsr;
1885 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1886 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1887 }
1888
1889 if (atomic_inc_return(&dev->enable_cnt) > 1)
1890 return 0;
1891
1892 bridge = pci_upstream_bridge(dev);
1893 if (bridge)
1894 pci_enable_bridge(bridge);
1895
1896
1897 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1898 if (dev->resource[i].flags & flags)
1899 bars |= (1 << i);
1900 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1901 if (dev->resource[i].flags & flags)
1902 bars |= (1 << i);
1903
1904 err = do_pci_enable_device(dev, bars);
1905 if (err < 0)
1906 atomic_dec(&dev->enable_cnt);
1907 return err;
1908}
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918int pci_enable_device_io(struct pci_dev *dev)
1919{
1920 return pci_enable_device_flags(dev, IORESOURCE_IO);
1921}
1922EXPORT_SYMBOL(pci_enable_device_io);
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932int pci_enable_device_mem(struct pci_dev *dev)
1933{
1934 return pci_enable_device_flags(dev, IORESOURCE_MEM);
1935}
1936EXPORT_SYMBOL(pci_enable_device_mem);
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949int pci_enable_device(struct pci_dev *dev)
1950{
1951 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1952}
1953EXPORT_SYMBOL(pci_enable_device);
1954
1955
1956
1957
1958
1959
1960
1961struct pci_devres {
1962 unsigned int enabled:1;
1963 unsigned int pinned:1;
1964 unsigned int orig_intx:1;
1965 unsigned int restore_intx:1;
1966 unsigned int mwi:1;
1967 u32 region_mask;
1968};
1969
1970static void pcim_release(struct device *gendev, void *res)
1971{
1972 struct pci_dev *dev = to_pci_dev(gendev);
1973 struct pci_devres *this = res;
1974 int i;
1975
1976 if (dev->msi_enabled)
1977 pci_disable_msi(dev);
1978 if (dev->msix_enabled)
1979 pci_disable_msix(dev);
1980
1981 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1982 if (this->region_mask & (1 << i))
1983 pci_release_region(dev, i);
1984
1985 if (this->mwi)
1986 pci_clear_mwi(dev);
1987
1988 if (this->restore_intx)
1989 pci_intx(dev, this->orig_intx);
1990
1991 if (this->enabled && !this->pinned)
1992 pci_disable_device(dev);
1993}
1994
1995static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1996{
1997 struct pci_devres *dr, *new_dr;
1998
1999 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
2000 if (dr)
2001 return dr;
2002
2003 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
2004 if (!new_dr)
2005 return NULL;
2006 return devres_get(&pdev->dev, new_dr, NULL, NULL);
2007}
2008
2009static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
2010{
2011 if (pci_is_managed(pdev))
2012 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
2013 return NULL;
2014}
2015
2016
2017
2018
2019
2020
2021
2022int pcim_enable_device(struct pci_dev *pdev)
2023{
2024 struct pci_devres *dr;
2025 int rc;
2026
2027 dr = get_pci_dr(pdev);
2028 if (unlikely(!dr))
2029 return -ENOMEM;
2030 if (dr->enabled)
2031 return 0;
2032
2033 rc = pci_enable_device(pdev);
2034 if (!rc) {
2035 pdev->is_managed = 1;
2036 dr->enabled = 1;
2037 }
2038 return rc;
2039}
2040EXPORT_SYMBOL(pcim_enable_device);
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050void pcim_pin_device(struct pci_dev *pdev)
2051{
2052 struct pci_devres *dr;
2053
2054 dr = find_pci_dr(pdev);
2055 WARN_ON(!dr || !dr->enabled);
2056 if (dr)
2057 dr->pinned = 1;
2058}
2059EXPORT_SYMBOL(pcim_pin_device);
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069int __weak pcibios_add_device(struct pci_dev *dev)
2070{
2071 return 0;
2072}
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083void __weak pcibios_release_device(struct pci_dev *dev) {}
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093void __weak pcibios_disable_device(struct pci_dev *dev) {}
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104void __weak pcibios_penalize_isa_irq(int irq, int active) {}
2105
2106static void do_pci_disable_device(struct pci_dev *dev)
2107{
2108 u16 pci_command;
2109
2110 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2111 if (pci_command & PCI_COMMAND_MASTER) {
2112 pci_command &= ~PCI_COMMAND_MASTER;
2113 pci_write_config_word(dev, PCI_COMMAND, pci_command);
2114 }
2115
2116 pcibios_disable_device(dev);
2117}
2118
2119
2120
2121
2122
2123
2124
2125
2126void pci_disable_enabled_device(struct pci_dev *dev)
2127{
2128 if (pci_is_enabled(dev))
2129 do_pci_disable_device(dev);
2130}
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142void pci_disable_device(struct pci_dev *dev)
2143{
2144 struct pci_devres *dr;
2145
2146 dr = find_pci_dr(dev);
2147 if (dr)
2148 dr->enabled = 0;
2149
2150 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2151 "disabling already-disabled device");
2152
2153 if (atomic_dec_return(&dev->enable_cnt) != 0)
2154 return;
2155
2156 do_pci_disable_device(dev);
2157
2158 dev->is_busmaster = 0;
2159}
2160EXPORT_SYMBOL(pci_disable_device);
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2171 enum pcie_reset_state state)
2172{
2173 return -EINVAL;
2174}
2175
2176
2177
2178
2179
2180
2181
2182
2183int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2184{
2185 return pcibios_set_pcie_reset_state(dev, state);
2186}
2187EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2188
2189void pcie_clear_device_status(struct pci_dev *dev)
2190{
2191 u16 sta;
2192
2193 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2194 pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2195}
2196
2197
2198
2199
2200
2201void pcie_clear_root_pme_status(struct pci_dev *dev)
2202{
2203 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2204}
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214bool pci_check_pme_status(struct pci_dev *dev)
2215{
2216 int pmcsr_pos;
2217 u16 pmcsr;
2218 bool ret = false;
2219
2220 if (!dev->pm_cap)
2221 return false;
2222
2223 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2224 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2225 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2226 return false;
2227
2228
2229 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2230 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2231
2232 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2233 ret = true;
2234 }
2235
2236 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2237
2238 return ret;
2239}
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2250{
2251 if (pme_poll_reset && dev->pme_poll)
2252 dev->pme_poll = false;
2253
2254 if (pci_check_pme_status(dev)) {
2255 pci_wakeup_event(dev);
2256 pm_request_resume(&dev->dev);
2257 }
2258 return 0;
2259}
2260
2261
2262
2263
2264
2265void pci_pme_wakeup_bus(struct pci_bus *bus)
2266{
2267 if (bus)
2268 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2269}
2270
2271
2272
2273
2274
2275
2276
2277bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2278{
2279 if (!dev->pm_cap)
2280 return false;
2281
2282 return !!(dev->pme_support & (1 << state));
2283}
2284EXPORT_SYMBOL(pci_pme_capable);
2285
2286static void pci_pme_list_scan(struct work_struct *work)
2287{
2288 struct pci_pme_device *pme_dev, *n;
2289
2290 mutex_lock(&pci_pme_list_mutex);
2291 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2292 if (pme_dev->dev->pme_poll) {
2293 struct pci_dev *bridge;
2294
2295 bridge = pme_dev->dev->bus->self;
2296
2297
2298
2299
2300
2301 if (bridge && bridge->current_state != PCI_D0)
2302 continue;
2303
2304
2305
2306
2307 if (pme_dev->dev->current_state == PCI_D3cold)
2308 continue;
2309
2310 pci_pme_wakeup(pme_dev->dev, NULL);
2311 } else {
2312 list_del(&pme_dev->list);
2313 kfree(pme_dev);
2314 }
2315 }
2316 if (!list_empty(&pci_pme_list))
2317 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2318 msecs_to_jiffies(PME_TIMEOUT));
2319 mutex_unlock(&pci_pme_list_mutex);
2320}
2321
2322static void __pci_pme_active(struct pci_dev *dev, bool enable)
2323{
2324 u16 pmcsr;
2325
2326 if (!dev->pme_support)
2327 return;
2328
2329 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2330
2331 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2332 if (!enable)
2333 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2334
2335 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2336}
2337
2338
2339
2340
2341
2342void pci_pme_restore(struct pci_dev *dev)
2343{
2344 u16 pmcsr;
2345
2346 if (!dev->pme_support)
2347 return;
2348
2349 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2350 if (dev->wakeup_prepared) {
2351 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2352 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2353 } else {
2354 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2355 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2356 }
2357 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2358}
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368void pci_pme_active(struct pci_dev *dev, bool enable)
2369{
2370 __pci_pme_active(dev, enable);
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392 if (dev->pme_poll) {
2393 struct pci_pme_device *pme_dev;
2394 if (enable) {
2395 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2396 GFP_KERNEL);
2397 if (!pme_dev) {
2398 pci_warn(dev, "can't enable PME#\n");
2399 return;
2400 }
2401 pme_dev->dev = dev;
2402 mutex_lock(&pci_pme_list_mutex);
2403 list_add(&pme_dev->list, &pci_pme_list);
2404 if (list_is_singular(&pci_pme_list))
2405 queue_delayed_work(system_freezable_wq,
2406 &pci_pme_work,
2407 msecs_to_jiffies(PME_TIMEOUT));
2408 mutex_unlock(&pci_pme_list_mutex);
2409 } else {
2410 mutex_lock(&pci_pme_list_mutex);
2411 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2412 if (pme_dev->dev == dev) {
2413 list_del(&pme_dev->list);
2414 kfree(pme_dev);
2415 break;
2416 }
2417 }
2418 mutex_unlock(&pci_pme_list_mutex);
2419 }
2420 }
2421
2422 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2423}
2424EXPORT_SYMBOL(pci_pme_active);
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2446{
2447 int ret = 0;
2448
2449
2450
2451
2452
2453
2454
2455
2456 if (!pci_power_manageable(dev))
2457 return 0;
2458
2459
2460 if (!!enable == !!dev->wakeup_prepared)
2461 return 0;
2462
2463
2464
2465
2466
2467
2468
2469 if (enable) {
2470 int error;
2471
2472 if (pci_pme_capable(dev, state))
2473 pci_pme_active(dev, true);
2474 else
2475 ret = 1;
2476 error = platform_pci_set_wakeup(dev, true);
2477 if (ret)
2478 ret = error;
2479 if (!ret)
2480 dev->wakeup_prepared = true;
2481 } else {
2482 platform_pci_set_wakeup(dev, false);
2483 pci_pme_active(dev, false);
2484 dev->wakeup_prepared = false;
2485 }
2486
2487 return ret;
2488}
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2500{
2501 if (enable && !device_may_wakeup(&pci_dev->dev))
2502 return -EINVAL;
2503
2504 return __pci_enable_wake(pci_dev, state, enable);
2505}
2506EXPORT_SYMBOL(pci_enable_wake);
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2523{
2524 return pci_pme_capable(dev, PCI_D3cold) ?
2525 pci_enable_wake(dev, PCI_D3cold, enable) :
2526 pci_enable_wake(dev, PCI_D3hot, enable);
2527}
2528EXPORT_SYMBOL(pci_wake_from_d3);
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2540{
2541 pci_power_t target_state = PCI_D3hot;
2542
2543 if (platform_pci_power_manageable(dev)) {
2544
2545
2546
2547 pci_power_t state = platform_pci_choose_state(dev);
2548
2549 switch (state) {
2550 case PCI_POWER_ERROR:
2551 case PCI_UNKNOWN:
2552 break;
2553 case PCI_D1:
2554 case PCI_D2:
2555 if (pci_no_d1d2(dev))
2556 break;
2557 fallthrough;
2558 default:
2559 target_state = state;
2560 }
2561
2562 return target_state;
2563 }
2564
2565 if (!dev->pm_cap)
2566 target_state = PCI_D0;
2567
2568
2569
2570
2571
2572
2573 if (dev->current_state == PCI_D3cold)
2574 target_state = PCI_D3cold;
2575
2576 if (wakeup) {
2577
2578
2579
2580
2581 if (dev->pme_support) {
2582 while (target_state
2583 && !(dev->pme_support & (1 << target_state)))
2584 target_state--;
2585 }
2586 }
2587
2588 return target_state;
2589}
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600int pci_prepare_to_sleep(struct pci_dev *dev)
2601{
2602 bool wakeup = device_may_wakeup(&dev->dev);
2603 pci_power_t target_state = pci_target_state(dev, wakeup);
2604 int error;
2605
2606 if (target_state == PCI_POWER_ERROR)
2607 return -EIO;
2608
2609 pci_enable_wake(dev, target_state, wakeup);
2610
2611 error = pci_set_power_state(dev, target_state);
2612
2613 if (error)
2614 pci_enable_wake(dev, target_state, false);
2615
2616 return error;
2617}
2618EXPORT_SYMBOL(pci_prepare_to_sleep);
2619
2620
2621
2622
2623
2624
2625
2626
2627int pci_back_from_sleep(struct pci_dev *dev)
2628{
2629 pci_enable_wake(dev, PCI_D0, false);
2630 return pci_set_power_state(dev, PCI_D0);
2631}
2632EXPORT_SYMBOL(pci_back_from_sleep);
2633
2634
2635
2636
2637
2638
2639
2640
2641int pci_finish_runtime_suspend(struct pci_dev *dev)
2642{
2643 pci_power_t target_state;
2644 int error;
2645
2646 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2647 if (target_state == PCI_POWER_ERROR)
2648 return -EIO;
2649
2650 dev->runtime_d3cold = target_state == PCI_D3cold;
2651
2652 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2653
2654 error = pci_set_power_state(dev, target_state);
2655
2656 if (error) {
2657 pci_enable_wake(dev, target_state, false);
2658 dev->runtime_d3cold = false;
2659 }
2660
2661 return error;
2662}
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672bool pci_dev_run_wake(struct pci_dev *dev)
2673{
2674 struct pci_bus *bus = dev->bus;
2675
2676 if (!dev->pme_support)
2677 return false;
2678
2679
2680 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2681 return false;
2682
2683 if (device_can_wakeup(&dev->dev))
2684 return true;
2685
2686 while (bus->parent) {
2687 struct pci_dev *bridge = bus->self;
2688
2689 if (device_can_wakeup(&bridge->dev))
2690 return true;
2691
2692 bus = bus->parent;
2693 }
2694
2695
2696 if (bus->bridge)
2697 return device_can_wakeup(bus->bridge);
2698
2699 return false;
2700}
2701EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712bool pci_dev_need_resume(struct pci_dev *pci_dev)
2713{
2714 struct device *dev = &pci_dev->dev;
2715 pci_power_t target_state;
2716
2717 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2718 return true;
2719
2720 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2721
2722
2723
2724
2725
2726
2727 return target_state != pci_dev->current_state &&
2728 target_state != PCI_D3cold &&
2729 pci_dev->current_state != PCI_D3hot;
2730}
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2744{
2745 struct device *dev = &pci_dev->dev;
2746
2747 spin_lock_irq(&dev->power.lock);
2748
2749 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2750 pci_dev->current_state < PCI_D3cold)
2751 __pci_pme_active(pci_dev, false);
2752
2753 spin_unlock_irq(&dev->power.lock);
2754}
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764void pci_dev_complete_resume(struct pci_dev *pci_dev)
2765{
2766 struct device *dev = &pci_dev->dev;
2767
2768 if (!pci_dev_run_wake(pci_dev))
2769 return;
2770
2771 spin_lock_irq(&dev->power.lock);
2772
2773 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2774 __pci_pme_active(pci_dev, true);
2775
2776 spin_unlock_irq(&dev->power.lock);
2777}
2778
2779void pci_config_pm_runtime_get(struct pci_dev *pdev)
2780{
2781 struct device *dev = &pdev->dev;
2782 struct device *parent = dev->parent;
2783
2784 if (parent)
2785 pm_runtime_get_sync(parent);
2786 pm_runtime_get_noresume(dev);
2787
2788
2789
2790
2791 pm_runtime_barrier(dev);
2792
2793
2794
2795
2796
2797 if (pdev->current_state == PCI_D3cold)
2798 pm_runtime_resume(dev);
2799}
2800
2801void pci_config_pm_runtime_put(struct pci_dev *pdev)
2802{
2803 struct device *dev = &pdev->dev;
2804 struct device *parent = dev->parent;
2805
2806 pm_runtime_put(dev);
2807 if (parent)
2808 pm_runtime_put_sync(parent);
2809}
2810
2811static const struct dmi_system_id bridge_d3_blacklist[] = {
2812#ifdef CONFIG_X86
2813 {
2814
2815
2816
2817
2818
2819
2820 .ident = "X299 DESIGNARE EX-CF",
2821 .matches = {
2822 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2823 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2824 },
2825 },
2826#endif
2827 { }
2828};
2829
2830
2831
2832
2833
2834
2835
2836
2837bool pci_bridge_d3_possible(struct pci_dev *bridge)
2838{
2839 if (!pci_is_pcie(bridge))
2840 return false;
2841
2842 switch (pci_pcie_type(bridge)) {
2843 case PCI_EXP_TYPE_ROOT_PORT:
2844 case PCI_EXP_TYPE_UPSTREAM:
2845 case PCI_EXP_TYPE_DOWNSTREAM:
2846 if (pci_bridge_d3_disable)
2847 return false;
2848
2849
2850
2851
2852
2853 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2854 return false;
2855
2856 if (pci_bridge_d3_force)
2857 return true;
2858
2859
2860 if (bridge->is_thunderbolt)
2861 return true;
2862
2863
2864 if (platform_pci_bridge_d3(bridge))
2865 return true;
2866
2867
2868
2869
2870
2871
2872 if (bridge->is_hotplug_bridge)
2873 return false;
2874
2875 if (dmi_check_system(bridge_d3_blacklist))
2876 return false;
2877
2878
2879
2880
2881
2882 if (dmi_get_bios_year() >= 2015)
2883 return true;
2884 break;
2885 }
2886
2887 return false;
2888}
2889
2890static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2891{
2892 bool *d3cold_ok = data;
2893
2894 if (
2895 dev->no_d3cold || !dev->d3cold_allowed ||
2896
2897
2898 (device_may_wakeup(&dev->dev) &&
2899 !pci_pme_capable(dev, PCI_D3cold)) ||
2900
2901
2902 !pci_power_manageable(dev))
2903
2904 *d3cold_ok = false;
2905
2906 return !*d3cold_ok;
2907}
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917void pci_bridge_d3_update(struct pci_dev *dev)
2918{
2919 bool remove = !device_is_registered(&dev->dev);
2920 struct pci_dev *bridge;
2921 bool d3cold_ok = true;
2922
2923 bridge = pci_upstream_bridge(dev);
2924 if (!bridge || !pci_bridge_d3_possible(bridge))
2925 return;
2926
2927
2928
2929
2930
2931 if (remove && bridge->bridge_d3)
2932 return;
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942 if (!remove)
2943 pci_dev_check_d3cold(dev, &d3cold_ok);
2944
2945
2946
2947
2948
2949
2950
2951 if (d3cold_ok && !bridge->bridge_d3)
2952 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2953 &d3cold_ok);
2954
2955 if (bridge->bridge_d3 != d3cold_ok) {
2956 bridge->bridge_d3 = d3cold_ok;
2957
2958 pci_bridge_d3_update(bridge);
2959 }
2960}
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970void pci_d3cold_enable(struct pci_dev *dev)
2971{
2972 if (dev->no_d3cold) {
2973 dev->no_d3cold = false;
2974 pci_bridge_d3_update(dev);
2975 }
2976}
2977EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987void pci_d3cold_disable(struct pci_dev *dev)
2988{
2989 if (!dev->no_d3cold) {
2990 dev->no_d3cold = true;
2991 pci_bridge_d3_update(dev);
2992 }
2993}
2994EXPORT_SYMBOL_GPL(pci_d3cold_disable);
2995
2996
2997
2998
2999
3000void pci_pm_init(struct pci_dev *dev)
3001{
3002 int pm;
3003 u16 status;
3004 u16 pmc;
3005
3006 pm_runtime_forbid(&dev->dev);
3007 pm_runtime_set_active(&dev->dev);
3008 pm_runtime_enable(&dev->dev);
3009 device_enable_async_suspend(&dev->dev);
3010 dev->wakeup_prepared = false;
3011
3012 dev->pm_cap = 0;
3013 dev->pme_support = 0;
3014
3015
3016 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3017 if (!pm)
3018 return;
3019
3020 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3021
3022 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3023 pci_err(dev, "unsupported PM cap regs version (%u)\n",
3024 pmc & PCI_PM_CAP_VER_MASK);
3025 return;
3026 }
3027
3028 dev->pm_cap = pm;
3029 dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3030 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3031 dev->bridge_d3 = pci_bridge_d3_possible(dev);
3032 dev->d3cold_allowed = true;
3033
3034 dev->d1_support = false;
3035 dev->d2_support = false;
3036 if (!pci_no_d1d2(dev)) {
3037 if (pmc & PCI_PM_CAP_D1)
3038 dev->d1_support = true;
3039 if (pmc & PCI_PM_CAP_D2)
3040 dev->d2_support = true;
3041
3042 if (dev->d1_support || dev->d2_support)
3043 pci_info(dev, "supports%s%s\n",
3044 dev->d1_support ? " D1" : "",
3045 dev->d2_support ? " D2" : "");
3046 }
3047
3048 pmc &= PCI_PM_CAP_PME_MASK;
3049 if (pmc) {
3050 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3051 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3052 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3053 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3054 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3055 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3056 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
3057 dev->pme_poll = true;
3058
3059
3060
3061
3062 device_set_wakeup_capable(&dev->dev, true);
3063
3064 pci_pme_active(dev, false);
3065 }
3066
3067 pci_read_config_word(dev, PCI_STATUS, &status);
3068 if (status & PCI_STATUS_IMM_READY)
3069 dev->imm_ready = 1;
3070}
3071
3072static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3073{
3074 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3075
3076 switch (prop) {
3077 case PCI_EA_P_MEM:
3078 case PCI_EA_P_VF_MEM:
3079 flags |= IORESOURCE_MEM;
3080 break;
3081 case PCI_EA_P_MEM_PREFETCH:
3082 case PCI_EA_P_VF_MEM_PREFETCH:
3083 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3084 break;
3085 case PCI_EA_P_IO:
3086 flags |= IORESOURCE_IO;
3087 break;
3088 default:
3089 return 0;
3090 }
3091
3092 return flags;
3093}
3094
3095static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3096 u8 prop)
3097{
3098 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3099 return &dev->resource[bei];
3100#ifdef CONFIG_PCI_IOV
3101 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3102 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3103 return &dev->resource[PCI_IOV_RESOURCES +
3104 bei - PCI_EA_BEI_VF_BAR0];
3105#endif
3106 else if (bei == PCI_EA_BEI_ROM)
3107 return &dev->resource[PCI_ROM_RESOURCE];
3108 else
3109 return NULL;
3110}
3111
3112
3113static int pci_ea_read(struct pci_dev *dev, int offset)
3114{
3115 struct resource *res;
3116 int ent_size, ent_offset = offset;
3117 resource_size_t start, end;
3118 unsigned long flags;
3119 u32 dw0, bei, base, max_offset;
3120 u8 prop;
3121 bool support_64 = (sizeof(resource_size_t) >= 8);
3122
3123 pci_read_config_dword(dev, ent_offset, &dw0);
3124 ent_offset += 4;
3125
3126
3127 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
3128
3129 if (!(dw0 & PCI_EA_ENABLE))
3130 goto out;
3131
3132 bei = (dw0 & PCI_EA_BEI) >> 4;
3133 prop = (dw0 & PCI_EA_PP) >> 8;
3134
3135
3136
3137
3138
3139 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3140 prop = (dw0 & PCI_EA_SP) >> 16;
3141 if (prop > PCI_EA_P_BRIDGE_IO)
3142 goto out;
3143
3144 res = pci_ea_get_resource(dev, bei, prop);
3145 if (!res) {
3146 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3147 goto out;
3148 }
3149
3150 flags = pci_ea_flags(dev, prop);
3151 if (!flags) {
3152 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3153 goto out;
3154 }
3155
3156
3157 pci_read_config_dword(dev, ent_offset, &base);
3158 start = (base & PCI_EA_FIELD_MASK);
3159 ent_offset += 4;
3160
3161
3162 pci_read_config_dword(dev, ent_offset, &max_offset);
3163 ent_offset += 4;
3164
3165
3166 if (base & PCI_EA_IS_64) {
3167 u32 base_upper;
3168
3169 pci_read_config_dword(dev, ent_offset, &base_upper);
3170 ent_offset += 4;
3171
3172 flags |= IORESOURCE_MEM_64;
3173
3174
3175 if (!support_64 && base_upper)
3176 goto out;
3177
3178 if (support_64)
3179 start |= ((u64)base_upper << 32);
3180 }
3181
3182 end = start + (max_offset | 0x03);
3183
3184
3185 if (max_offset & PCI_EA_IS_64) {
3186 u32 max_offset_upper;
3187
3188 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3189 ent_offset += 4;
3190
3191 flags |= IORESOURCE_MEM_64;
3192
3193
3194 if (!support_64 && max_offset_upper)
3195 goto out;
3196
3197 if (support_64)
3198 end += ((u64)max_offset_upper << 32);
3199 }
3200
3201 if (end < start) {
3202 pci_err(dev, "EA Entry crosses address boundary\n");
3203 goto out;
3204 }
3205
3206 if (ent_size != ent_offset - offset) {
3207 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3208 ent_size, ent_offset - offset);
3209 goto out;
3210 }
3211
3212 res->name = pci_name(dev);
3213 res->start = start;
3214 res->end = end;
3215 res->flags = flags;
3216
3217 if (bei <= PCI_EA_BEI_BAR5)
3218 pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3219 bei, res, prop);
3220 else if (bei == PCI_EA_BEI_ROM)
3221 pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3222 res, prop);
3223 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3224 pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3225 bei - PCI_EA_BEI_VF_BAR0, res, prop);
3226 else
3227 pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3228 bei, res, prop);
3229
3230out:
3231 return offset + ent_size;
3232}
3233
3234
3235void pci_ea_init(struct pci_dev *dev)
3236{
3237 int ea;
3238 u8 num_ent;
3239 int offset;
3240 int i;
3241
3242
3243 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3244 if (!ea)
3245 return;
3246
3247
3248 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3249 &num_ent);
3250 num_ent &= PCI_EA_NUM_ENT_MASK;
3251
3252 offset = ea + PCI_EA_FIRST_ENT;
3253
3254
3255 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3256 offset += 4;
3257
3258
3259 for (i = 0; i < num_ent; ++i)
3260 offset = pci_ea_read(dev, offset);
3261}
3262
3263static void pci_add_saved_cap(struct pci_dev *pci_dev,
3264 struct pci_cap_saved_state *new_cap)
3265{
3266 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3267}
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3278 bool extended, unsigned int size)
3279{
3280 int pos;
3281 struct pci_cap_saved_state *save_state;
3282
3283 if (extended)
3284 pos = pci_find_ext_capability(dev, cap);
3285 else
3286 pos = pci_find_capability(dev, cap);
3287
3288 if (!pos)
3289 return 0;
3290
3291 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3292 if (!save_state)
3293 return -ENOMEM;
3294
3295 save_state->cap.cap_nr = cap;
3296 save_state->cap.cap_extended = extended;
3297 save_state->cap.size = size;
3298 pci_add_saved_cap(dev, save_state);
3299
3300 return 0;
3301}
3302
3303int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3304{
3305 return _pci_add_cap_save_buffer(dev, cap, false, size);
3306}
3307
3308int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3309{
3310 return _pci_add_cap_save_buffer(dev, cap, true, size);
3311}
3312
3313
3314
3315
3316
3317void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3318{
3319 int error;
3320
3321 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3322 PCI_EXP_SAVE_REGS * sizeof(u16));
3323 if (error)
3324 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3325
3326 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3327 if (error)
3328 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3329
3330 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3331 2 * sizeof(u16));
3332 if (error)
3333 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3334
3335 pci_allocate_vc_save_buffers(dev);
3336}
3337
3338void pci_free_cap_save_buffers(struct pci_dev *dev)
3339{
3340 struct pci_cap_saved_state *tmp;
3341 struct hlist_node *n;
3342
3343 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3344 kfree(tmp);
3345}
3346
3347
3348
3349
3350
3351
3352
3353
3354void pci_configure_ari(struct pci_dev *dev)
3355{
3356 u32 cap;
3357 struct pci_dev *bridge;
3358
3359 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3360 return;
3361
3362 bridge = dev->bus->self;
3363 if (!bridge)
3364 return;
3365
3366 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3367 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3368 return;
3369
3370 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3371 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3372 PCI_EXP_DEVCTL2_ARI);
3373 bridge->ari_enabled = 1;
3374 } else {
3375 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3376 PCI_EXP_DEVCTL2_ARI);
3377 bridge->ari_enabled = 0;
3378 }
3379}
3380
3381static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3382{
3383 int pos;
3384 u16 cap, ctrl;
3385
3386 pos = pdev->acs_cap;
3387 if (!pos)
3388 return false;
3389
3390
3391
3392
3393
3394
3395 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3396 acs_flags &= (cap | PCI_ACS_EC);
3397
3398 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3399 return (ctrl & acs_flags) == acs_flags;
3400}
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3419{
3420 int ret;
3421
3422 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3423 if (ret >= 0)
3424 return ret > 0;
3425
3426
3427
3428
3429
3430
3431 if (!pci_is_pcie(pdev))
3432 return false;
3433
3434 switch (pci_pcie_type(pdev)) {
3435
3436
3437
3438
3439
3440 case PCI_EXP_TYPE_PCIE_BRIDGE:
3441
3442
3443
3444
3445
3446
3447 case PCI_EXP_TYPE_PCI_BRIDGE:
3448 case PCI_EXP_TYPE_RC_EC:
3449 return false;
3450
3451
3452
3453
3454
3455 case PCI_EXP_TYPE_DOWNSTREAM:
3456 case PCI_EXP_TYPE_ROOT_PORT:
3457 return pci_acs_flags_enabled(pdev, acs_flags);
3458
3459
3460
3461
3462
3463
3464
3465 case PCI_EXP_TYPE_ENDPOINT:
3466 case PCI_EXP_TYPE_UPSTREAM:
3467 case PCI_EXP_TYPE_LEG_END:
3468 case PCI_EXP_TYPE_RC_END:
3469 if (!pdev->multifunction)
3470 break;
3471
3472 return pci_acs_flags_enabled(pdev, acs_flags);
3473 }
3474
3475
3476
3477
3478
3479 return true;
3480}
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491bool pci_acs_path_enabled(struct pci_dev *start,
3492 struct pci_dev *end, u16 acs_flags)
3493{
3494 struct pci_dev *pdev, *parent = start;
3495
3496 do {
3497 pdev = parent;
3498
3499 if (!pci_acs_enabled(pdev, acs_flags))
3500 return false;
3501
3502 if (pci_is_root_bus(pdev->bus))
3503 return (end == NULL);
3504
3505 parent = pdev->bus->self;
3506 } while (pdev != end);
3507
3508 return true;
3509}
3510
3511
3512
3513
3514
3515void pci_acs_init(struct pci_dev *dev)
3516{
3517 dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3518
3519
3520
3521
3522
3523
3524
3525 pci_enable_acs(dev);
3526}
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3538{
3539 unsigned int pos, nbars, i;
3540 u32 ctrl;
3541
3542 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3543 if (!pos)
3544 return -ENOTSUPP;
3545
3546 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3547 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3548 PCI_REBAR_CTRL_NBAR_SHIFT;
3549
3550 for (i = 0; i < nbars; i++, pos += 8) {
3551 int bar_idx;
3552
3553 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3554 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3555 if (bar_idx == bar)
3556 return pos;
3557 }
3558
3559 return -ENOENT;
3560}
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3571{
3572 int pos;
3573 u32 cap;
3574
3575 pos = pci_rebar_find_pos(pdev, bar);
3576 if (pos < 0)
3577 return 0;
3578
3579 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3580 return (cap & PCI_REBAR_CAP_SIZES) >> 4;
3581}
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3592{
3593 int pos;
3594 u32 ctrl;
3595
3596 pos = pci_rebar_find_pos(pdev, bar);
3597 if (pos < 0)
3598 return pos;
3599
3600 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3601 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3602}
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3614{
3615 int pos;
3616 u32 ctrl;
3617
3618 pos = pci_rebar_find_pos(pdev, bar);
3619 if (pos < 0)
3620 return pos;
3621
3622 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3623 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3624 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3625 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3626 return 0;
3627}
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3643{
3644 struct pci_bus *bus = dev->bus;
3645 struct pci_dev *bridge;
3646 u32 cap, ctl2;
3647
3648 if (!pci_is_pcie(dev))
3649 return -EINVAL;
3650
3651
3652
3653
3654
3655
3656
3657
3658 switch (pci_pcie_type(dev)) {
3659 case PCI_EXP_TYPE_ENDPOINT:
3660 case PCI_EXP_TYPE_LEG_END:
3661 case PCI_EXP_TYPE_RC_END:
3662 break;
3663 default:
3664 return -EINVAL;
3665 }
3666
3667 while (bus->parent) {
3668 bridge = bus->self;
3669
3670 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3671
3672 switch (pci_pcie_type(bridge)) {
3673
3674 case PCI_EXP_TYPE_UPSTREAM:
3675 case PCI_EXP_TYPE_DOWNSTREAM:
3676 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3677 return -EINVAL;
3678 break;
3679
3680
3681 case PCI_EXP_TYPE_ROOT_PORT:
3682 if ((cap & cap_mask) != cap_mask)
3683 return -EINVAL;
3684 break;
3685 }
3686
3687
3688 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3689 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3690 &ctl2);
3691 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3692 return -EINVAL;
3693 }
3694
3695 bus = bus->parent;
3696 }
3697
3698 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3699 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3700 return 0;
3701}
3702EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3716{
3717 int slot;
3718
3719 if (pci_ari_enabled(dev->bus))
3720 slot = 0;
3721 else
3722 slot = PCI_SLOT(dev->devfn);
3723
3724 return (((pin - 1) + slot) % 4) + 1;
3725}
3726
3727int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3728{
3729 u8 pin;
3730
3731 pin = dev->pin;
3732 if (!pin)
3733 return -1;
3734
3735 while (!pci_is_root_bus(dev->bus)) {
3736 pin = pci_swizzle_interrupt_pin(dev, pin);
3737 dev = dev->bus->self;
3738 }
3739 *bridge = dev;
3740 return pin;
3741}
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3752{
3753 u8 pin = *pinp;
3754
3755 while (!pci_is_root_bus(dev->bus)) {
3756 pin = pci_swizzle_interrupt_pin(dev, pin);
3757 dev = dev->bus->self;
3758 }
3759 *pinp = pin;
3760 return PCI_SLOT(dev->devfn);
3761}
3762EXPORT_SYMBOL_GPL(pci_common_swizzle);
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774void pci_release_region(struct pci_dev *pdev, int bar)
3775{
3776 struct pci_devres *dr;
3777
3778 if (pci_resource_len(pdev, bar) == 0)
3779 return;
3780 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3781 release_region(pci_resource_start(pdev, bar),
3782 pci_resource_len(pdev, bar));
3783 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3784 release_mem_region(pci_resource_start(pdev, bar),
3785 pci_resource_len(pdev, bar));
3786
3787 dr = find_pci_dr(pdev);
3788 if (dr)
3789 dr->region_mask &= ~(1 << bar);
3790}
3791EXPORT_SYMBOL(pci_release_region);
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812static int __pci_request_region(struct pci_dev *pdev, int bar,
3813 const char *res_name, int exclusive)
3814{
3815 struct pci_devres *dr;
3816
3817 if (pci_resource_len(pdev, bar) == 0)
3818 return 0;
3819
3820 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3821 if (!request_region(pci_resource_start(pdev, bar),
3822 pci_resource_len(pdev, bar), res_name))
3823 goto err_out;
3824 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3825 if (!__request_mem_region(pci_resource_start(pdev, bar),
3826 pci_resource_len(pdev, bar), res_name,
3827 exclusive))
3828 goto err_out;
3829 }
3830
3831 dr = find_pci_dr(pdev);
3832 if (dr)
3833 dr->region_mask |= 1 << bar;
3834
3835 return 0;
3836
3837err_out:
3838 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3839 &pdev->resource[bar]);
3840 return -EBUSY;
3841}
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3858{
3859 return __pci_request_region(pdev, bar, res_name, 0);
3860}
3861EXPORT_SYMBOL(pci_request_region);
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3872{
3873 int i;
3874
3875 for (i = 0; i < PCI_STD_NUM_BARS; i++)
3876 if (bars & (1 << i))
3877 pci_release_region(pdev, i);
3878}
3879EXPORT_SYMBOL(pci_release_selected_regions);
3880
3881static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3882 const char *res_name, int excl)
3883{
3884 int i;
3885
3886 for (i = 0; i < PCI_STD_NUM_BARS; i++)
3887 if (bars & (1 << i))
3888 if (__pci_request_region(pdev, i, res_name, excl))
3889 goto err_out;
3890 return 0;
3891
3892err_out:
3893 while (--i >= 0)
3894 if (bars & (1 << i))
3895 pci_release_region(pdev, i);
3896
3897 return -EBUSY;
3898}
3899
3900
3901
3902
3903
3904
3905
3906
3907int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3908 const char *res_name)
3909{
3910 return __pci_request_selected_regions(pdev, bars, res_name, 0);
3911}
3912EXPORT_SYMBOL(pci_request_selected_regions);
3913
3914int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3915 const char *res_name)
3916{
3917 return __pci_request_selected_regions(pdev, bars, res_name,
3918 IORESOURCE_EXCLUSIVE);
3919}
3920EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932void pci_release_regions(struct pci_dev *pdev)
3933{
3934 pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
3935}
3936EXPORT_SYMBOL(pci_release_regions);
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3952{
3953 return pci_request_selected_regions(pdev,
3954 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
3955}
3956EXPORT_SYMBOL(pci_request_regions);
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3974{
3975 return pci_request_selected_regions_exclusive(pdev,
3976 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
3977}
3978EXPORT_SYMBOL(pci_request_regions_exclusive);
3979
3980
3981
3982
3983
3984int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
3985 resource_size_t size)
3986{
3987 int ret = 0;
3988#ifdef PCI_IOBASE
3989 struct logic_pio_hwaddr *range;
3990
3991 if (!size || addr + size < addr)
3992 return -EINVAL;
3993
3994 range = kzalloc(sizeof(*range), GFP_ATOMIC);
3995 if (!range)
3996 return -ENOMEM;
3997
3998 range->fwnode = fwnode;
3999 range->size = size;
4000 range->hw_start = addr;
4001 range->flags = LOGIC_PIO_CPU_MMIO;
4002
4003 ret = logic_pio_register_range(range);
4004 if (ret)
4005 kfree(range);
4006#endif
4007
4008 return ret;
4009}
4010
4011phys_addr_t pci_pio_to_address(unsigned long pio)
4012{
4013 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
4014
4015#ifdef PCI_IOBASE
4016 if (pio >= MMIO_UPPER_LIMIT)
4017 return address;
4018
4019 address = logic_pio_to_hwaddr(pio);
4020#endif
4021
4022 return address;
4023}
4024
4025unsigned long __weak pci_address_to_pio(phys_addr_t address)
4026{
4027#ifdef PCI_IOBASE
4028 return logic_pio_trans_cpuaddr(address);
4029#else
4030 if (address > IO_SPACE_LIMIT)
4031 return (unsigned long)-1;
4032
4033 return (unsigned long) address;
4034#endif
4035}
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4048{
4049#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4050 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4051
4052 if (!(res->flags & IORESOURCE_IO))
4053 return -EINVAL;
4054
4055 if (res->end > IO_SPACE_LIMIT)
4056 return -EINVAL;
4057
4058 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4059 pgprot_device(PAGE_KERNEL));
4060#else
4061
4062
4063
4064
4065 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4066 return -ENODEV;
4067#endif
4068}
4069EXPORT_SYMBOL(pci_remap_iospace);
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079void pci_unmap_iospace(struct resource *res)
4080{
4081#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4082 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4083
4084 unmap_kernel_range(vaddr, resource_size(res));
4085#endif
4086}
4087EXPORT_SYMBOL(pci_unmap_iospace);
4088
4089static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4090{
4091 struct resource **res = ptr;
4092
4093 pci_unmap_iospace(*res);
4094}
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4106 phys_addr_t phys_addr)
4107{
4108 const struct resource **ptr;
4109 int error;
4110
4111 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4112 if (!ptr)
4113 return -ENOMEM;
4114
4115 error = pci_remap_iospace(res, phys_addr);
4116 if (error) {
4117 devres_free(ptr);
4118 } else {
4119 *ptr = res;
4120 devres_add(dev, ptr);
4121 }
4122
4123 return error;
4124}
4125EXPORT_SYMBOL(devm_pci_remap_iospace);
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4137 resource_size_t offset,
4138 resource_size_t size)
4139{
4140 void __iomem **ptr, *addr;
4141
4142 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4143 if (!ptr)
4144 return NULL;
4145
4146 addr = pci_remap_cfgspace(offset, size);
4147 if (addr) {
4148 *ptr = addr;
4149 devres_add(dev, ptr);
4150 } else
4151 devres_free(ptr);
4152
4153 return addr;
4154}
4155EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4177 struct resource *res)
4178{
4179 resource_size_t size;
4180 const char *name;
4181 void __iomem *dest_ptr;
4182
4183 BUG_ON(!dev);
4184
4185 if (!res || resource_type(res) != IORESOURCE_MEM) {
4186 dev_err(dev, "invalid resource\n");
4187 return IOMEM_ERR_PTR(-EINVAL);
4188 }
4189
4190 size = resource_size(res);
4191 name = res->name ?: dev_name(dev);
4192
4193 if (!devm_request_mem_region(dev, res->start, size, name)) {
4194 dev_err(dev, "can't request region for resource %pR\n", res);
4195 return IOMEM_ERR_PTR(-EBUSY);
4196 }
4197
4198 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4199 if (!dest_ptr) {
4200 dev_err(dev, "ioremap failed for resource %pR\n", res);
4201 devm_release_mem_region(dev, res->start, size);
4202 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4203 }
4204
4205 return dest_ptr;
4206}
4207EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4208
4209static void __pci_set_master(struct pci_dev *dev, bool enable)
4210{
4211 u16 old_cmd, cmd;
4212
4213 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4214 if (enable)
4215 cmd = old_cmd | PCI_COMMAND_MASTER;
4216 else
4217 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4218 if (cmd != old_cmd) {
4219 pci_dbg(dev, "%s bus mastering\n",
4220 enable ? "enabling" : "disabling");
4221 pci_write_config_word(dev, PCI_COMMAND, cmd);
4222 }
4223 dev->is_busmaster = enable;
4224}
4225
4226
4227
4228
4229
4230
4231
4232
4233char * __weak __init pcibios_setup(char *str)
4234{
4235 return str;
4236}
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246void __weak pcibios_set_master(struct pci_dev *dev)
4247{
4248 u8 lat;
4249
4250
4251 if (pci_is_pcie(dev))
4252 return;
4253
4254 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4255 if (lat < 16)
4256 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4257 else if (lat > pcibios_max_latency)
4258 lat = pcibios_max_latency;
4259 else
4260 return;
4261
4262 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4263}
4264
4265
4266
4267
4268
4269
4270
4271
4272void pci_set_master(struct pci_dev *dev)
4273{
4274 __pci_set_master(dev, true);
4275 pcibios_set_master(dev);
4276}
4277EXPORT_SYMBOL(pci_set_master);
4278
4279
4280
4281
4282
4283void pci_clear_master(struct pci_dev *dev)
4284{
4285 __pci_set_master(dev, false);
4286}
4287EXPORT_SYMBOL(pci_clear_master);
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299int pci_set_cacheline_size(struct pci_dev *dev)
4300{
4301 u8 cacheline_size;
4302
4303 if (!pci_cache_line_size)
4304 return -EINVAL;
4305
4306
4307
4308 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4309 if (cacheline_size >= pci_cache_line_size &&
4310 (cacheline_size % pci_cache_line_size) == 0)
4311 return 0;
4312
4313
4314 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4315
4316 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4317 if (cacheline_size == pci_cache_line_size)
4318 return 0;
4319
4320 pci_info(dev, "cache line size of %d is not supported\n",
4321 pci_cache_line_size << 2);
4322
4323 return -EINVAL;
4324}
4325EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335int pci_set_mwi(struct pci_dev *dev)
4336{
4337#ifdef PCI_DISABLE_MWI
4338 return 0;
4339#else
4340 int rc;
4341 u16 cmd;
4342
4343 rc = pci_set_cacheline_size(dev);
4344 if (rc)
4345 return rc;
4346
4347 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4348 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4349 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4350 cmd |= PCI_COMMAND_INVALIDATE;
4351 pci_write_config_word(dev, PCI_COMMAND, cmd);
4352 }
4353 return 0;
4354#endif
4355}
4356EXPORT_SYMBOL(pci_set_mwi);
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366int pcim_set_mwi(struct pci_dev *dev)
4367{
4368 struct pci_devres *dr;
4369
4370 dr = find_pci_dr(dev);
4371 if (!dr)
4372 return -ENOMEM;
4373
4374 dr->mwi = 1;
4375 return pci_set_mwi(dev);
4376}
4377EXPORT_SYMBOL(pcim_set_mwi);
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388int pci_try_set_mwi(struct pci_dev *dev)
4389{
4390#ifdef PCI_DISABLE_MWI
4391 return 0;
4392#else
4393 return pci_set_mwi(dev);
4394#endif
4395}
4396EXPORT_SYMBOL(pci_try_set_mwi);
4397
4398
4399
4400
4401
4402
4403
4404void pci_clear_mwi(struct pci_dev *dev)
4405{
4406#ifndef PCI_DISABLE_MWI
4407 u16 cmd;
4408
4409 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4410 if (cmd & PCI_COMMAND_INVALIDATE) {
4411 cmd &= ~PCI_COMMAND_INVALIDATE;
4412 pci_write_config_word(dev, PCI_COMMAND, cmd);
4413 }
4414#endif
4415}
4416EXPORT_SYMBOL(pci_clear_mwi);
4417
4418
4419
4420
4421
4422
4423
4424
4425void pci_intx(struct pci_dev *pdev, int enable)
4426{
4427 u16 pci_command, new;
4428
4429 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4430
4431 if (enable)
4432 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4433 else
4434 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4435
4436 if (new != pci_command) {
4437 struct pci_devres *dr;
4438
4439 pci_write_config_word(pdev, PCI_COMMAND, new);
4440
4441 dr = find_pci_dr(pdev);
4442 if (dr && !dr->restore_intx) {
4443 dr->restore_intx = 1;
4444 dr->orig_intx = !enable;
4445 }
4446 }
4447}
4448EXPORT_SYMBOL_GPL(pci_intx);
4449
4450static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4451{
4452 struct pci_bus *bus = dev->bus;
4453 bool mask_updated = true;
4454 u32 cmd_status_dword;
4455 u16 origcmd, newcmd;
4456 unsigned long flags;
4457 bool irq_pending;
4458
4459
4460
4461
4462
4463 BUILD_BUG_ON(PCI_COMMAND % 4);
4464 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4465
4466 raw_spin_lock_irqsave(&pci_lock, flags);
4467
4468 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4469
4470 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4471
4472
4473
4474
4475
4476
4477 if (mask != irq_pending) {
4478 mask_updated = false;
4479 goto done;
4480 }
4481
4482 origcmd = cmd_status_dword;
4483 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4484 if (mask)
4485 newcmd |= PCI_COMMAND_INTX_DISABLE;
4486 if (newcmd != origcmd)
4487 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4488
4489done:
4490 raw_spin_unlock_irqrestore(&pci_lock, flags);
4491
4492 return mask_updated;
4493}
4494
4495
4496
4497
4498
4499
4500
4501
4502bool pci_check_and_mask_intx(struct pci_dev *dev)
4503{
4504 return pci_check_and_set_intx_mask(dev, true);
4505}
4506EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516bool pci_check_and_unmask_intx(struct pci_dev *dev)
4517{
4518 return pci_check_and_set_intx_mask(dev, false);
4519}
4520EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4521
4522
4523
4524
4525
4526
4527
4528int pci_wait_for_pending_transaction(struct pci_dev *dev)
4529{
4530 if (!pci_is_pcie(dev))
4531 return 1;
4532
4533 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4534 PCI_EXP_DEVSTA_TRPND);
4535}
4536EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4537
4538
4539
4540
4541
4542
4543
4544
4545bool pcie_has_flr(struct pci_dev *dev)
4546{
4547 u32 cap;
4548
4549 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4550 return false;
4551
4552 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
4553 return cap & PCI_EXP_DEVCAP_FLR;
4554}
4555EXPORT_SYMBOL_GPL(pcie_has_flr);
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565int pcie_flr(struct pci_dev *dev)
4566{
4567 if (!pci_wait_for_pending_transaction(dev))
4568 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4569
4570 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4571
4572 if (dev->imm_ready)
4573 return 0;
4574
4575
4576
4577
4578
4579
4580 msleep(100);
4581
4582 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4583}
4584EXPORT_SYMBOL_GPL(pcie_flr);
4585
4586static int pci_af_flr(struct pci_dev *dev, int probe)
4587{
4588 int pos;
4589 u8 cap;
4590
4591 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4592 if (!pos)
4593 return -ENOTTY;
4594
4595 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4596 return -ENOTTY;
4597
4598 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4599 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4600 return -ENOTTY;
4601
4602 if (probe)
4603 return 0;
4604
4605
4606
4607
4608
4609
4610 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4611 PCI_AF_STATUS_TP << 8))
4612 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4613
4614 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4615
4616 if (dev->imm_ready)
4617 return 0;
4618
4619
4620
4621
4622
4623
4624
4625 msleep(100);
4626
4627 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4628}
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644
4645static int pci_pm_reset(struct pci_dev *dev, int probe)
4646{
4647 u16 csr;
4648
4649 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4650 return -ENOTTY;
4651
4652 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4653 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4654 return -ENOTTY;
4655
4656 if (probe)
4657 return 0;
4658
4659 if (dev->current_state != PCI_D0)
4660 return -EINVAL;
4661
4662 csr &= ~PCI_PM_CTRL_STATE_MASK;
4663 csr |= PCI_D3hot;
4664 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4665 pci_dev_d3_sleep(dev);
4666
4667 csr &= ~PCI_PM_CTRL_STATE_MASK;
4668 csr |= PCI_D0;
4669 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4670 pci_dev_d3_sleep(dev);
4671
4672 return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4673}
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4684 int delay)
4685{
4686 int timeout = 1000;
4687 bool ret;
4688 u16 lnk_status;
4689
4690
4691
4692
4693
4694 if (!pdev->link_active_reporting) {
4695 msleep(timeout + delay);
4696 return true;
4697 }
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707
4708 if (active)
4709 msleep(20);
4710 for (;;) {
4711 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4712 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4713 if (ret == active)
4714 break;
4715 if (timeout <= 0)
4716 break;
4717 msleep(10);
4718 timeout -= 10;
4719 }
4720 if (active && ret)
4721 msleep(delay);
4722
4723 return ret == active;
4724}
4725
4726
4727
4728
4729
4730
4731
4732
4733bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4734{
4735 return pcie_wait_for_link_delay(pdev, active, 100);
4736}
4737
4738
4739
4740
4741
4742
4743
4744
4745static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4746{
4747 const struct pci_dev *pdev;
4748 int min_delay = 100;
4749 int max_delay = 0;
4750
4751 list_for_each_entry(pdev, &bus->devices, bus_list) {
4752 if (pdev->d3cold_delay < min_delay)
4753 min_delay = pdev->d3cold_delay;
4754 if (pdev->d3cold_delay > max_delay)
4755 max_delay = pdev->d3cold_delay;
4756 }
4757
4758 return max(min_delay, max_delay);
4759}
4760
4761
4762
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
4773{
4774 struct pci_dev *child;
4775 int delay;
4776
4777 if (pci_dev_is_disconnected(dev))
4778 return;
4779
4780 if (!pci_is_bridge(dev) || !dev->bridge_d3)
4781 return;
4782
4783 down_read(&pci_bus_sem);
4784
4785
4786
4787
4788
4789
4790
4791 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4792 up_read(&pci_bus_sem);
4793 return;
4794 }
4795
4796
4797 delay = pci_bus_max_d3cold_delay(dev->subordinate);
4798 if (!delay) {
4799 up_read(&pci_bus_sem);
4800 return;
4801 }
4802
4803 child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
4804 bus_list);
4805 up_read(&pci_bus_sem);
4806
4807
4808
4809
4810
4811
4812
4813 if (!pci_is_pcie(dev)) {
4814 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4815 msleep(1000 + delay);
4816 return;
4817 }
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836 if (!pcie_downstream_port(dev))
4837 return;
4838
4839 if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4840 pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4841 msleep(delay);
4842 } else {
4843 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4844 delay);
4845 if (!pcie_wait_for_link_delay(dev, true, delay)) {
4846
4847 pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
4848 return;
4849 }
4850 }
4851
4852 if (!pci_device_is_present(child)) {
4853 pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
4854 msleep(delay);
4855 }
4856}
4857
4858void pci_reset_secondary_bus(struct pci_dev *dev)
4859{
4860 u16 ctrl;
4861
4862 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4863 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4864 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4865
4866
4867
4868
4869
4870 msleep(2);
4871
4872 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4873 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4874
4875
4876
4877
4878
4879
4880
4881
4882 ssleep(1);
4883}
4884
4885void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4886{
4887 pci_reset_secondary_bus(dev);
4888}
4889
4890
4891
4892
4893
4894
4895
4896
4897int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4898{
4899 pcibios_reset_secondary_bus(dev);
4900
4901 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
4902}
4903EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4904
4905static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4906{
4907 struct pci_dev *pdev;
4908
4909 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4910 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4911 return -ENOTTY;
4912
4913 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4914 if (pdev != dev)
4915 return -ENOTTY;
4916
4917 if (probe)
4918 return 0;
4919
4920 return pci_bridge_secondary_bus_reset(dev->bus->self);
4921}
4922
4923static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4924{
4925 int rc = -ENOTTY;
4926
4927 if (!hotplug || !try_module_get(hotplug->owner))
4928 return rc;
4929
4930 if (hotplug->ops->reset_slot)
4931 rc = hotplug->ops->reset_slot(hotplug, probe);
4932
4933 module_put(hotplug->owner);
4934
4935 return rc;
4936}
4937
4938static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4939{
4940 if (dev->multifunction || dev->subordinate || !dev->slot ||
4941 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4942 return -ENOTTY;
4943
4944 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4945}
4946
4947static void pci_dev_lock(struct pci_dev *dev)
4948{
4949 pci_cfg_access_lock(dev);
4950
4951 device_lock(&dev->dev);
4952}
4953
4954
4955static int pci_dev_trylock(struct pci_dev *dev)
4956{
4957 if (pci_cfg_access_trylock(dev)) {
4958 if (device_trylock(&dev->dev))
4959 return 1;
4960 pci_cfg_access_unlock(dev);
4961 }
4962
4963 return 0;
4964}
4965
4966static void pci_dev_unlock(struct pci_dev *dev)
4967{
4968 device_unlock(&dev->dev);
4969 pci_cfg_access_unlock(dev);
4970}
4971
4972static void pci_dev_save_and_disable(struct pci_dev *dev)
4973{
4974 const struct pci_error_handlers *err_handler =
4975 dev->driver ? dev->driver->err_handler : NULL;
4976
4977
4978
4979
4980
4981
4982 if (err_handler && err_handler->reset_prepare)
4983 err_handler->reset_prepare(dev);
4984
4985
4986
4987
4988
4989
4990 pci_set_power_state(dev, PCI_D0);
4991
4992 pci_save_state(dev);
4993
4994
4995
4996
4997
4998
4999
5000 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5001}
5002
5003static void pci_dev_restore(struct pci_dev *dev)
5004{
5005 const struct pci_error_handlers *err_handler =
5006 dev->driver ? dev->driver->err_handler : NULL;
5007
5008 pci_restore_state(dev);
5009
5010
5011
5012
5013
5014
5015 if (err_handler && err_handler->reset_done)
5016 err_handler->reset_done(dev);
5017}
5018
5019
5020
5021
5022
5023
5024
5025
5026
5027
5028
5029
5030
5031
5032
5033
5034
5035
5036
5037
5038
5039int __pci_reset_function_locked(struct pci_dev *dev)
5040{
5041 int rc;
5042
5043 might_sleep();
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053 rc = pci_dev_specific_reset(dev, 0);
5054 if (rc != -ENOTTY)
5055 return rc;
5056 if (pcie_has_flr(dev)) {
5057 rc = pcie_flr(dev);
5058 if (rc != -ENOTTY)
5059 return rc;
5060 }
5061 rc = pci_af_flr(dev, 0);
5062 if (rc != -ENOTTY)
5063 return rc;
5064 rc = pci_pm_reset(dev, 0);
5065 if (rc != -ENOTTY)
5066 return rc;
5067 rc = pci_dev_reset_slot_function(dev, 0);
5068 if (rc != -ENOTTY)
5069 return rc;
5070 return pci_parent_bus_reset(dev, 0);
5071}
5072EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085int pci_probe_reset_function(struct pci_dev *dev)
5086{
5087 int rc;
5088
5089 might_sleep();
5090
5091 rc = pci_dev_specific_reset(dev, 1);
5092 if (rc != -ENOTTY)
5093 return rc;
5094 if (pcie_has_flr(dev))
5095 return 0;
5096 rc = pci_af_flr(dev, 1);
5097 if (rc != -ENOTTY)
5098 return rc;
5099 rc = pci_pm_reset(dev, 1);
5100 if (rc != -ENOTTY)
5101 return rc;
5102 rc = pci_dev_reset_slot_function(dev, 1);
5103 if (rc != -ENOTTY)
5104 return rc;
5105
5106 return pci_parent_bus_reset(dev, 1);
5107}
5108
5109
5110
5111
5112
5113
5114
5115
5116
5117
5118
5119
5120
5121
5122
5123
5124
5125int pci_reset_function(struct pci_dev *dev)
5126{
5127 int rc;
5128
5129 if (!dev->reset_fn)
5130 return -ENOTTY;
5131
5132 pci_dev_lock(dev);
5133 pci_dev_save_and_disable(dev);
5134
5135 rc = __pci_reset_function_locked(dev);
5136
5137 pci_dev_restore(dev);
5138 pci_dev_unlock(dev);
5139
5140 return rc;
5141}
5142EXPORT_SYMBOL_GPL(pci_reset_function);
5143
5144
5145
5146
5147
5148
5149
5150
5151
5152
5153
5154
5155
5156
5157
5158
5159
5160
5161int pci_reset_function_locked(struct pci_dev *dev)
5162{
5163 int rc;
5164
5165 if (!dev->reset_fn)
5166 return -ENOTTY;
5167
5168 pci_dev_save_and_disable(dev);
5169
5170 rc = __pci_reset_function_locked(dev);
5171
5172 pci_dev_restore(dev);
5173
5174 return rc;
5175}
5176EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5177
5178
5179
5180
5181
5182
5183
5184int pci_try_reset_function(struct pci_dev *dev)
5185{
5186 int rc;
5187
5188 if (!dev->reset_fn)
5189 return -ENOTTY;
5190
5191 if (!pci_dev_trylock(dev))
5192 return -EAGAIN;
5193
5194 pci_dev_save_and_disable(dev);
5195 rc = __pci_reset_function_locked(dev);
5196 pci_dev_restore(dev);
5197 pci_dev_unlock(dev);
5198
5199 return rc;
5200}
5201EXPORT_SYMBOL_GPL(pci_try_reset_function);
5202
5203
5204static bool pci_bus_resetable(struct pci_bus *bus)
5205{
5206 struct pci_dev *dev;
5207
5208
5209 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5210 return false;
5211
5212 list_for_each_entry(dev, &bus->devices, bus_list) {
5213 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5214 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5215 return false;
5216 }
5217
5218 return true;
5219}
5220
5221
5222static void pci_bus_lock(struct pci_bus *bus)
5223{
5224 struct pci_dev *dev;
5225
5226 list_for_each_entry(dev, &bus->devices, bus_list) {
5227 pci_dev_lock(dev);
5228 if (dev->subordinate)
5229 pci_bus_lock(dev->subordinate);
5230 }
5231}
5232
5233
5234static void pci_bus_unlock(struct pci_bus *bus)
5235{
5236 struct pci_dev *dev;
5237
5238 list_for_each_entry(dev, &bus->devices, bus_list) {
5239 if (dev->subordinate)
5240 pci_bus_unlock(dev->subordinate);
5241 pci_dev_unlock(dev);
5242 }
5243}
5244
5245
5246static int pci_bus_trylock(struct pci_bus *bus)
5247{
5248 struct pci_dev *dev;
5249
5250 list_for_each_entry(dev, &bus->devices, bus_list) {
5251 if (!pci_dev_trylock(dev))
5252 goto unlock;
5253 if (dev->subordinate) {
5254 if (!pci_bus_trylock(dev->subordinate)) {
5255 pci_dev_unlock(dev);
5256 goto unlock;
5257 }
5258 }
5259 }
5260 return 1;
5261
5262unlock:
5263 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5264 if (dev->subordinate)
5265 pci_bus_unlock(dev->subordinate);
5266 pci_dev_unlock(dev);
5267 }
5268 return 0;
5269}
5270
5271
5272static bool pci_slot_resetable(struct pci_slot *slot)
5273{
5274 struct pci_dev *dev;
5275
5276 if (slot->bus->self &&
5277 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5278 return false;
5279
5280 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5281 if (!dev->slot || dev->slot != slot)
5282 continue;
5283 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5284 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5285 return false;
5286 }
5287
5288 return true;
5289}
5290
5291
5292static void pci_slot_lock(struct pci_slot *slot)
5293{
5294 struct pci_dev *dev;
5295
5296 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5297 if (!dev->slot || dev->slot != slot)
5298 continue;
5299 pci_dev_lock(dev);
5300 if (dev->subordinate)
5301 pci_bus_lock(dev->subordinate);
5302 }
5303}
5304
5305
5306static void pci_slot_unlock(struct pci_slot *slot)
5307{
5308 struct pci_dev *dev;
5309
5310 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5311 if (!dev->slot || dev->slot != slot)
5312 continue;
5313 if (dev->subordinate)
5314 pci_bus_unlock(dev->subordinate);
5315 pci_dev_unlock(dev);
5316 }
5317}
5318
5319
5320static int pci_slot_trylock(struct pci_slot *slot)
5321{
5322 struct pci_dev *dev;
5323
5324 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5325 if (!dev->slot || dev->slot != slot)
5326 continue;
5327 if (!pci_dev_trylock(dev))
5328 goto unlock;
5329 if (dev->subordinate) {
5330 if (!pci_bus_trylock(dev->subordinate)) {
5331 pci_dev_unlock(dev);
5332 goto unlock;
5333 }
5334 }
5335 }
5336 return 1;
5337
5338unlock:
5339 list_for_each_entry_continue_reverse(dev,
5340 &slot->bus->devices, bus_list) {
5341 if (!dev->slot || dev->slot != slot)
5342 continue;
5343 if (dev->subordinate)
5344 pci_bus_unlock(dev->subordinate);
5345 pci_dev_unlock(dev);
5346 }
5347 return 0;
5348}
5349
5350
5351
5352
5353
5354static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5355{
5356 struct pci_dev *dev;
5357
5358 list_for_each_entry(dev, &bus->devices, bus_list) {
5359 pci_dev_save_and_disable(dev);
5360 if (dev->subordinate)
5361 pci_bus_save_and_disable_locked(dev->subordinate);
5362 }
5363}
5364
5365
5366
5367
5368
5369
5370static void pci_bus_restore_locked(struct pci_bus *bus)
5371{
5372 struct pci_dev *dev;
5373
5374 list_for_each_entry(dev, &bus->devices, bus_list) {
5375 pci_dev_restore(dev);
5376 if (dev->subordinate)
5377 pci_bus_restore_locked(dev->subordinate);
5378 }
5379}
5380
5381
5382
5383
5384
5385static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5386{
5387 struct pci_dev *dev;
5388
5389 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5390 if (!dev->slot || dev->slot != slot)
5391 continue;
5392 pci_dev_save_and_disable(dev);
5393 if (dev->subordinate)
5394 pci_bus_save_and_disable_locked(dev->subordinate);
5395 }
5396}
5397
5398
5399
5400
5401
5402
5403static void pci_slot_restore_locked(struct pci_slot *slot)
5404{
5405 struct pci_dev *dev;
5406
5407 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5408 if (!dev->slot || dev->slot != slot)
5409 continue;
5410 pci_dev_restore(dev);
5411 if (dev->subordinate)
5412 pci_bus_restore_locked(dev->subordinate);
5413 }
5414}
5415
5416static int pci_slot_reset(struct pci_slot *slot, int probe)
5417{
5418 int rc;
5419
5420 if (!slot || !pci_slot_resetable(slot))
5421 return -ENOTTY;
5422
5423 if (!probe)
5424 pci_slot_lock(slot);
5425
5426 might_sleep();
5427
5428 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5429
5430 if (!probe)
5431 pci_slot_unlock(slot);
5432
5433 return rc;
5434}
5435
5436
5437
5438
5439
5440
5441
5442int pci_probe_reset_slot(struct pci_slot *slot)
5443{
5444 return pci_slot_reset(slot, 1);
5445}
5446EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462
5463static int __pci_reset_slot(struct pci_slot *slot)
5464{
5465 int rc;
5466
5467 rc = pci_slot_reset(slot, 1);
5468 if (rc)
5469 return rc;
5470
5471 if (pci_slot_trylock(slot)) {
5472 pci_slot_save_and_disable_locked(slot);
5473 might_sleep();
5474 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
5475 pci_slot_restore_locked(slot);
5476 pci_slot_unlock(slot);
5477 } else
5478 rc = -EAGAIN;
5479
5480 return rc;
5481}
5482
5483static int pci_bus_reset(struct pci_bus *bus, int probe)
5484{
5485 int ret;
5486
5487 if (!bus->self || !pci_bus_resetable(bus))
5488 return -ENOTTY;
5489
5490 if (probe)
5491 return 0;
5492
5493 pci_bus_lock(bus);
5494
5495 might_sleep();
5496
5497 ret = pci_bridge_secondary_bus_reset(bus->self);
5498
5499 pci_bus_unlock(bus);
5500
5501 return ret;
5502}
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512int pci_bus_error_reset(struct pci_dev *bridge)
5513{
5514 struct pci_bus *bus = bridge->subordinate;
5515 struct pci_slot *slot;
5516
5517 if (!bus)
5518 return -ENOTTY;
5519
5520 mutex_lock(&pci_slot_mutex);
5521 if (list_empty(&bus->slots))
5522 goto bus_reset;
5523
5524 list_for_each_entry(slot, &bus->slots, list)
5525 if (pci_probe_reset_slot(slot))
5526 goto bus_reset;
5527
5528 list_for_each_entry(slot, &bus->slots, list)
5529 if (pci_slot_reset(slot, 0))
5530 goto bus_reset;
5531
5532 mutex_unlock(&pci_slot_mutex);
5533 return 0;
5534bus_reset:
5535 mutex_unlock(&pci_slot_mutex);
5536 return pci_bus_reset(bridge->subordinate, 0);
5537}
5538
5539
5540
5541
5542
5543
5544
5545int pci_probe_reset_bus(struct pci_bus *bus)
5546{
5547 return pci_bus_reset(bus, 1);
5548}
5549EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5550
5551
5552
5553
5554
5555
5556
5557static int __pci_reset_bus(struct pci_bus *bus)
5558{
5559 int rc;
5560
5561 rc = pci_bus_reset(bus, 1);
5562 if (rc)
5563 return rc;
5564
5565 if (pci_bus_trylock(bus)) {
5566 pci_bus_save_and_disable_locked(bus);
5567 might_sleep();
5568 rc = pci_bridge_secondary_bus_reset(bus->self);
5569 pci_bus_restore_locked(bus);
5570 pci_bus_unlock(bus);
5571 } else
5572 rc = -EAGAIN;
5573
5574 return rc;
5575}
5576
5577
5578
5579
5580
5581
5582
5583int pci_reset_bus(struct pci_dev *pdev)
5584{
5585 return (!pci_probe_reset_slot(pdev->slot)) ?
5586 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5587}
5588EXPORT_SYMBOL_GPL(pci_reset_bus);
5589
5590
5591
5592
5593
5594
5595
5596
5597int pcix_get_max_mmrbc(struct pci_dev *dev)
5598{
5599 int cap;
5600 u32 stat;
5601
5602 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5603 if (!cap)
5604 return -EINVAL;
5605
5606 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5607 return -EINVAL;
5608
5609 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5610}
5611EXPORT_SYMBOL(pcix_get_max_mmrbc);
5612
5613
5614
5615
5616
5617
5618
5619
5620int pcix_get_mmrbc(struct pci_dev *dev)
5621{
5622 int cap;
5623 u16 cmd;
5624
5625 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5626 if (!cap)
5627 return -EINVAL;
5628
5629 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5630 return -EINVAL;
5631
5632 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5633}
5634EXPORT_SYMBOL(pcix_get_mmrbc);
5635
5636
5637
5638
5639
5640
5641
5642
5643
5644
5645int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5646{
5647 int cap;
5648 u32 stat, v, o;
5649 u16 cmd;
5650
5651 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5652 return -EINVAL;
5653
5654 v = ffs(mmrbc) - 10;
5655
5656 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5657 if (!cap)
5658 return -EINVAL;
5659
5660 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5661 return -EINVAL;
5662
5663 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5664 return -E2BIG;
5665
5666 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5667 return -EINVAL;
5668
5669 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5670 if (o != v) {
5671 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5672 return -EIO;
5673
5674 cmd &= ~PCI_X_CMD_MAX_READ;
5675 cmd |= v << 2;
5676 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5677 return -EIO;
5678 }
5679 return 0;
5680}
5681EXPORT_SYMBOL(pcix_set_mmrbc);
5682
5683
5684
5685
5686
5687
5688
5689int pcie_get_readrq(struct pci_dev *dev)
5690{
5691 u16 ctl;
5692
5693 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5694
5695 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5696}
5697EXPORT_SYMBOL(pcie_get_readrq);
5698
5699
5700
5701
5702
5703
5704
5705
5706
5707int pcie_set_readrq(struct pci_dev *dev, int rq)
5708{
5709 u16 v;
5710 int ret;
5711
5712 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5713 return -EINVAL;
5714
5715
5716
5717
5718
5719
5720 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5721 int mps = pcie_get_mps(dev);
5722
5723 if (mps < rq)
5724 rq = mps;
5725 }
5726
5727 v = (ffs(rq) - 8) << 12;
5728
5729 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5730 PCI_EXP_DEVCTL_READRQ, v);
5731
5732 return pcibios_err_to_errno(ret);
5733}
5734EXPORT_SYMBOL(pcie_set_readrq);
5735
5736
5737
5738
5739
5740
5741
5742int pcie_get_mps(struct pci_dev *dev)
5743{
5744 u16 ctl;
5745
5746 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5747
5748 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5749}
5750EXPORT_SYMBOL(pcie_get_mps);
5751
5752
5753
5754
5755
5756
5757
5758
5759
5760int pcie_set_mps(struct pci_dev *dev, int mps)
5761{
5762 u16 v;
5763 int ret;
5764
5765 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5766 return -EINVAL;
5767
5768 v = ffs(mps) - 8;
5769 if (v > dev->pcie_mpss)
5770 return -EINVAL;
5771 v <<= 5;
5772
5773 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5774 PCI_EXP_DEVCTL_PAYLOAD, v);
5775
5776 return pcibios_err_to_errno(ret);
5777}
5778EXPORT_SYMBOL(pcie_set_mps);
5779
5780
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790
5791
5792
5793
5794u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
5795 enum pci_bus_speed *speed,
5796 enum pcie_link_width *width)
5797{
5798 u16 lnksta;
5799 enum pci_bus_speed next_speed;
5800 enum pcie_link_width next_width;
5801 u32 bw, next_bw;
5802
5803 if (speed)
5804 *speed = PCI_SPEED_UNKNOWN;
5805 if (width)
5806 *width = PCIE_LNK_WIDTH_UNKNOWN;
5807
5808 bw = 0;
5809
5810 while (dev) {
5811 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5812
5813 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5814 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5815 PCI_EXP_LNKSTA_NLW_SHIFT;
5816
5817 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
5818
5819
5820 if (!bw || next_bw <= bw) {
5821 bw = next_bw;
5822
5823 if (limiting_dev)
5824 *limiting_dev = dev;
5825 if (speed)
5826 *speed = next_speed;
5827 if (width)
5828 *width = next_width;
5829 }
5830
5831 dev = pci_upstream_bridge(dev);
5832 }
5833
5834 return bw;
5835}
5836EXPORT_SYMBOL(pcie_bandwidth_available);
5837
5838
5839
5840
5841
5842
5843
5844
5845enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5846{
5847 u32 lnkcap2, lnkcap;
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857
5858 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5859
5860
5861 if (lnkcap2)
5862 return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
5863
5864 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5865 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
5866 return PCIE_SPEED_5_0GT;
5867 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
5868 return PCIE_SPEED_2_5GT;
5869
5870 return PCI_SPEED_UNKNOWN;
5871}
5872EXPORT_SYMBOL(pcie_get_speed_cap);
5873
5874
5875
5876
5877
5878
5879
5880
5881enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
5882{
5883 u32 lnkcap;
5884
5885 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5886 if (lnkcap)
5887 return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
5888
5889 return PCIE_LNK_WIDTH_UNKNOWN;
5890}
5891EXPORT_SYMBOL(pcie_get_width_cap);
5892
5893
5894
5895
5896
5897
5898
5899
5900
5901
5902
5903u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
5904 enum pcie_link_width *width)
5905{
5906 *speed = pcie_get_speed_cap(dev);
5907 *width = pcie_get_width_cap(dev);
5908
5909 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5910 return 0;
5911
5912 return *width * PCIE_SPEED2MBS_ENC(*speed);
5913}
5914
5915
5916
5917
5918
5919
5920
5921
5922
5923
5924
5925void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
5926{
5927 enum pcie_link_width width, width_cap;
5928 enum pci_bus_speed speed, speed_cap;
5929 struct pci_dev *limiting_dev = NULL;
5930 u32 bw_avail, bw_cap;
5931
5932 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
5933 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
5934
5935 if (bw_avail >= bw_cap && verbose)
5936 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
5937 bw_cap / 1000, bw_cap % 1000,
5938 pci_speed_string(speed_cap), width_cap);
5939 else if (bw_avail < bw_cap)
5940 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
5941 bw_avail / 1000, bw_avail % 1000,
5942 pci_speed_string(speed), width,
5943 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
5944 bw_cap / 1000, bw_cap % 1000,
5945 pci_speed_string(speed_cap), width_cap);
5946}
5947
5948
5949
5950
5951
5952
5953
5954void pcie_print_link_status(struct pci_dev *dev)
5955{
5956 __pcie_print_link_status(dev, true);
5957}
5958EXPORT_SYMBOL(pcie_print_link_status);
5959
5960
5961
5962
5963
5964
5965
5966
5967int pci_select_bars(struct pci_dev *dev, unsigned long flags)
5968{
5969 int i, bars = 0;
5970 for (i = 0; i < PCI_NUM_RESOURCES; i++)
5971 if (pci_resource_flags(dev, i) & flags)
5972 bars |= (1 << i);
5973 return bars;
5974}
5975EXPORT_SYMBOL(pci_select_bars);
5976
5977
5978static arch_set_vga_state_t arch_set_vga_state;
5979
5980void __init pci_register_set_vga_state(arch_set_vga_state_t func)
5981{
5982 arch_set_vga_state = func;
5983}
5984
5985static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
5986 unsigned int command_bits, u32 flags)
5987{
5988 if (arch_set_vga_state)
5989 return arch_set_vga_state(dev, decode, command_bits,
5990 flags);
5991 return 0;
5992}
5993
5994
5995
5996
5997
5998
5999
6000
6001
6002int pci_set_vga_state(struct pci_dev *dev, bool decode,
6003 unsigned int command_bits, u32 flags)
6004{
6005 struct pci_bus *bus;
6006 struct pci_dev *bridge;
6007 u16 cmd;
6008 int rc;
6009
6010 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6011
6012
6013 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6014 if (rc)
6015 return rc;
6016
6017 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6018 pci_read_config_word(dev, PCI_COMMAND, &cmd);
6019 if (decode)
6020 cmd |= command_bits;
6021 else
6022 cmd &= ~command_bits;
6023 pci_write_config_word(dev, PCI_COMMAND, cmd);
6024 }
6025
6026 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6027 return 0;
6028
6029 bus = dev->bus;
6030 while (bus) {
6031 bridge = bus->self;
6032 if (bridge) {
6033 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6034 &cmd);
6035 if (decode)
6036 cmd |= PCI_BRIDGE_CTL_VGA;
6037 else
6038 cmd &= ~PCI_BRIDGE_CTL_VGA;
6039 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6040 cmd);
6041 }
6042 bus = bus->parent;
6043 }
6044 return 0;
6045}
6046
6047#ifdef CONFIG_ACPI
6048bool pci_pr3_present(struct pci_dev *pdev)
6049{
6050 struct acpi_device *adev;
6051
6052 if (acpi_disabled)
6053 return false;
6054
6055 adev = ACPI_COMPANION(&pdev->dev);
6056 if (!adev)
6057 return false;
6058
6059 return adev->power.flags.power_resources &&
6060 acpi_has_method(adev->handle, "_PR3");
6061}
6062EXPORT_SYMBOL_GPL(pci_pr3_present);
6063#endif
6064
6065
6066
6067
6068
6069
6070
6071
6072
6073
6074
6075
6076
6077
6078
6079
6080
6081
6082
6083
6084
6085void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns)
6086{
6087 int devfn_to;
6088
6089 nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from);
6090 devfn_to = devfn_from + nr_devfns - 1;
6091
6092 if (!dev->dma_alias_mask)
6093 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6094 if (!dev->dma_alias_mask) {
6095 pci_warn(dev, "Unable to allocate DMA alias mask\n");
6096 return;
6097 }
6098
6099 bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6100
6101 if (nr_devfns == 1)
6102 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6103 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6104 else if (nr_devfns > 1)
6105 pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6106 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6107 PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6108}
6109
6110bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6111{
6112 return (dev1->dma_alias_mask &&
6113 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6114 (dev2->dma_alias_mask &&
6115 test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6116 pci_real_dma_dev(dev1) == dev2 ||
6117 pci_real_dma_dev(dev2) == dev1;
6118}
6119
6120bool pci_device_is_present(struct pci_dev *pdev)
6121{
6122 u32 v;
6123
6124 if (pci_dev_is_disconnected(pdev))
6125 return false;
6126 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6127}
6128EXPORT_SYMBOL_GPL(pci_device_is_present);
6129
6130void pci_ignore_hotplug(struct pci_dev *dev)
6131{
6132 struct pci_dev *bridge = dev->bus->self;
6133
6134 dev->ignore_hotplug = 1;
6135
6136 if (bridge)
6137 bridge->ignore_hotplug = 1;
6138}
6139EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6140
6141
6142
6143
6144
6145
6146
6147
6148
6149
6150
6151struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6152{
6153 return dev;
6154}
6155
6156resource_size_t __weak pcibios_default_alignment(void)
6157{
6158 return 0;
6159}
6160
6161
6162
6163
6164
6165void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6166 const struct resource *rsrc,
6167 resource_size_t *start, resource_size_t *end)
6168{
6169 *start = rsrc->start;
6170 *end = rsrc->end;
6171}
6172
6173static char *resource_alignment_param;
6174static DEFINE_SPINLOCK(resource_alignment_lock);
6175
6176
6177
6178
6179
6180
6181
6182
6183
6184static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6185 bool *resize)
6186{
6187 int align_order, count;
6188 resource_size_t align = pcibios_default_alignment();
6189 const char *p;
6190 int ret;
6191
6192 spin_lock(&resource_alignment_lock);
6193 p = resource_alignment_param;
6194 if (!p || !*p)
6195 goto out;
6196 if (pci_has_flag(PCI_PROBE_ONLY)) {
6197 align = 0;
6198 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6199 goto out;
6200 }
6201
6202 while (*p) {
6203 count = 0;
6204 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6205 p[count] == '@') {
6206 p += count + 1;
6207 } else {
6208 align_order = -1;
6209 }
6210
6211 ret = pci_dev_str_match(dev, p, &p);
6212 if (ret == 1) {
6213 *resize = true;
6214 if (align_order == -1)
6215 align = PAGE_SIZE;
6216 else
6217 align = 1 << align_order;
6218 break;
6219 } else if (ret < 0) {
6220 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6221 p);
6222 break;
6223 }
6224
6225 if (*p != ';' && *p != ',') {
6226
6227 break;
6228 }
6229 p++;
6230 }
6231out:
6232 spin_unlock(&resource_alignment_lock);
6233 return align;
6234}
6235
6236static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6237 resource_size_t align, bool resize)
6238{
6239 struct resource *r = &dev->resource[bar];
6240 resource_size_t size;
6241
6242 if (!(r->flags & IORESOURCE_MEM))
6243 return;
6244
6245 if (r->flags & IORESOURCE_PCI_FIXED) {
6246 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6247 bar, r, (unsigned long long)align);
6248 return;
6249 }
6250
6251 size = resource_size(r);
6252 if (size >= align)
6253 return;
6254
6255
6256
6257
6258
6259
6260
6261
6262
6263
6264
6265
6266
6267
6268
6269
6270
6271
6272
6273
6274
6275
6276
6277
6278
6279
6280
6281
6282
6283 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6284 bar, r, (unsigned long long)align);
6285
6286 if (resize) {
6287 r->start = 0;
6288 r->end = align - 1;
6289 } else {
6290 r->flags &= ~IORESOURCE_SIZEALIGN;
6291 r->flags |= IORESOURCE_STARTALIGN;
6292 r->start = align;
6293 r->end = r->start + size - 1;
6294 }
6295 r->flags |= IORESOURCE_UNSET;
6296}
6297
6298
6299
6300
6301
6302
6303
6304
6305void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6306{
6307 int i;
6308 struct resource *r;
6309 resource_size_t align;
6310 u16 command;
6311 bool resize = false;
6312
6313
6314
6315
6316
6317
6318
6319 if (dev->is_virtfn)
6320 return;
6321
6322
6323 align = pci_specified_resource_alignment(dev, &resize);
6324 if (!align)
6325 return;
6326
6327 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6328 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6329 pci_warn(dev, "Can't reassign resources to host bridge\n");
6330 return;
6331 }
6332
6333 pci_read_config_word(dev, PCI_COMMAND, &command);
6334 command &= ~PCI_COMMAND_MEMORY;
6335 pci_write_config_word(dev, PCI_COMMAND, command);
6336
6337 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6338 pci_request_resource_alignment(dev, i, align, resize);
6339
6340
6341
6342
6343
6344
6345 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6346 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6347 r = &dev->resource[i];
6348 if (!(r->flags & IORESOURCE_MEM))
6349 continue;
6350 r->flags |= IORESOURCE_UNSET;
6351 r->end = resource_size(r) - 1;
6352 r->start = 0;
6353 }
6354 pci_disable_bridge_window(dev);
6355 }
6356}
6357
6358static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6359{
6360 size_t count = 0;
6361
6362 spin_lock(&resource_alignment_lock);
6363 if (resource_alignment_param)
6364 count = scnprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
6365 spin_unlock(&resource_alignment_lock);
6366
6367
6368
6369
6370
6371
6372 if (count >= 2 && buf[count - 2] != '\n' && count < PAGE_SIZE - 1) {
6373 buf[count - 1] = '\n';
6374 buf[count++] = 0;
6375 }
6376
6377 return count;
6378}
6379
6380static ssize_t resource_alignment_store(struct bus_type *bus,
6381 const char *buf, size_t count)
6382{
6383 char *param = kstrndup(buf, count, GFP_KERNEL);
6384
6385 if (!param)
6386 return -ENOMEM;
6387
6388 spin_lock(&resource_alignment_lock);
6389 kfree(resource_alignment_param);
6390 resource_alignment_param = param;
6391 spin_unlock(&resource_alignment_lock);
6392 return count;
6393}
6394
6395static BUS_ATTR_RW(resource_alignment);
6396
6397static int __init pci_resource_alignment_sysfs_init(void)
6398{
6399 return bus_create_file(&pci_bus_type,
6400 &bus_attr_resource_alignment);
6401}
6402late_initcall(pci_resource_alignment_sysfs_init);
6403
6404static void pci_no_domains(void)
6405{
6406#ifdef CONFIG_PCI_DOMAINS
6407 pci_domains_supported = 0;
6408#endif
6409}
6410
6411#ifdef CONFIG_PCI_DOMAINS_GENERIC
6412static atomic_t __domain_nr = ATOMIC_INIT(-1);
6413
6414static int pci_get_new_domain_nr(void)
6415{
6416 return atomic_inc_return(&__domain_nr);
6417}
6418
6419static int of_pci_bus_find_domain_nr(struct device *parent)
6420{
6421 static int use_dt_domains = -1;
6422 int domain = -1;
6423
6424 if (parent)
6425 domain = of_get_pci_domain_nr(parent->of_node);
6426
6427
6428
6429
6430
6431
6432
6433
6434
6435
6436
6437
6438
6439
6440
6441
6442
6443
6444
6445
6446
6447
6448
6449
6450
6451
6452
6453 if (domain >= 0 && use_dt_domains) {
6454 use_dt_domains = 1;
6455 } else if (domain < 0 && use_dt_domains != 1) {
6456 use_dt_domains = 0;
6457 domain = pci_get_new_domain_nr();
6458 } else {
6459 if (parent)
6460 pr_err("Node %pOF has ", parent->of_node);
6461 pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6462 domain = -1;
6463 }
6464
6465 return domain;
6466}
6467
6468int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6469{
6470 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6471 acpi_pci_bus_find_domain_nr(bus);
6472}
6473#endif
6474
6475
6476
6477
6478
6479
6480
6481
6482int __weak pci_ext_cfg_avail(void)
6483{
6484 return 1;
6485}
6486
6487void __weak pci_fixup_cardbus(struct pci_bus *bus)
6488{
6489}
6490EXPORT_SYMBOL(pci_fixup_cardbus);
6491
6492static int __init pci_setup(char *str)
6493{
6494 while (str) {
6495 char *k = strchr(str, ',');
6496 if (k)
6497 *k++ = 0;
6498 if (*str && (str = pcibios_setup(str)) && *str) {
6499 if (!strcmp(str, "nomsi")) {
6500 pci_no_msi();
6501 } else if (!strncmp(str, "noats", 5)) {
6502 pr_info("PCIe: ATS is disabled\n");
6503 pcie_ats_disabled = true;
6504 } else if (!strcmp(str, "noaer")) {
6505 pci_no_aer();
6506 } else if (!strcmp(str, "earlydump")) {
6507 pci_early_dump = true;
6508 } else if (!strncmp(str, "realloc=", 8)) {
6509 pci_realloc_get_opt(str + 8);
6510 } else if (!strncmp(str, "realloc", 7)) {
6511 pci_realloc_get_opt("on");
6512 } else if (!strcmp(str, "nodomains")) {
6513 pci_no_domains();
6514 } else if (!strncmp(str, "noari", 5)) {
6515 pcie_ari_disabled = true;
6516 } else if (!strncmp(str, "cbiosize=", 9)) {
6517 pci_cardbus_io_size = memparse(str + 9, &str);
6518 } else if (!strncmp(str, "cbmemsize=", 10)) {
6519 pci_cardbus_mem_size = memparse(str + 10, &str);
6520 } else if (!strncmp(str, "resource_alignment=", 19)) {
6521 resource_alignment_param = str + 19;
6522 } else if (!strncmp(str, "ecrc=", 5)) {
6523 pcie_ecrc_get_policy(str + 5);
6524 } else if (!strncmp(str, "hpiosize=", 9)) {
6525 pci_hotplug_io_size = memparse(str + 9, &str);
6526 } else if (!strncmp(str, "hpmmiosize=", 11)) {
6527 pci_hotplug_mmio_size = memparse(str + 11, &str);
6528 } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6529 pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6530 } else if (!strncmp(str, "hpmemsize=", 10)) {
6531 pci_hotplug_mmio_size = memparse(str + 10, &str);
6532 pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6533 } else if (!strncmp(str, "hpbussize=", 10)) {
6534 pci_hotplug_bus_size =
6535 simple_strtoul(str + 10, &str, 0);
6536 if (pci_hotplug_bus_size > 0xff)
6537 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6538 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6539 pcie_bus_config = PCIE_BUS_TUNE_OFF;
6540 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
6541 pcie_bus_config = PCIE_BUS_SAFE;
6542 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
6543 pcie_bus_config = PCIE_BUS_PERFORMANCE;
6544 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6545 pcie_bus_config = PCIE_BUS_PEER2PEER;
6546 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6547 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6548 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6549 disable_acs_redir_param = str + 18;
6550 } else {
6551 pr_err("PCI: Unknown option `%s'\n", str);
6552 }
6553 }
6554 str = k;
6555 }
6556 return 0;
6557}
6558early_param("pci", pci_setup);
6559
6560
6561
6562
6563
6564
6565
6566
6567
6568
6569static int __init pci_realloc_setup_params(void)
6570{
6571 resource_alignment_param = kstrdup(resource_alignment_param,
6572 GFP_KERNEL);
6573 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6574
6575 return 0;
6576}
6577pure_initcall(pci_realloc_setup_params);
6578