1
2
3
4
5
6
7
8
9
10
11#include <linux/acpi.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/dmi.h>
15#include <linux/init.h>
16#include <linux/msi.h>
17#include <linux/of.h>
18#include <linux/pci.h>
19#include <linux/pm.h>
20#include <linux/slab.h>
21#include <linux/module.h>
22#include <linux/spinlock.h>
23#include <linux/string.h>
24#include <linux/log2.h>
25#include <linux/logic_pio.h>
26#include <linux/pm_wakeup.h>
27#include <linux/interrupt.h>
28#include <linux/device.h>
29#include <linux/pm_runtime.h>
30#include <linux/pci_hotplug.h>
31#include <linux/vmalloc.h>
32#include <asm/dma.h>
33#include <linux/aer.h>
34#include "pci.h"
35
36DEFINE_MUTEX(pci_slot_mutex);
37
38const char *pci_power_names[] = {
39 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
40};
41EXPORT_SYMBOL_GPL(pci_power_names);
42
43int isa_dma_bridge_buggy;
44EXPORT_SYMBOL(isa_dma_bridge_buggy);
45
46int pci_pci_problems;
47EXPORT_SYMBOL(pci_pci_problems);
48
49unsigned int pci_pm_d3hot_delay;
50
51static void pci_pme_list_scan(struct work_struct *work);
52
53static LIST_HEAD(pci_pme_list);
54static DEFINE_MUTEX(pci_pme_list_mutex);
55static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
56
57struct pci_pme_device {
58 struct list_head list;
59 struct pci_dev *dev;
60};
61
62#define PME_TIMEOUT 1000
63
64static void pci_dev_d3_sleep(struct pci_dev *dev)
65{
66 unsigned int delay = dev->d3hot_delay;
67
68 if (delay < pci_pm_d3hot_delay)
69 delay = pci_pm_d3hot_delay;
70
71 if (delay)
72 msleep(delay);
73}
74
75#ifdef CONFIG_PCI_DOMAINS
76int pci_domains_supported = 1;
77#endif
78
79#define DEFAULT_CARDBUS_IO_SIZE (256)
80#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
81
82unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
83unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
84
85#define DEFAULT_HOTPLUG_IO_SIZE (256)
86#define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
87#define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
88
89unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
90
91
92
93
94
95unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
96unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
97
98#define DEFAULT_HOTPLUG_BUS_SIZE 1
99unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
100
101
102
103#ifdef CONFIG_PCIE_BUS_TUNE_OFF
104enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
105#elif defined CONFIG_PCIE_BUS_SAFE
106enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
107#elif defined CONFIG_PCIE_BUS_PERFORMANCE
108enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
109#elif defined CONFIG_PCIE_BUS_PEER2PEER
110enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
111#else
112enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
113#endif
114
115
116
117
118
119
120
121u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
122u8 pci_cache_line_size;
123
124
125
126
127
128unsigned int pcibios_max_latency = 255;
129
130
131static bool pcie_ari_disabled;
132
133
134static bool pcie_ats_disabled;
135
136
137bool pci_early_dump;
138
139bool pci_ats_disabled(void)
140{
141 return pcie_ats_disabled;
142}
143EXPORT_SYMBOL_GPL(pci_ats_disabled);
144
145
146static bool pci_bridge_d3_disable;
147
148static bool pci_bridge_d3_force;
149
150static int __init pcie_port_pm_setup(char *str)
151{
152 if (!strcmp(str, "off"))
153 pci_bridge_d3_disable = true;
154 else if (!strcmp(str, "force"))
155 pci_bridge_d3_force = true;
156 return 1;
157}
158__setup("pcie_port_pm=", pcie_port_pm_setup);
159
160
161#define PCIE_RESET_READY_POLL_MS 60000
162
163
164
165
166
167
168
169
170unsigned char pci_bus_max_busnr(struct pci_bus *bus)
171{
172 struct pci_bus *tmp;
173 unsigned char max, n;
174
175 max = bus->busn_res.end;
176 list_for_each_entry(tmp, &bus->children, node) {
177 n = pci_bus_max_busnr(tmp);
178 if (n > max)
179 max = n;
180 }
181 return max;
182}
183EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
184
185
186
187
188
189
190
191int pci_status_get_and_clear_errors(struct pci_dev *pdev)
192{
193 u16 status;
194 int ret;
195
196 ret = pci_read_config_word(pdev, PCI_STATUS, &status);
197 if (ret != PCIBIOS_SUCCESSFUL)
198 return -EIO;
199
200 status &= PCI_STATUS_ERROR_BITS;
201 if (status)
202 pci_write_config_word(pdev, PCI_STATUS, status);
203
204 return status;
205}
206EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
207
208#ifdef CONFIG_HAS_IOMEM
209void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
210{
211 struct resource *res = &pdev->resource[bar];
212
213
214
215
216 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
217 pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
218 return NULL;
219 }
220 return ioremap(res->start, resource_size(res));
221}
222EXPORT_SYMBOL_GPL(pci_ioremap_bar);
223
224void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
225{
226
227
228
229 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
230 WARN_ON(1);
231 return NULL;
232 }
233 return ioremap_wc(pci_resource_start(pdev, bar),
234 pci_resource_len(pdev, bar));
235}
236EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
237#endif
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
259 const char **endptr)
260{
261 int ret;
262 int seg, bus, slot, func;
263 char *wpath, *p;
264 char end;
265
266 *endptr = strchrnul(path, ';');
267
268 wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
269 if (!wpath)
270 return -ENOMEM;
271
272 while (1) {
273 p = strrchr(wpath, '/');
274 if (!p)
275 break;
276 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
277 if (ret != 2) {
278 ret = -EINVAL;
279 goto free_and_exit;
280 }
281
282 if (dev->devfn != PCI_DEVFN(slot, func)) {
283 ret = 0;
284 goto free_and_exit;
285 }
286
287
288
289
290
291
292
293 dev = pci_upstream_bridge(dev);
294 if (!dev) {
295 ret = 0;
296 goto free_and_exit;
297 }
298
299 *p = 0;
300 }
301
302 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
303 &func, &end);
304 if (ret != 4) {
305 seg = 0;
306 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
307 if (ret != 3) {
308 ret = -EINVAL;
309 goto free_and_exit;
310 }
311 }
312
313 ret = (seg == pci_domain_nr(dev->bus) &&
314 bus == dev->bus->number &&
315 dev->devfn == PCI_DEVFN(slot, func));
316
317free_and_exit:
318 kfree(wpath);
319 return ret;
320}
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352static int pci_dev_str_match(struct pci_dev *dev, const char *p,
353 const char **endptr)
354{
355 int ret;
356 int count;
357 unsigned short vendor, device, subsystem_vendor, subsystem_device;
358
359 if (strncmp(p, "pci:", 4) == 0) {
360
361 p += 4;
362 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
363 &subsystem_vendor, &subsystem_device, &count);
364 if (ret != 4) {
365 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
366 if (ret != 2)
367 return -EINVAL;
368
369 subsystem_vendor = 0;
370 subsystem_device = 0;
371 }
372
373 p += count;
374
375 if ((!vendor || vendor == dev->vendor) &&
376 (!device || device == dev->device) &&
377 (!subsystem_vendor ||
378 subsystem_vendor == dev->subsystem_vendor) &&
379 (!subsystem_device ||
380 subsystem_device == dev->subsystem_device))
381 goto found;
382 } else {
383
384
385
386
387 ret = pci_dev_str_match_path(dev, p, &p);
388 if (ret < 0)
389 return ret;
390 else if (ret)
391 goto found;
392 }
393
394 *endptr = p;
395 return 0;
396
397found:
398 *endptr = p;
399 return 1;
400}
401
402static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
403 u8 pos, int cap, int *ttl)
404{
405 u8 id;
406 u16 ent;
407
408 pci_bus_read_config_byte(bus, devfn, pos, &pos);
409
410 while ((*ttl)--) {
411 if (pos < 0x40)
412 break;
413 pos &= ~3;
414 pci_bus_read_config_word(bus, devfn, pos, &ent);
415
416 id = ent & 0xff;
417 if (id == 0xff)
418 break;
419 if (id == cap)
420 return pos;
421 pos = (ent >> 8);
422 }
423 return 0;
424}
425
426static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
427 u8 pos, int cap)
428{
429 int ttl = PCI_FIND_CAP_TTL;
430
431 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
432}
433
434u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
435{
436 return __pci_find_next_cap(dev->bus, dev->devfn,
437 pos + PCI_CAP_LIST_NEXT, cap);
438}
439EXPORT_SYMBOL_GPL(pci_find_next_capability);
440
441static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
442 unsigned int devfn, u8 hdr_type)
443{
444 u16 status;
445
446 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
447 if (!(status & PCI_STATUS_CAP_LIST))
448 return 0;
449
450 switch (hdr_type) {
451 case PCI_HEADER_TYPE_NORMAL:
452 case PCI_HEADER_TYPE_BRIDGE:
453 return PCI_CAPABILITY_LIST;
454 case PCI_HEADER_TYPE_CARDBUS:
455 return PCI_CB_CAPABILITY_LIST;
456 }
457
458 return 0;
459}
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480u8 pci_find_capability(struct pci_dev *dev, int cap)
481{
482 u8 pos;
483
484 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
485 if (pos)
486 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
487
488 return pos;
489}
490EXPORT_SYMBOL(pci_find_capability);
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
506{
507 u8 hdr_type, pos;
508
509 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
510
511 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
512 if (pos)
513 pos = __pci_find_next_cap(bus, devfn, pos, cap);
514
515 return pos;
516}
517EXPORT_SYMBOL(pci_bus_find_capability);
518
519
520
521
522
523
524
525
526
527
528
529
530u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
531{
532 u32 header;
533 int ttl;
534 u16 pos = PCI_CFG_SPACE_SIZE;
535
536
537 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
538
539 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
540 return 0;
541
542 if (start)
543 pos = start;
544
545 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
546 return 0;
547
548
549
550
551
552 if (header == 0)
553 return 0;
554
555 while (ttl-- > 0) {
556 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
557 return pos;
558
559 pos = PCI_EXT_CAP_NEXT(header);
560 if (pos < PCI_CFG_SPACE_SIZE)
561 break;
562
563 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
564 break;
565 }
566
567 return 0;
568}
569EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
586{
587 return pci_find_next_ext_capability(dev, 0, cap);
588}
589EXPORT_SYMBOL_GPL(pci_find_ext_capability);
590
591
592
593
594
595
596
597
598
599
600u64 pci_get_dsn(struct pci_dev *dev)
601{
602 u32 dword;
603 u64 dsn;
604 int pos;
605
606 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
607 if (!pos)
608 return 0;
609
610
611
612
613
614
615 pos += 4;
616 pci_read_config_dword(dev, pos, &dword);
617 dsn = (u64)dword;
618 pci_read_config_dword(dev, pos + 4, &dword);
619 dsn |= ((u64)dword) << 32;
620
621 return dsn;
622}
623EXPORT_SYMBOL_GPL(pci_get_dsn);
624
625static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
626{
627 int rc, ttl = PCI_FIND_CAP_TTL;
628 u8 cap, mask;
629
630 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
631 mask = HT_3BIT_CAP_MASK;
632 else
633 mask = HT_5BIT_CAP_MASK;
634
635 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
636 PCI_CAP_ID_HT, &ttl);
637 while (pos) {
638 rc = pci_read_config_byte(dev, pos + 3, &cap);
639 if (rc != PCIBIOS_SUCCESSFUL)
640 return 0;
641
642 if ((cap & mask) == ht_cap)
643 return pos;
644
645 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
646 pos + PCI_CAP_LIST_NEXT,
647 PCI_CAP_ID_HT, &ttl);
648 }
649
650 return 0;
651}
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
667{
668 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
669}
670EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
671
672
673
674
675
676
677
678
679
680
681
682
683u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
684{
685 u8 pos;
686
687 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
688 if (pos)
689 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
690
691 return pos;
692}
693EXPORT_SYMBOL_GPL(pci_find_ht_capability);
694
695
696
697
698
699
700
701
702
703
704struct resource *pci_find_parent_resource(const struct pci_dev *dev,
705 struct resource *res)
706{
707 const struct pci_bus *bus = dev->bus;
708 struct resource *r;
709 int i;
710
711 pci_bus_for_each_resource(bus, r, i) {
712 if (!r)
713 continue;
714 if (resource_contains(r, res)) {
715
716
717
718
719
720 if (r->flags & IORESOURCE_PREFETCH &&
721 !(res->flags & IORESOURCE_PREFETCH))
722 return NULL;
723
724
725
726
727
728
729
730
731
732 return r;
733 }
734 }
735 return NULL;
736}
737EXPORT_SYMBOL(pci_find_parent_resource);
738
739
740
741
742
743
744
745
746
747
748struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
749{
750 int i;
751
752 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
753 struct resource *r = &dev->resource[i];
754
755 if (r->start && resource_contains(r, res))
756 return r;
757 }
758
759 return NULL;
760}
761EXPORT_SYMBOL(pci_find_resource);
762
763
764
765
766
767
768
769
770
771int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
772{
773 int i;
774
775
776 for (i = 0; i < 4; i++) {
777 u16 status;
778 if (i)
779 msleep((1 << (i - 1)) * 100);
780
781 pci_read_config_word(dev, pos, &status);
782 if (!(status & mask))
783 return 1;
784 }
785
786 return 0;
787}
788
789static int pci_acs_enable;
790
791
792
793
794void pci_request_acs(void)
795{
796 pci_acs_enable = 1;
797}
798
799static const char *disable_acs_redir_param;
800
801
802
803
804
805
806
807static void pci_disable_acs_redir(struct pci_dev *dev)
808{
809 int ret = 0;
810 const char *p;
811 int pos;
812 u16 ctrl;
813
814 if (!disable_acs_redir_param)
815 return;
816
817 p = disable_acs_redir_param;
818 while (*p) {
819 ret = pci_dev_str_match(dev, p, &p);
820 if (ret < 0) {
821 pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
822 disable_acs_redir_param);
823
824 break;
825 } else if (ret == 1) {
826
827 break;
828 }
829
830 if (*p != ';' && *p != ',') {
831
832 break;
833 }
834 p++;
835 }
836
837 if (ret != 1)
838 return;
839
840 if (!pci_dev_specific_disable_acs_redir(dev))
841 return;
842
843 pos = dev->acs_cap;
844 if (!pos) {
845 pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
846 return;
847 }
848
849 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
850
851
852 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
853
854 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
855
856 pci_info(dev, "disabled ACS redirect\n");
857}
858
859
860
861
862
863static void pci_std_enable_acs(struct pci_dev *dev)
864{
865 int pos;
866 u16 cap;
867 u16 ctrl;
868
869 pos = dev->acs_cap;
870 if (!pos)
871 return;
872
873 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
874 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
875
876
877 ctrl |= (cap & PCI_ACS_SV);
878
879
880 ctrl |= (cap & PCI_ACS_RR);
881
882
883 ctrl |= (cap & PCI_ACS_CR);
884
885
886 ctrl |= (cap & PCI_ACS_UF);
887
888
889 if (dev->external_facing || dev->untrusted)
890 ctrl |= (cap & PCI_ACS_TB);
891
892 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
893}
894
895
896
897
898
899static void pci_enable_acs(struct pci_dev *dev)
900{
901 if (!pci_acs_enable)
902 goto disable_acs_redir;
903
904 if (!pci_dev_specific_enable_acs(dev))
905 goto disable_acs_redir;
906
907 pci_std_enable_acs(dev);
908
909disable_acs_redir:
910
911
912
913
914
915
916
917 pci_disable_acs_redir(dev);
918}
919
920
921
922
923
924
925
926
927static void pci_restore_bars(struct pci_dev *dev)
928{
929 int i;
930
931 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
932 pci_update_resource(dev, i);
933}
934
935static const struct pci_platform_pm_ops *pci_platform_pm;
936
937int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
938{
939 if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
940 !ops->choose_state || !ops->set_wakeup || !ops->need_resume)
941 return -EINVAL;
942 pci_platform_pm = ops;
943 return 0;
944}
945
946static inline bool platform_pci_power_manageable(struct pci_dev *dev)
947{
948 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
949}
950
951static inline int platform_pci_set_power_state(struct pci_dev *dev,
952 pci_power_t t)
953{
954 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
955}
956
957static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
958{
959 return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
960}
961
962static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
963{
964 if (pci_platform_pm && pci_platform_pm->refresh_state)
965 pci_platform_pm->refresh_state(dev);
966}
967
968static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
969{
970 return pci_platform_pm ?
971 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
972}
973
974static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
975{
976 return pci_platform_pm ?
977 pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
978}
979
980static inline bool platform_pci_need_resume(struct pci_dev *dev)
981{
982 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
983}
984
985static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
986{
987 if (pci_platform_pm && pci_platform_pm->bridge_d3)
988 return pci_platform_pm->bridge_d3(dev);
989 return false;
990}
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1006{
1007 u16 pmcsr;
1008 bool need_restore = false;
1009
1010
1011 if (dev->current_state == state)
1012 return 0;
1013
1014 if (!dev->pm_cap)
1015 return -EIO;
1016
1017 if (state < PCI_D0 || state > PCI_D3hot)
1018 return -EINVAL;
1019
1020
1021
1022
1023
1024
1025
1026 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
1027 && dev->current_state > state) {
1028 pci_err(dev, "invalid power transition (from %s to %s)\n",
1029 pci_power_name(dev->current_state),
1030 pci_power_name(state));
1031 return -EINVAL;
1032 }
1033
1034
1035 if ((state == PCI_D1 && !dev->d1_support)
1036 || (state == PCI_D2 && !dev->d2_support))
1037 return -EIO;
1038
1039 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1040 if (pmcsr == (u16) ~0) {
1041 pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
1042 pci_power_name(dev->current_state),
1043 pci_power_name(state));
1044 return -EIO;
1045 }
1046
1047
1048
1049
1050
1051
1052 switch (dev->current_state) {
1053 case PCI_D0:
1054 case PCI_D1:
1055 case PCI_D2:
1056 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1057 pmcsr |= state;
1058 break;
1059 case PCI_D3hot:
1060 case PCI_D3cold:
1061 case PCI_UNKNOWN:
1062 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
1063 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
1064 need_restore = true;
1065 fallthrough;
1066 default:
1067 pmcsr = 0;
1068 break;
1069 }
1070
1071
1072 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1073
1074
1075
1076
1077
1078 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1079 pci_dev_d3_sleep(dev);
1080 else if (state == PCI_D2 || dev->current_state == PCI_D2)
1081 udelay(PCI_PM_D2_DELAY);
1082
1083 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1084 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1085 if (dev->current_state != state)
1086 pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
1087 pci_power_name(dev->current_state),
1088 pci_power_name(state));
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103 if (need_restore)
1104 pci_restore_bars(dev);
1105
1106 if (dev->bus->self)
1107 pcie_aspm_pm_state_change(dev->bus->self);
1108
1109 return 0;
1110}
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1125{
1126 if (platform_pci_get_power_state(dev) == PCI_D3cold ||
1127 !pci_device_is_present(dev)) {
1128 dev->current_state = PCI_D3cold;
1129 } else if (dev->pm_cap) {
1130 u16 pmcsr;
1131
1132 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1133 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1134 } else {
1135 dev->current_state = state;
1136 }
1137}
1138
1139
1140
1141
1142
1143
1144
1145
1146void pci_refresh_power_state(struct pci_dev *dev)
1147{
1148 if (platform_pci_power_manageable(dev))
1149 platform_pci_refresh_power_state(dev);
1150
1151 pci_update_current_state(dev, dev->current_state);
1152}
1153
1154
1155
1156
1157
1158
1159int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1160{
1161 int error;
1162
1163 if (platform_pci_power_manageable(dev)) {
1164 error = platform_pci_set_power_state(dev, state);
1165 if (!error)
1166 pci_update_current_state(dev, state);
1167 } else
1168 error = -ENODEV;
1169
1170 if (error && !dev->pm_cap)
1171 dev->current_state = PCI_D0;
1172
1173 return error;
1174}
1175EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1176
1177static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1178{
1179 pm_request_resume(&pci_dev->dev);
1180 return 0;
1181}
1182
1183
1184
1185
1186
1187void pci_resume_bus(struct pci_bus *bus)
1188{
1189 if (bus)
1190 pci_walk_bus(bus, pci_resume_one, NULL);
1191}
1192
1193static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1194{
1195 int delay = 1;
1196 u32 id;
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210 pci_read_config_dword(dev, PCI_COMMAND, &id);
1211 while (id == ~0) {
1212 if (delay > timeout) {
1213 pci_warn(dev, "not ready %dms after %s; giving up\n",
1214 delay - 1, reset_type);
1215 return -ENOTTY;
1216 }
1217
1218 if (delay > 1000)
1219 pci_info(dev, "not ready %dms after %s; waiting\n",
1220 delay - 1, reset_type);
1221
1222 msleep(delay);
1223 delay *= 2;
1224 pci_read_config_dword(dev, PCI_COMMAND, &id);
1225 }
1226
1227 if (delay > 1000)
1228 pci_info(dev, "ready %dms after %s\n", delay - 1,
1229 reset_type);
1230
1231 return 0;
1232}
1233
1234
1235
1236
1237
1238int pci_power_up(struct pci_dev *dev)
1239{
1240 pci_platform_power_transition(dev, PCI_D0);
1241
1242
1243
1244
1245
1246
1247 if (dev->runtime_d3cold) {
1248
1249
1250
1251
1252
1253 pci_resume_bus(dev->subordinate);
1254 }
1255
1256 return pci_raw_set_power_state(dev, PCI_D0);
1257}
1258
1259
1260
1261
1262
1263
1264static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1265{
1266 pci_power_t state = *(pci_power_t *)data;
1267
1268 dev->current_state = state;
1269 return 0;
1270}
1271
1272
1273
1274
1275
1276
1277void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1278{
1279 if (bus)
1280 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1281}
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1301{
1302 int error;
1303
1304
1305 if (state > PCI_D3cold)
1306 state = PCI_D3cold;
1307 else if (state < PCI_D0)
1308 state = PCI_D0;
1309 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1310
1311
1312
1313
1314
1315
1316
1317 return 0;
1318
1319
1320 if (dev->current_state == state)
1321 return 0;
1322
1323 if (state == PCI_D0)
1324 return pci_power_up(dev);
1325
1326
1327
1328
1329
1330 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1331 return 0;
1332
1333
1334
1335
1336
1337 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
1338 PCI_D3hot : state);
1339
1340 if (pci_platform_power_transition(dev, state))
1341 return error;
1342
1343
1344 if (state == PCI_D3cold)
1345 pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1346
1347 return 0;
1348}
1349EXPORT_SYMBOL(pci_set_power_state);
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
1361{
1362 pci_power_t ret;
1363
1364 if (!dev->pm_cap)
1365 return PCI_D0;
1366
1367 ret = platform_pci_choose_state(dev);
1368 if (ret != PCI_POWER_ERROR)
1369 return ret;
1370
1371 switch (state.event) {
1372 case PM_EVENT_ON:
1373 return PCI_D0;
1374 case PM_EVENT_FREEZE:
1375 case PM_EVENT_PRETHAW:
1376
1377 case PM_EVENT_SUSPEND:
1378 case PM_EVENT_HIBERNATE:
1379 return PCI_D3hot;
1380 default:
1381 pci_info(dev, "unrecognized suspend event %d\n",
1382 state.event);
1383 BUG();
1384 }
1385 return PCI_D0;
1386}
1387EXPORT_SYMBOL(pci_choose_state);
1388
1389#define PCI_EXP_SAVE_REGS 7
1390
1391static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1392 u16 cap, bool extended)
1393{
1394 struct pci_cap_saved_state *tmp;
1395
1396 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1397 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1398 return tmp;
1399 }
1400 return NULL;
1401}
1402
1403struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1404{
1405 return _pci_find_saved_cap(dev, cap, false);
1406}
1407
1408struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1409{
1410 return _pci_find_saved_cap(dev, cap, true);
1411}
1412
1413static int pci_save_pcie_state(struct pci_dev *dev)
1414{
1415 int i = 0;
1416 struct pci_cap_saved_state *save_state;
1417 u16 *cap;
1418
1419 if (!pci_is_pcie(dev))
1420 return 0;
1421
1422 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1423 if (!save_state) {
1424 pci_err(dev, "buffer not found in %s\n", __func__);
1425 return -ENOMEM;
1426 }
1427
1428 cap = (u16 *)&save_state->cap.data[0];
1429 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1430 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1431 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1432 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1433 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1434 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1435 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1436
1437 return 0;
1438}
1439
1440static void pci_restore_pcie_state(struct pci_dev *dev)
1441{
1442 int i = 0;
1443 struct pci_cap_saved_state *save_state;
1444 u16 *cap;
1445
1446 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1447 if (!save_state)
1448 return;
1449
1450 cap = (u16 *)&save_state->cap.data[0];
1451 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1452 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1453 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1454 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1455 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1456 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1457 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1458}
1459
1460static int pci_save_pcix_state(struct pci_dev *dev)
1461{
1462 int pos;
1463 struct pci_cap_saved_state *save_state;
1464
1465 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1466 if (!pos)
1467 return 0;
1468
1469 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1470 if (!save_state) {
1471 pci_err(dev, "buffer not found in %s\n", __func__);
1472 return -ENOMEM;
1473 }
1474
1475 pci_read_config_word(dev, pos + PCI_X_CMD,
1476 (u16 *)save_state->cap.data);
1477
1478 return 0;
1479}
1480
1481static void pci_restore_pcix_state(struct pci_dev *dev)
1482{
1483 int i = 0, pos;
1484 struct pci_cap_saved_state *save_state;
1485 u16 *cap;
1486
1487 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1488 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1489 if (!save_state || !pos)
1490 return;
1491 cap = (u16 *)&save_state->cap.data[0];
1492
1493 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1494}
1495
1496static void pci_save_ltr_state(struct pci_dev *dev)
1497{
1498 int ltr;
1499 struct pci_cap_saved_state *save_state;
1500 u16 *cap;
1501
1502 if (!pci_is_pcie(dev))
1503 return;
1504
1505 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1506 if (!ltr)
1507 return;
1508
1509 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1510 if (!save_state) {
1511 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1512 return;
1513 }
1514
1515 cap = (u16 *)&save_state->cap.data[0];
1516 pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
1517 pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
1518}
1519
1520static void pci_restore_ltr_state(struct pci_dev *dev)
1521{
1522 struct pci_cap_saved_state *save_state;
1523 int ltr;
1524 u16 *cap;
1525
1526 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1527 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1528 if (!save_state || !ltr)
1529 return;
1530
1531 cap = (u16 *)&save_state->cap.data[0];
1532 pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
1533 pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
1534}
1535
1536
1537
1538
1539
1540
1541int pci_save_state(struct pci_dev *dev)
1542{
1543 int i;
1544
1545 for (i = 0; i < 16; i++) {
1546 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1547 pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
1548 i * 4, dev->saved_config_space[i]);
1549 }
1550 dev->state_saved = true;
1551
1552 i = pci_save_pcie_state(dev);
1553 if (i != 0)
1554 return i;
1555
1556 i = pci_save_pcix_state(dev);
1557 if (i != 0)
1558 return i;
1559
1560 pci_save_ltr_state(dev);
1561 pci_save_dpc_state(dev);
1562 pci_save_aer_state(dev);
1563 pci_save_ptm_state(dev);
1564 return pci_save_vc_state(dev);
1565}
1566EXPORT_SYMBOL(pci_save_state);
1567
1568static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1569 u32 saved_val, int retry, bool force)
1570{
1571 u32 val;
1572
1573 pci_read_config_dword(pdev, offset, &val);
1574 if (!force && val == saved_val)
1575 return;
1576
1577 for (;;) {
1578 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1579 offset, val, saved_val);
1580 pci_write_config_dword(pdev, offset, saved_val);
1581 if (retry-- <= 0)
1582 return;
1583
1584 pci_read_config_dword(pdev, offset, &val);
1585 if (val == saved_val)
1586 return;
1587
1588 mdelay(1);
1589 }
1590}
1591
1592static void pci_restore_config_space_range(struct pci_dev *pdev,
1593 int start, int end, int retry,
1594 bool force)
1595{
1596 int index;
1597
1598 for (index = end; index >= start; index--)
1599 pci_restore_config_dword(pdev, 4 * index,
1600 pdev->saved_config_space[index],
1601 retry, force);
1602}
1603
1604static void pci_restore_config_space(struct pci_dev *pdev)
1605{
1606 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1607 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1608
1609 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1610 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1611 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1612 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1613
1614
1615
1616
1617
1618
1619 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1620 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1621 } else {
1622 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1623 }
1624}
1625
1626static void pci_restore_rebar_state(struct pci_dev *pdev)
1627{
1628 unsigned int pos, nbars, i;
1629 u32 ctrl;
1630
1631 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1632 if (!pos)
1633 return;
1634
1635 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1636 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1637 PCI_REBAR_CTRL_NBAR_SHIFT;
1638
1639 for (i = 0; i < nbars; i++, pos += 8) {
1640 struct resource *res;
1641 int bar_idx, size;
1642
1643 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1644 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1645 res = pdev->resource + bar_idx;
1646 size = ilog2(resource_size(res)) - 20;
1647 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1648 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1649 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1650 }
1651}
1652
1653
1654
1655
1656
1657void pci_restore_state(struct pci_dev *dev)
1658{
1659 if (!dev->state_saved)
1660 return;
1661
1662
1663
1664
1665
1666 pci_restore_ltr_state(dev);
1667
1668 pci_restore_pcie_state(dev);
1669 pci_restore_pasid_state(dev);
1670 pci_restore_pri_state(dev);
1671 pci_restore_ats_state(dev);
1672 pci_restore_vc_state(dev);
1673 pci_restore_rebar_state(dev);
1674 pci_restore_dpc_state(dev);
1675 pci_restore_ptm_state(dev);
1676
1677 pci_aer_clear_status(dev);
1678 pci_restore_aer_state(dev);
1679
1680 pci_restore_config_space(dev);
1681
1682 pci_restore_pcix_state(dev);
1683 pci_restore_msi_state(dev);
1684
1685
1686 pci_enable_acs(dev);
1687 pci_restore_iov_state(dev);
1688
1689 dev->state_saved = false;
1690}
1691EXPORT_SYMBOL(pci_restore_state);
1692
1693struct pci_saved_state {
1694 u32 config_space[16];
1695 struct pci_cap_saved_data cap[];
1696};
1697
1698
1699
1700
1701
1702
1703
1704
1705struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1706{
1707 struct pci_saved_state *state;
1708 struct pci_cap_saved_state *tmp;
1709 struct pci_cap_saved_data *cap;
1710 size_t size;
1711
1712 if (!dev->state_saved)
1713 return NULL;
1714
1715 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1716
1717 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1718 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1719
1720 state = kzalloc(size, GFP_KERNEL);
1721 if (!state)
1722 return NULL;
1723
1724 memcpy(state->config_space, dev->saved_config_space,
1725 sizeof(state->config_space));
1726
1727 cap = state->cap;
1728 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1729 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1730 memcpy(cap, &tmp->cap, len);
1731 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1732 }
1733
1734
1735 return state;
1736}
1737EXPORT_SYMBOL_GPL(pci_store_saved_state);
1738
1739
1740
1741
1742
1743
1744int pci_load_saved_state(struct pci_dev *dev,
1745 struct pci_saved_state *state)
1746{
1747 struct pci_cap_saved_data *cap;
1748
1749 dev->state_saved = false;
1750
1751 if (!state)
1752 return 0;
1753
1754 memcpy(dev->saved_config_space, state->config_space,
1755 sizeof(state->config_space));
1756
1757 cap = state->cap;
1758 while (cap->size) {
1759 struct pci_cap_saved_state *tmp;
1760
1761 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1762 if (!tmp || tmp->cap.size != cap->size)
1763 return -EINVAL;
1764
1765 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1766 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1767 sizeof(struct pci_cap_saved_data) + cap->size);
1768 }
1769
1770 dev->state_saved = true;
1771 return 0;
1772}
1773EXPORT_SYMBOL_GPL(pci_load_saved_state);
1774
1775
1776
1777
1778
1779
1780
1781int pci_load_and_free_saved_state(struct pci_dev *dev,
1782 struct pci_saved_state **state)
1783{
1784 int ret = pci_load_saved_state(dev, *state);
1785 kfree(*state);
1786 *state = NULL;
1787 return ret;
1788}
1789EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1790
1791int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1792{
1793 return pci_enable_resources(dev, bars);
1794}
1795
1796static int do_pci_enable_device(struct pci_dev *dev, int bars)
1797{
1798 int err;
1799 struct pci_dev *bridge;
1800 u16 cmd;
1801 u8 pin;
1802
1803 err = pci_set_power_state(dev, PCI_D0);
1804 if (err < 0 && err != -EIO)
1805 return err;
1806
1807 bridge = pci_upstream_bridge(dev);
1808 if (bridge)
1809 pcie_aspm_powersave_config_link(bridge);
1810
1811 err = pcibios_enable_device(dev, bars);
1812 if (err < 0)
1813 return err;
1814 pci_fixup_device(pci_fixup_enable, dev);
1815
1816 if (dev->msi_enabled || dev->msix_enabled)
1817 return 0;
1818
1819 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1820 if (pin) {
1821 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1822 if (cmd & PCI_COMMAND_INTX_DISABLE)
1823 pci_write_config_word(dev, PCI_COMMAND,
1824 cmd & ~PCI_COMMAND_INTX_DISABLE);
1825 }
1826
1827 return 0;
1828}
1829
1830
1831
1832
1833
1834
1835
1836
1837int pci_reenable_device(struct pci_dev *dev)
1838{
1839 if (pci_is_enabled(dev))
1840 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1841 return 0;
1842}
1843EXPORT_SYMBOL(pci_reenable_device);
1844
1845static void pci_enable_bridge(struct pci_dev *dev)
1846{
1847 struct pci_dev *bridge;
1848 int retval;
1849
1850 bridge = pci_upstream_bridge(dev);
1851 if (bridge)
1852 pci_enable_bridge(bridge);
1853
1854 if (pci_is_enabled(dev)) {
1855 if (!dev->is_busmaster)
1856 pci_set_master(dev);
1857 return;
1858 }
1859
1860 retval = pci_enable_device(dev);
1861 if (retval)
1862 pci_err(dev, "Error enabling bridge (%d), continuing\n",
1863 retval);
1864 pci_set_master(dev);
1865}
1866
1867static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1868{
1869 struct pci_dev *bridge;
1870 int err;
1871 int i, bars = 0;
1872
1873
1874
1875
1876
1877
1878
1879 if (dev->pm_cap) {
1880 u16 pmcsr;
1881 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1882 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1883 }
1884
1885 if (atomic_inc_return(&dev->enable_cnt) > 1)
1886 return 0;
1887
1888 bridge = pci_upstream_bridge(dev);
1889 if (bridge)
1890 pci_enable_bridge(bridge);
1891
1892
1893 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1894 if (dev->resource[i].flags & flags)
1895 bars |= (1 << i);
1896 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1897 if (dev->resource[i].flags & flags)
1898 bars |= (1 << i);
1899
1900 err = do_pci_enable_device(dev, bars);
1901 if (err < 0)
1902 atomic_dec(&dev->enable_cnt);
1903 return err;
1904}
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914int pci_enable_device_io(struct pci_dev *dev)
1915{
1916 return pci_enable_device_flags(dev, IORESOURCE_IO);
1917}
1918EXPORT_SYMBOL(pci_enable_device_io);
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928int pci_enable_device_mem(struct pci_dev *dev)
1929{
1930 return pci_enable_device_flags(dev, IORESOURCE_MEM);
1931}
1932EXPORT_SYMBOL(pci_enable_device_mem);
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945int pci_enable_device(struct pci_dev *dev)
1946{
1947 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1948}
1949EXPORT_SYMBOL(pci_enable_device);
1950
1951
1952
1953
1954
1955
1956
1957struct pci_devres {
1958 unsigned int enabled:1;
1959 unsigned int pinned:1;
1960 unsigned int orig_intx:1;
1961 unsigned int restore_intx:1;
1962 unsigned int mwi:1;
1963 u32 region_mask;
1964};
1965
1966static void pcim_release(struct device *gendev, void *res)
1967{
1968 struct pci_dev *dev = to_pci_dev(gendev);
1969 struct pci_devres *this = res;
1970 int i;
1971
1972 if (dev->msi_enabled)
1973 pci_disable_msi(dev);
1974 if (dev->msix_enabled)
1975 pci_disable_msix(dev);
1976
1977 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1978 if (this->region_mask & (1 << i))
1979 pci_release_region(dev, i);
1980
1981 if (this->mwi)
1982 pci_clear_mwi(dev);
1983
1984 if (this->restore_intx)
1985 pci_intx(dev, this->orig_intx);
1986
1987 if (this->enabled && !this->pinned)
1988 pci_disable_device(dev);
1989}
1990
1991static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1992{
1993 struct pci_devres *dr, *new_dr;
1994
1995 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1996 if (dr)
1997 return dr;
1998
1999 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
2000 if (!new_dr)
2001 return NULL;
2002 return devres_get(&pdev->dev, new_dr, NULL, NULL);
2003}
2004
2005static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
2006{
2007 if (pci_is_managed(pdev))
2008 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
2009 return NULL;
2010}
2011
2012
2013
2014
2015
2016
2017
2018int pcim_enable_device(struct pci_dev *pdev)
2019{
2020 struct pci_devres *dr;
2021 int rc;
2022
2023 dr = get_pci_dr(pdev);
2024 if (unlikely(!dr))
2025 return -ENOMEM;
2026 if (dr->enabled)
2027 return 0;
2028
2029 rc = pci_enable_device(pdev);
2030 if (!rc) {
2031 pdev->is_managed = 1;
2032 dr->enabled = 1;
2033 }
2034 return rc;
2035}
2036EXPORT_SYMBOL(pcim_enable_device);
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046void pcim_pin_device(struct pci_dev *pdev)
2047{
2048 struct pci_devres *dr;
2049
2050 dr = find_pci_dr(pdev);
2051 WARN_ON(!dr || !dr->enabled);
2052 if (dr)
2053 dr->pinned = 1;
2054}
2055EXPORT_SYMBOL(pcim_pin_device);
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065int __weak pcibios_add_device(struct pci_dev *dev)
2066{
2067 return 0;
2068}
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079void __weak pcibios_release_device(struct pci_dev *dev) {}
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089void __weak pcibios_disable_device(struct pci_dev *dev) {}
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100void __weak pcibios_penalize_isa_irq(int irq, int active) {}
2101
2102static void do_pci_disable_device(struct pci_dev *dev)
2103{
2104 u16 pci_command;
2105
2106 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2107 if (pci_command & PCI_COMMAND_MASTER) {
2108 pci_command &= ~PCI_COMMAND_MASTER;
2109 pci_write_config_word(dev, PCI_COMMAND, pci_command);
2110 }
2111
2112 pcibios_disable_device(dev);
2113}
2114
2115
2116
2117
2118
2119
2120
2121
2122void pci_disable_enabled_device(struct pci_dev *dev)
2123{
2124 if (pci_is_enabled(dev))
2125 do_pci_disable_device(dev);
2126}
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138void pci_disable_device(struct pci_dev *dev)
2139{
2140 struct pci_devres *dr;
2141
2142 dr = find_pci_dr(dev);
2143 if (dr)
2144 dr->enabled = 0;
2145
2146 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2147 "disabling already-disabled device");
2148
2149 if (atomic_dec_return(&dev->enable_cnt) != 0)
2150 return;
2151
2152 do_pci_disable_device(dev);
2153
2154 dev->is_busmaster = 0;
2155}
2156EXPORT_SYMBOL(pci_disable_device);
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2167 enum pcie_reset_state state)
2168{
2169 return -EINVAL;
2170}
2171
2172
2173
2174
2175
2176
2177
2178
2179int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2180{
2181 return pcibios_set_pcie_reset_state(dev, state);
2182}
2183EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2184
2185void pcie_clear_device_status(struct pci_dev *dev)
2186{
2187 u16 sta;
2188
2189 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2190 pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2191}
2192
2193
2194
2195
2196
2197void pcie_clear_root_pme_status(struct pci_dev *dev)
2198{
2199 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2200}
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210bool pci_check_pme_status(struct pci_dev *dev)
2211{
2212 int pmcsr_pos;
2213 u16 pmcsr;
2214 bool ret = false;
2215
2216 if (!dev->pm_cap)
2217 return false;
2218
2219 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2220 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2221 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2222 return false;
2223
2224
2225 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2226 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2227
2228 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2229 ret = true;
2230 }
2231
2232 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2233
2234 return ret;
2235}
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2246{
2247 if (pme_poll_reset && dev->pme_poll)
2248 dev->pme_poll = false;
2249
2250 if (pci_check_pme_status(dev)) {
2251 pci_wakeup_event(dev);
2252 pm_request_resume(&dev->dev);
2253 }
2254 return 0;
2255}
2256
2257
2258
2259
2260
2261void pci_pme_wakeup_bus(struct pci_bus *bus)
2262{
2263 if (bus)
2264 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2265}
2266
2267
2268
2269
2270
2271
2272
2273bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2274{
2275 if (!dev->pm_cap)
2276 return false;
2277
2278 return !!(dev->pme_support & (1 << state));
2279}
2280EXPORT_SYMBOL(pci_pme_capable);
2281
2282static void pci_pme_list_scan(struct work_struct *work)
2283{
2284 struct pci_pme_device *pme_dev, *n;
2285
2286 mutex_lock(&pci_pme_list_mutex);
2287 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2288 if (pme_dev->dev->pme_poll) {
2289 struct pci_dev *bridge;
2290
2291 bridge = pme_dev->dev->bus->self;
2292
2293
2294
2295
2296
2297 if (bridge && bridge->current_state != PCI_D0)
2298 continue;
2299
2300
2301
2302
2303 if (pme_dev->dev->current_state == PCI_D3cold)
2304 continue;
2305
2306 pci_pme_wakeup(pme_dev->dev, NULL);
2307 } else {
2308 list_del(&pme_dev->list);
2309 kfree(pme_dev);
2310 }
2311 }
2312 if (!list_empty(&pci_pme_list))
2313 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2314 msecs_to_jiffies(PME_TIMEOUT));
2315 mutex_unlock(&pci_pme_list_mutex);
2316}
2317
2318static void __pci_pme_active(struct pci_dev *dev, bool enable)
2319{
2320 u16 pmcsr;
2321
2322 if (!dev->pme_support)
2323 return;
2324
2325 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2326
2327 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2328 if (!enable)
2329 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2330
2331 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2332}
2333
2334
2335
2336
2337
2338void pci_pme_restore(struct pci_dev *dev)
2339{
2340 u16 pmcsr;
2341
2342 if (!dev->pme_support)
2343 return;
2344
2345 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2346 if (dev->wakeup_prepared) {
2347 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2348 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2349 } else {
2350 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2351 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2352 }
2353 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2354}
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364void pci_pme_active(struct pci_dev *dev, bool enable)
2365{
2366 __pci_pme_active(dev, enable);
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388 if (dev->pme_poll) {
2389 struct pci_pme_device *pme_dev;
2390 if (enable) {
2391 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2392 GFP_KERNEL);
2393 if (!pme_dev) {
2394 pci_warn(dev, "can't enable PME#\n");
2395 return;
2396 }
2397 pme_dev->dev = dev;
2398 mutex_lock(&pci_pme_list_mutex);
2399 list_add(&pme_dev->list, &pci_pme_list);
2400 if (list_is_singular(&pci_pme_list))
2401 queue_delayed_work(system_freezable_wq,
2402 &pci_pme_work,
2403 msecs_to_jiffies(PME_TIMEOUT));
2404 mutex_unlock(&pci_pme_list_mutex);
2405 } else {
2406 mutex_lock(&pci_pme_list_mutex);
2407 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2408 if (pme_dev->dev == dev) {
2409 list_del(&pme_dev->list);
2410 kfree(pme_dev);
2411 break;
2412 }
2413 }
2414 mutex_unlock(&pci_pme_list_mutex);
2415 }
2416 }
2417
2418 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2419}
2420EXPORT_SYMBOL(pci_pme_active);
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2442{
2443 int ret = 0;
2444
2445
2446
2447
2448
2449
2450
2451
2452 if (!pci_power_manageable(dev))
2453 return 0;
2454
2455
2456 if (!!enable == !!dev->wakeup_prepared)
2457 return 0;
2458
2459
2460
2461
2462
2463
2464
2465 if (enable) {
2466 int error;
2467
2468 if (pci_pme_capable(dev, state))
2469 pci_pme_active(dev, true);
2470 else
2471 ret = 1;
2472 error = platform_pci_set_wakeup(dev, true);
2473 if (ret)
2474 ret = error;
2475 if (!ret)
2476 dev->wakeup_prepared = true;
2477 } else {
2478 platform_pci_set_wakeup(dev, false);
2479 pci_pme_active(dev, false);
2480 dev->wakeup_prepared = false;
2481 }
2482
2483 return ret;
2484}
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2496{
2497 if (enable && !device_may_wakeup(&pci_dev->dev))
2498 return -EINVAL;
2499
2500 return __pci_enable_wake(pci_dev, state, enable);
2501}
2502EXPORT_SYMBOL(pci_enable_wake);
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2519{
2520 return pci_pme_capable(dev, PCI_D3cold) ?
2521 pci_enable_wake(dev, PCI_D3cold, enable) :
2522 pci_enable_wake(dev, PCI_D3hot, enable);
2523}
2524EXPORT_SYMBOL(pci_wake_from_d3);
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2536{
2537 pci_power_t target_state = PCI_D3hot;
2538
2539 if (platform_pci_power_manageable(dev)) {
2540
2541
2542
2543 pci_power_t state = platform_pci_choose_state(dev);
2544
2545 switch (state) {
2546 case PCI_POWER_ERROR:
2547 case PCI_UNKNOWN:
2548 break;
2549 case PCI_D1:
2550 case PCI_D2:
2551 if (pci_no_d1d2(dev))
2552 break;
2553 fallthrough;
2554 default:
2555 target_state = state;
2556 }
2557
2558 return target_state;
2559 }
2560
2561 if (!dev->pm_cap)
2562 target_state = PCI_D0;
2563
2564
2565
2566
2567
2568
2569 if (dev->current_state == PCI_D3cold)
2570 target_state = PCI_D3cold;
2571
2572 if (wakeup) {
2573
2574
2575
2576
2577 if (dev->pme_support) {
2578 while (target_state
2579 && !(dev->pme_support & (1 << target_state)))
2580 target_state--;
2581 }
2582 }
2583
2584 return target_state;
2585}
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596int pci_prepare_to_sleep(struct pci_dev *dev)
2597{
2598 bool wakeup = device_may_wakeup(&dev->dev);
2599 pci_power_t target_state = pci_target_state(dev, wakeup);
2600 int error;
2601
2602 if (target_state == PCI_POWER_ERROR)
2603 return -EIO;
2604
2605
2606
2607
2608
2609
2610
2611
2612 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2613 pci_disable_ptm(dev);
2614
2615 pci_enable_wake(dev, target_state, wakeup);
2616
2617 error = pci_set_power_state(dev, target_state);
2618
2619 if (error) {
2620 pci_enable_wake(dev, target_state, false);
2621 pci_restore_ptm_state(dev);
2622 }
2623
2624 return error;
2625}
2626EXPORT_SYMBOL(pci_prepare_to_sleep);
2627
2628
2629
2630
2631
2632
2633
2634
2635int pci_back_from_sleep(struct pci_dev *dev)
2636{
2637 pci_enable_wake(dev, PCI_D0, false);
2638 return pci_set_power_state(dev, PCI_D0);
2639}
2640EXPORT_SYMBOL(pci_back_from_sleep);
2641
2642
2643
2644
2645
2646
2647
2648
2649int pci_finish_runtime_suspend(struct pci_dev *dev)
2650{
2651 pci_power_t target_state;
2652 int error;
2653
2654 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2655 if (target_state == PCI_POWER_ERROR)
2656 return -EIO;
2657
2658 dev->runtime_d3cold = target_state == PCI_D3cold;
2659
2660
2661
2662
2663
2664
2665
2666
2667 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2668 pci_disable_ptm(dev);
2669
2670 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2671
2672 error = pci_set_power_state(dev, target_state);
2673
2674 if (error) {
2675 pci_enable_wake(dev, target_state, false);
2676 pci_restore_ptm_state(dev);
2677 dev->runtime_d3cold = false;
2678 }
2679
2680 return error;
2681}
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691bool pci_dev_run_wake(struct pci_dev *dev)
2692{
2693 struct pci_bus *bus = dev->bus;
2694
2695 if (!dev->pme_support)
2696 return false;
2697
2698
2699 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2700 return false;
2701
2702 if (device_can_wakeup(&dev->dev))
2703 return true;
2704
2705 while (bus->parent) {
2706 struct pci_dev *bridge = bus->self;
2707
2708 if (device_can_wakeup(&bridge->dev))
2709 return true;
2710
2711 bus = bus->parent;
2712 }
2713
2714
2715 if (bus->bridge)
2716 return device_can_wakeup(bus->bridge);
2717
2718 return false;
2719}
2720EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731bool pci_dev_need_resume(struct pci_dev *pci_dev)
2732{
2733 struct device *dev = &pci_dev->dev;
2734 pci_power_t target_state;
2735
2736 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2737 return true;
2738
2739 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2740
2741
2742
2743
2744
2745
2746 return target_state != pci_dev->current_state &&
2747 target_state != PCI_D3cold &&
2748 pci_dev->current_state != PCI_D3hot;
2749}
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2763{
2764 struct device *dev = &pci_dev->dev;
2765
2766 spin_lock_irq(&dev->power.lock);
2767
2768 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2769 pci_dev->current_state < PCI_D3cold)
2770 __pci_pme_active(pci_dev, false);
2771
2772 spin_unlock_irq(&dev->power.lock);
2773}
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783void pci_dev_complete_resume(struct pci_dev *pci_dev)
2784{
2785 struct device *dev = &pci_dev->dev;
2786
2787 if (!pci_dev_run_wake(pci_dev))
2788 return;
2789
2790 spin_lock_irq(&dev->power.lock);
2791
2792 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2793 __pci_pme_active(pci_dev, true);
2794
2795 spin_unlock_irq(&dev->power.lock);
2796}
2797
2798void pci_config_pm_runtime_get(struct pci_dev *pdev)
2799{
2800 struct device *dev = &pdev->dev;
2801 struct device *parent = dev->parent;
2802
2803 if (parent)
2804 pm_runtime_get_sync(parent);
2805 pm_runtime_get_noresume(dev);
2806
2807
2808
2809
2810 pm_runtime_barrier(dev);
2811
2812
2813
2814
2815
2816 if (pdev->current_state == PCI_D3cold)
2817 pm_runtime_resume(dev);
2818}
2819
2820void pci_config_pm_runtime_put(struct pci_dev *pdev)
2821{
2822 struct device *dev = &pdev->dev;
2823 struct device *parent = dev->parent;
2824
2825 pm_runtime_put(dev);
2826 if (parent)
2827 pm_runtime_put_sync(parent);
2828}
2829
2830static const struct dmi_system_id bridge_d3_blacklist[] = {
2831#ifdef CONFIG_X86
2832 {
2833
2834
2835
2836
2837
2838
2839 .ident = "X299 DESIGNARE EX-CF",
2840 .matches = {
2841 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2842 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2843 },
2844 },
2845#endif
2846 { }
2847};
2848
2849
2850
2851
2852
2853
2854
2855
2856bool pci_bridge_d3_possible(struct pci_dev *bridge)
2857{
2858 if (!pci_is_pcie(bridge))
2859 return false;
2860
2861 switch (pci_pcie_type(bridge)) {
2862 case PCI_EXP_TYPE_ROOT_PORT:
2863 case PCI_EXP_TYPE_UPSTREAM:
2864 case PCI_EXP_TYPE_DOWNSTREAM:
2865 if (pci_bridge_d3_disable)
2866 return false;
2867
2868
2869
2870
2871
2872 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2873 return false;
2874
2875 if (pci_bridge_d3_force)
2876 return true;
2877
2878
2879 if (bridge->is_thunderbolt)
2880 return true;
2881
2882
2883 if (platform_pci_bridge_d3(bridge))
2884 return true;
2885
2886
2887
2888
2889
2890
2891 if (bridge->is_hotplug_bridge)
2892 return false;
2893
2894 if (dmi_check_system(bridge_d3_blacklist))
2895 return false;
2896
2897
2898
2899
2900
2901 if (dmi_get_bios_year() >= 2015)
2902 return true;
2903 break;
2904 }
2905
2906 return false;
2907}
2908
2909static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2910{
2911 bool *d3cold_ok = data;
2912
2913 if (
2914 dev->no_d3cold || !dev->d3cold_allowed ||
2915
2916
2917 (device_may_wakeup(&dev->dev) &&
2918 !pci_pme_capable(dev, PCI_D3cold)) ||
2919
2920
2921 !pci_power_manageable(dev))
2922
2923 *d3cold_ok = false;
2924
2925 return !*d3cold_ok;
2926}
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936void pci_bridge_d3_update(struct pci_dev *dev)
2937{
2938 bool remove = !device_is_registered(&dev->dev);
2939 struct pci_dev *bridge;
2940 bool d3cold_ok = true;
2941
2942 bridge = pci_upstream_bridge(dev);
2943 if (!bridge || !pci_bridge_d3_possible(bridge))
2944 return;
2945
2946
2947
2948
2949
2950 if (remove && bridge->bridge_d3)
2951 return;
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961 if (!remove)
2962 pci_dev_check_d3cold(dev, &d3cold_ok);
2963
2964
2965
2966
2967
2968
2969
2970 if (d3cold_ok && !bridge->bridge_d3)
2971 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2972 &d3cold_ok);
2973
2974 if (bridge->bridge_d3 != d3cold_ok) {
2975 bridge->bridge_d3 = d3cold_ok;
2976
2977 pci_bridge_d3_update(bridge);
2978 }
2979}
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989void pci_d3cold_enable(struct pci_dev *dev)
2990{
2991 if (dev->no_d3cold) {
2992 dev->no_d3cold = false;
2993 pci_bridge_d3_update(dev);
2994 }
2995}
2996EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006void pci_d3cold_disable(struct pci_dev *dev)
3007{
3008 if (!dev->no_d3cold) {
3009 dev->no_d3cold = true;
3010 pci_bridge_d3_update(dev);
3011 }
3012}
3013EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3014
3015
3016
3017
3018
3019void pci_pm_init(struct pci_dev *dev)
3020{
3021 int pm;
3022 u16 status;
3023 u16 pmc;
3024
3025 pm_runtime_forbid(&dev->dev);
3026 pm_runtime_set_active(&dev->dev);
3027 pm_runtime_enable(&dev->dev);
3028 device_enable_async_suspend(&dev->dev);
3029 dev->wakeup_prepared = false;
3030
3031 dev->pm_cap = 0;
3032 dev->pme_support = 0;
3033
3034
3035 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3036 if (!pm)
3037 return;
3038
3039 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3040
3041 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3042 pci_err(dev, "unsupported PM cap regs version (%u)\n",
3043 pmc & PCI_PM_CAP_VER_MASK);
3044 return;
3045 }
3046
3047 dev->pm_cap = pm;
3048 dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3049 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3050 dev->bridge_d3 = pci_bridge_d3_possible(dev);
3051 dev->d3cold_allowed = true;
3052
3053 dev->d1_support = false;
3054 dev->d2_support = false;
3055 if (!pci_no_d1d2(dev)) {
3056 if (pmc & PCI_PM_CAP_D1)
3057 dev->d1_support = true;
3058 if (pmc & PCI_PM_CAP_D2)
3059 dev->d2_support = true;
3060
3061 if (dev->d1_support || dev->d2_support)
3062 pci_info(dev, "supports%s%s\n",
3063 dev->d1_support ? " D1" : "",
3064 dev->d2_support ? " D2" : "");
3065 }
3066
3067 pmc &= PCI_PM_CAP_PME_MASK;
3068 if (pmc) {
3069 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3070 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3071 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3072 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3073 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3074 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3075 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
3076 dev->pme_poll = true;
3077
3078
3079
3080
3081 device_set_wakeup_capable(&dev->dev, true);
3082
3083 pci_pme_active(dev, false);
3084 }
3085
3086 pci_read_config_word(dev, PCI_STATUS, &status);
3087 if (status & PCI_STATUS_IMM_READY)
3088 dev->imm_ready = 1;
3089}
3090
3091static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3092{
3093 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3094
3095 switch (prop) {
3096 case PCI_EA_P_MEM:
3097 case PCI_EA_P_VF_MEM:
3098 flags |= IORESOURCE_MEM;
3099 break;
3100 case PCI_EA_P_MEM_PREFETCH:
3101 case PCI_EA_P_VF_MEM_PREFETCH:
3102 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3103 break;
3104 case PCI_EA_P_IO:
3105 flags |= IORESOURCE_IO;
3106 break;
3107 default:
3108 return 0;
3109 }
3110
3111 return flags;
3112}
3113
3114static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3115 u8 prop)
3116{
3117 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3118 return &dev->resource[bei];
3119#ifdef CONFIG_PCI_IOV
3120 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3121 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3122 return &dev->resource[PCI_IOV_RESOURCES +
3123 bei - PCI_EA_BEI_VF_BAR0];
3124#endif
3125 else if (bei == PCI_EA_BEI_ROM)
3126 return &dev->resource[PCI_ROM_RESOURCE];
3127 else
3128 return NULL;
3129}
3130
3131
3132static int pci_ea_read(struct pci_dev *dev, int offset)
3133{
3134 struct resource *res;
3135 int ent_size, ent_offset = offset;
3136 resource_size_t start, end;
3137 unsigned long flags;
3138 u32 dw0, bei, base, max_offset;
3139 u8 prop;
3140 bool support_64 = (sizeof(resource_size_t) >= 8);
3141
3142 pci_read_config_dword(dev, ent_offset, &dw0);
3143 ent_offset += 4;
3144
3145
3146 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
3147
3148 if (!(dw0 & PCI_EA_ENABLE))
3149 goto out;
3150
3151 bei = (dw0 & PCI_EA_BEI) >> 4;
3152 prop = (dw0 & PCI_EA_PP) >> 8;
3153
3154
3155
3156
3157
3158 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3159 prop = (dw0 & PCI_EA_SP) >> 16;
3160 if (prop > PCI_EA_P_BRIDGE_IO)
3161 goto out;
3162
3163 res = pci_ea_get_resource(dev, bei, prop);
3164 if (!res) {
3165 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3166 goto out;
3167 }
3168
3169 flags = pci_ea_flags(dev, prop);
3170 if (!flags) {
3171 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3172 goto out;
3173 }
3174
3175
3176 pci_read_config_dword(dev, ent_offset, &base);
3177 start = (base & PCI_EA_FIELD_MASK);
3178 ent_offset += 4;
3179
3180
3181 pci_read_config_dword(dev, ent_offset, &max_offset);
3182 ent_offset += 4;
3183
3184
3185 if (base & PCI_EA_IS_64) {
3186 u32 base_upper;
3187
3188 pci_read_config_dword(dev, ent_offset, &base_upper);
3189 ent_offset += 4;
3190
3191 flags |= IORESOURCE_MEM_64;
3192
3193
3194 if (!support_64 && base_upper)
3195 goto out;
3196
3197 if (support_64)
3198 start |= ((u64)base_upper << 32);
3199 }
3200
3201 end = start + (max_offset | 0x03);
3202
3203
3204 if (max_offset & PCI_EA_IS_64) {
3205 u32 max_offset_upper;
3206
3207 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3208 ent_offset += 4;
3209
3210 flags |= IORESOURCE_MEM_64;
3211
3212
3213 if (!support_64 && max_offset_upper)
3214 goto out;
3215
3216 if (support_64)
3217 end += ((u64)max_offset_upper << 32);
3218 }
3219
3220 if (end < start) {
3221 pci_err(dev, "EA Entry crosses address boundary\n");
3222 goto out;
3223 }
3224
3225 if (ent_size != ent_offset - offset) {
3226 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3227 ent_size, ent_offset - offset);
3228 goto out;
3229 }
3230
3231 res->name = pci_name(dev);
3232 res->start = start;
3233 res->end = end;
3234 res->flags = flags;
3235
3236 if (bei <= PCI_EA_BEI_BAR5)
3237 pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3238 bei, res, prop);
3239 else if (bei == PCI_EA_BEI_ROM)
3240 pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3241 res, prop);
3242 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3243 pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3244 bei - PCI_EA_BEI_VF_BAR0, res, prop);
3245 else
3246 pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3247 bei, res, prop);
3248
3249out:
3250 return offset + ent_size;
3251}
3252
3253
3254void pci_ea_init(struct pci_dev *dev)
3255{
3256 int ea;
3257 u8 num_ent;
3258 int offset;
3259 int i;
3260
3261
3262 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3263 if (!ea)
3264 return;
3265
3266
3267 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3268 &num_ent);
3269 num_ent &= PCI_EA_NUM_ENT_MASK;
3270
3271 offset = ea + PCI_EA_FIRST_ENT;
3272
3273
3274 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3275 offset += 4;
3276
3277
3278 for (i = 0; i < num_ent; ++i)
3279 offset = pci_ea_read(dev, offset);
3280}
3281
3282static void pci_add_saved_cap(struct pci_dev *pci_dev,
3283 struct pci_cap_saved_state *new_cap)
3284{
3285 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3286}
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3297 bool extended, unsigned int size)
3298{
3299 int pos;
3300 struct pci_cap_saved_state *save_state;
3301
3302 if (extended)
3303 pos = pci_find_ext_capability(dev, cap);
3304 else
3305 pos = pci_find_capability(dev, cap);
3306
3307 if (!pos)
3308 return 0;
3309
3310 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3311 if (!save_state)
3312 return -ENOMEM;
3313
3314 save_state->cap.cap_nr = cap;
3315 save_state->cap.cap_extended = extended;
3316 save_state->cap.size = size;
3317 pci_add_saved_cap(dev, save_state);
3318
3319 return 0;
3320}
3321
3322int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3323{
3324 return _pci_add_cap_save_buffer(dev, cap, false, size);
3325}
3326
3327int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3328{
3329 return _pci_add_cap_save_buffer(dev, cap, true, size);
3330}
3331
3332
3333
3334
3335
3336void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3337{
3338 int error;
3339
3340 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3341 PCI_EXP_SAVE_REGS * sizeof(u16));
3342 if (error)
3343 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3344
3345 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3346 if (error)
3347 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3348
3349 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3350 2 * sizeof(u16));
3351 if (error)
3352 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3353
3354 pci_allocate_vc_save_buffers(dev);
3355}
3356
3357void pci_free_cap_save_buffers(struct pci_dev *dev)
3358{
3359 struct pci_cap_saved_state *tmp;
3360 struct hlist_node *n;
3361
3362 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3363 kfree(tmp);
3364}
3365
3366
3367
3368
3369
3370
3371
3372
3373void pci_configure_ari(struct pci_dev *dev)
3374{
3375 u32 cap;
3376 struct pci_dev *bridge;
3377
3378 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3379 return;
3380
3381 bridge = dev->bus->self;
3382 if (!bridge)
3383 return;
3384
3385 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3386 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3387 return;
3388
3389 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3390 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3391 PCI_EXP_DEVCTL2_ARI);
3392 bridge->ari_enabled = 1;
3393 } else {
3394 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3395 PCI_EXP_DEVCTL2_ARI);
3396 bridge->ari_enabled = 0;
3397 }
3398}
3399
3400static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3401{
3402 int pos;
3403 u16 cap, ctrl;
3404
3405 pos = pdev->acs_cap;
3406 if (!pos)
3407 return false;
3408
3409
3410
3411
3412
3413
3414 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3415 acs_flags &= (cap | PCI_ACS_EC);
3416
3417 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3418 return (ctrl & acs_flags) == acs_flags;
3419}
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3438{
3439 int ret;
3440
3441 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3442 if (ret >= 0)
3443 return ret > 0;
3444
3445
3446
3447
3448
3449
3450 if (!pci_is_pcie(pdev))
3451 return false;
3452
3453 switch (pci_pcie_type(pdev)) {
3454
3455
3456
3457
3458
3459 case PCI_EXP_TYPE_PCIE_BRIDGE:
3460
3461
3462
3463
3464
3465
3466 case PCI_EXP_TYPE_PCI_BRIDGE:
3467 case PCI_EXP_TYPE_RC_EC:
3468 return false;
3469
3470
3471
3472
3473
3474 case PCI_EXP_TYPE_DOWNSTREAM:
3475 case PCI_EXP_TYPE_ROOT_PORT:
3476 return pci_acs_flags_enabled(pdev, acs_flags);
3477
3478
3479
3480
3481
3482
3483
3484 case PCI_EXP_TYPE_ENDPOINT:
3485 case PCI_EXP_TYPE_UPSTREAM:
3486 case PCI_EXP_TYPE_LEG_END:
3487 case PCI_EXP_TYPE_RC_END:
3488 if (!pdev->multifunction)
3489 break;
3490
3491 return pci_acs_flags_enabled(pdev, acs_flags);
3492 }
3493
3494
3495
3496
3497
3498 return true;
3499}
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510bool pci_acs_path_enabled(struct pci_dev *start,
3511 struct pci_dev *end, u16 acs_flags)
3512{
3513 struct pci_dev *pdev, *parent = start;
3514
3515 do {
3516 pdev = parent;
3517
3518 if (!pci_acs_enabled(pdev, acs_flags))
3519 return false;
3520
3521 if (pci_is_root_bus(pdev->bus))
3522 return (end == NULL);
3523
3524 parent = pdev->bus->self;
3525 } while (pdev != end);
3526
3527 return true;
3528}
3529
3530
3531
3532
3533
3534void pci_acs_init(struct pci_dev *dev)
3535{
3536 dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3537
3538
3539
3540
3541
3542
3543
3544 pci_enable_acs(dev);
3545}
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3557{
3558 unsigned int pos, nbars, i;
3559 u32 ctrl;
3560
3561 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3562 if (!pos)
3563 return -ENOTSUPP;
3564
3565 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3566 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3567 PCI_REBAR_CTRL_NBAR_SHIFT;
3568
3569 for (i = 0; i < nbars; i++, pos += 8) {
3570 int bar_idx;
3571
3572 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3573 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3574 if (bar_idx == bar)
3575 return pos;
3576 }
3577
3578 return -ENOENT;
3579}
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3590{
3591 int pos;
3592 u32 cap;
3593
3594 pos = pci_rebar_find_pos(pdev, bar);
3595 if (pos < 0)
3596 return 0;
3597
3598 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3599 return (cap & PCI_REBAR_CAP_SIZES) >> 4;
3600}
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3611{
3612 int pos;
3613 u32 ctrl;
3614
3615 pos = pci_rebar_find_pos(pdev, bar);
3616 if (pos < 0)
3617 return pos;
3618
3619 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3620 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3621}
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3633{
3634 int pos;
3635 u32 ctrl;
3636
3637 pos = pci_rebar_find_pos(pdev, bar);
3638 if (pos < 0)
3639 return pos;
3640
3641 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3642 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3643 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3644 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3645 return 0;
3646}
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3662{
3663 struct pci_bus *bus = dev->bus;
3664 struct pci_dev *bridge;
3665 u32 cap, ctl2;
3666
3667 if (!pci_is_pcie(dev))
3668 return -EINVAL;
3669
3670
3671
3672
3673
3674
3675
3676
3677 switch (pci_pcie_type(dev)) {
3678 case PCI_EXP_TYPE_ENDPOINT:
3679 case PCI_EXP_TYPE_LEG_END:
3680 case PCI_EXP_TYPE_RC_END:
3681 break;
3682 default:
3683 return -EINVAL;
3684 }
3685
3686 while (bus->parent) {
3687 bridge = bus->self;
3688
3689 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3690
3691 switch (pci_pcie_type(bridge)) {
3692
3693 case PCI_EXP_TYPE_UPSTREAM:
3694 case PCI_EXP_TYPE_DOWNSTREAM:
3695 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3696 return -EINVAL;
3697 break;
3698
3699
3700 case PCI_EXP_TYPE_ROOT_PORT:
3701 if ((cap & cap_mask) != cap_mask)
3702 return -EINVAL;
3703 break;
3704 }
3705
3706
3707 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3708 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3709 &ctl2);
3710 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3711 return -EINVAL;
3712 }
3713
3714 bus = bus->parent;
3715 }
3716
3717 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3718 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3719 return 0;
3720}
3721EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3735{
3736 int slot;
3737
3738 if (pci_ari_enabled(dev->bus))
3739 slot = 0;
3740 else
3741 slot = PCI_SLOT(dev->devfn);
3742
3743 return (((pin - 1) + slot) % 4) + 1;
3744}
3745
3746int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3747{
3748 u8 pin;
3749
3750 pin = dev->pin;
3751 if (!pin)
3752 return -1;
3753
3754 while (!pci_is_root_bus(dev->bus)) {
3755 pin = pci_swizzle_interrupt_pin(dev, pin);
3756 dev = dev->bus->self;
3757 }
3758 *bridge = dev;
3759 return pin;
3760}
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3771{
3772 u8 pin = *pinp;
3773
3774 while (!pci_is_root_bus(dev->bus)) {
3775 pin = pci_swizzle_interrupt_pin(dev, pin);
3776 dev = dev->bus->self;
3777 }
3778 *pinp = pin;
3779 return PCI_SLOT(dev->devfn);
3780}
3781EXPORT_SYMBOL_GPL(pci_common_swizzle);
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793void pci_release_region(struct pci_dev *pdev, int bar)
3794{
3795 struct pci_devres *dr;
3796
3797 if (pci_resource_len(pdev, bar) == 0)
3798 return;
3799 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3800 release_region(pci_resource_start(pdev, bar),
3801 pci_resource_len(pdev, bar));
3802 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3803 release_mem_region(pci_resource_start(pdev, bar),
3804 pci_resource_len(pdev, bar));
3805
3806 dr = find_pci_dr(pdev);
3807 if (dr)
3808 dr->region_mask &= ~(1 << bar);
3809}
3810EXPORT_SYMBOL(pci_release_region);
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831static int __pci_request_region(struct pci_dev *pdev, int bar,
3832 const char *res_name, int exclusive)
3833{
3834 struct pci_devres *dr;
3835
3836 if (pci_resource_len(pdev, bar) == 0)
3837 return 0;
3838
3839 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3840 if (!request_region(pci_resource_start(pdev, bar),
3841 pci_resource_len(pdev, bar), res_name))
3842 goto err_out;
3843 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3844 if (!__request_mem_region(pci_resource_start(pdev, bar),
3845 pci_resource_len(pdev, bar), res_name,
3846 exclusive))
3847 goto err_out;
3848 }
3849
3850 dr = find_pci_dr(pdev);
3851 if (dr)
3852 dr->region_mask |= 1 << bar;
3853
3854 return 0;
3855
3856err_out:
3857 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3858 &pdev->resource[bar]);
3859 return -EBUSY;
3860}
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3877{
3878 return __pci_request_region(pdev, bar, res_name, 0);
3879}
3880EXPORT_SYMBOL(pci_request_region);
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3891{
3892 int i;
3893
3894 for (i = 0; i < PCI_STD_NUM_BARS; i++)
3895 if (bars & (1 << i))
3896 pci_release_region(pdev, i);
3897}
3898EXPORT_SYMBOL(pci_release_selected_regions);
3899
3900static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3901 const char *res_name, int excl)
3902{
3903 int i;
3904
3905 for (i = 0; i < PCI_STD_NUM_BARS; i++)
3906 if (bars & (1 << i))
3907 if (__pci_request_region(pdev, i, res_name, excl))
3908 goto err_out;
3909 return 0;
3910
3911err_out:
3912 while (--i >= 0)
3913 if (bars & (1 << i))
3914 pci_release_region(pdev, i);
3915
3916 return -EBUSY;
3917}
3918
3919
3920
3921
3922
3923
3924
3925
3926int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3927 const char *res_name)
3928{
3929 return __pci_request_selected_regions(pdev, bars, res_name, 0);
3930}
3931EXPORT_SYMBOL(pci_request_selected_regions);
3932
3933int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3934 const char *res_name)
3935{
3936 return __pci_request_selected_regions(pdev, bars, res_name,
3937 IORESOURCE_EXCLUSIVE);
3938}
3939EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951void pci_release_regions(struct pci_dev *pdev)
3952{
3953 pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
3954}
3955EXPORT_SYMBOL(pci_release_regions);
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3971{
3972 return pci_request_selected_regions(pdev,
3973 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
3974}
3975EXPORT_SYMBOL(pci_request_regions);
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3993{
3994 return pci_request_selected_regions_exclusive(pdev,
3995 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
3996}
3997EXPORT_SYMBOL(pci_request_regions_exclusive);
3998
3999
4000
4001
4002
4003int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
4004 resource_size_t size)
4005{
4006 int ret = 0;
4007#ifdef PCI_IOBASE
4008 struct logic_pio_hwaddr *range;
4009
4010 if (!size || addr + size < addr)
4011 return -EINVAL;
4012
4013 range = kzalloc(sizeof(*range), GFP_ATOMIC);
4014 if (!range)
4015 return -ENOMEM;
4016
4017 range->fwnode = fwnode;
4018 range->size = size;
4019 range->hw_start = addr;
4020 range->flags = LOGIC_PIO_CPU_MMIO;
4021
4022 ret = logic_pio_register_range(range);
4023 if (ret)
4024 kfree(range);
4025#endif
4026
4027 return ret;
4028}
4029
4030phys_addr_t pci_pio_to_address(unsigned long pio)
4031{
4032 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
4033
4034#ifdef PCI_IOBASE
4035 if (pio >= MMIO_UPPER_LIMIT)
4036 return address;
4037
4038 address = logic_pio_to_hwaddr(pio);
4039#endif
4040
4041 return address;
4042}
4043
4044unsigned long __weak pci_address_to_pio(phys_addr_t address)
4045{
4046#ifdef PCI_IOBASE
4047 return logic_pio_trans_cpuaddr(address);
4048#else
4049 if (address > IO_SPACE_LIMIT)
4050 return (unsigned long)-1;
4051
4052 return (unsigned long) address;
4053#endif
4054}
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4067{
4068#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4069 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4070
4071 if (!(res->flags & IORESOURCE_IO))
4072 return -EINVAL;
4073
4074 if (res->end > IO_SPACE_LIMIT)
4075 return -EINVAL;
4076
4077 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4078 pgprot_device(PAGE_KERNEL));
4079#else
4080
4081
4082
4083
4084 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4085 return -ENODEV;
4086#endif
4087}
4088EXPORT_SYMBOL(pci_remap_iospace);
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098void pci_unmap_iospace(struct resource *res)
4099{
4100#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4101 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4102
4103 unmap_kernel_range(vaddr, resource_size(res));
4104#endif
4105}
4106EXPORT_SYMBOL(pci_unmap_iospace);
4107
4108static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4109{
4110 struct resource **res = ptr;
4111
4112 pci_unmap_iospace(*res);
4113}
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4125 phys_addr_t phys_addr)
4126{
4127 const struct resource **ptr;
4128 int error;
4129
4130 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4131 if (!ptr)
4132 return -ENOMEM;
4133
4134 error = pci_remap_iospace(res, phys_addr);
4135 if (error) {
4136 devres_free(ptr);
4137 } else {
4138 *ptr = res;
4139 devres_add(dev, ptr);
4140 }
4141
4142 return error;
4143}
4144EXPORT_SYMBOL(devm_pci_remap_iospace);
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4156 resource_size_t offset,
4157 resource_size_t size)
4158{
4159 void __iomem **ptr, *addr;
4160
4161 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4162 if (!ptr)
4163 return NULL;
4164
4165 addr = pci_remap_cfgspace(offset, size);
4166 if (addr) {
4167 *ptr = addr;
4168 devres_add(dev, ptr);
4169 } else
4170 devres_free(ptr);
4171
4172 return addr;
4173}
4174EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4175
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4196 struct resource *res)
4197{
4198 resource_size_t size;
4199 const char *name;
4200 void __iomem *dest_ptr;
4201
4202 BUG_ON(!dev);
4203
4204 if (!res || resource_type(res) != IORESOURCE_MEM) {
4205 dev_err(dev, "invalid resource\n");
4206 return IOMEM_ERR_PTR(-EINVAL);
4207 }
4208
4209 size = resource_size(res);
4210
4211 if (res->name)
4212 name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
4213 res->name);
4214 else
4215 name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
4216 if (!name)
4217 return IOMEM_ERR_PTR(-ENOMEM);
4218
4219 if (!devm_request_mem_region(dev, res->start, size, name)) {
4220 dev_err(dev, "can't request region for resource %pR\n", res);
4221 return IOMEM_ERR_PTR(-EBUSY);
4222 }
4223
4224 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4225 if (!dest_ptr) {
4226 dev_err(dev, "ioremap failed for resource %pR\n", res);
4227 devm_release_mem_region(dev, res->start, size);
4228 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4229 }
4230
4231 return dest_ptr;
4232}
4233EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4234
4235static void __pci_set_master(struct pci_dev *dev, bool enable)
4236{
4237 u16 old_cmd, cmd;
4238
4239 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4240 if (enable)
4241 cmd = old_cmd | PCI_COMMAND_MASTER;
4242 else
4243 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4244 if (cmd != old_cmd) {
4245 pci_dbg(dev, "%s bus mastering\n",
4246 enable ? "enabling" : "disabling");
4247 pci_write_config_word(dev, PCI_COMMAND, cmd);
4248 }
4249 dev->is_busmaster = enable;
4250}
4251
4252
4253
4254
4255
4256
4257
4258
4259char * __weak __init pcibios_setup(char *str)
4260{
4261 return str;
4262}
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272void __weak pcibios_set_master(struct pci_dev *dev)
4273{
4274 u8 lat;
4275
4276
4277 if (pci_is_pcie(dev))
4278 return;
4279
4280 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4281 if (lat < 16)
4282 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4283 else if (lat > pcibios_max_latency)
4284 lat = pcibios_max_latency;
4285 else
4286 return;
4287
4288 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4289}
4290
4291
4292
4293
4294
4295
4296
4297
4298void pci_set_master(struct pci_dev *dev)
4299{
4300 __pci_set_master(dev, true);
4301 pcibios_set_master(dev);
4302}
4303EXPORT_SYMBOL(pci_set_master);
4304
4305
4306
4307
4308
4309void pci_clear_master(struct pci_dev *dev)
4310{
4311 __pci_set_master(dev, false);
4312}
4313EXPORT_SYMBOL(pci_clear_master);
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325int pci_set_cacheline_size(struct pci_dev *dev)
4326{
4327 u8 cacheline_size;
4328
4329 if (!pci_cache_line_size)
4330 return -EINVAL;
4331
4332
4333
4334 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4335 if (cacheline_size >= pci_cache_line_size &&
4336 (cacheline_size % pci_cache_line_size) == 0)
4337 return 0;
4338
4339
4340 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4341
4342 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4343 if (cacheline_size == pci_cache_line_size)
4344 return 0;
4345
4346 pci_dbg(dev, "cache line size of %d is not supported\n",
4347 pci_cache_line_size << 2);
4348
4349 return -EINVAL;
4350}
4351EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361int pci_set_mwi(struct pci_dev *dev)
4362{
4363#ifdef PCI_DISABLE_MWI
4364 return 0;
4365#else
4366 int rc;
4367 u16 cmd;
4368
4369 rc = pci_set_cacheline_size(dev);
4370 if (rc)
4371 return rc;
4372
4373 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4374 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4375 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4376 cmd |= PCI_COMMAND_INVALIDATE;
4377 pci_write_config_word(dev, PCI_COMMAND, cmd);
4378 }
4379 return 0;
4380#endif
4381}
4382EXPORT_SYMBOL(pci_set_mwi);
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392int pcim_set_mwi(struct pci_dev *dev)
4393{
4394 struct pci_devres *dr;
4395
4396 dr = find_pci_dr(dev);
4397 if (!dr)
4398 return -ENOMEM;
4399
4400 dr->mwi = 1;
4401 return pci_set_mwi(dev);
4402}
4403EXPORT_SYMBOL(pcim_set_mwi);
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414int pci_try_set_mwi(struct pci_dev *dev)
4415{
4416#ifdef PCI_DISABLE_MWI
4417 return 0;
4418#else
4419 return pci_set_mwi(dev);
4420#endif
4421}
4422EXPORT_SYMBOL(pci_try_set_mwi);
4423
4424
4425
4426
4427
4428
4429
4430void pci_clear_mwi(struct pci_dev *dev)
4431{
4432#ifndef PCI_DISABLE_MWI
4433 u16 cmd;
4434
4435 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4436 if (cmd & PCI_COMMAND_INVALIDATE) {
4437 cmd &= ~PCI_COMMAND_INVALIDATE;
4438 pci_write_config_word(dev, PCI_COMMAND, cmd);
4439 }
4440#endif
4441}
4442EXPORT_SYMBOL(pci_clear_mwi);
4443
4444
4445
4446
4447
4448
4449
4450
4451void pci_intx(struct pci_dev *pdev, int enable)
4452{
4453 u16 pci_command, new;
4454
4455 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4456
4457 if (enable)
4458 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4459 else
4460 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4461
4462 if (new != pci_command) {
4463 struct pci_devres *dr;
4464
4465 pci_write_config_word(pdev, PCI_COMMAND, new);
4466
4467 dr = find_pci_dr(pdev);
4468 if (dr && !dr->restore_intx) {
4469 dr->restore_intx = 1;
4470 dr->orig_intx = !enable;
4471 }
4472 }
4473}
4474EXPORT_SYMBOL_GPL(pci_intx);
4475
4476static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4477{
4478 struct pci_bus *bus = dev->bus;
4479 bool mask_updated = true;
4480 u32 cmd_status_dword;
4481 u16 origcmd, newcmd;
4482 unsigned long flags;
4483 bool irq_pending;
4484
4485
4486
4487
4488
4489 BUILD_BUG_ON(PCI_COMMAND % 4);
4490 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4491
4492 raw_spin_lock_irqsave(&pci_lock, flags);
4493
4494 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4495
4496 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4497
4498
4499
4500
4501
4502
4503 if (mask != irq_pending) {
4504 mask_updated = false;
4505 goto done;
4506 }
4507
4508 origcmd = cmd_status_dword;
4509 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4510 if (mask)
4511 newcmd |= PCI_COMMAND_INTX_DISABLE;
4512 if (newcmd != origcmd)
4513 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4514
4515done:
4516 raw_spin_unlock_irqrestore(&pci_lock, flags);
4517
4518 return mask_updated;
4519}
4520
4521
4522
4523
4524
4525
4526
4527
4528bool pci_check_and_mask_intx(struct pci_dev *dev)
4529{
4530 return pci_check_and_set_intx_mask(dev, true);
4531}
4532EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542bool pci_check_and_unmask_intx(struct pci_dev *dev)
4543{
4544 return pci_check_and_set_intx_mask(dev, false);
4545}
4546EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4547
4548
4549
4550
4551
4552
4553
4554int pci_wait_for_pending_transaction(struct pci_dev *dev)
4555{
4556 if (!pci_is_pcie(dev))
4557 return 1;
4558
4559 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4560 PCI_EXP_DEVSTA_TRPND);
4561}
4562EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4563
4564
4565
4566
4567
4568
4569
4570
4571bool pcie_has_flr(struct pci_dev *dev)
4572{
4573 u32 cap;
4574
4575 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4576 return false;
4577
4578 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
4579 return cap & PCI_EXP_DEVCAP_FLR;
4580}
4581EXPORT_SYMBOL_GPL(pcie_has_flr);
4582
4583
4584
4585
4586
4587
4588
4589
4590
4591int pcie_flr(struct pci_dev *dev)
4592{
4593 if (!pci_wait_for_pending_transaction(dev))
4594 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4595
4596 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4597
4598 if (dev->imm_ready)
4599 return 0;
4600
4601
4602
4603
4604
4605
4606 msleep(100);
4607
4608 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4609}
4610EXPORT_SYMBOL_GPL(pcie_flr);
4611
4612static int pci_af_flr(struct pci_dev *dev, int probe)
4613{
4614 int pos;
4615 u8 cap;
4616
4617 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4618 if (!pos)
4619 return -ENOTTY;
4620
4621 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4622 return -ENOTTY;
4623
4624 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4625 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4626 return -ENOTTY;
4627
4628 if (probe)
4629 return 0;
4630
4631
4632
4633
4634
4635
4636 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4637 PCI_AF_STATUS_TP << 8))
4638 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4639
4640 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4641
4642 if (dev->imm_ready)
4643 return 0;
4644
4645
4646
4647
4648
4649
4650
4651 msleep(100);
4652
4653 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4654}
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671static int pci_pm_reset(struct pci_dev *dev, int probe)
4672{
4673 u16 csr;
4674
4675 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4676 return -ENOTTY;
4677
4678 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4679 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4680 return -ENOTTY;
4681
4682 if (probe)
4683 return 0;
4684
4685 if (dev->current_state != PCI_D0)
4686 return -EINVAL;
4687
4688 csr &= ~PCI_PM_CTRL_STATE_MASK;
4689 csr |= PCI_D3hot;
4690 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4691 pci_dev_d3_sleep(dev);
4692
4693 csr &= ~PCI_PM_CTRL_STATE_MASK;
4694 csr |= PCI_D0;
4695 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4696 pci_dev_d3_sleep(dev);
4697
4698 return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4699}
4700
4701
4702
4703
4704
4705
4706
4707
4708
4709static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4710 int delay)
4711{
4712 int timeout = 1000;
4713 bool ret;
4714 u16 lnk_status;
4715
4716
4717
4718
4719
4720 if (!pdev->link_active_reporting) {
4721 msleep(timeout + delay);
4722 return true;
4723 }
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734 if (active)
4735 msleep(20);
4736 for (;;) {
4737 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4738 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4739 if (ret == active)
4740 break;
4741 if (timeout <= 0)
4742 break;
4743 msleep(10);
4744 timeout -= 10;
4745 }
4746 if (active && ret)
4747 msleep(delay);
4748
4749 return ret == active;
4750}
4751
4752
4753
4754
4755
4756
4757
4758
4759bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4760{
4761 return pcie_wait_for_link_delay(pdev, active, 100);
4762}
4763
4764
4765
4766
4767
4768
4769
4770
4771static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4772{
4773 const struct pci_dev *pdev;
4774 int min_delay = 100;
4775 int max_delay = 0;
4776
4777 list_for_each_entry(pdev, &bus->devices, bus_list) {
4778 if (pdev->d3cold_delay < min_delay)
4779 min_delay = pdev->d3cold_delay;
4780 if (pdev->d3cold_delay > max_delay)
4781 max_delay = pdev->d3cold_delay;
4782 }
4783
4784 return max(min_delay, max_delay);
4785}
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
4799{
4800 struct pci_dev *child;
4801 int delay;
4802
4803 if (pci_dev_is_disconnected(dev))
4804 return;
4805
4806 if (!pci_is_bridge(dev) || !dev->bridge_d3)
4807 return;
4808
4809 down_read(&pci_bus_sem);
4810
4811
4812
4813
4814
4815
4816
4817 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4818 up_read(&pci_bus_sem);
4819 return;
4820 }
4821
4822
4823 delay = pci_bus_max_d3cold_delay(dev->subordinate);
4824 if (!delay) {
4825 up_read(&pci_bus_sem);
4826 return;
4827 }
4828
4829 child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
4830 bus_list);
4831 up_read(&pci_bus_sem);
4832
4833
4834
4835
4836
4837
4838
4839 if (!pci_is_pcie(dev)) {
4840 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4841 msleep(1000 + delay);
4842 return;
4843 }
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856
4857
4858
4859
4860
4861
4862 if (!pcie_downstream_port(dev))
4863 return;
4864
4865 if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4866 pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4867 msleep(delay);
4868 } else {
4869 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4870 delay);
4871 if (!pcie_wait_for_link_delay(dev, true, delay)) {
4872
4873 pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
4874 return;
4875 }
4876 }
4877
4878 if (!pci_device_is_present(child)) {
4879 pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
4880 msleep(delay);
4881 }
4882}
4883
4884void pci_reset_secondary_bus(struct pci_dev *dev)
4885{
4886 u16 ctrl;
4887
4888 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4889 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4890 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4891
4892
4893
4894
4895
4896 msleep(2);
4897
4898 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4899 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4900
4901
4902
4903
4904
4905
4906
4907
4908 ssleep(1);
4909}
4910
4911void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4912{
4913 pci_reset_secondary_bus(dev);
4914}
4915
4916
4917
4918
4919
4920
4921
4922
4923int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4924{
4925 pcibios_reset_secondary_bus(dev);
4926
4927 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
4928}
4929EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4930
4931static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4932{
4933 struct pci_dev *pdev;
4934
4935 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4936 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4937 return -ENOTTY;
4938
4939 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4940 if (pdev != dev)
4941 return -ENOTTY;
4942
4943 if (probe)
4944 return 0;
4945
4946 return pci_bridge_secondary_bus_reset(dev->bus->self);
4947}
4948
4949static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4950{
4951 int rc = -ENOTTY;
4952
4953 if (!hotplug || !try_module_get(hotplug->owner))
4954 return rc;
4955
4956 if (hotplug->ops->reset_slot)
4957 rc = hotplug->ops->reset_slot(hotplug, probe);
4958
4959 module_put(hotplug->owner);
4960
4961 return rc;
4962}
4963
4964static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4965{
4966 if (dev->multifunction || dev->subordinate || !dev->slot ||
4967 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4968 return -ENOTTY;
4969
4970 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4971}
4972
4973static void pci_dev_lock(struct pci_dev *dev)
4974{
4975 pci_cfg_access_lock(dev);
4976
4977 device_lock(&dev->dev);
4978}
4979
4980
4981static int pci_dev_trylock(struct pci_dev *dev)
4982{
4983 if (pci_cfg_access_trylock(dev)) {
4984 if (device_trylock(&dev->dev))
4985 return 1;
4986 pci_cfg_access_unlock(dev);
4987 }
4988
4989 return 0;
4990}
4991
4992static void pci_dev_unlock(struct pci_dev *dev)
4993{
4994 device_unlock(&dev->dev);
4995 pci_cfg_access_unlock(dev);
4996}
4997
4998static void pci_dev_save_and_disable(struct pci_dev *dev)
4999{
5000 const struct pci_error_handlers *err_handler =
5001 dev->driver ? dev->driver->err_handler : NULL;
5002
5003
5004
5005
5006
5007
5008 if (err_handler && err_handler->reset_prepare)
5009 err_handler->reset_prepare(dev);
5010
5011
5012
5013
5014
5015
5016 pci_set_power_state(dev, PCI_D0);
5017
5018 pci_save_state(dev);
5019
5020
5021
5022
5023
5024
5025
5026 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5027}
5028
5029static void pci_dev_restore(struct pci_dev *dev)
5030{
5031 const struct pci_error_handlers *err_handler =
5032 dev->driver ? dev->driver->err_handler : NULL;
5033
5034 pci_restore_state(dev);
5035
5036
5037
5038
5039
5040
5041 if (err_handler && err_handler->reset_done)
5042 err_handler->reset_done(dev);
5043}
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060
5061
5062
5063
5064
5065int __pci_reset_function_locked(struct pci_dev *dev)
5066{
5067 int rc;
5068
5069 might_sleep();
5070
5071
5072
5073
5074
5075
5076
5077
5078
5079 rc = pci_dev_specific_reset(dev, 0);
5080 if (rc != -ENOTTY)
5081 return rc;
5082 if (pcie_has_flr(dev)) {
5083 rc = pcie_flr(dev);
5084 if (rc != -ENOTTY)
5085 return rc;
5086 }
5087 rc = pci_af_flr(dev, 0);
5088 if (rc != -ENOTTY)
5089 return rc;
5090 rc = pci_pm_reset(dev, 0);
5091 if (rc != -ENOTTY)
5092 return rc;
5093 rc = pci_dev_reset_slot_function(dev, 0);
5094 if (rc != -ENOTTY)
5095 return rc;
5096 return pci_parent_bus_reset(dev, 0);
5097}
5098EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5099
5100
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110
5111int pci_probe_reset_function(struct pci_dev *dev)
5112{
5113 int rc;
5114
5115 might_sleep();
5116
5117 rc = pci_dev_specific_reset(dev, 1);
5118 if (rc != -ENOTTY)
5119 return rc;
5120 if (pcie_has_flr(dev))
5121 return 0;
5122 rc = pci_af_flr(dev, 1);
5123 if (rc != -ENOTTY)
5124 return rc;
5125 rc = pci_pm_reset(dev, 1);
5126 if (rc != -ENOTTY)
5127 return rc;
5128 rc = pci_dev_reset_slot_function(dev, 1);
5129 if (rc != -ENOTTY)
5130 return rc;
5131
5132 return pci_parent_bus_reset(dev, 1);
5133}
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144
5145
5146
5147
5148
5149
5150
5151int pci_reset_function(struct pci_dev *dev)
5152{
5153 int rc;
5154
5155 if (!dev->reset_fn)
5156 return -ENOTTY;
5157
5158 pci_dev_lock(dev);
5159 pci_dev_save_and_disable(dev);
5160
5161 rc = __pci_reset_function_locked(dev);
5162
5163 pci_dev_restore(dev);
5164 pci_dev_unlock(dev);
5165
5166 return rc;
5167}
5168EXPORT_SYMBOL_GPL(pci_reset_function);
5169
5170
5171
5172
5173
5174
5175
5176
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187int pci_reset_function_locked(struct pci_dev *dev)
5188{
5189 int rc;
5190
5191 if (!dev->reset_fn)
5192 return -ENOTTY;
5193
5194 pci_dev_save_and_disable(dev);
5195
5196 rc = __pci_reset_function_locked(dev);
5197
5198 pci_dev_restore(dev);
5199
5200 return rc;
5201}
5202EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5203
5204
5205
5206
5207
5208
5209
5210int pci_try_reset_function(struct pci_dev *dev)
5211{
5212 int rc;
5213
5214 if (!dev->reset_fn)
5215 return -ENOTTY;
5216
5217 if (!pci_dev_trylock(dev))
5218 return -EAGAIN;
5219
5220 pci_dev_save_and_disable(dev);
5221 rc = __pci_reset_function_locked(dev);
5222 pci_dev_restore(dev);
5223 pci_dev_unlock(dev);
5224
5225 return rc;
5226}
5227EXPORT_SYMBOL_GPL(pci_try_reset_function);
5228
5229
5230static bool pci_bus_resetable(struct pci_bus *bus)
5231{
5232 struct pci_dev *dev;
5233
5234
5235 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5236 return false;
5237
5238 list_for_each_entry(dev, &bus->devices, bus_list) {
5239 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5240 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5241 return false;
5242 }
5243
5244 return true;
5245}
5246
5247
5248static void pci_bus_lock(struct pci_bus *bus)
5249{
5250 struct pci_dev *dev;
5251
5252 list_for_each_entry(dev, &bus->devices, bus_list) {
5253 pci_dev_lock(dev);
5254 if (dev->subordinate)
5255 pci_bus_lock(dev->subordinate);
5256 }
5257}
5258
5259
5260static void pci_bus_unlock(struct pci_bus *bus)
5261{
5262 struct pci_dev *dev;
5263
5264 list_for_each_entry(dev, &bus->devices, bus_list) {
5265 if (dev->subordinate)
5266 pci_bus_unlock(dev->subordinate);
5267 pci_dev_unlock(dev);
5268 }
5269}
5270
5271
5272static int pci_bus_trylock(struct pci_bus *bus)
5273{
5274 struct pci_dev *dev;
5275
5276 list_for_each_entry(dev, &bus->devices, bus_list) {
5277 if (!pci_dev_trylock(dev))
5278 goto unlock;
5279 if (dev->subordinate) {
5280 if (!pci_bus_trylock(dev->subordinate)) {
5281 pci_dev_unlock(dev);
5282 goto unlock;
5283 }
5284 }
5285 }
5286 return 1;
5287
5288unlock:
5289 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5290 if (dev->subordinate)
5291 pci_bus_unlock(dev->subordinate);
5292 pci_dev_unlock(dev);
5293 }
5294 return 0;
5295}
5296
5297
5298static bool pci_slot_resetable(struct pci_slot *slot)
5299{
5300 struct pci_dev *dev;
5301
5302 if (slot->bus->self &&
5303 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5304 return false;
5305
5306 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5307 if (!dev->slot || dev->slot != slot)
5308 continue;
5309 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5310 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5311 return false;
5312 }
5313
5314 return true;
5315}
5316
5317
5318static void pci_slot_lock(struct pci_slot *slot)
5319{
5320 struct pci_dev *dev;
5321
5322 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5323 if (!dev->slot || dev->slot != slot)
5324 continue;
5325 pci_dev_lock(dev);
5326 if (dev->subordinate)
5327 pci_bus_lock(dev->subordinate);
5328 }
5329}
5330
5331
5332static void pci_slot_unlock(struct pci_slot *slot)
5333{
5334 struct pci_dev *dev;
5335
5336 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5337 if (!dev->slot || dev->slot != slot)
5338 continue;
5339 if (dev->subordinate)
5340 pci_bus_unlock(dev->subordinate);
5341 pci_dev_unlock(dev);
5342 }
5343}
5344
5345
5346static int pci_slot_trylock(struct pci_slot *slot)
5347{
5348 struct pci_dev *dev;
5349
5350 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5351 if (!dev->slot || dev->slot != slot)
5352 continue;
5353 if (!pci_dev_trylock(dev))
5354 goto unlock;
5355 if (dev->subordinate) {
5356 if (!pci_bus_trylock(dev->subordinate)) {
5357 pci_dev_unlock(dev);
5358 goto unlock;
5359 }
5360 }
5361 }
5362 return 1;
5363
5364unlock:
5365 list_for_each_entry_continue_reverse(dev,
5366 &slot->bus->devices, bus_list) {
5367 if (!dev->slot || dev->slot != slot)
5368 continue;
5369 if (dev->subordinate)
5370 pci_bus_unlock(dev->subordinate);
5371 pci_dev_unlock(dev);
5372 }
5373 return 0;
5374}
5375
5376
5377
5378
5379
5380static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5381{
5382 struct pci_dev *dev;
5383
5384 list_for_each_entry(dev, &bus->devices, bus_list) {
5385 pci_dev_save_and_disable(dev);
5386 if (dev->subordinate)
5387 pci_bus_save_and_disable_locked(dev->subordinate);
5388 }
5389}
5390
5391
5392
5393
5394
5395
5396static void pci_bus_restore_locked(struct pci_bus *bus)
5397{
5398 struct pci_dev *dev;
5399
5400 list_for_each_entry(dev, &bus->devices, bus_list) {
5401 pci_dev_restore(dev);
5402 if (dev->subordinate)
5403 pci_bus_restore_locked(dev->subordinate);
5404 }
5405}
5406
5407
5408
5409
5410
5411static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5412{
5413 struct pci_dev *dev;
5414
5415 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5416 if (!dev->slot || dev->slot != slot)
5417 continue;
5418 pci_dev_save_and_disable(dev);
5419 if (dev->subordinate)
5420 pci_bus_save_and_disable_locked(dev->subordinate);
5421 }
5422}
5423
5424
5425
5426
5427
5428
5429static void pci_slot_restore_locked(struct pci_slot *slot)
5430{
5431 struct pci_dev *dev;
5432
5433 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5434 if (!dev->slot || dev->slot != slot)
5435 continue;
5436 pci_dev_restore(dev);
5437 if (dev->subordinate)
5438 pci_bus_restore_locked(dev->subordinate);
5439 }
5440}
5441
5442static int pci_slot_reset(struct pci_slot *slot, int probe)
5443{
5444 int rc;
5445
5446 if (!slot || !pci_slot_resetable(slot))
5447 return -ENOTTY;
5448
5449 if (!probe)
5450 pci_slot_lock(slot);
5451
5452 might_sleep();
5453
5454 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5455
5456 if (!probe)
5457 pci_slot_unlock(slot);
5458
5459 return rc;
5460}
5461
5462
5463
5464
5465
5466
5467
5468int pci_probe_reset_slot(struct pci_slot *slot)
5469{
5470 return pci_slot_reset(slot, 1);
5471}
5472EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489static int __pci_reset_slot(struct pci_slot *slot)
5490{
5491 int rc;
5492
5493 rc = pci_slot_reset(slot, 1);
5494 if (rc)
5495 return rc;
5496
5497 if (pci_slot_trylock(slot)) {
5498 pci_slot_save_and_disable_locked(slot);
5499 might_sleep();
5500 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
5501 pci_slot_restore_locked(slot);
5502 pci_slot_unlock(slot);
5503 } else
5504 rc = -EAGAIN;
5505
5506 return rc;
5507}
5508
5509static int pci_bus_reset(struct pci_bus *bus, int probe)
5510{
5511 int ret;
5512
5513 if (!bus->self || !pci_bus_resetable(bus))
5514 return -ENOTTY;
5515
5516 if (probe)
5517 return 0;
5518
5519 pci_bus_lock(bus);
5520
5521 might_sleep();
5522
5523 ret = pci_bridge_secondary_bus_reset(bus->self);
5524
5525 pci_bus_unlock(bus);
5526
5527 return ret;
5528}
5529
5530
5531
5532
5533
5534
5535
5536
5537
5538int pci_bus_error_reset(struct pci_dev *bridge)
5539{
5540 struct pci_bus *bus = bridge->subordinate;
5541 struct pci_slot *slot;
5542
5543 if (!bus)
5544 return -ENOTTY;
5545
5546 mutex_lock(&pci_slot_mutex);
5547 if (list_empty(&bus->slots))
5548 goto bus_reset;
5549
5550 list_for_each_entry(slot, &bus->slots, list)
5551 if (pci_probe_reset_slot(slot))
5552 goto bus_reset;
5553
5554 list_for_each_entry(slot, &bus->slots, list)
5555 if (pci_slot_reset(slot, 0))
5556 goto bus_reset;
5557
5558 mutex_unlock(&pci_slot_mutex);
5559 return 0;
5560bus_reset:
5561 mutex_unlock(&pci_slot_mutex);
5562 return pci_bus_reset(bridge->subordinate, 0);
5563}
5564
5565
5566
5567
5568
5569
5570
5571int pci_probe_reset_bus(struct pci_bus *bus)
5572{
5573 return pci_bus_reset(bus, 1);
5574}
5575EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5576
5577
5578
5579
5580
5581
5582
5583static int __pci_reset_bus(struct pci_bus *bus)
5584{
5585 int rc;
5586
5587 rc = pci_bus_reset(bus, 1);
5588 if (rc)
5589 return rc;
5590
5591 if (pci_bus_trylock(bus)) {
5592 pci_bus_save_and_disable_locked(bus);
5593 might_sleep();
5594 rc = pci_bridge_secondary_bus_reset(bus->self);
5595 pci_bus_restore_locked(bus);
5596 pci_bus_unlock(bus);
5597 } else
5598 rc = -EAGAIN;
5599
5600 return rc;
5601}
5602
5603
5604
5605
5606
5607
5608
5609int pci_reset_bus(struct pci_dev *pdev)
5610{
5611 return (!pci_probe_reset_slot(pdev->slot)) ?
5612 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5613}
5614EXPORT_SYMBOL_GPL(pci_reset_bus);
5615
5616
5617
5618
5619
5620
5621
5622
5623int pcix_get_max_mmrbc(struct pci_dev *dev)
5624{
5625 int cap;
5626 u32 stat;
5627
5628 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5629 if (!cap)
5630 return -EINVAL;
5631
5632 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5633 return -EINVAL;
5634
5635 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5636}
5637EXPORT_SYMBOL(pcix_get_max_mmrbc);
5638
5639
5640
5641
5642
5643
5644
5645
5646int pcix_get_mmrbc(struct pci_dev *dev)
5647{
5648 int cap;
5649 u16 cmd;
5650
5651 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5652 if (!cap)
5653 return -EINVAL;
5654
5655 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5656 return -EINVAL;
5657
5658 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5659}
5660EXPORT_SYMBOL(pcix_get_mmrbc);
5661
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5672{
5673 int cap;
5674 u32 stat, v, o;
5675 u16 cmd;
5676
5677 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5678 return -EINVAL;
5679
5680 v = ffs(mmrbc) - 10;
5681
5682 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5683 if (!cap)
5684 return -EINVAL;
5685
5686 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5687 return -EINVAL;
5688
5689 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5690 return -E2BIG;
5691
5692 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5693 return -EINVAL;
5694
5695 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5696 if (o != v) {
5697 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5698 return -EIO;
5699
5700 cmd &= ~PCI_X_CMD_MAX_READ;
5701 cmd |= v << 2;
5702 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5703 return -EIO;
5704 }
5705 return 0;
5706}
5707EXPORT_SYMBOL(pcix_set_mmrbc);
5708
5709
5710
5711
5712
5713
5714
5715int pcie_get_readrq(struct pci_dev *dev)
5716{
5717 u16 ctl;
5718
5719 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5720
5721 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5722}
5723EXPORT_SYMBOL(pcie_get_readrq);
5724
5725
5726
5727
5728
5729
5730
5731
5732
5733int pcie_set_readrq(struct pci_dev *dev, int rq)
5734{
5735 u16 v;
5736 int ret;
5737
5738 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5739 return -EINVAL;
5740
5741
5742
5743
5744
5745
5746 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5747 int mps = pcie_get_mps(dev);
5748
5749 if (mps < rq)
5750 rq = mps;
5751 }
5752
5753 v = (ffs(rq) - 8) << 12;
5754
5755 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5756 PCI_EXP_DEVCTL_READRQ, v);
5757
5758 return pcibios_err_to_errno(ret);
5759}
5760EXPORT_SYMBOL(pcie_set_readrq);
5761
5762
5763
5764
5765
5766
5767
5768int pcie_get_mps(struct pci_dev *dev)
5769{
5770 u16 ctl;
5771
5772 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5773
5774 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5775}
5776EXPORT_SYMBOL(pcie_get_mps);
5777
5778
5779
5780
5781
5782
5783
5784
5785
5786int pcie_set_mps(struct pci_dev *dev, int mps)
5787{
5788 u16 v;
5789 int ret;
5790
5791 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5792 return -EINVAL;
5793
5794 v = ffs(mps) - 8;
5795 if (v > dev->pcie_mpss)
5796 return -EINVAL;
5797 v <<= 5;
5798
5799 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5800 PCI_EXP_DEVCTL_PAYLOAD, v);
5801
5802 return pcibios_err_to_errno(ret);
5803}
5804EXPORT_SYMBOL(pcie_set_mps);
5805
5806
5807
5808
5809
5810
5811
5812
5813
5814
5815
5816
5817
5818
5819
5820u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
5821 enum pci_bus_speed *speed,
5822 enum pcie_link_width *width)
5823{
5824 u16 lnksta;
5825 enum pci_bus_speed next_speed;
5826 enum pcie_link_width next_width;
5827 u32 bw, next_bw;
5828
5829 if (speed)
5830 *speed = PCI_SPEED_UNKNOWN;
5831 if (width)
5832 *width = PCIE_LNK_WIDTH_UNKNOWN;
5833
5834 bw = 0;
5835
5836 while (dev) {
5837 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5838
5839 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5840 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5841 PCI_EXP_LNKSTA_NLW_SHIFT;
5842
5843 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
5844
5845
5846 if (!bw || next_bw <= bw) {
5847 bw = next_bw;
5848
5849 if (limiting_dev)
5850 *limiting_dev = dev;
5851 if (speed)
5852 *speed = next_speed;
5853 if (width)
5854 *width = next_width;
5855 }
5856
5857 dev = pci_upstream_bridge(dev);
5858 }
5859
5860 return bw;
5861}
5862EXPORT_SYMBOL(pcie_bandwidth_available);
5863
5864
5865
5866
5867
5868
5869
5870
5871enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5872{
5873 u32 lnkcap2, lnkcap;
5874
5875
5876
5877
5878
5879
5880
5881
5882
5883
5884 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5885
5886
5887 if (lnkcap2)
5888 return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
5889
5890 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5891 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
5892 return PCIE_SPEED_5_0GT;
5893 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
5894 return PCIE_SPEED_2_5GT;
5895
5896 return PCI_SPEED_UNKNOWN;
5897}
5898EXPORT_SYMBOL(pcie_get_speed_cap);
5899
5900
5901
5902
5903
5904
5905
5906
5907enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
5908{
5909 u32 lnkcap;
5910
5911 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5912 if (lnkcap)
5913 return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
5914
5915 return PCIE_LNK_WIDTH_UNKNOWN;
5916}
5917EXPORT_SYMBOL(pcie_get_width_cap);
5918
5919
5920
5921
5922
5923
5924
5925
5926
5927
5928
5929u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
5930 enum pcie_link_width *width)
5931{
5932 *speed = pcie_get_speed_cap(dev);
5933 *width = pcie_get_width_cap(dev);
5934
5935 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5936 return 0;
5937
5938 return *width * PCIE_SPEED2MBS_ENC(*speed);
5939}
5940
5941
5942
5943
5944
5945
5946
5947
5948
5949
5950
5951void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
5952{
5953 enum pcie_link_width width, width_cap;
5954 enum pci_bus_speed speed, speed_cap;
5955 struct pci_dev *limiting_dev = NULL;
5956 u32 bw_avail, bw_cap;
5957
5958 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
5959 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
5960
5961 if (bw_avail >= bw_cap && verbose)
5962 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
5963 bw_cap / 1000, bw_cap % 1000,
5964 pci_speed_string(speed_cap), width_cap);
5965 else if (bw_avail < bw_cap)
5966 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
5967 bw_avail / 1000, bw_avail % 1000,
5968 pci_speed_string(speed), width,
5969 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
5970 bw_cap / 1000, bw_cap % 1000,
5971 pci_speed_string(speed_cap), width_cap);
5972}
5973
5974
5975
5976
5977
5978
5979
5980void pcie_print_link_status(struct pci_dev *dev)
5981{
5982 __pcie_print_link_status(dev, true);
5983}
5984EXPORT_SYMBOL(pcie_print_link_status);
5985
5986
5987
5988
5989
5990
5991
5992
5993int pci_select_bars(struct pci_dev *dev, unsigned long flags)
5994{
5995 int i, bars = 0;
5996 for (i = 0; i < PCI_NUM_RESOURCES; i++)
5997 if (pci_resource_flags(dev, i) & flags)
5998 bars |= (1 << i);
5999 return bars;
6000}
6001EXPORT_SYMBOL(pci_select_bars);
6002
6003
6004static arch_set_vga_state_t arch_set_vga_state;
6005
6006void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6007{
6008 arch_set_vga_state = func;
6009}
6010
6011static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6012 unsigned int command_bits, u32 flags)
6013{
6014 if (arch_set_vga_state)
6015 return arch_set_vga_state(dev, decode, command_bits,
6016 flags);
6017 return 0;
6018}
6019
6020
6021
6022
6023
6024
6025
6026
6027
6028int pci_set_vga_state(struct pci_dev *dev, bool decode,
6029 unsigned int command_bits, u32 flags)
6030{
6031 struct pci_bus *bus;
6032 struct pci_dev *bridge;
6033 u16 cmd;
6034 int rc;
6035
6036 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6037
6038
6039 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6040 if (rc)
6041 return rc;
6042
6043 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6044 pci_read_config_word(dev, PCI_COMMAND, &cmd);
6045 if (decode)
6046 cmd |= command_bits;
6047 else
6048 cmd &= ~command_bits;
6049 pci_write_config_word(dev, PCI_COMMAND, cmd);
6050 }
6051
6052 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6053 return 0;
6054
6055 bus = dev->bus;
6056 while (bus) {
6057 bridge = bus->self;
6058 if (bridge) {
6059 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6060 &cmd);
6061 if (decode)
6062 cmd |= PCI_BRIDGE_CTL_VGA;
6063 else
6064 cmd &= ~PCI_BRIDGE_CTL_VGA;
6065 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6066 cmd);
6067 }
6068 bus = bus->parent;
6069 }
6070 return 0;
6071}
6072
6073#ifdef CONFIG_ACPI
6074bool pci_pr3_present(struct pci_dev *pdev)
6075{
6076 struct acpi_device *adev;
6077
6078 if (acpi_disabled)
6079 return false;
6080
6081 adev = ACPI_COMPANION(&pdev->dev);
6082 if (!adev)
6083 return false;
6084
6085 return adev->power.flags.power_resources &&
6086 acpi_has_method(adev->handle, "_PR3");
6087}
6088EXPORT_SYMBOL_GPL(pci_pr3_present);
6089#endif
6090
6091
6092
6093
6094
6095
6096
6097
6098
6099
6100
6101
6102
6103
6104
6105
6106
6107
6108
6109
6110
6111void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns)
6112{
6113 int devfn_to;
6114
6115 nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from);
6116 devfn_to = devfn_from + nr_devfns - 1;
6117
6118 if (!dev->dma_alias_mask)
6119 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6120 if (!dev->dma_alias_mask) {
6121 pci_warn(dev, "Unable to allocate DMA alias mask\n");
6122 return;
6123 }
6124
6125 bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6126
6127 if (nr_devfns == 1)
6128 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6129 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6130 else if (nr_devfns > 1)
6131 pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6132 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6133 PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6134}
6135
6136bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6137{
6138 return (dev1->dma_alias_mask &&
6139 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6140 (dev2->dma_alias_mask &&
6141 test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6142 pci_real_dma_dev(dev1) == dev2 ||
6143 pci_real_dma_dev(dev2) == dev1;
6144}
6145
6146bool pci_device_is_present(struct pci_dev *pdev)
6147{
6148 u32 v;
6149
6150 if (pci_dev_is_disconnected(pdev))
6151 return false;
6152 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6153}
6154EXPORT_SYMBOL_GPL(pci_device_is_present);
6155
6156void pci_ignore_hotplug(struct pci_dev *dev)
6157{
6158 struct pci_dev *bridge = dev->bus->self;
6159
6160 dev->ignore_hotplug = 1;
6161
6162 if (bridge)
6163 bridge->ignore_hotplug = 1;
6164}
6165EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6166
6167
6168
6169
6170
6171
6172
6173
6174
6175
6176
6177struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6178{
6179 return dev;
6180}
6181
6182resource_size_t __weak pcibios_default_alignment(void)
6183{
6184 return 0;
6185}
6186
6187
6188
6189
6190
6191void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6192 const struct resource *rsrc,
6193 resource_size_t *start, resource_size_t *end)
6194{
6195 *start = rsrc->start;
6196 *end = rsrc->end;
6197}
6198
6199static char *resource_alignment_param;
6200static DEFINE_SPINLOCK(resource_alignment_lock);
6201
6202
6203
6204
6205
6206
6207
6208
6209
6210static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6211 bool *resize)
6212{
6213 int align_order, count;
6214 resource_size_t align = pcibios_default_alignment();
6215 const char *p;
6216 int ret;
6217
6218 spin_lock(&resource_alignment_lock);
6219 p = resource_alignment_param;
6220 if (!p || !*p)
6221 goto out;
6222 if (pci_has_flag(PCI_PROBE_ONLY)) {
6223 align = 0;
6224 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6225 goto out;
6226 }
6227
6228 while (*p) {
6229 count = 0;
6230 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6231 p[count] == '@') {
6232 p += count + 1;
6233 if (align_order > 63) {
6234 pr_err("PCI: Invalid requested alignment (order %d)\n",
6235 align_order);
6236 align_order = PAGE_SHIFT;
6237 }
6238 } else {
6239 align_order = PAGE_SHIFT;
6240 }
6241
6242 ret = pci_dev_str_match(dev, p, &p);
6243 if (ret == 1) {
6244 *resize = true;
6245 align = 1ULL << align_order;
6246 break;
6247 } else if (ret < 0) {
6248 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6249 p);
6250 break;
6251 }
6252
6253 if (*p != ';' && *p != ',') {
6254
6255 break;
6256 }
6257 p++;
6258 }
6259out:
6260 spin_unlock(&resource_alignment_lock);
6261 return align;
6262}
6263
6264static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6265 resource_size_t align, bool resize)
6266{
6267 struct resource *r = &dev->resource[bar];
6268 resource_size_t size;
6269
6270 if (!(r->flags & IORESOURCE_MEM))
6271 return;
6272
6273 if (r->flags & IORESOURCE_PCI_FIXED) {
6274 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6275 bar, r, (unsigned long long)align);
6276 return;
6277 }
6278
6279 size = resource_size(r);
6280 if (size >= align)
6281 return;
6282
6283
6284
6285
6286
6287
6288
6289
6290
6291
6292
6293
6294
6295
6296
6297
6298
6299
6300
6301
6302
6303
6304
6305
6306
6307
6308
6309
6310
6311 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6312 bar, r, (unsigned long long)align);
6313
6314 if (resize) {
6315 r->start = 0;
6316 r->end = align - 1;
6317 } else {
6318 r->flags &= ~IORESOURCE_SIZEALIGN;
6319 r->flags |= IORESOURCE_STARTALIGN;
6320 r->start = align;
6321 r->end = r->start + size - 1;
6322 }
6323 r->flags |= IORESOURCE_UNSET;
6324}
6325
6326
6327
6328
6329
6330
6331
6332
6333void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6334{
6335 int i;
6336 struct resource *r;
6337 resource_size_t align;
6338 u16 command;
6339 bool resize = false;
6340
6341
6342
6343
6344
6345
6346
6347 if (dev->is_virtfn)
6348 return;
6349
6350
6351 align = pci_specified_resource_alignment(dev, &resize);
6352 if (!align)
6353 return;
6354
6355 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6356 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6357 pci_warn(dev, "Can't reassign resources to host bridge\n");
6358 return;
6359 }
6360
6361 pci_read_config_word(dev, PCI_COMMAND, &command);
6362 command &= ~PCI_COMMAND_MEMORY;
6363 pci_write_config_word(dev, PCI_COMMAND, command);
6364
6365 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6366 pci_request_resource_alignment(dev, i, align, resize);
6367
6368
6369
6370
6371
6372
6373 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6374 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6375 r = &dev->resource[i];
6376 if (!(r->flags & IORESOURCE_MEM))
6377 continue;
6378 r->flags |= IORESOURCE_UNSET;
6379 r->end = resource_size(r) - 1;
6380 r->start = 0;
6381 }
6382 pci_disable_bridge_window(dev);
6383 }
6384}
6385
6386static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6387{
6388 size_t count = 0;
6389
6390 spin_lock(&resource_alignment_lock);
6391 if (resource_alignment_param)
6392 count = scnprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
6393 spin_unlock(&resource_alignment_lock);
6394
6395
6396
6397
6398
6399
6400 if (count >= 2 && buf[count - 2] != '\n' && count < PAGE_SIZE - 1) {
6401 buf[count - 1] = '\n';
6402 buf[count++] = 0;
6403 }
6404
6405 return count;
6406}
6407
6408static ssize_t resource_alignment_store(struct bus_type *bus,
6409 const char *buf, size_t count)
6410{
6411 char *param = kstrndup(buf, count, GFP_KERNEL);
6412
6413 if (!param)
6414 return -ENOMEM;
6415
6416 spin_lock(&resource_alignment_lock);
6417 kfree(resource_alignment_param);
6418 resource_alignment_param = param;
6419 spin_unlock(&resource_alignment_lock);
6420 return count;
6421}
6422
6423static BUS_ATTR_RW(resource_alignment);
6424
6425static int __init pci_resource_alignment_sysfs_init(void)
6426{
6427 return bus_create_file(&pci_bus_type,
6428 &bus_attr_resource_alignment);
6429}
6430late_initcall(pci_resource_alignment_sysfs_init);
6431
6432static void pci_no_domains(void)
6433{
6434#ifdef CONFIG_PCI_DOMAINS
6435 pci_domains_supported = 0;
6436#endif
6437}
6438
6439#ifdef CONFIG_PCI_DOMAINS_GENERIC
6440static atomic_t __domain_nr = ATOMIC_INIT(-1);
6441
6442static int pci_get_new_domain_nr(void)
6443{
6444 return atomic_inc_return(&__domain_nr);
6445}
6446
6447static int of_pci_bus_find_domain_nr(struct device *parent)
6448{
6449 static int use_dt_domains = -1;
6450 int domain = -1;
6451
6452 if (parent)
6453 domain = of_get_pci_domain_nr(parent->of_node);
6454
6455
6456
6457
6458
6459
6460
6461
6462
6463
6464
6465
6466
6467
6468
6469
6470
6471
6472
6473
6474
6475
6476
6477
6478
6479
6480
6481 if (domain >= 0 && use_dt_domains) {
6482 use_dt_domains = 1;
6483 } else if (domain < 0 && use_dt_domains != 1) {
6484 use_dt_domains = 0;
6485 domain = pci_get_new_domain_nr();
6486 } else {
6487 if (parent)
6488 pr_err("Node %pOF has ", parent->of_node);
6489 pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6490 domain = -1;
6491 }
6492
6493 return domain;
6494}
6495
6496int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6497{
6498 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6499 acpi_pci_bus_find_domain_nr(bus);
6500}
6501#endif
6502
6503
6504
6505
6506
6507
6508
6509
6510int __weak pci_ext_cfg_avail(void)
6511{
6512 return 1;
6513}
6514
6515void __weak pci_fixup_cardbus(struct pci_bus *bus)
6516{
6517}
6518EXPORT_SYMBOL(pci_fixup_cardbus);
6519
6520static int __init pci_setup(char *str)
6521{
6522 while (str) {
6523 char *k = strchr(str, ',');
6524 if (k)
6525 *k++ = 0;
6526 if (*str && (str = pcibios_setup(str)) && *str) {
6527 if (!strcmp(str, "nomsi")) {
6528 pci_no_msi();
6529 } else if (!strncmp(str, "noats", 5)) {
6530 pr_info("PCIe: ATS is disabled\n");
6531 pcie_ats_disabled = true;
6532 } else if (!strcmp(str, "noaer")) {
6533 pci_no_aer();
6534 } else if (!strcmp(str, "earlydump")) {
6535 pci_early_dump = true;
6536 } else if (!strncmp(str, "realloc=", 8)) {
6537 pci_realloc_get_opt(str + 8);
6538 } else if (!strncmp(str, "realloc", 7)) {
6539 pci_realloc_get_opt("on");
6540 } else if (!strcmp(str, "nodomains")) {
6541 pci_no_domains();
6542 } else if (!strncmp(str, "noari", 5)) {
6543 pcie_ari_disabled = true;
6544 } else if (!strncmp(str, "cbiosize=", 9)) {
6545 pci_cardbus_io_size = memparse(str + 9, &str);
6546 } else if (!strncmp(str, "cbmemsize=", 10)) {
6547 pci_cardbus_mem_size = memparse(str + 10, &str);
6548 } else if (!strncmp(str, "resource_alignment=", 19)) {
6549 resource_alignment_param = str + 19;
6550 } else if (!strncmp(str, "ecrc=", 5)) {
6551 pcie_ecrc_get_policy(str + 5);
6552 } else if (!strncmp(str, "hpiosize=", 9)) {
6553 pci_hotplug_io_size = memparse(str + 9, &str);
6554 } else if (!strncmp(str, "hpmmiosize=", 11)) {
6555 pci_hotplug_mmio_size = memparse(str + 11, &str);
6556 } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6557 pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6558 } else if (!strncmp(str, "hpmemsize=", 10)) {
6559 pci_hotplug_mmio_size = memparse(str + 10, &str);
6560 pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6561 } else if (!strncmp(str, "hpbussize=", 10)) {
6562 pci_hotplug_bus_size =
6563 simple_strtoul(str + 10, &str, 0);
6564 if (pci_hotplug_bus_size > 0xff)
6565 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6566 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6567 pcie_bus_config = PCIE_BUS_TUNE_OFF;
6568 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
6569 pcie_bus_config = PCIE_BUS_SAFE;
6570 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
6571 pcie_bus_config = PCIE_BUS_PERFORMANCE;
6572 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6573 pcie_bus_config = PCIE_BUS_PEER2PEER;
6574 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6575 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6576 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6577 disable_acs_redir_param = str + 18;
6578 } else {
6579 pr_err("PCI: Unknown option `%s'\n", str);
6580 }
6581 }
6582 str = k;
6583 }
6584 return 0;
6585}
6586early_param("pci", pci_setup);
6587
6588
6589
6590
6591
6592
6593
6594
6595
6596
6597static int __init pci_realloc_setup_params(void)
6598{
6599 resource_alignment_param = kstrdup(resource_alignment_param,
6600 GFP_KERNEL);
6601 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6602
6603 return 0;
6604}
6605pure_initcall(pci_realloc_setup_params);
6606