1
2
3
4
5
6
7
8
9
10
11#include <linux/acpi.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/dmi.h>
15#include <linux/init.h>
16#include <linux/msi.h>
17#include <linux/of.h>
18#include <linux/pci.h>
19#include <linux/pm.h>
20#include <linux/slab.h>
21#include <linux/module.h>
22#include <linux/spinlock.h>
23#include <linux/string.h>
24#include <linux/log2.h>
25#include <linux/logic_pio.h>
26#include <linux/pm_wakeup.h>
27#include <linux/interrupt.h>
28#include <linux/device.h>
29#include <linux/pm_runtime.h>
30#include <linux/pci_hotplug.h>
31#include <linux/vmalloc.h>
32#include <asm/dma.h>
33#include <linux/aer.h>
34#include <linux/bitfield.h>
35#include "pci.h"
36
37DEFINE_MUTEX(pci_slot_mutex);
38
39const char *pci_power_names[] = {
40 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
41};
42EXPORT_SYMBOL_GPL(pci_power_names);
43
44int isa_dma_bridge_buggy;
45EXPORT_SYMBOL(isa_dma_bridge_buggy);
46
47int pci_pci_problems;
48EXPORT_SYMBOL(pci_pci_problems);
49
50unsigned int pci_pm_d3hot_delay;
51
52static void pci_pme_list_scan(struct work_struct *work);
53
54static LIST_HEAD(pci_pme_list);
55static DEFINE_MUTEX(pci_pme_list_mutex);
56static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
57
58struct pci_pme_device {
59 struct list_head list;
60 struct pci_dev *dev;
61};
62
63#define PME_TIMEOUT 1000
64
65static void pci_dev_d3_sleep(struct pci_dev *dev)
66{
67 unsigned int delay = dev->d3hot_delay;
68
69 if (delay < pci_pm_d3hot_delay)
70 delay = pci_pm_d3hot_delay;
71
72 if (delay)
73 msleep(delay);
74}
75
76bool pci_reset_supported(struct pci_dev *dev)
77{
78 return dev->reset_methods[0] != 0;
79}
80
81#ifdef CONFIG_PCI_DOMAINS
82int pci_domains_supported = 1;
83#endif
84
85#define DEFAULT_CARDBUS_IO_SIZE (256)
86#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
87
88unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
89unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
90
91#define DEFAULT_HOTPLUG_IO_SIZE (256)
92#define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
93#define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
94
95unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
96
97
98
99
100
101unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
102unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
103
104#define DEFAULT_HOTPLUG_BUS_SIZE 1
105unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
106
107
108
109#ifdef CONFIG_PCIE_BUS_TUNE_OFF
110enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
111#elif defined CONFIG_PCIE_BUS_SAFE
112enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
113#elif defined CONFIG_PCIE_BUS_PERFORMANCE
114enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
115#elif defined CONFIG_PCIE_BUS_PEER2PEER
116enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
117#else
118enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
119#endif
120
121
122
123
124
125
126
127u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
128u8 pci_cache_line_size;
129
130
131
132
133
134unsigned int pcibios_max_latency = 255;
135
136
137static bool pcie_ari_disabled;
138
139
140static bool pcie_ats_disabled;
141
142
143bool pci_early_dump;
144
145bool pci_ats_disabled(void)
146{
147 return pcie_ats_disabled;
148}
149EXPORT_SYMBOL_GPL(pci_ats_disabled);
150
151
152static bool pci_bridge_d3_disable;
153
154static bool pci_bridge_d3_force;
155
156static int __init pcie_port_pm_setup(char *str)
157{
158 if (!strcmp(str, "off"))
159 pci_bridge_d3_disable = true;
160 else if (!strcmp(str, "force"))
161 pci_bridge_d3_force = true;
162 return 1;
163}
164__setup("pcie_port_pm=", pcie_port_pm_setup);
165
166
167#define PCIE_RESET_READY_POLL_MS 60000
168
169
170
171
172
173
174
175
176unsigned char pci_bus_max_busnr(struct pci_bus *bus)
177{
178 struct pci_bus *tmp;
179 unsigned char max, n;
180
181 max = bus->busn_res.end;
182 list_for_each_entry(tmp, &bus->children, node) {
183 n = pci_bus_max_busnr(tmp);
184 if (n > max)
185 max = n;
186 }
187 return max;
188}
189EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
190
191
192
193
194
195
196
197int pci_status_get_and_clear_errors(struct pci_dev *pdev)
198{
199 u16 status;
200 int ret;
201
202 ret = pci_read_config_word(pdev, PCI_STATUS, &status);
203 if (ret != PCIBIOS_SUCCESSFUL)
204 return -EIO;
205
206 status &= PCI_STATUS_ERROR_BITS;
207 if (status)
208 pci_write_config_word(pdev, PCI_STATUS, status);
209
210 return status;
211}
212EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
213
214#ifdef CONFIG_HAS_IOMEM
215static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
216 bool write_combine)
217{
218 struct resource *res = &pdev->resource[bar];
219 resource_size_t start = res->start;
220 resource_size_t size = resource_size(res);
221
222
223
224
225 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
226 pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
227 return NULL;
228 }
229
230 if (write_combine)
231 return ioremap_wc(start, size);
232
233 return ioremap(start, size);
234}
235
236void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
237{
238 return __pci_ioremap_resource(pdev, bar, false);
239}
240EXPORT_SYMBOL_GPL(pci_ioremap_bar);
241
242void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
243{
244 return __pci_ioremap_resource(pdev, bar, true);
245}
246EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
247#endif
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
269 const char **endptr)
270{
271 int ret;
272 int seg, bus, slot, func;
273 char *wpath, *p;
274 char end;
275
276 *endptr = strchrnul(path, ';');
277
278 wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
279 if (!wpath)
280 return -ENOMEM;
281
282 while (1) {
283 p = strrchr(wpath, '/');
284 if (!p)
285 break;
286 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
287 if (ret != 2) {
288 ret = -EINVAL;
289 goto free_and_exit;
290 }
291
292 if (dev->devfn != PCI_DEVFN(slot, func)) {
293 ret = 0;
294 goto free_and_exit;
295 }
296
297
298
299
300
301
302
303 dev = pci_upstream_bridge(dev);
304 if (!dev) {
305 ret = 0;
306 goto free_and_exit;
307 }
308
309 *p = 0;
310 }
311
312 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
313 &func, &end);
314 if (ret != 4) {
315 seg = 0;
316 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
317 if (ret != 3) {
318 ret = -EINVAL;
319 goto free_and_exit;
320 }
321 }
322
323 ret = (seg == pci_domain_nr(dev->bus) &&
324 bus == dev->bus->number &&
325 dev->devfn == PCI_DEVFN(slot, func));
326
327free_and_exit:
328 kfree(wpath);
329 return ret;
330}
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362static int pci_dev_str_match(struct pci_dev *dev, const char *p,
363 const char **endptr)
364{
365 int ret;
366 int count;
367 unsigned short vendor, device, subsystem_vendor, subsystem_device;
368
369 if (strncmp(p, "pci:", 4) == 0) {
370
371 p += 4;
372 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
373 &subsystem_vendor, &subsystem_device, &count);
374 if (ret != 4) {
375 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
376 if (ret != 2)
377 return -EINVAL;
378
379 subsystem_vendor = 0;
380 subsystem_device = 0;
381 }
382
383 p += count;
384
385 if ((!vendor || vendor == dev->vendor) &&
386 (!device || device == dev->device) &&
387 (!subsystem_vendor ||
388 subsystem_vendor == dev->subsystem_vendor) &&
389 (!subsystem_device ||
390 subsystem_device == dev->subsystem_device))
391 goto found;
392 } else {
393
394
395
396
397 ret = pci_dev_str_match_path(dev, p, &p);
398 if (ret < 0)
399 return ret;
400 else if (ret)
401 goto found;
402 }
403
404 *endptr = p;
405 return 0;
406
407found:
408 *endptr = p;
409 return 1;
410}
411
412static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
413 u8 pos, int cap, int *ttl)
414{
415 u8 id;
416 u16 ent;
417
418 pci_bus_read_config_byte(bus, devfn, pos, &pos);
419
420 while ((*ttl)--) {
421 if (pos < 0x40)
422 break;
423 pos &= ~3;
424 pci_bus_read_config_word(bus, devfn, pos, &ent);
425
426 id = ent & 0xff;
427 if (id == 0xff)
428 break;
429 if (id == cap)
430 return pos;
431 pos = (ent >> 8);
432 }
433 return 0;
434}
435
436static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
437 u8 pos, int cap)
438{
439 int ttl = PCI_FIND_CAP_TTL;
440
441 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
442}
443
444u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
445{
446 return __pci_find_next_cap(dev->bus, dev->devfn,
447 pos + PCI_CAP_LIST_NEXT, cap);
448}
449EXPORT_SYMBOL_GPL(pci_find_next_capability);
450
451static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
452 unsigned int devfn, u8 hdr_type)
453{
454 u16 status;
455
456 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
457 if (!(status & PCI_STATUS_CAP_LIST))
458 return 0;
459
460 switch (hdr_type) {
461 case PCI_HEADER_TYPE_NORMAL:
462 case PCI_HEADER_TYPE_BRIDGE:
463 return PCI_CAPABILITY_LIST;
464 case PCI_HEADER_TYPE_CARDBUS:
465 return PCI_CB_CAPABILITY_LIST;
466 }
467
468 return 0;
469}
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490u8 pci_find_capability(struct pci_dev *dev, int cap)
491{
492 u8 pos;
493
494 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
495 if (pos)
496 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
497
498 return pos;
499}
500EXPORT_SYMBOL(pci_find_capability);
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
516{
517 u8 hdr_type, pos;
518
519 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
520
521 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
522 if (pos)
523 pos = __pci_find_next_cap(bus, devfn, pos, cap);
524
525 return pos;
526}
527EXPORT_SYMBOL(pci_bus_find_capability);
528
529
530
531
532
533
534
535
536
537
538
539
540u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
541{
542 u32 header;
543 int ttl;
544 u16 pos = PCI_CFG_SPACE_SIZE;
545
546
547 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
548
549 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
550 return 0;
551
552 if (start)
553 pos = start;
554
555 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
556 return 0;
557
558
559
560
561
562 if (header == 0)
563 return 0;
564
565 while (ttl-- > 0) {
566 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
567 return pos;
568
569 pos = PCI_EXT_CAP_NEXT(header);
570 if (pos < PCI_CFG_SPACE_SIZE)
571 break;
572
573 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
574 break;
575 }
576
577 return 0;
578}
579EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
596{
597 return pci_find_next_ext_capability(dev, 0, cap);
598}
599EXPORT_SYMBOL_GPL(pci_find_ext_capability);
600
601
602
603
604
605
606
607
608
609
610u64 pci_get_dsn(struct pci_dev *dev)
611{
612 u32 dword;
613 u64 dsn;
614 int pos;
615
616 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
617 if (!pos)
618 return 0;
619
620
621
622
623
624
625 pos += 4;
626 pci_read_config_dword(dev, pos, &dword);
627 dsn = (u64)dword;
628 pci_read_config_dword(dev, pos + 4, &dword);
629 dsn |= ((u64)dword) << 32;
630
631 return dsn;
632}
633EXPORT_SYMBOL_GPL(pci_get_dsn);
634
635static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
636{
637 int rc, ttl = PCI_FIND_CAP_TTL;
638 u8 cap, mask;
639
640 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
641 mask = HT_3BIT_CAP_MASK;
642 else
643 mask = HT_5BIT_CAP_MASK;
644
645 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
646 PCI_CAP_ID_HT, &ttl);
647 while (pos) {
648 rc = pci_read_config_byte(dev, pos + 3, &cap);
649 if (rc != PCIBIOS_SUCCESSFUL)
650 return 0;
651
652 if ((cap & mask) == ht_cap)
653 return pos;
654
655 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
656 pos + PCI_CAP_LIST_NEXT,
657 PCI_CAP_ID_HT, &ttl);
658 }
659
660 return 0;
661}
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
677{
678 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
679}
680EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
681
682
683
684
685
686
687
688
689
690
691
692
693u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
694{
695 u8 pos;
696
697 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
698 if (pos)
699 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
700
701 return pos;
702}
703EXPORT_SYMBOL_GPL(pci_find_ht_capability);
704
705
706
707
708
709
710
711
712
713
714
715u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
716{
717 u16 vsec = 0;
718 u32 header;
719
720 if (vendor != dev->vendor)
721 return 0;
722
723 while ((vsec = pci_find_next_ext_capability(dev, vsec,
724 PCI_EXT_CAP_ID_VNDR))) {
725 if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER,
726 &header) == PCIBIOS_SUCCESSFUL &&
727 PCI_VNDR_HEADER_ID(header) == cap)
728 return vsec;
729 }
730
731 return 0;
732}
733EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
734
735
736
737
738
739
740
741
742
743
744struct resource *pci_find_parent_resource(const struct pci_dev *dev,
745 struct resource *res)
746{
747 const struct pci_bus *bus = dev->bus;
748 struct resource *r;
749 int i;
750
751 pci_bus_for_each_resource(bus, r, i) {
752 if (!r)
753 continue;
754 if (resource_contains(r, res)) {
755
756
757
758
759
760 if (r->flags & IORESOURCE_PREFETCH &&
761 !(res->flags & IORESOURCE_PREFETCH))
762 return NULL;
763
764
765
766
767
768
769
770
771
772 return r;
773 }
774 }
775 return NULL;
776}
777EXPORT_SYMBOL(pci_find_parent_resource);
778
779
780
781
782
783
784
785
786
787
788struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
789{
790 int i;
791
792 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
793 struct resource *r = &dev->resource[i];
794
795 if (r->start && resource_contains(r, res))
796 return r;
797 }
798
799 return NULL;
800}
801EXPORT_SYMBOL(pci_find_resource);
802
803
804
805
806
807
808
809
810
811int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
812{
813 int i;
814
815
816 for (i = 0; i < 4; i++) {
817 u16 status;
818 if (i)
819 msleep((1 << (i - 1)) * 100);
820
821 pci_read_config_word(dev, pos, &status);
822 if (!(status & mask))
823 return 1;
824 }
825
826 return 0;
827}
828
829static int pci_acs_enable;
830
831
832
833
834void pci_request_acs(void)
835{
836 pci_acs_enable = 1;
837}
838
839static const char *disable_acs_redir_param;
840
841
842
843
844
845
846
847static void pci_disable_acs_redir(struct pci_dev *dev)
848{
849 int ret = 0;
850 const char *p;
851 int pos;
852 u16 ctrl;
853
854 if (!disable_acs_redir_param)
855 return;
856
857 p = disable_acs_redir_param;
858 while (*p) {
859 ret = pci_dev_str_match(dev, p, &p);
860 if (ret < 0) {
861 pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
862 disable_acs_redir_param);
863
864 break;
865 } else if (ret == 1) {
866
867 break;
868 }
869
870 if (*p != ';' && *p != ',') {
871
872 break;
873 }
874 p++;
875 }
876
877 if (ret != 1)
878 return;
879
880 if (!pci_dev_specific_disable_acs_redir(dev))
881 return;
882
883 pos = dev->acs_cap;
884 if (!pos) {
885 pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
886 return;
887 }
888
889 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
890
891
892 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
893
894 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
895
896 pci_info(dev, "disabled ACS redirect\n");
897}
898
899
900
901
902
903static void pci_std_enable_acs(struct pci_dev *dev)
904{
905 int pos;
906 u16 cap;
907 u16 ctrl;
908
909 pos = dev->acs_cap;
910 if (!pos)
911 return;
912
913 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
914 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
915
916
917 ctrl |= (cap & PCI_ACS_SV);
918
919
920 ctrl |= (cap & PCI_ACS_RR);
921
922
923 ctrl |= (cap & PCI_ACS_CR);
924
925
926 ctrl |= (cap & PCI_ACS_UF);
927
928
929 if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
930 ctrl |= (cap & PCI_ACS_TB);
931
932 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
933}
934
935
936
937
938
939static void pci_enable_acs(struct pci_dev *dev)
940{
941 if (!pci_acs_enable)
942 goto disable_acs_redir;
943
944 if (!pci_dev_specific_enable_acs(dev))
945 goto disable_acs_redir;
946
947 pci_std_enable_acs(dev);
948
949disable_acs_redir:
950
951
952
953
954
955
956
957 pci_disable_acs_redir(dev);
958}
959
960
961
962
963
964
965
966
967static void pci_restore_bars(struct pci_dev *dev)
968{
969 int i;
970
971 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
972 pci_update_resource(dev, i);
973}
974
975static const struct pci_platform_pm_ops *pci_platform_pm;
976
977int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
978{
979 if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
980 !ops->choose_state || !ops->set_wakeup || !ops->need_resume)
981 return -EINVAL;
982 pci_platform_pm = ops;
983 return 0;
984}
985
986static inline bool platform_pci_power_manageable(struct pci_dev *dev)
987{
988 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
989}
990
991static inline int platform_pci_set_power_state(struct pci_dev *dev,
992 pci_power_t t)
993{
994 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
995}
996
997static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
998{
999 return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
1000}
1001
1002static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
1003{
1004 if (pci_platform_pm && pci_platform_pm->refresh_state)
1005 pci_platform_pm->refresh_state(dev);
1006}
1007
1008static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
1009{
1010 return pci_platform_pm ?
1011 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
1012}
1013
1014static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
1015{
1016 return pci_platform_pm ?
1017 pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
1018}
1019
1020static inline bool platform_pci_need_resume(struct pci_dev *dev)
1021{
1022 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
1023}
1024
1025static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
1026{
1027 if (pci_platform_pm && pci_platform_pm->bridge_d3)
1028 return pci_platform_pm->bridge_d3(dev);
1029 return false;
1030}
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1046{
1047 u16 pmcsr;
1048 bool need_restore = false;
1049
1050
1051 if (dev->current_state == state)
1052 return 0;
1053
1054 if (!dev->pm_cap)
1055 return -EIO;
1056
1057 if (state < PCI_D0 || state > PCI_D3hot)
1058 return -EINVAL;
1059
1060
1061
1062
1063
1064
1065
1066 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
1067 && dev->current_state > state) {
1068 pci_err(dev, "invalid power transition (from %s to %s)\n",
1069 pci_power_name(dev->current_state),
1070 pci_power_name(state));
1071 return -EINVAL;
1072 }
1073
1074
1075 if ((state == PCI_D1 && !dev->d1_support)
1076 || (state == PCI_D2 && !dev->d2_support))
1077 return -EIO;
1078
1079 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1080 if (pmcsr == (u16) ~0) {
1081 pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
1082 pci_power_name(dev->current_state),
1083 pci_power_name(state));
1084 return -EIO;
1085 }
1086
1087
1088
1089
1090
1091
1092 switch (dev->current_state) {
1093 case PCI_D0:
1094 case PCI_D1:
1095 case PCI_D2:
1096 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1097 pmcsr |= state;
1098 break;
1099 case PCI_D3hot:
1100 case PCI_D3cold:
1101 case PCI_UNKNOWN:
1102 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
1103 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
1104 need_restore = true;
1105 fallthrough;
1106 default:
1107 pmcsr = 0;
1108 break;
1109 }
1110
1111
1112 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1113
1114
1115
1116
1117
1118 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1119 pci_dev_d3_sleep(dev);
1120 else if (state == PCI_D2 || dev->current_state == PCI_D2)
1121 udelay(PCI_PM_D2_DELAY);
1122
1123 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1124 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1125 if (dev->current_state != state)
1126 pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
1127 pci_power_name(dev->current_state),
1128 pci_power_name(state));
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143 if (need_restore)
1144 pci_restore_bars(dev);
1145
1146 if (dev->bus->self)
1147 pcie_aspm_pm_state_change(dev->bus->self);
1148
1149 return 0;
1150}
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1165{
1166 if (platform_pci_get_power_state(dev) == PCI_D3cold ||
1167 !pci_device_is_present(dev)) {
1168 dev->current_state = PCI_D3cold;
1169 } else if (dev->pm_cap) {
1170 u16 pmcsr;
1171
1172 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1173 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1174 } else {
1175 dev->current_state = state;
1176 }
1177}
1178
1179
1180
1181
1182
1183
1184
1185
1186void pci_refresh_power_state(struct pci_dev *dev)
1187{
1188 if (platform_pci_power_manageable(dev))
1189 platform_pci_refresh_power_state(dev);
1190
1191 pci_update_current_state(dev, dev->current_state);
1192}
1193
1194
1195
1196
1197
1198
1199int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1200{
1201 int error;
1202
1203 if (platform_pci_power_manageable(dev)) {
1204 error = platform_pci_set_power_state(dev, state);
1205 if (!error)
1206 pci_update_current_state(dev, state);
1207 } else
1208 error = -ENODEV;
1209
1210 if (error && !dev->pm_cap)
1211 dev->current_state = PCI_D0;
1212
1213 return error;
1214}
1215EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1216
1217static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1218{
1219 pm_request_resume(&pci_dev->dev);
1220 return 0;
1221}
1222
1223
1224
1225
1226
1227void pci_resume_bus(struct pci_bus *bus)
1228{
1229 if (bus)
1230 pci_walk_bus(bus, pci_resume_one, NULL);
1231}
1232
1233static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1234{
1235 int delay = 1;
1236 u32 id;
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250 pci_read_config_dword(dev, PCI_COMMAND, &id);
1251 while (id == ~0) {
1252 if (delay > timeout) {
1253 pci_warn(dev, "not ready %dms after %s; giving up\n",
1254 delay - 1, reset_type);
1255 return -ENOTTY;
1256 }
1257
1258 if (delay > 1000)
1259 pci_info(dev, "not ready %dms after %s; waiting\n",
1260 delay - 1, reset_type);
1261
1262 msleep(delay);
1263 delay *= 2;
1264 pci_read_config_dword(dev, PCI_COMMAND, &id);
1265 }
1266
1267 if (delay > 1000)
1268 pci_info(dev, "ready %dms after %s\n", delay - 1,
1269 reset_type);
1270
1271 return 0;
1272}
1273
1274
1275
1276
1277
1278int pci_power_up(struct pci_dev *dev)
1279{
1280 pci_platform_power_transition(dev, PCI_D0);
1281
1282
1283
1284
1285
1286
1287 if (dev->runtime_d3cold) {
1288
1289
1290
1291
1292
1293 pci_resume_bus(dev->subordinate);
1294 }
1295
1296 return pci_raw_set_power_state(dev, PCI_D0);
1297}
1298
1299
1300
1301
1302
1303
1304static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1305{
1306 pci_power_t state = *(pci_power_t *)data;
1307
1308 dev->current_state = state;
1309 return 0;
1310}
1311
1312
1313
1314
1315
1316
1317void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1318{
1319 if (bus)
1320 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1321}
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1341{
1342 int error;
1343
1344
1345 if (state > PCI_D3cold)
1346 state = PCI_D3cold;
1347 else if (state < PCI_D0)
1348 state = PCI_D0;
1349 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1350
1351
1352
1353
1354
1355
1356
1357 return 0;
1358
1359
1360 if (dev->current_state == state)
1361 return 0;
1362
1363 if (state == PCI_D0)
1364 return pci_power_up(dev);
1365
1366
1367
1368
1369
1370 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1371 return 0;
1372
1373
1374
1375
1376
1377 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
1378 PCI_D3hot : state);
1379
1380 if (pci_platform_power_transition(dev, state))
1381 return error;
1382
1383
1384 if (state == PCI_D3cold)
1385 pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1386
1387 return 0;
1388}
1389EXPORT_SYMBOL(pci_set_power_state);
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
1401{
1402 pci_power_t ret;
1403
1404 if (!dev->pm_cap)
1405 return PCI_D0;
1406
1407 ret = platform_pci_choose_state(dev);
1408 if (ret != PCI_POWER_ERROR)
1409 return ret;
1410
1411 switch (state.event) {
1412 case PM_EVENT_ON:
1413 return PCI_D0;
1414 case PM_EVENT_FREEZE:
1415 case PM_EVENT_PRETHAW:
1416
1417 case PM_EVENT_SUSPEND:
1418 case PM_EVENT_HIBERNATE:
1419 return PCI_D3hot;
1420 default:
1421 pci_info(dev, "unrecognized suspend event %d\n",
1422 state.event);
1423 BUG();
1424 }
1425 return PCI_D0;
1426}
1427EXPORT_SYMBOL(pci_choose_state);
1428
1429#define PCI_EXP_SAVE_REGS 7
1430
1431static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1432 u16 cap, bool extended)
1433{
1434 struct pci_cap_saved_state *tmp;
1435
1436 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1437 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1438 return tmp;
1439 }
1440 return NULL;
1441}
1442
1443struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1444{
1445 return _pci_find_saved_cap(dev, cap, false);
1446}
1447
1448struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1449{
1450 return _pci_find_saved_cap(dev, cap, true);
1451}
1452
1453static int pci_save_pcie_state(struct pci_dev *dev)
1454{
1455 int i = 0;
1456 struct pci_cap_saved_state *save_state;
1457 u16 *cap;
1458
1459 if (!pci_is_pcie(dev))
1460 return 0;
1461
1462 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1463 if (!save_state) {
1464 pci_err(dev, "buffer not found in %s\n", __func__);
1465 return -ENOMEM;
1466 }
1467
1468 cap = (u16 *)&save_state->cap.data[0];
1469 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1470 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1471 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1472 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1473 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1474 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1475 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1476
1477 return 0;
1478}
1479
1480static void pci_restore_pcie_state(struct pci_dev *dev)
1481{
1482 int i = 0;
1483 struct pci_cap_saved_state *save_state;
1484 u16 *cap;
1485
1486 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1487 if (!save_state)
1488 return;
1489
1490 cap = (u16 *)&save_state->cap.data[0];
1491 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1492 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1493 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1494 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1495 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1496 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1497 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1498}
1499
1500static int pci_save_pcix_state(struct pci_dev *dev)
1501{
1502 int pos;
1503 struct pci_cap_saved_state *save_state;
1504
1505 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1506 if (!pos)
1507 return 0;
1508
1509 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1510 if (!save_state) {
1511 pci_err(dev, "buffer not found in %s\n", __func__);
1512 return -ENOMEM;
1513 }
1514
1515 pci_read_config_word(dev, pos + PCI_X_CMD,
1516 (u16 *)save_state->cap.data);
1517
1518 return 0;
1519}
1520
1521static void pci_restore_pcix_state(struct pci_dev *dev)
1522{
1523 int i = 0, pos;
1524 struct pci_cap_saved_state *save_state;
1525 u16 *cap;
1526
1527 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1528 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1529 if (!save_state || !pos)
1530 return;
1531 cap = (u16 *)&save_state->cap.data[0];
1532
1533 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1534}
1535
1536static void pci_save_ltr_state(struct pci_dev *dev)
1537{
1538 int ltr;
1539 struct pci_cap_saved_state *save_state;
1540 u16 *cap;
1541
1542 if (!pci_is_pcie(dev))
1543 return;
1544
1545 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1546 if (!ltr)
1547 return;
1548
1549 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1550 if (!save_state) {
1551 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1552 return;
1553 }
1554
1555 cap = (u16 *)&save_state->cap.data[0];
1556 pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
1557 pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
1558}
1559
1560static void pci_restore_ltr_state(struct pci_dev *dev)
1561{
1562 struct pci_cap_saved_state *save_state;
1563 int ltr;
1564 u16 *cap;
1565
1566 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1567 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1568 if (!save_state || !ltr)
1569 return;
1570
1571 cap = (u16 *)&save_state->cap.data[0];
1572 pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
1573 pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
1574}
1575
1576
1577
1578
1579
1580
1581int pci_save_state(struct pci_dev *dev)
1582{
1583 int i;
1584
1585 for (i = 0; i < 16; i++) {
1586 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1587 pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
1588 i * 4, dev->saved_config_space[i]);
1589 }
1590 dev->state_saved = true;
1591
1592 i = pci_save_pcie_state(dev);
1593 if (i != 0)
1594 return i;
1595
1596 i = pci_save_pcix_state(dev);
1597 if (i != 0)
1598 return i;
1599
1600 pci_save_ltr_state(dev);
1601 pci_save_dpc_state(dev);
1602 pci_save_aer_state(dev);
1603 pci_save_ptm_state(dev);
1604 return pci_save_vc_state(dev);
1605}
1606EXPORT_SYMBOL(pci_save_state);
1607
1608static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1609 u32 saved_val, int retry, bool force)
1610{
1611 u32 val;
1612
1613 pci_read_config_dword(pdev, offset, &val);
1614 if (!force && val == saved_val)
1615 return;
1616
1617 for (;;) {
1618 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1619 offset, val, saved_val);
1620 pci_write_config_dword(pdev, offset, saved_val);
1621 if (retry-- <= 0)
1622 return;
1623
1624 pci_read_config_dword(pdev, offset, &val);
1625 if (val == saved_val)
1626 return;
1627
1628 mdelay(1);
1629 }
1630}
1631
1632static void pci_restore_config_space_range(struct pci_dev *pdev,
1633 int start, int end, int retry,
1634 bool force)
1635{
1636 int index;
1637
1638 for (index = end; index >= start; index--)
1639 pci_restore_config_dword(pdev, 4 * index,
1640 pdev->saved_config_space[index],
1641 retry, force);
1642}
1643
1644static void pci_restore_config_space(struct pci_dev *pdev)
1645{
1646 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1647 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1648
1649 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1650 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1651 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1652 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1653
1654
1655
1656
1657
1658
1659 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1660 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1661 } else {
1662 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1663 }
1664}
1665
1666static void pci_restore_rebar_state(struct pci_dev *pdev)
1667{
1668 unsigned int pos, nbars, i;
1669 u32 ctrl;
1670
1671 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1672 if (!pos)
1673 return;
1674
1675 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1676 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1677 PCI_REBAR_CTRL_NBAR_SHIFT;
1678
1679 for (i = 0; i < nbars; i++, pos += 8) {
1680 struct resource *res;
1681 int bar_idx, size;
1682
1683 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1684 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1685 res = pdev->resource + bar_idx;
1686 size = pci_rebar_bytes_to_size(resource_size(res));
1687 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1688 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1689 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1690 }
1691}
1692
1693
1694
1695
1696
1697void pci_restore_state(struct pci_dev *dev)
1698{
1699 if (!dev->state_saved)
1700 return;
1701
1702
1703
1704
1705
1706 pci_restore_ltr_state(dev);
1707
1708 pci_restore_pcie_state(dev);
1709 pci_restore_pasid_state(dev);
1710 pci_restore_pri_state(dev);
1711 pci_restore_ats_state(dev);
1712 pci_restore_vc_state(dev);
1713 pci_restore_rebar_state(dev);
1714 pci_restore_dpc_state(dev);
1715 pci_restore_ptm_state(dev);
1716
1717 pci_aer_clear_status(dev);
1718 pci_restore_aer_state(dev);
1719
1720 pci_restore_config_space(dev);
1721
1722 pci_restore_pcix_state(dev);
1723 pci_restore_msi_state(dev);
1724
1725
1726 pci_enable_acs(dev);
1727 pci_restore_iov_state(dev);
1728
1729 dev->state_saved = false;
1730}
1731EXPORT_SYMBOL(pci_restore_state);
1732
1733struct pci_saved_state {
1734 u32 config_space[16];
1735 struct pci_cap_saved_data cap[];
1736};
1737
1738
1739
1740
1741
1742
1743
1744
1745struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1746{
1747 struct pci_saved_state *state;
1748 struct pci_cap_saved_state *tmp;
1749 struct pci_cap_saved_data *cap;
1750 size_t size;
1751
1752 if (!dev->state_saved)
1753 return NULL;
1754
1755 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1756
1757 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1758 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1759
1760 state = kzalloc(size, GFP_KERNEL);
1761 if (!state)
1762 return NULL;
1763
1764 memcpy(state->config_space, dev->saved_config_space,
1765 sizeof(state->config_space));
1766
1767 cap = state->cap;
1768 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1769 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1770 memcpy(cap, &tmp->cap, len);
1771 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1772 }
1773
1774
1775 return state;
1776}
1777EXPORT_SYMBOL_GPL(pci_store_saved_state);
1778
1779
1780
1781
1782
1783
1784int pci_load_saved_state(struct pci_dev *dev,
1785 struct pci_saved_state *state)
1786{
1787 struct pci_cap_saved_data *cap;
1788
1789 dev->state_saved = false;
1790
1791 if (!state)
1792 return 0;
1793
1794 memcpy(dev->saved_config_space, state->config_space,
1795 sizeof(state->config_space));
1796
1797 cap = state->cap;
1798 while (cap->size) {
1799 struct pci_cap_saved_state *tmp;
1800
1801 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1802 if (!tmp || tmp->cap.size != cap->size)
1803 return -EINVAL;
1804
1805 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1806 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1807 sizeof(struct pci_cap_saved_data) + cap->size);
1808 }
1809
1810 dev->state_saved = true;
1811 return 0;
1812}
1813EXPORT_SYMBOL_GPL(pci_load_saved_state);
1814
1815
1816
1817
1818
1819
1820
1821int pci_load_and_free_saved_state(struct pci_dev *dev,
1822 struct pci_saved_state **state)
1823{
1824 int ret = pci_load_saved_state(dev, *state);
1825 kfree(*state);
1826 *state = NULL;
1827 return ret;
1828}
1829EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1830
1831int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1832{
1833 return pci_enable_resources(dev, bars);
1834}
1835
1836static int do_pci_enable_device(struct pci_dev *dev, int bars)
1837{
1838 int err;
1839 struct pci_dev *bridge;
1840 u16 cmd;
1841 u8 pin;
1842
1843 err = pci_set_power_state(dev, PCI_D0);
1844 if (err < 0 && err != -EIO)
1845 return err;
1846
1847 bridge = pci_upstream_bridge(dev);
1848 if (bridge)
1849 pcie_aspm_powersave_config_link(bridge);
1850
1851 err = pcibios_enable_device(dev, bars);
1852 if (err < 0)
1853 return err;
1854 pci_fixup_device(pci_fixup_enable, dev);
1855
1856 if (dev->msi_enabled || dev->msix_enabled)
1857 return 0;
1858
1859 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1860 if (pin) {
1861 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1862 if (cmd & PCI_COMMAND_INTX_DISABLE)
1863 pci_write_config_word(dev, PCI_COMMAND,
1864 cmd & ~PCI_COMMAND_INTX_DISABLE);
1865 }
1866
1867 return 0;
1868}
1869
1870
1871
1872
1873
1874
1875
1876
1877int pci_reenable_device(struct pci_dev *dev)
1878{
1879 if (pci_is_enabled(dev))
1880 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1881 return 0;
1882}
1883EXPORT_SYMBOL(pci_reenable_device);
1884
1885static void pci_enable_bridge(struct pci_dev *dev)
1886{
1887 struct pci_dev *bridge;
1888 int retval;
1889
1890 bridge = pci_upstream_bridge(dev);
1891 if (bridge)
1892 pci_enable_bridge(bridge);
1893
1894 if (pci_is_enabled(dev)) {
1895 if (!dev->is_busmaster)
1896 pci_set_master(dev);
1897 return;
1898 }
1899
1900 retval = pci_enable_device(dev);
1901 if (retval)
1902 pci_err(dev, "Error enabling bridge (%d), continuing\n",
1903 retval);
1904 pci_set_master(dev);
1905}
1906
1907static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1908{
1909 struct pci_dev *bridge;
1910 int err;
1911 int i, bars = 0;
1912
1913
1914
1915
1916
1917
1918
1919 pci_update_current_state(dev, dev->current_state);
1920
1921 if (atomic_inc_return(&dev->enable_cnt) > 1)
1922 return 0;
1923
1924 bridge = pci_upstream_bridge(dev);
1925 if (bridge)
1926 pci_enable_bridge(bridge);
1927
1928
1929 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1930 if (dev->resource[i].flags & flags)
1931 bars |= (1 << i);
1932 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1933 if (dev->resource[i].flags & flags)
1934 bars |= (1 << i);
1935
1936 err = do_pci_enable_device(dev, bars);
1937 if (err < 0)
1938 atomic_dec(&dev->enable_cnt);
1939 return err;
1940}
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950int pci_enable_device_io(struct pci_dev *dev)
1951{
1952 return pci_enable_device_flags(dev, IORESOURCE_IO);
1953}
1954EXPORT_SYMBOL(pci_enable_device_io);
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964int pci_enable_device_mem(struct pci_dev *dev)
1965{
1966 return pci_enable_device_flags(dev, IORESOURCE_MEM);
1967}
1968EXPORT_SYMBOL(pci_enable_device_mem);
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981int pci_enable_device(struct pci_dev *dev)
1982{
1983 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1984}
1985EXPORT_SYMBOL(pci_enable_device);
1986
1987
1988
1989
1990
1991
1992
1993struct pci_devres {
1994 unsigned int enabled:1;
1995 unsigned int pinned:1;
1996 unsigned int orig_intx:1;
1997 unsigned int restore_intx:1;
1998 unsigned int mwi:1;
1999 u32 region_mask;
2000};
2001
2002static void pcim_release(struct device *gendev, void *res)
2003{
2004 struct pci_dev *dev = to_pci_dev(gendev);
2005 struct pci_devres *this = res;
2006 int i;
2007
2008 if (dev->msi_enabled)
2009 pci_disable_msi(dev);
2010 if (dev->msix_enabled)
2011 pci_disable_msix(dev);
2012
2013 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
2014 if (this->region_mask & (1 << i))
2015 pci_release_region(dev, i);
2016
2017 if (this->mwi)
2018 pci_clear_mwi(dev);
2019
2020 if (this->restore_intx)
2021 pci_intx(dev, this->orig_intx);
2022
2023 if (this->enabled && !this->pinned)
2024 pci_disable_device(dev);
2025}
2026
2027static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
2028{
2029 struct pci_devres *dr, *new_dr;
2030
2031 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
2032 if (dr)
2033 return dr;
2034
2035 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
2036 if (!new_dr)
2037 return NULL;
2038 return devres_get(&pdev->dev, new_dr, NULL, NULL);
2039}
2040
2041static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
2042{
2043 if (pci_is_managed(pdev))
2044 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
2045 return NULL;
2046}
2047
2048
2049
2050
2051
2052
2053
2054int pcim_enable_device(struct pci_dev *pdev)
2055{
2056 struct pci_devres *dr;
2057 int rc;
2058
2059 dr = get_pci_dr(pdev);
2060 if (unlikely(!dr))
2061 return -ENOMEM;
2062 if (dr->enabled)
2063 return 0;
2064
2065 rc = pci_enable_device(pdev);
2066 if (!rc) {
2067 pdev->is_managed = 1;
2068 dr->enabled = 1;
2069 }
2070 return rc;
2071}
2072EXPORT_SYMBOL(pcim_enable_device);
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082void pcim_pin_device(struct pci_dev *pdev)
2083{
2084 struct pci_devres *dr;
2085
2086 dr = find_pci_dr(pdev);
2087 WARN_ON(!dr || !dr->enabled);
2088 if (dr)
2089 dr->pinned = 1;
2090}
2091EXPORT_SYMBOL(pcim_pin_device);
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101int __weak pcibios_add_device(struct pci_dev *dev)
2102{
2103 return 0;
2104}
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115void __weak pcibios_release_device(struct pci_dev *dev) {}
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125void __weak pcibios_disable_device(struct pci_dev *dev) {}
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136void __weak pcibios_penalize_isa_irq(int irq, int active) {}
2137
2138static void do_pci_disable_device(struct pci_dev *dev)
2139{
2140 u16 pci_command;
2141
2142 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2143 if (pci_command & PCI_COMMAND_MASTER) {
2144 pci_command &= ~PCI_COMMAND_MASTER;
2145 pci_write_config_word(dev, PCI_COMMAND, pci_command);
2146 }
2147
2148 pcibios_disable_device(dev);
2149}
2150
2151
2152
2153
2154
2155
2156
2157
2158void pci_disable_enabled_device(struct pci_dev *dev)
2159{
2160 if (pci_is_enabled(dev))
2161 do_pci_disable_device(dev);
2162}
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174void pci_disable_device(struct pci_dev *dev)
2175{
2176 struct pci_devres *dr;
2177
2178 dr = find_pci_dr(dev);
2179 if (dr)
2180 dr->enabled = 0;
2181
2182 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2183 "disabling already-disabled device");
2184
2185 if (atomic_dec_return(&dev->enable_cnt) != 0)
2186 return;
2187
2188 do_pci_disable_device(dev);
2189
2190 dev->is_busmaster = 0;
2191}
2192EXPORT_SYMBOL(pci_disable_device);
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2203 enum pcie_reset_state state)
2204{
2205 return -EINVAL;
2206}
2207
2208
2209
2210
2211
2212
2213
2214
2215int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2216{
2217 return pcibios_set_pcie_reset_state(dev, state);
2218}
2219EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2220
2221void pcie_clear_device_status(struct pci_dev *dev)
2222{
2223 u16 sta;
2224
2225 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2226 pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2227}
2228
2229
2230
2231
2232
2233void pcie_clear_root_pme_status(struct pci_dev *dev)
2234{
2235 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2236}
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246bool pci_check_pme_status(struct pci_dev *dev)
2247{
2248 int pmcsr_pos;
2249 u16 pmcsr;
2250 bool ret = false;
2251
2252 if (!dev->pm_cap)
2253 return false;
2254
2255 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2256 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2257 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2258 return false;
2259
2260
2261 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2262 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2263
2264 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2265 ret = true;
2266 }
2267
2268 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2269
2270 return ret;
2271}
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2282{
2283 if (pme_poll_reset && dev->pme_poll)
2284 dev->pme_poll = false;
2285
2286 if (pci_check_pme_status(dev)) {
2287 pci_wakeup_event(dev);
2288 pm_request_resume(&dev->dev);
2289 }
2290 return 0;
2291}
2292
2293
2294
2295
2296
2297void pci_pme_wakeup_bus(struct pci_bus *bus)
2298{
2299 if (bus)
2300 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2301}
2302
2303
2304
2305
2306
2307
2308
2309bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2310{
2311 if (!dev->pm_cap)
2312 return false;
2313
2314 return !!(dev->pme_support & (1 << state));
2315}
2316EXPORT_SYMBOL(pci_pme_capable);
2317
2318static void pci_pme_list_scan(struct work_struct *work)
2319{
2320 struct pci_pme_device *pme_dev, *n;
2321
2322 mutex_lock(&pci_pme_list_mutex);
2323 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2324 if (pme_dev->dev->pme_poll) {
2325 struct pci_dev *bridge;
2326
2327 bridge = pme_dev->dev->bus->self;
2328
2329
2330
2331
2332
2333 if (bridge && bridge->current_state != PCI_D0)
2334 continue;
2335
2336
2337
2338
2339 if (pme_dev->dev->current_state == PCI_D3cold)
2340 continue;
2341
2342 pci_pme_wakeup(pme_dev->dev, NULL);
2343 } else {
2344 list_del(&pme_dev->list);
2345 kfree(pme_dev);
2346 }
2347 }
2348 if (!list_empty(&pci_pme_list))
2349 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2350 msecs_to_jiffies(PME_TIMEOUT));
2351 mutex_unlock(&pci_pme_list_mutex);
2352}
2353
2354static void __pci_pme_active(struct pci_dev *dev, bool enable)
2355{
2356 u16 pmcsr;
2357
2358 if (!dev->pme_support)
2359 return;
2360
2361 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2362
2363 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2364 if (!enable)
2365 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2366
2367 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2368}
2369
2370
2371
2372
2373
2374void pci_pme_restore(struct pci_dev *dev)
2375{
2376 u16 pmcsr;
2377
2378 if (!dev->pme_support)
2379 return;
2380
2381 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2382 if (dev->wakeup_prepared) {
2383 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2384 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2385 } else {
2386 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2387 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2388 }
2389 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2390}
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400void pci_pme_active(struct pci_dev *dev, bool enable)
2401{
2402 __pci_pme_active(dev, enable);
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424 if (dev->pme_poll) {
2425 struct pci_pme_device *pme_dev;
2426 if (enable) {
2427 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2428 GFP_KERNEL);
2429 if (!pme_dev) {
2430 pci_warn(dev, "can't enable PME#\n");
2431 return;
2432 }
2433 pme_dev->dev = dev;
2434 mutex_lock(&pci_pme_list_mutex);
2435 list_add(&pme_dev->list, &pci_pme_list);
2436 if (list_is_singular(&pci_pme_list))
2437 queue_delayed_work(system_freezable_wq,
2438 &pci_pme_work,
2439 msecs_to_jiffies(PME_TIMEOUT));
2440 mutex_unlock(&pci_pme_list_mutex);
2441 } else {
2442 mutex_lock(&pci_pme_list_mutex);
2443 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2444 if (pme_dev->dev == dev) {
2445 list_del(&pme_dev->list);
2446 kfree(pme_dev);
2447 break;
2448 }
2449 }
2450 mutex_unlock(&pci_pme_list_mutex);
2451 }
2452 }
2453
2454 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2455}
2456EXPORT_SYMBOL(pci_pme_active);
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2478{
2479 int ret = 0;
2480
2481
2482
2483
2484
2485
2486
2487
2488 if (!pci_power_manageable(dev))
2489 return 0;
2490
2491
2492 if (!!enable == !!dev->wakeup_prepared)
2493 return 0;
2494
2495
2496
2497
2498
2499
2500
2501 if (enable) {
2502 int error;
2503
2504
2505
2506
2507
2508
2509
2510
2511 if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2512 pci_pme_active(dev, true);
2513 else
2514 ret = 1;
2515 error = platform_pci_set_wakeup(dev, true);
2516 if (ret)
2517 ret = error;
2518 if (!ret)
2519 dev->wakeup_prepared = true;
2520 } else {
2521 platform_pci_set_wakeup(dev, false);
2522 pci_pme_active(dev, false);
2523 dev->wakeup_prepared = false;
2524 }
2525
2526 return ret;
2527}
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2539{
2540 if (enable && !device_may_wakeup(&pci_dev->dev))
2541 return -EINVAL;
2542
2543 return __pci_enable_wake(pci_dev, state, enable);
2544}
2545EXPORT_SYMBOL(pci_enable_wake);
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2562{
2563 return pci_pme_capable(dev, PCI_D3cold) ?
2564 pci_enable_wake(dev, PCI_D3cold, enable) :
2565 pci_enable_wake(dev, PCI_D3hot, enable);
2566}
2567EXPORT_SYMBOL(pci_wake_from_d3);
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2579{
2580 pci_power_t target_state = PCI_D3hot;
2581
2582 if (platform_pci_power_manageable(dev)) {
2583
2584
2585
2586 pci_power_t state = platform_pci_choose_state(dev);
2587
2588 switch (state) {
2589 case PCI_POWER_ERROR:
2590 case PCI_UNKNOWN:
2591 break;
2592 case PCI_D1:
2593 case PCI_D2:
2594 if (pci_no_d1d2(dev))
2595 break;
2596 fallthrough;
2597 default:
2598 target_state = state;
2599 }
2600
2601 return target_state;
2602 }
2603
2604 if (!dev->pm_cap)
2605 target_state = PCI_D0;
2606
2607
2608
2609
2610
2611
2612 if (dev->current_state == PCI_D3cold)
2613 target_state = PCI_D3cold;
2614
2615 if (wakeup && dev->pme_support) {
2616 pci_power_t state = target_state;
2617
2618
2619
2620
2621
2622 while (state && !(dev->pme_support & (1 << state)))
2623 state--;
2624
2625 if (state)
2626 return state;
2627 else if (dev->pme_support & 1)
2628 return PCI_D0;
2629 }
2630
2631 return target_state;
2632}
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643int pci_prepare_to_sleep(struct pci_dev *dev)
2644{
2645 bool wakeup = device_may_wakeup(&dev->dev);
2646 pci_power_t target_state = pci_target_state(dev, wakeup);
2647 int error;
2648
2649 if (target_state == PCI_POWER_ERROR)
2650 return -EIO;
2651
2652
2653
2654
2655
2656
2657
2658
2659 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2660 pci_disable_ptm(dev);
2661
2662 pci_enable_wake(dev, target_state, wakeup);
2663
2664 error = pci_set_power_state(dev, target_state);
2665
2666 if (error) {
2667 pci_enable_wake(dev, target_state, false);
2668 pci_restore_ptm_state(dev);
2669 }
2670
2671 return error;
2672}
2673EXPORT_SYMBOL(pci_prepare_to_sleep);
2674
2675
2676
2677
2678
2679
2680
2681
2682int pci_back_from_sleep(struct pci_dev *dev)
2683{
2684 pci_enable_wake(dev, PCI_D0, false);
2685 return pci_set_power_state(dev, PCI_D0);
2686}
2687EXPORT_SYMBOL(pci_back_from_sleep);
2688
2689
2690
2691
2692
2693
2694
2695
2696int pci_finish_runtime_suspend(struct pci_dev *dev)
2697{
2698 pci_power_t target_state;
2699 int error;
2700
2701 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2702 if (target_state == PCI_POWER_ERROR)
2703 return -EIO;
2704
2705 dev->runtime_d3cold = target_state == PCI_D3cold;
2706
2707
2708
2709
2710
2711
2712
2713
2714 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2715 pci_disable_ptm(dev);
2716
2717 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2718
2719 error = pci_set_power_state(dev, target_state);
2720
2721 if (error) {
2722 pci_enable_wake(dev, target_state, false);
2723 pci_restore_ptm_state(dev);
2724 dev->runtime_d3cold = false;
2725 }
2726
2727 return error;
2728}
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738bool pci_dev_run_wake(struct pci_dev *dev)
2739{
2740 struct pci_bus *bus = dev->bus;
2741
2742 if (!dev->pme_support)
2743 return false;
2744
2745
2746 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2747 return false;
2748
2749 if (device_can_wakeup(&dev->dev))
2750 return true;
2751
2752 while (bus->parent) {
2753 struct pci_dev *bridge = bus->self;
2754
2755 if (device_can_wakeup(&bridge->dev))
2756 return true;
2757
2758 bus = bus->parent;
2759 }
2760
2761
2762 if (bus->bridge)
2763 return device_can_wakeup(bus->bridge);
2764
2765 return false;
2766}
2767EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778bool pci_dev_need_resume(struct pci_dev *pci_dev)
2779{
2780 struct device *dev = &pci_dev->dev;
2781 pci_power_t target_state;
2782
2783 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2784 return true;
2785
2786 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2787
2788
2789
2790
2791
2792
2793 return target_state != pci_dev->current_state &&
2794 target_state != PCI_D3cold &&
2795 pci_dev->current_state != PCI_D3hot;
2796}
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2810{
2811 struct device *dev = &pci_dev->dev;
2812
2813 spin_lock_irq(&dev->power.lock);
2814
2815 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2816 pci_dev->current_state < PCI_D3cold)
2817 __pci_pme_active(pci_dev, false);
2818
2819 spin_unlock_irq(&dev->power.lock);
2820}
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830void pci_dev_complete_resume(struct pci_dev *pci_dev)
2831{
2832 struct device *dev = &pci_dev->dev;
2833
2834 if (!pci_dev_run_wake(pci_dev))
2835 return;
2836
2837 spin_lock_irq(&dev->power.lock);
2838
2839 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2840 __pci_pme_active(pci_dev, true);
2841
2842 spin_unlock_irq(&dev->power.lock);
2843}
2844
2845void pci_config_pm_runtime_get(struct pci_dev *pdev)
2846{
2847 struct device *dev = &pdev->dev;
2848 struct device *parent = dev->parent;
2849
2850 if (parent)
2851 pm_runtime_get_sync(parent);
2852 pm_runtime_get_noresume(dev);
2853
2854
2855
2856
2857 pm_runtime_barrier(dev);
2858
2859
2860
2861
2862
2863 if (pdev->current_state == PCI_D3cold)
2864 pm_runtime_resume(dev);
2865}
2866
2867void pci_config_pm_runtime_put(struct pci_dev *pdev)
2868{
2869 struct device *dev = &pdev->dev;
2870 struct device *parent = dev->parent;
2871
2872 pm_runtime_put(dev);
2873 if (parent)
2874 pm_runtime_put_sync(parent);
2875}
2876
2877static const struct dmi_system_id bridge_d3_blacklist[] = {
2878#ifdef CONFIG_X86
2879 {
2880
2881
2882
2883
2884
2885
2886 .ident = "X299 DESIGNARE EX-CF",
2887 .matches = {
2888 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2889 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2890 },
2891 },
2892#endif
2893 { }
2894};
2895
2896
2897
2898
2899
2900
2901
2902
2903bool pci_bridge_d3_possible(struct pci_dev *bridge)
2904{
2905 if (!pci_is_pcie(bridge))
2906 return false;
2907
2908 switch (pci_pcie_type(bridge)) {
2909 case PCI_EXP_TYPE_ROOT_PORT:
2910 case PCI_EXP_TYPE_UPSTREAM:
2911 case PCI_EXP_TYPE_DOWNSTREAM:
2912 if (pci_bridge_d3_disable)
2913 return false;
2914
2915
2916
2917
2918
2919 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2920 return false;
2921
2922 if (pci_bridge_d3_force)
2923 return true;
2924
2925
2926 if (bridge->is_thunderbolt)
2927 return true;
2928
2929
2930 if (platform_pci_bridge_d3(bridge))
2931 return true;
2932
2933
2934
2935
2936
2937
2938 if (bridge->is_hotplug_bridge)
2939 return false;
2940
2941 if (dmi_check_system(bridge_d3_blacklist))
2942 return false;
2943
2944
2945
2946
2947
2948 if (dmi_get_bios_year() >= 2015)
2949 return true;
2950 break;
2951 }
2952
2953 return false;
2954}
2955
2956static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2957{
2958 bool *d3cold_ok = data;
2959
2960 if (
2961 dev->no_d3cold || !dev->d3cold_allowed ||
2962
2963
2964 (device_may_wakeup(&dev->dev) &&
2965 !pci_pme_capable(dev, PCI_D3cold)) ||
2966
2967
2968 !pci_power_manageable(dev))
2969
2970 *d3cold_ok = false;
2971
2972 return !*d3cold_ok;
2973}
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983void pci_bridge_d3_update(struct pci_dev *dev)
2984{
2985 bool remove = !device_is_registered(&dev->dev);
2986 struct pci_dev *bridge;
2987 bool d3cold_ok = true;
2988
2989 bridge = pci_upstream_bridge(dev);
2990 if (!bridge || !pci_bridge_d3_possible(bridge))
2991 return;
2992
2993
2994
2995
2996
2997 if (remove && bridge->bridge_d3)
2998 return;
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008 if (!remove)
3009 pci_dev_check_d3cold(dev, &d3cold_ok);
3010
3011
3012
3013
3014
3015
3016
3017 if (d3cold_ok && !bridge->bridge_d3)
3018 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
3019 &d3cold_ok);
3020
3021 if (bridge->bridge_d3 != d3cold_ok) {
3022 bridge->bridge_d3 = d3cold_ok;
3023
3024 pci_bridge_d3_update(bridge);
3025 }
3026}
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036void pci_d3cold_enable(struct pci_dev *dev)
3037{
3038 if (dev->no_d3cold) {
3039 dev->no_d3cold = false;
3040 pci_bridge_d3_update(dev);
3041 }
3042}
3043EXPORT_SYMBOL_GPL(pci_d3cold_enable);
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053void pci_d3cold_disable(struct pci_dev *dev)
3054{
3055 if (!dev->no_d3cold) {
3056 dev->no_d3cold = true;
3057 pci_bridge_d3_update(dev);
3058 }
3059}
3060EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3061
3062
3063
3064
3065
3066void pci_pm_init(struct pci_dev *dev)
3067{
3068 int pm;
3069 u16 status;
3070 u16 pmc;
3071
3072 pm_runtime_forbid(&dev->dev);
3073 pm_runtime_set_active(&dev->dev);
3074 pm_runtime_enable(&dev->dev);
3075 device_enable_async_suspend(&dev->dev);
3076 dev->wakeup_prepared = false;
3077
3078 dev->pm_cap = 0;
3079 dev->pme_support = 0;
3080
3081
3082 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3083 if (!pm)
3084 return;
3085
3086 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3087
3088 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3089 pci_err(dev, "unsupported PM cap regs version (%u)\n",
3090 pmc & PCI_PM_CAP_VER_MASK);
3091 return;
3092 }
3093
3094 dev->pm_cap = pm;
3095 dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3096 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3097 dev->bridge_d3 = pci_bridge_d3_possible(dev);
3098 dev->d3cold_allowed = true;
3099
3100 dev->d1_support = false;
3101 dev->d2_support = false;
3102 if (!pci_no_d1d2(dev)) {
3103 if (pmc & PCI_PM_CAP_D1)
3104 dev->d1_support = true;
3105 if (pmc & PCI_PM_CAP_D2)
3106 dev->d2_support = true;
3107
3108 if (dev->d1_support || dev->d2_support)
3109 pci_info(dev, "supports%s%s\n",
3110 dev->d1_support ? " D1" : "",
3111 dev->d2_support ? " D2" : "");
3112 }
3113
3114 pmc &= PCI_PM_CAP_PME_MASK;
3115 if (pmc) {
3116 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3117 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3118 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3119 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3120 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3121 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3122 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
3123 dev->pme_poll = true;
3124
3125
3126
3127
3128 device_set_wakeup_capable(&dev->dev, true);
3129
3130 pci_pme_active(dev, false);
3131 }
3132
3133 pci_read_config_word(dev, PCI_STATUS, &status);
3134 if (status & PCI_STATUS_IMM_READY)
3135 dev->imm_ready = 1;
3136}
3137
3138static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3139{
3140 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3141
3142 switch (prop) {
3143 case PCI_EA_P_MEM:
3144 case PCI_EA_P_VF_MEM:
3145 flags |= IORESOURCE_MEM;
3146 break;
3147 case PCI_EA_P_MEM_PREFETCH:
3148 case PCI_EA_P_VF_MEM_PREFETCH:
3149 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3150 break;
3151 case PCI_EA_P_IO:
3152 flags |= IORESOURCE_IO;
3153 break;
3154 default:
3155 return 0;
3156 }
3157
3158 return flags;
3159}
3160
3161static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3162 u8 prop)
3163{
3164 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3165 return &dev->resource[bei];
3166#ifdef CONFIG_PCI_IOV
3167 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3168 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3169 return &dev->resource[PCI_IOV_RESOURCES +
3170 bei - PCI_EA_BEI_VF_BAR0];
3171#endif
3172 else if (bei == PCI_EA_BEI_ROM)
3173 return &dev->resource[PCI_ROM_RESOURCE];
3174 else
3175 return NULL;
3176}
3177
3178
3179static int pci_ea_read(struct pci_dev *dev, int offset)
3180{
3181 struct resource *res;
3182 int ent_size, ent_offset = offset;
3183 resource_size_t start, end;
3184 unsigned long flags;
3185 u32 dw0, bei, base, max_offset;
3186 u8 prop;
3187 bool support_64 = (sizeof(resource_size_t) >= 8);
3188
3189 pci_read_config_dword(dev, ent_offset, &dw0);
3190 ent_offset += 4;
3191
3192
3193 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
3194
3195 if (!(dw0 & PCI_EA_ENABLE))
3196 goto out;
3197
3198 bei = (dw0 & PCI_EA_BEI) >> 4;
3199 prop = (dw0 & PCI_EA_PP) >> 8;
3200
3201
3202
3203
3204
3205 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3206 prop = (dw0 & PCI_EA_SP) >> 16;
3207 if (prop > PCI_EA_P_BRIDGE_IO)
3208 goto out;
3209
3210 res = pci_ea_get_resource(dev, bei, prop);
3211 if (!res) {
3212 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3213 goto out;
3214 }
3215
3216 flags = pci_ea_flags(dev, prop);
3217 if (!flags) {
3218 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3219 goto out;
3220 }
3221
3222
3223 pci_read_config_dword(dev, ent_offset, &base);
3224 start = (base & PCI_EA_FIELD_MASK);
3225 ent_offset += 4;
3226
3227
3228 pci_read_config_dword(dev, ent_offset, &max_offset);
3229 ent_offset += 4;
3230
3231
3232 if (base & PCI_EA_IS_64) {
3233 u32 base_upper;
3234
3235 pci_read_config_dword(dev, ent_offset, &base_upper);
3236 ent_offset += 4;
3237
3238 flags |= IORESOURCE_MEM_64;
3239
3240
3241 if (!support_64 && base_upper)
3242 goto out;
3243
3244 if (support_64)
3245 start |= ((u64)base_upper << 32);
3246 }
3247
3248 end = start + (max_offset | 0x03);
3249
3250
3251 if (max_offset & PCI_EA_IS_64) {
3252 u32 max_offset_upper;
3253
3254 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3255 ent_offset += 4;
3256
3257 flags |= IORESOURCE_MEM_64;
3258
3259
3260 if (!support_64 && max_offset_upper)
3261 goto out;
3262
3263 if (support_64)
3264 end += ((u64)max_offset_upper << 32);
3265 }
3266
3267 if (end < start) {
3268 pci_err(dev, "EA Entry crosses address boundary\n");
3269 goto out;
3270 }
3271
3272 if (ent_size != ent_offset - offset) {
3273 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3274 ent_size, ent_offset - offset);
3275 goto out;
3276 }
3277
3278 res->name = pci_name(dev);
3279 res->start = start;
3280 res->end = end;
3281 res->flags = flags;
3282
3283 if (bei <= PCI_EA_BEI_BAR5)
3284 pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3285 bei, res, prop);
3286 else if (bei == PCI_EA_BEI_ROM)
3287 pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3288 res, prop);
3289 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3290 pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3291 bei - PCI_EA_BEI_VF_BAR0, res, prop);
3292 else
3293 pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3294 bei, res, prop);
3295
3296out:
3297 return offset + ent_size;
3298}
3299
3300
3301void pci_ea_init(struct pci_dev *dev)
3302{
3303 int ea;
3304 u8 num_ent;
3305 int offset;
3306 int i;
3307
3308
3309 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3310 if (!ea)
3311 return;
3312
3313
3314 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3315 &num_ent);
3316 num_ent &= PCI_EA_NUM_ENT_MASK;
3317
3318 offset = ea + PCI_EA_FIRST_ENT;
3319
3320
3321 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3322 offset += 4;
3323
3324
3325 for (i = 0; i < num_ent; ++i)
3326 offset = pci_ea_read(dev, offset);
3327}
3328
3329static void pci_add_saved_cap(struct pci_dev *pci_dev,
3330 struct pci_cap_saved_state *new_cap)
3331{
3332 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3333}
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3344 bool extended, unsigned int size)
3345{
3346 int pos;
3347 struct pci_cap_saved_state *save_state;
3348
3349 if (extended)
3350 pos = pci_find_ext_capability(dev, cap);
3351 else
3352 pos = pci_find_capability(dev, cap);
3353
3354 if (!pos)
3355 return 0;
3356
3357 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3358 if (!save_state)
3359 return -ENOMEM;
3360
3361 save_state->cap.cap_nr = cap;
3362 save_state->cap.cap_extended = extended;
3363 save_state->cap.size = size;
3364 pci_add_saved_cap(dev, save_state);
3365
3366 return 0;
3367}
3368
3369int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3370{
3371 return _pci_add_cap_save_buffer(dev, cap, false, size);
3372}
3373
3374int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3375{
3376 return _pci_add_cap_save_buffer(dev, cap, true, size);
3377}
3378
3379
3380
3381
3382
3383void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3384{
3385 int error;
3386
3387 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3388 PCI_EXP_SAVE_REGS * sizeof(u16));
3389 if (error)
3390 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3391
3392 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3393 if (error)
3394 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3395
3396 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3397 2 * sizeof(u16));
3398 if (error)
3399 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3400
3401 pci_allocate_vc_save_buffers(dev);
3402}
3403
3404void pci_free_cap_save_buffers(struct pci_dev *dev)
3405{
3406 struct pci_cap_saved_state *tmp;
3407 struct hlist_node *n;
3408
3409 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3410 kfree(tmp);
3411}
3412
3413
3414
3415
3416
3417
3418
3419
3420void pci_configure_ari(struct pci_dev *dev)
3421{
3422 u32 cap;
3423 struct pci_dev *bridge;
3424
3425 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3426 return;
3427
3428 bridge = dev->bus->self;
3429 if (!bridge)
3430 return;
3431
3432 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3433 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3434 return;
3435
3436 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3437 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3438 PCI_EXP_DEVCTL2_ARI);
3439 bridge->ari_enabled = 1;
3440 } else {
3441 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3442 PCI_EXP_DEVCTL2_ARI);
3443 bridge->ari_enabled = 0;
3444 }
3445}
3446
3447static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3448{
3449 int pos;
3450 u16 cap, ctrl;
3451
3452 pos = pdev->acs_cap;
3453 if (!pos)
3454 return false;
3455
3456
3457
3458
3459
3460
3461 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3462 acs_flags &= (cap | PCI_ACS_EC);
3463
3464 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3465 return (ctrl & acs_flags) == acs_flags;
3466}
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3485{
3486 int ret;
3487
3488 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3489 if (ret >= 0)
3490 return ret > 0;
3491
3492
3493
3494
3495
3496
3497 if (!pci_is_pcie(pdev))
3498 return false;
3499
3500 switch (pci_pcie_type(pdev)) {
3501
3502
3503
3504
3505
3506 case PCI_EXP_TYPE_PCIE_BRIDGE:
3507
3508
3509
3510
3511
3512
3513 case PCI_EXP_TYPE_PCI_BRIDGE:
3514 case PCI_EXP_TYPE_RC_EC:
3515 return false;
3516
3517
3518
3519
3520
3521 case PCI_EXP_TYPE_DOWNSTREAM:
3522 case PCI_EXP_TYPE_ROOT_PORT:
3523 return pci_acs_flags_enabled(pdev, acs_flags);
3524
3525
3526
3527
3528
3529
3530
3531 case PCI_EXP_TYPE_ENDPOINT:
3532 case PCI_EXP_TYPE_UPSTREAM:
3533 case PCI_EXP_TYPE_LEG_END:
3534 case PCI_EXP_TYPE_RC_END:
3535 if (!pdev->multifunction)
3536 break;
3537
3538 return pci_acs_flags_enabled(pdev, acs_flags);
3539 }
3540
3541
3542
3543
3544
3545 return true;
3546}
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557bool pci_acs_path_enabled(struct pci_dev *start,
3558 struct pci_dev *end, u16 acs_flags)
3559{
3560 struct pci_dev *pdev, *parent = start;
3561
3562 do {
3563 pdev = parent;
3564
3565 if (!pci_acs_enabled(pdev, acs_flags))
3566 return false;
3567
3568 if (pci_is_root_bus(pdev->bus))
3569 return (end == NULL);
3570
3571 parent = pdev->bus->self;
3572 } while (pdev != end);
3573
3574 return true;
3575}
3576
3577
3578
3579
3580
3581void pci_acs_init(struct pci_dev *dev)
3582{
3583 dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3584
3585
3586
3587
3588
3589
3590
3591 pci_enable_acs(dev);
3592}
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3604{
3605 unsigned int pos, nbars, i;
3606 u32 ctrl;
3607
3608 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3609 if (!pos)
3610 return -ENOTSUPP;
3611
3612 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3613 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3614 PCI_REBAR_CTRL_NBAR_SHIFT;
3615
3616 for (i = 0; i < nbars; i++, pos += 8) {
3617 int bar_idx;
3618
3619 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3620 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3621 if (bar_idx == bar)
3622 return pos;
3623 }
3624
3625 return -ENOENT;
3626}
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3637{
3638 int pos;
3639 u32 cap;
3640
3641 pos = pci_rebar_find_pos(pdev, bar);
3642 if (pos < 0)
3643 return 0;
3644
3645 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3646 cap &= PCI_REBAR_CAP_SIZES;
3647
3648
3649 if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3650 bar == 0 && cap == 0x7000)
3651 cap = 0x3f000;
3652
3653 return cap >> 4;
3654}
3655EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3666{
3667 int pos;
3668 u32 ctrl;
3669
3670 pos = pci_rebar_find_pos(pdev, bar);
3671 if (pos < 0)
3672 return pos;
3673
3674 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3675 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3676}
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3688{
3689 int pos;
3690 u32 ctrl;
3691
3692 pos = pci_rebar_find_pos(pdev, bar);
3693 if (pos < 0)
3694 return pos;
3695
3696 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3697 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3698 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3699 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3700 return 0;
3701}
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3717{
3718 struct pci_bus *bus = dev->bus;
3719 struct pci_dev *bridge;
3720 u32 cap, ctl2;
3721
3722 if (!pci_is_pcie(dev))
3723 return -EINVAL;
3724
3725
3726
3727
3728
3729
3730
3731
3732 switch (pci_pcie_type(dev)) {
3733 case PCI_EXP_TYPE_ENDPOINT:
3734 case PCI_EXP_TYPE_LEG_END:
3735 case PCI_EXP_TYPE_RC_END:
3736 break;
3737 default:
3738 return -EINVAL;
3739 }
3740
3741 while (bus->parent) {
3742 bridge = bus->self;
3743
3744 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3745
3746 switch (pci_pcie_type(bridge)) {
3747
3748 case PCI_EXP_TYPE_UPSTREAM:
3749 case PCI_EXP_TYPE_DOWNSTREAM:
3750 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3751 return -EINVAL;
3752 break;
3753
3754
3755 case PCI_EXP_TYPE_ROOT_PORT:
3756 if ((cap & cap_mask) != cap_mask)
3757 return -EINVAL;
3758 break;
3759 }
3760
3761
3762 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3763 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3764 &ctl2);
3765 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3766 return -EINVAL;
3767 }
3768
3769 bus = bus->parent;
3770 }
3771
3772 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3773 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3774 return 0;
3775}
3776EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3790{
3791 int slot;
3792
3793 if (pci_ari_enabled(dev->bus))
3794 slot = 0;
3795 else
3796 slot = PCI_SLOT(dev->devfn);
3797
3798 return (((pin - 1) + slot) % 4) + 1;
3799}
3800
3801int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3802{
3803 u8 pin;
3804
3805 pin = dev->pin;
3806 if (!pin)
3807 return -1;
3808
3809 while (!pci_is_root_bus(dev->bus)) {
3810 pin = pci_swizzle_interrupt_pin(dev, pin);
3811 dev = dev->bus->self;
3812 }
3813 *bridge = dev;
3814 return pin;
3815}
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3826{
3827 u8 pin = *pinp;
3828
3829 while (!pci_is_root_bus(dev->bus)) {
3830 pin = pci_swizzle_interrupt_pin(dev, pin);
3831 dev = dev->bus->self;
3832 }
3833 *pinp = pin;
3834 return PCI_SLOT(dev->devfn);
3835}
3836EXPORT_SYMBOL_GPL(pci_common_swizzle);
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848void pci_release_region(struct pci_dev *pdev, int bar)
3849{
3850 struct pci_devres *dr;
3851
3852 if (pci_resource_len(pdev, bar) == 0)
3853 return;
3854 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3855 release_region(pci_resource_start(pdev, bar),
3856 pci_resource_len(pdev, bar));
3857 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3858 release_mem_region(pci_resource_start(pdev, bar),
3859 pci_resource_len(pdev, bar));
3860
3861 dr = find_pci_dr(pdev);
3862 if (dr)
3863 dr->region_mask &= ~(1 << bar);
3864}
3865EXPORT_SYMBOL(pci_release_region);
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886static int __pci_request_region(struct pci_dev *pdev, int bar,
3887 const char *res_name, int exclusive)
3888{
3889 struct pci_devres *dr;
3890
3891 if (pci_resource_len(pdev, bar) == 0)
3892 return 0;
3893
3894 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3895 if (!request_region(pci_resource_start(pdev, bar),
3896 pci_resource_len(pdev, bar), res_name))
3897 goto err_out;
3898 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3899 if (!__request_mem_region(pci_resource_start(pdev, bar),
3900 pci_resource_len(pdev, bar), res_name,
3901 exclusive))
3902 goto err_out;
3903 }
3904
3905 dr = find_pci_dr(pdev);
3906 if (dr)
3907 dr->region_mask |= 1 << bar;
3908
3909 return 0;
3910
3911err_out:
3912 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3913 &pdev->resource[bar]);
3914 return -EBUSY;
3915}
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3932{
3933 return __pci_request_region(pdev, bar, res_name, 0);
3934}
3935EXPORT_SYMBOL(pci_request_region);
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3946{
3947 int i;
3948
3949 for (i = 0; i < PCI_STD_NUM_BARS; i++)
3950 if (bars & (1 << i))
3951 pci_release_region(pdev, i);
3952}
3953EXPORT_SYMBOL(pci_release_selected_regions);
3954
3955static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3956 const char *res_name, int excl)
3957{
3958 int i;
3959
3960 for (i = 0; i < PCI_STD_NUM_BARS; i++)
3961 if (bars & (1 << i))
3962 if (__pci_request_region(pdev, i, res_name, excl))
3963 goto err_out;
3964 return 0;
3965
3966err_out:
3967 while (--i >= 0)
3968 if (bars & (1 << i))
3969 pci_release_region(pdev, i);
3970
3971 return -EBUSY;
3972}
3973
3974
3975
3976
3977
3978
3979
3980
3981int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3982 const char *res_name)
3983{
3984 return __pci_request_selected_regions(pdev, bars, res_name, 0);
3985}
3986EXPORT_SYMBOL(pci_request_selected_regions);
3987
3988int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3989 const char *res_name)
3990{
3991 return __pci_request_selected_regions(pdev, bars, res_name,
3992 IORESOURCE_EXCLUSIVE);
3993}
3994EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006void pci_release_regions(struct pci_dev *pdev)
4007{
4008 pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
4009}
4010EXPORT_SYMBOL(pci_release_regions);
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025int pci_request_regions(struct pci_dev *pdev, const char *res_name)
4026{
4027 return pci_request_selected_regions(pdev,
4028 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
4029}
4030EXPORT_SYMBOL(pci_request_regions);
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
4048{
4049 return pci_request_selected_regions_exclusive(pdev,
4050 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
4051}
4052EXPORT_SYMBOL(pci_request_regions_exclusive);
4053
4054
4055
4056
4057
4058int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
4059 resource_size_t size)
4060{
4061 int ret = 0;
4062#ifdef PCI_IOBASE
4063 struct logic_pio_hwaddr *range;
4064
4065 if (!size || addr + size < addr)
4066 return -EINVAL;
4067
4068 range = kzalloc(sizeof(*range), GFP_ATOMIC);
4069 if (!range)
4070 return -ENOMEM;
4071
4072 range->fwnode = fwnode;
4073 range->size = size;
4074 range->hw_start = addr;
4075 range->flags = LOGIC_PIO_CPU_MMIO;
4076
4077 ret = logic_pio_register_range(range);
4078 if (ret)
4079 kfree(range);
4080
4081
4082 if (ret == -EEXIST)
4083 ret = 0;
4084#endif
4085
4086 return ret;
4087}
4088
4089phys_addr_t pci_pio_to_address(unsigned long pio)
4090{
4091 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
4092
4093#ifdef PCI_IOBASE
4094 if (pio >= MMIO_UPPER_LIMIT)
4095 return address;
4096
4097 address = logic_pio_to_hwaddr(pio);
4098#endif
4099
4100 return address;
4101}
4102EXPORT_SYMBOL_GPL(pci_pio_to_address);
4103
4104unsigned long __weak pci_address_to_pio(phys_addr_t address)
4105{
4106#ifdef PCI_IOBASE
4107 return logic_pio_trans_cpuaddr(address);
4108#else
4109 if (address > IO_SPACE_LIMIT)
4110 return (unsigned long)-1;
4111
4112 return (unsigned long) address;
4113#endif
4114}
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4127{
4128#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4129 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4130
4131 if (!(res->flags & IORESOURCE_IO))
4132 return -EINVAL;
4133
4134 if (res->end > IO_SPACE_LIMIT)
4135 return -EINVAL;
4136
4137 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4138 pgprot_device(PAGE_KERNEL));
4139#else
4140
4141
4142
4143
4144 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4145 return -ENODEV;
4146#endif
4147}
4148EXPORT_SYMBOL(pci_remap_iospace);
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158void pci_unmap_iospace(struct resource *res)
4159{
4160#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4161 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4162
4163 vunmap_range(vaddr, vaddr + resource_size(res));
4164#endif
4165}
4166EXPORT_SYMBOL(pci_unmap_iospace);
4167
4168static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4169{
4170 struct resource **res = ptr;
4171
4172 pci_unmap_iospace(*res);
4173}
4174
4175
4176
4177
4178
4179
4180
4181
4182
4183
4184int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4185 phys_addr_t phys_addr)
4186{
4187 const struct resource **ptr;
4188 int error;
4189
4190 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4191 if (!ptr)
4192 return -ENOMEM;
4193
4194 error = pci_remap_iospace(res, phys_addr);
4195 if (error) {
4196 devres_free(ptr);
4197 } else {
4198 *ptr = res;
4199 devres_add(dev, ptr);
4200 }
4201
4202 return error;
4203}
4204EXPORT_SYMBOL(devm_pci_remap_iospace);
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4216 resource_size_t offset,
4217 resource_size_t size)
4218{
4219 void __iomem **ptr, *addr;
4220
4221 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4222 if (!ptr)
4223 return NULL;
4224
4225 addr = pci_remap_cfgspace(offset, size);
4226 if (addr) {
4227 *ptr = addr;
4228 devres_add(dev, ptr);
4229 } else
4230 devres_free(ptr);
4231
4232 return addr;
4233}
4234EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254
4255void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4256 struct resource *res)
4257{
4258 resource_size_t size;
4259 const char *name;
4260 void __iomem *dest_ptr;
4261
4262 BUG_ON(!dev);
4263
4264 if (!res || resource_type(res) != IORESOURCE_MEM) {
4265 dev_err(dev, "invalid resource\n");
4266 return IOMEM_ERR_PTR(-EINVAL);
4267 }
4268
4269 size = resource_size(res);
4270
4271 if (res->name)
4272 name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
4273 res->name);
4274 else
4275 name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
4276 if (!name)
4277 return IOMEM_ERR_PTR(-ENOMEM);
4278
4279 if (!devm_request_mem_region(dev, res->start, size, name)) {
4280 dev_err(dev, "can't request region for resource %pR\n", res);
4281 return IOMEM_ERR_PTR(-EBUSY);
4282 }
4283
4284 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4285 if (!dest_ptr) {
4286 dev_err(dev, "ioremap failed for resource %pR\n", res);
4287 devm_release_mem_region(dev, res->start, size);
4288 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4289 }
4290
4291 return dest_ptr;
4292}
4293EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4294
4295static void __pci_set_master(struct pci_dev *dev, bool enable)
4296{
4297 u16 old_cmd, cmd;
4298
4299 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4300 if (enable)
4301 cmd = old_cmd | PCI_COMMAND_MASTER;
4302 else
4303 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4304 if (cmd != old_cmd) {
4305 pci_dbg(dev, "%s bus mastering\n",
4306 enable ? "enabling" : "disabling");
4307 pci_write_config_word(dev, PCI_COMMAND, cmd);
4308 }
4309 dev->is_busmaster = enable;
4310}
4311
4312
4313
4314
4315
4316
4317
4318
4319char * __weak __init pcibios_setup(char *str)
4320{
4321 return str;
4322}
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332void __weak pcibios_set_master(struct pci_dev *dev)
4333{
4334 u8 lat;
4335
4336
4337 if (pci_is_pcie(dev))
4338 return;
4339
4340 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4341 if (lat < 16)
4342 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4343 else if (lat > pcibios_max_latency)
4344 lat = pcibios_max_latency;
4345 else
4346 return;
4347
4348 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4349}
4350
4351
4352
4353
4354
4355
4356
4357
4358void pci_set_master(struct pci_dev *dev)
4359{
4360 __pci_set_master(dev, true);
4361 pcibios_set_master(dev);
4362}
4363EXPORT_SYMBOL(pci_set_master);
4364
4365
4366
4367
4368
4369void pci_clear_master(struct pci_dev *dev)
4370{
4371 __pci_set_master(dev, false);
4372}
4373EXPORT_SYMBOL(pci_clear_master);
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385int pci_set_cacheline_size(struct pci_dev *dev)
4386{
4387 u8 cacheline_size;
4388
4389 if (!pci_cache_line_size)
4390 return -EINVAL;
4391
4392
4393
4394 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4395 if (cacheline_size >= pci_cache_line_size &&
4396 (cacheline_size % pci_cache_line_size) == 0)
4397 return 0;
4398
4399
4400 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4401
4402 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4403 if (cacheline_size == pci_cache_line_size)
4404 return 0;
4405
4406 pci_dbg(dev, "cache line size of %d is not supported\n",
4407 pci_cache_line_size << 2);
4408
4409 return -EINVAL;
4410}
4411EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421int pci_set_mwi(struct pci_dev *dev)
4422{
4423#ifdef PCI_DISABLE_MWI
4424 return 0;
4425#else
4426 int rc;
4427 u16 cmd;
4428
4429 rc = pci_set_cacheline_size(dev);
4430 if (rc)
4431 return rc;
4432
4433 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4434 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4435 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4436 cmd |= PCI_COMMAND_INVALIDATE;
4437 pci_write_config_word(dev, PCI_COMMAND, cmd);
4438 }
4439 return 0;
4440#endif
4441}
4442EXPORT_SYMBOL(pci_set_mwi);
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452int pcim_set_mwi(struct pci_dev *dev)
4453{
4454 struct pci_devres *dr;
4455
4456 dr = find_pci_dr(dev);
4457 if (!dr)
4458 return -ENOMEM;
4459
4460 dr->mwi = 1;
4461 return pci_set_mwi(dev);
4462}
4463EXPORT_SYMBOL(pcim_set_mwi);
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474int pci_try_set_mwi(struct pci_dev *dev)
4475{
4476#ifdef PCI_DISABLE_MWI
4477 return 0;
4478#else
4479 return pci_set_mwi(dev);
4480#endif
4481}
4482EXPORT_SYMBOL(pci_try_set_mwi);
4483
4484
4485
4486
4487
4488
4489
4490void pci_clear_mwi(struct pci_dev *dev)
4491{
4492#ifndef PCI_DISABLE_MWI
4493 u16 cmd;
4494
4495 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4496 if (cmd & PCI_COMMAND_INVALIDATE) {
4497 cmd &= ~PCI_COMMAND_INVALIDATE;
4498 pci_write_config_word(dev, PCI_COMMAND, cmd);
4499 }
4500#endif
4501}
4502EXPORT_SYMBOL(pci_clear_mwi);
4503
4504
4505
4506
4507
4508
4509
4510void pci_disable_parity(struct pci_dev *dev)
4511{
4512 u16 cmd;
4513
4514 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4515 if (cmd & PCI_COMMAND_PARITY) {
4516 cmd &= ~PCI_COMMAND_PARITY;
4517 pci_write_config_word(dev, PCI_COMMAND, cmd);
4518 }
4519}
4520
4521
4522
4523
4524
4525
4526
4527
4528void pci_intx(struct pci_dev *pdev, int enable)
4529{
4530 u16 pci_command, new;
4531
4532 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4533
4534 if (enable)
4535 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4536 else
4537 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4538
4539 if (new != pci_command) {
4540 struct pci_devres *dr;
4541
4542 pci_write_config_word(pdev, PCI_COMMAND, new);
4543
4544 dr = find_pci_dr(pdev);
4545 if (dr && !dr->restore_intx) {
4546 dr->restore_intx = 1;
4547 dr->orig_intx = !enable;
4548 }
4549 }
4550}
4551EXPORT_SYMBOL_GPL(pci_intx);
4552
4553static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4554{
4555 struct pci_bus *bus = dev->bus;
4556 bool mask_updated = true;
4557 u32 cmd_status_dword;
4558 u16 origcmd, newcmd;
4559 unsigned long flags;
4560 bool irq_pending;
4561
4562
4563
4564
4565
4566 BUILD_BUG_ON(PCI_COMMAND % 4);
4567 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4568
4569 raw_spin_lock_irqsave(&pci_lock, flags);
4570
4571 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4572
4573 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4574
4575
4576
4577
4578
4579
4580 if (mask != irq_pending) {
4581 mask_updated = false;
4582 goto done;
4583 }
4584
4585 origcmd = cmd_status_dword;
4586 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4587 if (mask)
4588 newcmd |= PCI_COMMAND_INTX_DISABLE;
4589 if (newcmd != origcmd)
4590 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4591
4592done:
4593 raw_spin_unlock_irqrestore(&pci_lock, flags);
4594
4595 return mask_updated;
4596}
4597
4598
4599
4600
4601
4602
4603
4604
4605bool pci_check_and_mask_intx(struct pci_dev *dev)
4606{
4607 return pci_check_and_set_intx_mask(dev, true);
4608}
4609EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619bool pci_check_and_unmask_intx(struct pci_dev *dev)
4620{
4621 return pci_check_and_set_intx_mask(dev, false);
4622}
4623EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4624
4625
4626
4627
4628
4629
4630
4631int pci_wait_for_pending_transaction(struct pci_dev *dev)
4632{
4633 if (!pci_is_pcie(dev))
4634 return 1;
4635
4636 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4637 PCI_EXP_DEVSTA_TRPND);
4638}
4639EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4640
4641
4642
4643
4644
4645
4646
4647
4648int pcie_flr(struct pci_dev *dev)
4649{
4650 if (!pci_wait_for_pending_transaction(dev))
4651 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4652
4653 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4654
4655 if (dev->imm_ready)
4656 return 0;
4657
4658
4659
4660
4661
4662
4663 msleep(100);
4664
4665 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4666}
4667EXPORT_SYMBOL_GPL(pcie_flr);
4668
4669
4670
4671
4672
4673
4674
4675
4676int pcie_reset_flr(struct pci_dev *dev, bool probe)
4677{
4678 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4679 return -ENOTTY;
4680
4681 if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
4682 return -ENOTTY;
4683
4684 if (probe)
4685 return 0;
4686
4687 return pcie_flr(dev);
4688}
4689EXPORT_SYMBOL_GPL(pcie_reset_flr);
4690
4691static int pci_af_flr(struct pci_dev *dev, bool probe)
4692{
4693 int pos;
4694 u8 cap;
4695
4696 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4697 if (!pos)
4698 return -ENOTTY;
4699
4700 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4701 return -ENOTTY;
4702
4703 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4704 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4705 return -ENOTTY;
4706
4707 if (probe)
4708 return 0;
4709
4710
4711
4712
4713
4714
4715 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4716 PCI_AF_STATUS_TP << 8))
4717 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4718
4719 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4720
4721 if (dev->imm_ready)
4722 return 0;
4723
4724
4725
4726
4727
4728
4729
4730 msleep(100);
4731
4732 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4733}
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747
4748
4749
4750static int pci_pm_reset(struct pci_dev *dev, bool probe)
4751{
4752 u16 csr;
4753
4754 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4755 return -ENOTTY;
4756
4757 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4758 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4759 return -ENOTTY;
4760
4761 if (probe)
4762 return 0;
4763
4764 if (dev->current_state != PCI_D0)
4765 return -EINVAL;
4766
4767 csr &= ~PCI_PM_CTRL_STATE_MASK;
4768 csr |= PCI_D3hot;
4769 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4770 pci_dev_d3_sleep(dev);
4771
4772 csr &= ~PCI_PM_CTRL_STATE_MASK;
4773 csr |= PCI_D0;
4774 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4775 pci_dev_d3_sleep(dev);
4776
4777 return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4778}
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4789 int delay)
4790{
4791 int timeout = 1000;
4792 bool ret;
4793 u16 lnk_status;
4794
4795
4796
4797
4798
4799 if (!pdev->link_active_reporting) {
4800 msleep(timeout + delay);
4801 return true;
4802 }
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813 if (active)
4814 msleep(20);
4815 for (;;) {
4816 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4817 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4818 if (ret == active)
4819 break;
4820 if (timeout <= 0)
4821 break;
4822 msleep(10);
4823 timeout -= 10;
4824 }
4825 if (active && ret)
4826 msleep(delay);
4827
4828 return ret == active;
4829}
4830
4831
4832
4833
4834
4835
4836
4837
4838bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4839{
4840 return pcie_wait_for_link_delay(pdev, active, 100);
4841}
4842
4843
4844
4845
4846
4847
4848
4849
4850static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4851{
4852 const struct pci_dev *pdev;
4853 int min_delay = 100;
4854 int max_delay = 0;
4855
4856 list_for_each_entry(pdev, &bus->devices, bus_list) {
4857 if (pdev->d3cold_delay < min_delay)
4858 min_delay = pdev->d3cold_delay;
4859 if (pdev->d3cold_delay > max_delay)
4860 max_delay = pdev->d3cold_delay;
4861 }
4862
4863 return max(min_delay, max_delay);
4864}
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
4878{
4879 struct pci_dev *child;
4880 int delay;
4881
4882 if (pci_dev_is_disconnected(dev))
4883 return;
4884
4885 if (!pci_is_bridge(dev) || !dev->bridge_d3)
4886 return;
4887
4888 down_read(&pci_bus_sem);
4889
4890
4891
4892
4893
4894
4895
4896 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4897 up_read(&pci_bus_sem);
4898 return;
4899 }
4900
4901
4902 delay = pci_bus_max_d3cold_delay(dev->subordinate);
4903 if (!delay) {
4904 up_read(&pci_bus_sem);
4905 return;
4906 }
4907
4908 child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
4909 bus_list);
4910 up_read(&pci_bus_sem);
4911
4912
4913
4914
4915
4916
4917
4918 if (!pci_is_pcie(dev)) {
4919 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4920 msleep(1000 + delay);
4921 return;
4922 }
4923
4924
4925
4926
4927
4928
4929
4930
4931
4932
4933
4934
4935
4936
4937
4938
4939
4940
4941 if (!pcie_downstream_port(dev))
4942 return;
4943
4944 if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4945 pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4946 msleep(delay);
4947 } else {
4948 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4949 delay);
4950 if (!pcie_wait_for_link_delay(dev, true, delay)) {
4951
4952 pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
4953 return;
4954 }
4955 }
4956
4957 if (!pci_device_is_present(child)) {
4958 pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
4959 msleep(delay);
4960 }
4961}
4962
4963void pci_reset_secondary_bus(struct pci_dev *dev)
4964{
4965 u16 ctrl;
4966
4967 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4968 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4969 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4970
4971
4972
4973
4974
4975 msleep(2);
4976
4977 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4978 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4979
4980
4981
4982
4983
4984
4985
4986
4987 ssleep(1);
4988}
4989
4990void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4991{
4992 pci_reset_secondary_bus(dev);
4993}
4994
4995
4996
4997
4998
4999
5000
5001
5002int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
5003{
5004 pcibios_reset_secondary_bus(dev);
5005
5006 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
5007}
5008EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
5009
5010static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
5011{
5012 struct pci_dev *pdev;
5013
5014 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
5015 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5016 return -ENOTTY;
5017
5018 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
5019 if (pdev != dev)
5020 return -ENOTTY;
5021
5022 if (probe)
5023 return 0;
5024
5025 return pci_bridge_secondary_bus_reset(dev->bus->self);
5026}
5027
5028static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
5029{
5030 int rc = -ENOTTY;
5031
5032 if (!hotplug || !try_module_get(hotplug->owner))
5033 return rc;
5034
5035 if (hotplug->ops->reset_slot)
5036 rc = hotplug->ops->reset_slot(hotplug, probe);
5037
5038 module_put(hotplug->owner);
5039
5040 return rc;
5041}
5042
5043static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
5044{
5045 if (dev->multifunction || dev->subordinate || !dev->slot ||
5046 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5047 return -ENOTTY;
5048
5049 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
5050}
5051
5052static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
5053{
5054 int rc;
5055
5056 rc = pci_dev_reset_slot_function(dev, probe);
5057 if (rc != -ENOTTY)
5058 return rc;
5059 return pci_parent_bus_reset(dev, probe);
5060}
5061
5062static void pci_dev_lock(struct pci_dev *dev)
5063{
5064 pci_cfg_access_lock(dev);
5065
5066 device_lock(&dev->dev);
5067}
5068
5069
5070int pci_dev_trylock(struct pci_dev *dev)
5071{
5072 if (pci_cfg_access_trylock(dev)) {
5073 if (device_trylock(&dev->dev))
5074 return 1;
5075 pci_cfg_access_unlock(dev);
5076 }
5077
5078 return 0;
5079}
5080EXPORT_SYMBOL_GPL(pci_dev_trylock);
5081
5082void pci_dev_unlock(struct pci_dev *dev)
5083{
5084 device_unlock(&dev->dev);
5085 pci_cfg_access_unlock(dev);
5086}
5087EXPORT_SYMBOL_GPL(pci_dev_unlock);
5088
5089static void pci_dev_save_and_disable(struct pci_dev *dev)
5090{
5091 const struct pci_error_handlers *err_handler =
5092 dev->driver ? dev->driver->err_handler : NULL;
5093
5094
5095
5096
5097
5098
5099 if (err_handler && err_handler->reset_prepare)
5100 err_handler->reset_prepare(dev);
5101
5102
5103
5104
5105
5106
5107 pci_set_power_state(dev, PCI_D0);
5108
5109 pci_save_state(dev);
5110
5111
5112
5113
5114
5115
5116
5117 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5118}
5119
5120static void pci_dev_restore(struct pci_dev *dev)
5121{
5122 const struct pci_error_handlers *err_handler =
5123 dev->driver ? dev->driver->err_handler : NULL;
5124
5125 pci_restore_state(dev);
5126
5127
5128
5129
5130
5131
5132 if (err_handler && err_handler->reset_done)
5133 err_handler->reset_done(dev);
5134}
5135
5136
5137static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
5138 { },
5139 { pci_dev_specific_reset, .name = "device_specific" },
5140 { pci_dev_acpi_reset, .name = "acpi" },
5141 { pcie_reset_flr, .name = "flr" },
5142 { pci_af_flr, .name = "af_flr" },
5143 { pci_pm_reset, .name = "pm" },
5144 { pci_reset_bus_function, .name = "bus" },
5145};
5146
5147static ssize_t reset_method_show(struct device *dev,
5148 struct device_attribute *attr, char *buf)
5149{
5150 struct pci_dev *pdev = to_pci_dev(dev);
5151 ssize_t len = 0;
5152 int i, m;
5153
5154 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5155 m = pdev->reset_methods[i];
5156 if (!m)
5157 break;
5158
5159 len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
5160 pci_reset_fn_methods[m].name);
5161 }
5162
5163 if (len)
5164 len += sysfs_emit_at(buf, len, "\n");
5165
5166 return len;
5167}
5168
5169static int reset_method_lookup(const char *name)
5170{
5171 int m;
5172
5173 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5174 if (sysfs_streq(name, pci_reset_fn_methods[m].name))
5175 return m;
5176 }
5177
5178 return 0;
5179}
5180
5181static ssize_t reset_method_store(struct device *dev,
5182 struct device_attribute *attr,
5183 const char *buf, size_t count)
5184{
5185 struct pci_dev *pdev = to_pci_dev(dev);
5186 char *options, *name;
5187 int m, n;
5188 u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
5189
5190 if (sysfs_streq(buf, "")) {
5191 pdev->reset_methods[0] = 0;
5192 pci_warn(pdev, "All device reset methods disabled by user");
5193 return count;
5194 }
5195
5196 if (sysfs_streq(buf, "default")) {
5197 pci_init_reset_methods(pdev);
5198 return count;
5199 }
5200
5201 options = kstrndup(buf, count, GFP_KERNEL);
5202 if (!options)
5203 return -ENOMEM;
5204
5205 n = 0;
5206 while ((name = strsep(&options, " ")) != NULL) {
5207 if (sysfs_streq(name, ""))
5208 continue;
5209
5210 name = strim(name);
5211
5212 m = reset_method_lookup(name);
5213 if (!m) {
5214 pci_err(pdev, "Invalid reset method '%s'", name);
5215 goto error;
5216 }
5217
5218 if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
5219 pci_err(pdev, "Unsupported reset method '%s'", name);
5220 goto error;
5221 }
5222
5223 if (n == PCI_NUM_RESET_METHODS - 1) {
5224 pci_err(pdev, "Too many reset methods\n");
5225 goto error;
5226 }
5227
5228 reset_methods[n++] = m;
5229 }
5230
5231 reset_methods[n] = 0;
5232
5233
5234 if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
5235 reset_methods[0] != 1)
5236 pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
5237 memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
5238 kfree(options);
5239 return count;
5240
5241error:
5242
5243 kfree(options);
5244 return -EINVAL;
5245}
5246static DEVICE_ATTR_RW(reset_method);
5247
5248static struct attribute *pci_dev_reset_method_attrs[] = {
5249 &dev_attr_reset_method.attr,
5250 NULL,
5251};
5252
5253static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
5254 struct attribute *a, int n)
5255{
5256 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
5257
5258 if (!pci_reset_supported(pdev))
5259 return 0;
5260
5261 return a->mode;
5262}
5263
5264const struct attribute_group pci_dev_reset_method_attr_group = {
5265 .attrs = pci_dev_reset_method_attrs,
5266 .is_visible = pci_dev_reset_method_attr_is_visible,
5267};
5268
5269
5270
5271
5272
5273
5274
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288
5289int __pci_reset_function_locked(struct pci_dev *dev)
5290{
5291 int i, m, rc = -ENOTTY;
5292
5293 might_sleep();
5294
5295
5296
5297
5298
5299
5300
5301
5302
5303 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5304 m = dev->reset_methods[i];
5305 if (!m)
5306 return -ENOTTY;
5307
5308 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
5309 if (!rc)
5310 return 0;
5311 if (rc != -ENOTTY)
5312 return rc;
5313 }
5314
5315 return -ENOTTY;
5316}
5317EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5318
5319
5320
5321
5322
5323
5324
5325
5326
5327
5328
5329
5330
5331void pci_init_reset_methods(struct pci_dev *dev)
5332{
5333 int m, i, rc;
5334
5335 BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
5336
5337 might_sleep();
5338
5339 i = 0;
5340 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5341 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
5342 if (!rc)
5343 dev->reset_methods[i++] = m;
5344 else if (rc != -ENOTTY)
5345 break;
5346 }
5347
5348 dev->reset_methods[i] = 0;
5349}
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359
5360
5361
5362
5363
5364
5365
5366
5367int pci_reset_function(struct pci_dev *dev)
5368{
5369 int rc;
5370
5371 if (!pci_reset_supported(dev))
5372 return -ENOTTY;
5373
5374 pci_dev_lock(dev);
5375 pci_dev_save_and_disable(dev);
5376
5377 rc = __pci_reset_function_locked(dev);
5378
5379 pci_dev_restore(dev);
5380 pci_dev_unlock(dev);
5381
5382 return rc;
5383}
5384EXPORT_SYMBOL_GPL(pci_reset_function);
5385
5386
5387
5388
5389
5390
5391
5392
5393
5394
5395
5396
5397
5398
5399
5400
5401
5402
5403int pci_reset_function_locked(struct pci_dev *dev)
5404{
5405 int rc;
5406
5407 if (!pci_reset_supported(dev))
5408 return -ENOTTY;
5409
5410 pci_dev_save_and_disable(dev);
5411
5412 rc = __pci_reset_function_locked(dev);
5413
5414 pci_dev_restore(dev);
5415
5416 return rc;
5417}
5418EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5419
5420
5421
5422
5423
5424
5425
5426int pci_try_reset_function(struct pci_dev *dev)
5427{
5428 int rc;
5429
5430 if (!pci_reset_supported(dev))
5431 return -ENOTTY;
5432
5433 if (!pci_dev_trylock(dev))
5434 return -EAGAIN;
5435
5436 pci_dev_save_and_disable(dev);
5437 rc = __pci_reset_function_locked(dev);
5438 pci_dev_restore(dev);
5439 pci_dev_unlock(dev);
5440
5441 return rc;
5442}
5443EXPORT_SYMBOL_GPL(pci_try_reset_function);
5444
5445
5446static bool pci_bus_resetable(struct pci_bus *bus)
5447{
5448 struct pci_dev *dev;
5449
5450
5451 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5452 return false;
5453
5454 list_for_each_entry(dev, &bus->devices, bus_list) {
5455 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5456 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5457 return false;
5458 }
5459
5460 return true;
5461}
5462
5463
5464static void pci_bus_lock(struct pci_bus *bus)
5465{
5466 struct pci_dev *dev;
5467
5468 list_for_each_entry(dev, &bus->devices, bus_list) {
5469 pci_dev_lock(dev);
5470 if (dev->subordinate)
5471 pci_bus_lock(dev->subordinate);
5472 }
5473}
5474
5475
5476static void pci_bus_unlock(struct pci_bus *bus)
5477{
5478 struct pci_dev *dev;
5479
5480 list_for_each_entry(dev, &bus->devices, bus_list) {
5481 if (dev->subordinate)
5482 pci_bus_unlock(dev->subordinate);
5483 pci_dev_unlock(dev);
5484 }
5485}
5486
5487
5488static int pci_bus_trylock(struct pci_bus *bus)
5489{
5490 struct pci_dev *dev;
5491
5492 list_for_each_entry(dev, &bus->devices, bus_list) {
5493 if (!pci_dev_trylock(dev))
5494 goto unlock;
5495 if (dev->subordinate) {
5496 if (!pci_bus_trylock(dev->subordinate)) {
5497 pci_dev_unlock(dev);
5498 goto unlock;
5499 }
5500 }
5501 }
5502 return 1;
5503
5504unlock:
5505 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5506 if (dev->subordinate)
5507 pci_bus_unlock(dev->subordinate);
5508 pci_dev_unlock(dev);
5509 }
5510 return 0;
5511}
5512
5513
5514static bool pci_slot_resetable(struct pci_slot *slot)
5515{
5516 struct pci_dev *dev;
5517
5518 if (slot->bus->self &&
5519 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5520 return false;
5521
5522 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5523 if (!dev->slot || dev->slot != slot)
5524 continue;
5525 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5526 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5527 return false;
5528 }
5529
5530 return true;
5531}
5532
5533
5534static void pci_slot_lock(struct pci_slot *slot)
5535{
5536 struct pci_dev *dev;
5537
5538 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5539 if (!dev->slot || dev->slot != slot)
5540 continue;
5541 pci_dev_lock(dev);
5542 if (dev->subordinate)
5543 pci_bus_lock(dev->subordinate);
5544 }
5545}
5546
5547
5548static void pci_slot_unlock(struct pci_slot *slot)
5549{
5550 struct pci_dev *dev;
5551
5552 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5553 if (!dev->slot || dev->slot != slot)
5554 continue;
5555 if (dev->subordinate)
5556 pci_bus_unlock(dev->subordinate);
5557 pci_dev_unlock(dev);
5558 }
5559}
5560
5561
5562static int pci_slot_trylock(struct pci_slot *slot)
5563{
5564 struct pci_dev *dev;
5565
5566 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5567 if (!dev->slot || dev->slot != slot)
5568 continue;
5569 if (!pci_dev_trylock(dev))
5570 goto unlock;
5571 if (dev->subordinate) {
5572 if (!pci_bus_trylock(dev->subordinate)) {
5573 pci_dev_unlock(dev);
5574 goto unlock;
5575 }
5576 }
5577 }
5578 return 1;
5579
5580unlock:
5581 list_for_each_entry_continue_reverse(dev,
5582 &slot->bus->devices, bus_list) {
5583 if (!dev->slot || dev->slot != slot)
5584 continue;
5585 if (dev->subordinate)
5586 pci_bus_unlock(dev->subordinate);
5587 pci_dev_unlock(dev);
5588 }
5589 return 0;
5590}
5591
5592
5593
5594
5595
5596static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5597{
5598 struct pci_dev *dev;
5599
5600 list_for_each_entry(dev, &bus->devices, bus_list) {
5601 pci_dev_save_and_disable(dev);
5602 if (dev->subordinate)
5603 pci_bus_save_and_disable_locked(dev->subordinate);
5604 }
5605}
5606
5607
5608
5609
5610
5611
5612static void pci_bus_restore_locked(struct pci_bus *bus)
5613{
5614 struct pci_dev *dev;
5615
5616 list_for_each_entry(dev, &bus->devices, bus_list) {
5617 pci_dev_restore(dev);
5618 if (dev->subordinate)
5619 pci_bus_restore_locked(dev->subordinate);
5620 }
5621}
5622
5623
5624
5625
5626
5627static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5628{
5629 struct pci_dev *dev;
5630
5631 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5632 if (!dev->slot || dev->slot != slot)
5633 continue;
5634 pci_dev_save_and_disable(dev);
5635 if (dev->subordinate)
5636 pci_bus_save_and_disable_locked(dev->subordinate);
5637 }
5638}
5639
5640
5641
5642
5643
5644
5645static void pci_slot_restore_locked(struct pci_slot *slot)
5646{
5647 struct pci_dev *dev;
5648
5649 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5650 if (!dev->slot || dev->slot != slot)
5651 continue;
5652 pci_dev_restore(dev);
5653 if (dev->subordinate)
5654 pci_bus_restore_locked(dev->subordinate);
5655 }
5656}
5657
5658static int pci_slot_reset(struct pci_slot *slot, bool probe)
5659{
5660 int rc;
5661
5662 if (!slot || !pci_slot_resetable(slot))
5663 return -ENOTTY;
5664
5665 if (!probe)
5666 pci_slot_lock(slot);
5667
5668 might_sleep();
5669
5670 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5671
5672 if (!probe)
5673 pci_slot_unlock(slot);
5674
5675 return rc;
5676}
5677
5678
5679
5680
5681
5682
5683
5684int pci_probe_reset_slot(struct pci_slot *slot)
5685{
5686 return pci_slot_reset(slot, PCI_RESET_PROBE);
5687}
5688EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5689
5690
5691
5692
5693
5694
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705static int __pci_reset_slot(struct pci_slot *slot)
5706{
5707 int rc;
5708
5709 rc = pci_slot_reset(slot, PCI_RESET_PROBE);
5710 if (rc)
5711 return rc;
5712
5713 if (pci_slot_trylock(slot)) {
5714 pci_slot_save_and_disable_locked(slot);
5715 might_sleep();
5716 rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
5717 pci_slot_restore_locked(slot);
5718 pci_slot_unlock(slot);
5719 } else
5720 rc = -EAGAIN;
5721
5722 return rc;
5723}
5724
5725static int pci_bus_reset(struct pci_bus *bus, bool probe)
5726{
5727 int ret;
5728
5729 if (!bus->self || !pci_bus_resetable(bus))
5730 return -ENOTTY;
5731
5732 if (probe)
5733 return 0;
5734
5735 pci_bus_lock(bus);
5736
5737 might_sleep();
5738
5739 ret = pci_bridge_secondary_bus_reset(bus->self);
5740
5741 pci_bus_unlock(bus);
5742
5743 return ret;
5744}
5745
5746
5747
5748
5749
5750
5751
5752
5753
5754int pci_bus_error_reset(struct pci_dev *bridge)
5755{
5756 struct pci_bus *bus = bridge->subordinate;
5757 struct pci_slot *slot;
5758
5759 if (!bus)
5760 return -ENOTTY;
5761
5762 mutex_lock(&pci_slot_mutex);
5763 if (list_empty(&bus->slots))
5764 goto bus_reset;
5765
5766 list_for_each_entry(slot, &bus->slots, list)
5767 if (pci_probe_reset_slot(slot))
5768 goto bus_reset;
5769
5770 list_for_each_entry(slot, &bus->slots, list)
5771 if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
5772 goto bus_reset;
5773
5774 mutex_unlock(&pci_slot_mutex);
5775 return 0;
5776bus_reset:
5777 mutex_unlock(&pci_slot_mutex);
5778 return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
5779}
5780
5781
5782
5783
5784
5785
5786
5787int pci_probe_reset_bus(struct pci_bus *bus)
5788{
5789 return pci_bus_reset(bus, PCI_RESET_PROBE);
5790}
5791EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5792
5793
5794
5795
5796
5797
5798
5799static int __pci_reset_bus(struct pci_bus *bus)
5800{
5801 int rc;
5802
5803 rc = pci_bus_reset(bus, PCI_RESET_PROBE);
5804 if (rc)
5805 return rc;
5806
5807 if (pci_bus_trylock(bus)) {
5808 pci_bus_save_and_disable_locked(bus);
5809 might_sleep();
5810 rc = pci_bridge_secondary_bus_reset(bus->self);
5811 pci_bus_restore_locked(bus);
5812 pci_bus_unlock(bus);
5813 } else
5814 rc = -EAGAIN;
5815
5816 return rc;
5817}
5818
5819
5820
5821
5822
5823
5824
5825int pci_reset_bus(struct pci_dev *pdev)
5826{
5827 return (!pci_probe_reset_slot(pdev->slot)) ?
5828 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5829}
5830EXPORT_SYMBOL_GPL(pci_reset_bus);
5831
5832
5833
5834
5835
5836
5837
5838
5839int pcix_get_max_mmrbc(struct pci_dev *dev)
5840{
5841 int cap;
5842 u32 stat;
5843
5844 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5845 if (!cap)
5846 return -EINVAL;
5847
5848 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5849 return -EINVAL;
5850
5851 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5852}
5853EXPORT_SYMBOL(pcix_get_max_mmrbc);
5854
5855
5856
5857
5858
5859
5860
5861
5862int pcix_get_mmrbc(struct pci_dev *dev)
5863{
5864 int cap;
5865 u16 cmd;
5866
5867 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5868 if (!cap)
5869 return -EINVAL;
5870
5871 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5872 return -EINVAL;
5873
5874 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5875}
5876EXPORT_SYMBOL(pcix_get_mmrbc);
5877
5878
5879
5880
5881
5882
5883
5884
5885
5886
5887int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5888{
5889 int cap;
5890 u32 stat, v, o;
5891 u16 cmd;
5892
5893 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5894 return -EINVAL;
5895
5896 v = ffs(mmrbc) - 10;
5897
5898 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5899 if (!cap)
5900 return -EINVAL;
5901
5902 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5903 return -EINVAL;
5904
5905 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5906 return -E2BIG;
5907
5908 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5909 return -EINVAL;
5910
5911 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5912 if (o != v) {
5913 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5914 return -EIO;
5915
5916 cmd &= ~PCI_X_CMD_MAX_READ;
5917 cmd |= v << 2;
5918 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5919 return -EIO;
5920 }
5921 return 0;
5922}
5923EXPORT_SYMBOL(pcix_set_mmrbc);
5924
5925
5926
5927
5928
5929
5930
5931int pcie_get_readrq(struct pci_dev *dev)
5932{
5933 u16 ctl;
5934
5935 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5936
5937 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5938}
5939EXPORT_SYMBOL(pcie_get_readrq);
5940
5941
5942
5943
5944
5945
5946
5947
5948
5949int pcie_set_readrq(struct pci_dev *dev, int rq)
5950{
5951 u16 v;
5952 int ret;
5953
5954 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5955 return -EINVAL;
5956
5957
5958
5959
5960
5961
5962 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5963 int mps = pcie_get_mps(dev);
5964
5965 if (mps < rq)
5966 rq = mps;
5967 }
5968
5969 v = (ffs(rq) - 8) << 12;
5970
5971 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5972 PCI_EXP_DEVCTL_READRQ, v);
5973
5974 return pcibios_err_to_errno(ret);
5975}
5976EXPORT_SYMBOL(pcie_set_readrq);
5977
5978
5979
5980
5981
5982
5983
5984int pcie_get_mps(struct pci_dev *dev)
5985{
5986 u16 ctl;
5987
5988 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5989
5990 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5991}
5992EXPORT_SYMBOL(pcie_get_mps);
5993
5994
5995
5996
5997
5998
5999
6000
6001
6002int pcie_set_mps(struct pci_dev *dev, int mps)
6003{
6004 u16 v;
6005 int ret;
6006
6007 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
6008 return -EINVAL;
6009
6010 v = ffs(mps) - 8;
6011 if (v > dev->pcie_mpss)
6012 return -EINVAL;
6013 v <<= 5;
6014
6015 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6016 PCI_EXP_DEVCTL_PAYLOAD, v);
6017
6018 return pcibios_err_to_errno(ret);
6019}
6020EXPORT_SYMBOL(pcie_set_mps);
6021
6022
6023
6024
6025
6026
6027
6028
6029
6030
6031
6032
6033
6034
6035
6036u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
6037 enum pci_bus_speed *speed,
6038 enum pcie_link_width *width)
6039{
6040 u16 lnksta;
6041 enum pci_bus_speed next_speed;
6042 enum pcie_link_width next_width;
6043 u32 bw, next_bw;
6044
6045 if (speed)
6046 *speed = PCI_SPEED_UNKNOWN;
6047 if (width)
6048 *width = PCIE_LNK_WIDTH_UNKNOWN;
6049
6050 bw = 0;
6051
6052 while (dev) {
6053 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
6054
6055 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
6056 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
6057 PCI_EXP_LNKSTA_NLW_SHIFT;
6058
6059 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
6060
6061
6062 if (!bw || next_bw <= bw) {
6063 bw = next_bw;
6064
6065 if (limiting_dev)
6066 *limiting_dev = dev;
6067 if (speed)
6068 *speed = next_speed;
6069 if (width)
6070 *width = next_width;
6071 }
6072
6073 dev = pci_upstream_bridge(dev);
6074 }
6075
6076 return bw;
6077}
6078EXPORT_SYMBOL(pcie_bandwidth_available);
6079
6080
6081
6082
6083
6084
6085
6086
6087enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
6088{
6089 u32 lnkcap2, lnkcap;
6090
6091
6092
6093
6094
6095
6096
6097
6098
6099
6100 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
6101
6102
6103 if (lnkcap2)
6104 return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
6105
6106 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6107 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
6108 return PCIE_SPEED_5_0GT;
6109 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
6110 return PCIE_SPEED_2_5GT;
6111
6112 return PCI_SPEED_UNKNOWN;
6113}
6114EXPORT_SYMBOL(pcie_get_speed_cap);
6115
6116
6117
6118
6119
6120
6121
6122
6123enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
6124{
6125 u32 lnkcap;
6126
6127 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6128 if (lnkcap)
6129 return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
6130
6131 return PCIE_LNK_WIDTH_UNKNOWN;
6132}
6133EXPORT_SYMBOL(pcie_get_width_cap);
6134
6135
6136
6137
6138
6139
6140
6141
6142
6143
6144
6145u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
6146 enum pcie_link_width *width)
6147{
6148 *speed = pcie_get_speed_cap(dev);
6149 *width = pcie_get_width_cap(dev);
6150
6151 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
6152 return 0;
6153
6154 return *width * PCIE_SPEED2MBS_ENC(*speed);
6155}
6156
6157
6158
6159
6160
6161
6162
6163
6164
6165
6166
6167void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
6168{
6169 enum pcie_link_width width, width_cap;
6170 enum pci_bus_speed speed, speed_cap;
6171 struct pci_dev *limiting_dev = NULL;
6172 u32 bw_avail, bw_cap;
6173
6174 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
6175 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
6176
6177 if (bw_avail >= bw_cap && verbose)
6178 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
6179 bw_cap / 1000, bw_cap % 1000,
6180 pci_speed_string(speed_cap), width_cap);
6181 else if (bw_avail < bw_cap)
6182 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
6183 bw_avail / 1000, bw_avail % 1000,
6184 pci_speed_string(speed), width,
6185 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
6186 bw_cap / 1000, bw_cap % 1000,
6187 pci_speed_string(speed_cap), width_cap);
6188}
6189
6190
6191
6192
6193
6194
6195
6196void pcie_print_link_status(struct pci_dev *dev)
6197{
6198 __pcie_print_link_status(dev, true);
6199}
6200EXPORT_SYMBOL(pcie_print_link_status);
6201
6202
6203
6204
6205
6206
6207
6208
6209int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6210{
6211 int i, bars = 0;
6212 for (i = 0; i < PCI_NUM_RESOURCES; i++)
6213 if (pci_resource_flags(dev, i) & flags)
6214 bars |= (1 << i);
6215 return bars;
6216}
6217EXPORT_SYMBOL(pci_select_bars);
6218
6219
6220static arch_set_vga_state_t arch_set_vga_state;
6221
6222void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6223{
6224 arch_set_vga_state = func;
6225}
6226
6227static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6228 unsigned int command_bits, u32 flags)
6229{
6230 if (arch_set_vga_state)
6231 return arch_set_vga_state(dev, decode, command_bits,
6232 flags);
6233 return 0;
6234}
6235
6236
6237
6238
6239
6240
6241
6242
6243
6244int pci_set_vga_state(struct pci_dev *dev, bool decode,
6245 unsigned int command_bits, u32 flags)
6246{
6247 struct pci_bus *bus;
6248 struct pci_dev *bridge;
6249 u16 cmd;
6250 int rc;
6251
6252 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6253
6254
6255 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6256 if (rc)
6257 return rc;
6258
6259 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6260 pci_read_config_word(dev, PCI_COMMAND, &cmd);
6261 if (decode)
6262 cmd |= command_bits;
6263 else
6264 cmd &= ~command_bits;
6265 pci_write_config_word(dev, PCI_COMMAND, cmd);
6266 }
6267
6268 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6269 return 0;
6270
6271 bus = dev->bus;
6272 while (bus) {
6273 bridge = bus->self;
6274 if (bridge) {
6275 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6276 &cmd);
6277 if (decode)
6278 cmd |= PCI_BRIDGE_CTL_VGA;
6279 else
6280 cmd &= ~PCI_BRIDGE_CTL_VGA;
6281 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6282 cmd);
6283 }
6284 bus = bus->parent;
6285 }
6286 return 0;
6287}
6288
6289#ifdef CONFIG_ACPI
6290bool pci_pr3_present(struct pci_dev *pdev)
6291{
6292 struct acpi_device *adev;
6293
6294 if (acpi_disabled)
6295 return false;
6296
6297 adev = ACPI_COMPANION(&pdev->dev);
6298 if (!adev)
6299 return false;
6300
6301 return adev->power.flags.power_resources &&
6302 acpi_has_method(adev->handle, "_PR3");
6303}
6304EXPORT_SYMBOL_GPL(pci_pr3_present);
6305#endif
6306
6307
6308
6309
6310
6311
6312
6313
6314
6315
6316
6317
6318
6319
6320
6321
6322
6323
6324
6325
6326
6327void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns)
6328{
6329 int devfn_to;
6330
6331 nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from);
6332 devfn_to = devfn_from + nr_devfns - 1;
6333
6334 if (!dev->dma_alias_mask)
6335 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6336 if (!dev->dma_alias_mask) {
6337 pci_warn(dev, "Unable to allocate DMA alias mask\n");
6338 return;
6339 }
6340
6341 bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6342
6343 if (nr_devfns == 1)
6344 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6345 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6346 else if (nr_devfns > 1)
6347 pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6348 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6349 PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6350}
6351
6352bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6353{
6354 return (dev1->dma_alias_mask &&
6355 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6356 (dev2->dma_alias_mask &&
6357 test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6358 pci_real_dma_dev(dev1) == dev2 ||
6359 pci_real_dma_dev(dev2) == dev1;
6360}
6361
6362bool pci_device_is_present(struct pci_dev *pdev)
6363{
6364 u32 v;
6365
6366 if (pci_dev_is_disconnected(pdev))
6367 return false;
6368 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6369}
6370EXPORT_SYMBOL_GPL(pci_device_is_present);
6371
6372void pci_ignore_hotplug(struct pci_dev *dev)
6373{
6374 struct pci_dev *bridge = dev->bus->self;
6375
6376 dev->ignore_hotplug = 1;
6377
6378 if (bridge)
6379 bridge->ignore_hotplug = 1;
6380}
6381EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6382
6383
6384
6385
6386
6387
6388
6389
6390
6391
6392
6393struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6394{
6395 return dev;
6396}
6397
6398resource_size_t __weak pcibios_default_alignment(void)
6399{
6400 return 0;
6401}
6402
6403
6404
6405
6406
6407void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6408 const struct resource *rsrc,
6409 resource_size_t *start, resource_size_t *end)
6410{
6411 *start = rsrc->start;
6412 *end = rsrc->end;
6413}
6414
6415static char *resource_alignment_param;
6416static DEFINE_SPINLOCK(resource_alignment_lock);
6417
6418
6419
6420
6421
6422
6423
6424
6425
6426static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6427 bool *resize)
6428{
6429 int align_order, count;
6430 resource_size_t align = pcibios_default_alignment();
6431 const char *p;
6432 int ret;
6433
6434 spin_lock(&resource_alignment_lock);
6435 p = resource_alignment_param;
6436 if (!p || !*p)
6437 goto out;
6438 if (pci_has_flag(PCI_PROBE_ONLY)) {
6439 align = 0;
6440 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6441 goto out;
6442 }
6443
6444 while (*p) {
6445 count = 0;
6446 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6447 p[count] == '@') {
6448 p += count + 1;
6449 if (align_order > 63) {
6450 pr_err("PCI: Invalid requested alignment (order %d)\n",
6451 align_order);
6452 align_order = PAGE_SHIFT;
6453 }
6454 } else {
6455 align_order = PAGE_SHIFT;
6456 }
6457
6458 ret = pci_dev_str_match(dev, p, &p);
6459 if (ret == 1) {
6460 *resize = true;
6461 align = 1ULL << align_order;
6462 break;
6463 } else if (ret < 0) {
6464 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6465 p);
6466 break;
6467 }
6468
6469 if (*p != ';' && *p != ',') {
6470
6471 break;
6472 }
6473 p++;
6474 }
6475out:
6476 spin_unlock(&resource_alignment_lock);
6477 return align;
6478}
6479
6480static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6481 resource_size_t align, bool resize)
6482{
6483 struct resource *r = &dev->resource[bar];
6484 resource_size_t size;
6485
6486 if (!(r->flags & IORESOURCE_MEM))
6487 return;
6488
6489 if (r->flags & IORESOURCE_PCI_FIXED) {
6490 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6491 bar, r, (unsigned long long)align);
6492 return;
6493 }
6494
6495 size = resource_size(r);
6496 if (size >= align)
6497 return;
6498
6499
6500
6501
6502
6503
6504
6505
6506
6507
6508
6509
6510
6511
6512
6513
6514
6515
6516
6517
6518
6519
6520
6521
6522
6523
6524
6525
6526
6527 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6528 bar, r, (unsigned long long)align);
6529
6530 if (resize) {
6531 r->start = 0;
6532 r->end = align - 1;
6533 } else {
6534 r->flags &= ~IORESOURCE_SIZEALIGN;
6535 r->flags |= IORESOURCE_STARTALIGN;
6536 r->start = align;
6537 r->end = r->start + size - 1;
6538 }
6539 r->flags |= IORESOURCE_UNSET;
6540}
6541
6542
6543
6544
6545
6546
6547
6548
6549void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6550{
6551 int i;
6552 struct resource *r;
6553 resource_size_t align;
6554 u16 command;
6555 bool resize = false;
6556
6557
6558
6559
6560
6561
6562
6563 if (dev->is_virtfn)
6564 return;
6565
6566
6567 align = pci_specified_resource_alignment(dev, &resize);
6568 if (!align)
6569 return;
6570
6571 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6572 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6573 pci_warn(dev, "Can't reassign resources to host bridge\n");
6574 return;
6575 }
6576
6577 pci_read_config_word(dev, PCI_COMMAND, &command);
6578 command &= ~PCI_COMMAND_MEMORY;
6579 pci_write_config_word(dev, PCI_COMMAND, command);
6580
6581 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6582 pci_request_resource_alignment(dev, i, align, resize);
6583
6584
6585
6586
6587
6588
6589 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6590 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6591 r = &dev->resource[i];
6592 if (!(r->flags & IORESOURCE_MEM))
6593 continue;
6594 r->flags |= IORESOURCE_UNSET;
6595 r->end = resource_size(r) - 1;
6596 r->start = 0;
6597 }
6598 pci_disable_bridge_window(dev);
6599 }
6600}
6601
6602static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6603{
6604 size_t count = 0;
6605
6606 spin_lock(&resource_alignment_lock);
6607 if (resource_alignment_param)
6608 count = sysfs_emit(buf, "%s\n", resource_alignment_param);
6609 spin_unlock(&resource_alignment_lock);
6610
6611 return count;
6612}
6613
6614static ssize_t resource_alignment_store(struct bus_type *bus,
6615 const char *buf, size_t count)
6616{
6617 char *param, *old, *end;
6618
6619 if (count >= (PAGE_SIZE - 1))
6620 return -EINVAL;
6621
6622 param = kstrndup(buf, count, GFP_KERNEL);
6623 if (!param)
6624 return -ENOMEM;
6625
6626 end = strchr(param, '\n');
6627 if (end)
6628 *end = '\0';
6629
6630 spin_lock(&resource_alignment_lock);
6631 old = resource_alignment_param;
6632 if (strlen(param)) {
6633 resource_alignment_param = param;
6634 } else {
6635 kfree(param);
6636 resource_alignment_param = NULL;
6637 }
6638 spin_unlock(&resource_alignment_lock);
6639
6640 kfree(old);
6641
6642 return count;
6643}
6644
6645static BUS_ATTR_RW(resource_alignment);
6646
6647static int __init pci_resource_alignment_sysfs_init(void)
6648{
6649 return bus_create_file(&pci_bus_type,
6650 &bus_attr_resource_alignment);
6651}
6652late_initcall(pci_resource_alignment_sysfs_init);
6653
6654static void pci_no_domains(void)
6655{
6656#ifdef CONFIG_PCI_DOMAINS
6657 pci_domains_supported = 0;
6658#endif
6659}
6660
6661#ifdef CONFIG_PCI_DOMAINS_GENERIC
6662static atomic_t __domain_nr = ATOMIC_INIT(-1);
6663
6664static int pci_get_new_domain_nr(void)
6665{
6666 return atomic_inc_return(&__domain_nr);
6667}
6668
6669static int of_pci_bus_find_domain_nr(struct device *parent)
6670{
6671 static int use_dt_domains = -1;
6672 int domain = -1;
6673
6674 if (parent)
6675 domain = of_get_pci_domain_nr(parent->of_node);
6676
6677
6678
6679
6680
6681
6682
6683
6684
6685
6686
6687
6688
6689
6690
6691
6692
6693
6694
6695
6696
6697
6698
6699
6700
6701
6702
6703 if (domain >= 0 && use_dt_domains) {
6704 use_dt_domains = 1;
6705 } else if (domain < 0 && use_dt_domains != 1) {
6706 use_dt_domains = 0;
6707 domain = pci_get_new_domain_nr();
6708 } else {
6709 if (parent)
6710 pr_err("Node %pOF has ", parent->of_node);
6711 pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6712 domain = -1;
6713 }
6714
6715 return domain;
6716}
6717
6718int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6719{
6720 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6721 acpi_pci_bus_find_domain_nr(bus);
6722}
6723#endif
6724
6725
6726
6727
6728
6729
6730
6731
6732int __weak pci_ext_cfg_avail(void)
6733{
6734 return 1;
6735}
6736
6737void __weak pci_fixup_cardbus(struct pci_bus *bus)
6738{
6739}
6740EXPORT_SYMBOL(pci_fixup_cardbus);
6741
6742static int __init pci_setup(char *str)
6743{
6744 while (str) {
6745 char *k = strchr(str, ',');
6746 if (k)
6747 *k++ = 0;
6748 if (*str && (str = pcibios_setup(str)) && *str) {
6749 if (!strcmp(str, "nomsi")) {
6750 pci_no_msi();
6751 } else if (!strncmp(str, "noats", 5)) {
6752 pr_info("PCIe: ATS is disabled\n");
6753 pcie_ats_disabled = true;
6754 } else if (!strcmp(str, "noaer")) {
6755 pci_no_aer();
6756 } else if (!strcmp(str, "earlydump")) {
6757 pci_early_dump = true;
6758 } else if (!strncmp(str, "realloc=", 8)) {
6759 pci_realloc_get_opt(str + 8);
6760 } else if (!strncmp(str, "realloc", 7)) {
6761 pci_realloc_get_opt("on");
6762 } else if (!strcmp(str, "nodomains")) {
6763 pci_no_domains();
6764 } else if (!strncmp(str, "noari", 5)) {
6765 pcie_ari_disabled = true;
6766 } else if (!strncmp(str, "cbiosize=", 9)) {
6767 pci_cardbus_io_size = memparse(str + 9, &str);
6768 } else if (!strncmp(str, "cbmemsize=", 10)) {
6769 pci_cardbus_mem_size = memparse(str + 10, &str);
6770 } else if (!strncmp(str, "resource_alignment=", 19)) {
6771 resource_alignment_param = str + 19;
6772 } else if (!strncmp(str, "ecrc=", 5)) {
6773 pcie_ecrc_get_policy(str + 5);
6774 } else if (!strncmp(str, "hpiosize=", 9)) {
6775 pci_hotplug_io_size = memparse(str + 9, &str);
6776 } else if (!strncmp(str, "hpmmiosize=", 11)) {
6777 pci_hotplug_mmio_size = memparse(str + 11, &str);
6778 } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6779 pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6780 } else if (!strncmp(str, "hpmemsize=", 10)) {
6781 pci_hotplug_mmio_size = memparse(str + 10, &str);
6782 pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6783 } else if (!strncmp(str, "hpbussize=", 10)) {
6784 pci_hotplug_bus_size =
6785 simple_strtoul(str + 10, &str, 0);
6786 if (pci_hotplug_bus_size > 0xff)
6787 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6788 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6789 pcie_bus_config = PCIE_BUS_TUNE_OFF;
6790 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
6791 pcie_bus_config = PCIE_BUS_SAFE;
6792 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
6793 pcie_bus_config = PCIE_BUS_PERFORMANCE;
6794 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6795 pcie_bus_config = PCIE_BUS_PEER2PEER;
6796 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6797 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6798 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6799 disable_acs_redir_param = str + 18;
6800 } else {
6801 pr_err("PCI: Unknown option `%s'\n", str);
6802 }
6803 }
6804 str = k;
6805 }
6806 return 0;
6807}
6808early_param("pci", pci_setup);
6809
6810
6811
6812
6813
6814
6815
6816
6817
6818
6819static int __init pci_realloc_setup_params(void)
6820{
6821 resource_alignment_param = kstrdup(resource_alignment_param,
6822 GFP_KERNEL);
6823 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6824
6825 return 0;
6826}
6827pure_initcall(pci_realloc_setup_params);
6828