1
2
3
4
5
6
7
8
9
10
11#include <linux/acpi.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/dmi.h>
15#include <linux/init.h>
16#include <linux/of.h>
17#include <linux/of_pci.h>
18#include <linux/pci.h>
19#include <linux/pm.h>
20#include <linux/slab.h>
21#include <linux/module.h>
22#include <linux/spinlock.h>
23#include <linux/string.h>
24#include <linux/log2.h>
25#include <linux/logic_pio.h>
26#include <linux/pm_wakeup.h>
27#include <linux/interrupt.h>
28#include <linux/device.h>
29#include <linux/pm_runtime.h>
30#include <linux/pci_hotplug.h>
31#include <linux/vmalloc.h>
32#include <linux/pci-ats.h>
33#include <asm/setup.h>
34#include <asm/dma.h>
35#include <linux/aer.h>
36#include "pci.h"
37
38DEFINE_MUTEX(pci_slot_mutex);
39
40const char *pci_power_names[] = {
41 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
42};
43EXPORT_SYMBOL_GPL(pci_power_names);
44
45int isa_dma_bridge_buggy;
46EXPORT_SYMBOL(isa_dma_bridge_buggy);
47
48int pci_pci_problems;
49EXPORT_SYMBOL(pci_pci_problems);
50
51unsigned int pci_pm_d3_delay;
52
53static void pci_pme_list_scan(struct work_struct *work);
54
55static LIST_HEAD(pci_pme_list);
56static DEFINE_MUTEX(pci_pme_list_mutex);
57static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
58
59struct pci_pme_device {
60 struct list_head list;
61 struct pci_dev *dev;
62};
63
64#define PME_TIMEOUT 1000
65
66static void pci_dev_d3_sleep(struct pci_dev *dev)
67{
68 unsigned int delay = dev->d3_delay;
69
70 if (delay < pci_pm_d3_delay)
71 delay = pci_pm_d3_delay;
72
73 if (delay)
74 msleep(delay);
75}
76
77#ifdef CONFIG_PCI_DOMAINS
78int pci_domains_supported = 1;
79#endif
80
81#define DEFAULT_CARDBUS_IO_SIZE (256)
82#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
83
84unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
85unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
86
87#define DEFAULT_HOTPLUG_IO_SIZE (256)
88#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
89
90unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
91unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
92
93#define DEFAULT_HOTPLUG_BUS_SIZE 1
94unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
95
96enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
97
98
99
100
101
102
103
104u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
105u8 pci_cache_line_size;
106
107
108
109
110
111unsigned int pcibios_max_latency = 255;
112
113
114static bool pcie_ari_disabled;
115
116
117static bool pcie_ats_disabled;
118
119
120bool pci_early_dump;
121
122bool pci_ats_disabled(void)
123{
124 return pcie_ats_disabled;
125}
126
127
128static bool pci_bridge_d3_disable;
129
130static bool pci_bridge_d3_force;
131
132static int __init pcie_port_pm_setup(char *str)
133{
134 if (!strcmp(str, "off"))
135 pci_bridge_d3_disable = true;
136 else if (!strcmp(str, "force"))
137 pci_bridge_d3_force = true;
138 return 1;
139}
140__setup("pcie_port_pm=", pcie_port_pm_setup);
141
142
143#define PCIE_RESET_READY_POLL_MS 60000
144
145
146
147
148
149
150
151
152unsigned char pci_bus_max_busnr(struct pci_bus *bus)
153{
154 struct pci_bus *tmp;
155 unsigned char max, n;
156
157 max = bus->busn_res.end;
158 list_for_each_entry(tmp, &bus->children, node) {
159 n = pci_bus_max_busnr(tmp);
160 if (n > max)
161 max = n;
162 }
163 return max;
164}
165EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
166
167#ifdef CONFIG_HAS_IOMEM
168void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
169{
170 struct resource *res = &pdev->resource[bar];
171
172
173
174
175 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
176 pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
177 return NULL;
178 }
179 return ioremap_nocache(res->start, resource_size(res));
180}
181EXPORT_SYMBOL_GPL(pci_ioremap_bar);
182
183void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
184{
185
186
187
188 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
189 WARN_ON(1);
190 return NULL;
191 }
192 return ioremap_wc(pci_resource_start(pdev, bar),
193 pci_resource_len(pdev, bar));
194}
195EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
196#endif
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
218 const char **endptr)
219{
220 int ret;
221 int seg, bus, slot, func;
222 char *wpath, *p;
223 char end;
224
225 *endptr = strchrnul(path, ';');
226
227 wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
228 if (!wpath)
229 return -ENOMEM;
230
231 while (1) {
232 p = strrchr(wpath, '/');
233 if (!p)
234 break;
235 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
236 if (ret != 2) {
237 ret = -EINVAL;
238 goto free_and_exit;
239 }
240
241 if (dev->devfn != PCI_DEVFN(slot, func)) {
242 ret = 0;
243 goto free_and_exit;
244 }
245
246
247
248
249
250
251
252 dev = pci_upstream_bridge(dev);
253 if (!dev) {
254 ret = 0;
255 goto free_and_exit;
256 }
257
258 *p = 0;
259 }
260
261 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
262 &func, &end);
263 if (ret != 4) {
264 seg = 0;
265 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
266 if (ret != 3) {
267 ret = -EINVAL;
268 goto free_and_exit;
269 }
270 }
271
272 ret = (seg == pci_domain_nr(dev->bus) &&
273 bus == dev->bus->number &&
274 dev->devfn == PCI_DEVFN(slot, func));
275
276free_and_exit:
277 kfree(wpath);
278 return ret;
279}
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311static int pci_dev_str_match(struct pci_dev *dev, const char *p,
312 const char **endptr)
313{
314 int ret;
315 int count;
316 unsigned short vendor, device, subsystem_vendor, subsystem_device;
317
318 if (strncmp(p, "pci:", 4) == 0) {
319
320 p += 4;
321 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
322 &subsystem_vendor, &subsystem_device, &count);
323 if (ret != 4) {
324 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
325 if (ret != 2)
326 return -EINVAL;
327
328 subsystem_vendor = 0;
329 subsystem_device = 0;
330 }
331
332 p += count;
333
334 if ((!vendor || vendor == dev->vendor) &&
335 (!device || device == dev->device) &&
336 (!subsystem_vendor ||
337 subsystem_vendor == dev->subsystem_vendor) &&
338 (!subsystem_device ||
339 subsystem_device == dev->subsystem_device))
340 goto found;
341 } else {
342
343
344
345
346 ret = pci_dev_str_match_path(dev, p, &p);
347 if (ret < 0)
348 return ret;
349 else if (ret)
350 goto found;
351 }
352
353 *endptr = p;
354 return 0;
355
356found:
357 *endptr = p;
358 return 1;
359}
360
361static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
362 u8 pos, int cap, int *ttl)
363{
364 u8 id;
365 u16 ent;
366
367 pci_bus_read_config_byte(bus, devfn, pos, &pos);
368
369 while ((*ttl)--) {
370 if (pos < 0x40)
371 break;
372 pos &= ~3;
373 pci_bus_read_config_word(bus, devfn, pos, &ent);
374
375 id = ent & 0xff;
376 if (id == 0xff)
377 break;
378 if (id == cap)
379 return pos;
380 pos = (ent >> 8);
381 }
382 return 0;
383}
384
385static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
386 u8 pos, int cap)
387{
388 int ttl = PCI_FIND_CAP_TTL;
389
390 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
391}
392
393int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
394{
395 return __pci_find_next_cap(dev->bus, dev->devfn,
396 pos + PCI_CAP_LIST_NEXT, cap);
397}
398EXPORT_SYMBOL_GPL(pci_find_next_capability);
399
400static int __pci_bus_find_cap_start(struct pci_bus *bus,
401 unsigned int devfn, u8 hdr_type)
402{
403 u16 status;
404
405 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
406 if (!(status & PCI_STATUS_CAP_LIST))
407 return 0;
408
409 switch (hdr_type) {
410 case PCI_HEADER_TYPE_NORMAL:
411 case PCI_HEADER_TYPE_BRIDGE:
412 return PCI_CAPABILITY_LIST;
413 case PCI_HEADER_TYPE_CARDBUS:
414 return PCI_CB_CAPABILITY_LIST;
415 }
416
417 return 0;
418}
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439int pci_find_capability(struct pci_dev *dev, int cap)
440{
441 int pos;
442
443 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
444 if (pos)
445 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
446
447 return pos;
448}
449EXPORT_SYMBOL(pci_find_capability);
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
465{
466 int pos;
467 u8 hdr_type;
468
469 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
470
471 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
472 if (pos)
473 pos = __pci_find_next_cap(bus, devfn, pos, cap);
474
475 return pos;
476}
477EXPORT_SYMBOL(pci_bus_find_capability);
478
479
480
481
482
483
484
485
486
487
488
489
490int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
491{
492 u32 header;
493 int ttl;
494 int pos = PCI_CFG_SPACE_SIZE;
495
496
497 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
498
499 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
500 return 0;
501
502 if (start)
503 pos = start;
504
505 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
506 return 0;
507
508
509
510
511
512 if (header == 0)
513 return 0;
514
515 while (ttl-- > 0) {
516 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
517 return pos;
518
519 pos = PCI_EXT_CAP_NEXT(header);
520 if (pos < PCI_CFG_SPACE_SIZE)
521 break;
522
523 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
524 break;
525 }
526
527 return 0;
528}
529EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545int pci_find_ext_capability(struct pci_dev *dev, int cap)
546{
547 return pci_find_next_ext_capability(dev, 0, cap);
548}
549EXPORT_SYMBOL_GPL(pci_find_ext_capability);
550
551static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
552{
553 int rc, ttl = PCI_FIND_CAP_TTL;
554 u8 cap, mask;
555
556 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
557 mask = HT_3BIT_CAP_MASK;
558 else
559 mask = HT_5BIT_CAP_MASK;
560
561 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
562 PCI_CAP_ID_HT, &ttl);
563 while (pos) {
564 rc = pci_read_config_byte(dev, pos + 3, &cap);
565 if (rc != PCIBIOS_SUCCESSFUL)
566 return 0;
567
568 if ((cap & mask) == ht_cap)
569 return pos;
570
571 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
572 pos + PCI_CAP_LIST_NEXT,
573 PCI_CAP_ID_HT, &ttl);
574 }
575
576 return 0;
577}
578
579
580
581
582
583
584
585
586
587
588
589
590
591int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
592{
593 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
594}
595EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
596
597
598
599
600
601
602
603
604
605
606
607
608int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
609{
610 int pos;
611
612 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
613 if (pos)
614 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
615
616 return pos;
617}
618EXPORT_SYMBOL_GPL(pci_find_ht_capability);
619
620
621
622
623
624
625
626
627
628
629struct resource *pci_find_parent_resource(const struct pci_dev *dev,
630 struct resource *res)
631{
632 const struct pci_bus *bus = dev->bus;
633 struct resource *r;
634 int i;
635
636 pci_bus_for_each_resource(bus, r, i) {
637 if (!r)
638 continue;
639 if (resource_contains(r, res)) {
640
641
642
643
644
645 if (r->flags & IORESOURCE_PREFETCH &&
646 !(res->flags & IORESOURCE_PREFETCH))
647 return NULL;
648
649
650
651
652
653
654
655
656
657 return r;
658 }
659 }
660 return NULL;
661}
662EXPORT_SYMBOL(pci_find_parent_resource);
663
664
665
666
667
668
669
670
671
672
673struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
674{
675 int i;
676
677 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
678 struct resource *r = &dev->resource[i];
679
680 if (r->start && resource_contains(r, res))
681 return r;
682 }
683
684 return NULL;
685}
686EXPORT_SYMBOL(pci_find_resource);
687
688
689
690
691
692
693
694
695struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
696{
697 struct pci_dev *bridge, *highest_pcie_bridge = dev;
698
699 bridge = pci_upstream_bridge(dev);
700 while (bridge && pci_is_pcie(bridge)) {
701 highest_pcie_bridge = bridge;
702 bridge = pci_upstream_bridge(bridge);
703 }
704
705 if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
706 return NULL;
707
708 return highest_pcie_bridge;
709}
710EXPORT_SYMBOL(pci_find_pcie_root_port);
711
712
713
714
715
716
717
718
719
720int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
721{
722 int i;
723
724
725 for (i = 0; i < 4; i++) {
726 u16 status;
727 if (i)
728 msleep((1 << (i - 1)) * 100);
729
730 pci_read_config_word(dev, pos, &status);
731 if (!(status & mask))
732 return 1;
733 }
734
735 return 0;
736}
737
738
739
740
741
742
743
744
745static void pci_restore_bars(struct pci_dev *dev)
746{
747 int i;
748
749 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
750 pci_update_resource(dev, i);
751}
752
753static const struct pci_platform_pm_ops *pci_platform_pm;
754
755int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
756{
757 if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
758 !ops->choose_state || !ops->set_wakeup || !ops->need_resume)
759 return -EINVAL;
760 pci_platform_pm = ops;
761 return 0;
762}
763
764static inline bool platform_pci_power_manageable(struct pci_dev *dev)
765{
766 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
767}
768
769static inline int platform_pci_set_power_state(struct pci_dev *dev,
770 pci_power_t t)
771{
772 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
773}
774
775static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
776{
777 return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
778}
779
780static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
781{
782 if (pci_platform_pm && pci_platform_pm->refresh_state)
783 pci_platform_pm->refresh_state(dev);
784}
785
786static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
787{
788 return pci_platform_pm ?
789 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
790}
791
792static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
793{
794 return pci_platform_pm ?
795 pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
796}
797
798static inline bool platform_pci_need_resume(struct pci_dev *dev)
799{
800 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
801}
802
803static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
804{
805 return pci_platform_pm ? pci_platform_pm->bridge_d3(dev) : false;
806}
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
822{
823 u16 pmcsr;
824 bool need_restore = false;
825
826
827 if (dev->current_state == state)
828 return 0;
829
830 if (!dev->pm_cap)
831 return -EIO;
832
833 if (state < PCI_D0 || state > PCI_D3hot)
834 return -EINVAL;
835
836
837
838
839
840
841 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
842 && dev->current_state > state) {
843 pci_err(dev, "invalid power transition (from state %d to %d)\n",
844 dev->current_state, state);
845 return -EINVAL;
846 }
847
848
849 if ((state == PCI_D1 && !dev->d1_support)
850 || (state == PCI_D2 && !dev->d2_support))
851 return -EIO;
852
853 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
854
855
856
857
858
859
860 switch (dev->current_state) {
861 case PCI_D0:
862 case PCI_D1:
863 case PCI_D2:
864 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
865 pmcsr |= state;
866 break;
867 case PCI_D3hot:
868 case PCI_D3cold:
869 case PCI_UNKNOWN:
870 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
871 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
872 need_restore = true;
873
874 default:
875 pmcsr = 0;
876 break;
877 }
878
879
880 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
881
882
883
884
885
886 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
887 pci_dev_d3_sleep(dev);
888 else if (state == PCI_D2 || dev->current_state == PCI_D2)
889 udelay(PCI_PM_D2_DELAY);
890
891 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
892 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
893 if (dev->current_state != state)
894 pci_info_ratelimited(dev, "Refused to change power state, currently in D%d\n",
895 dev->current_state);
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910 if (need_restore)
911 pci_restore_bars(dev);
912
913 if (dev->bus->self)
914 pcie_aspm_pm_state_change(dev->bus->self);
915
916 return 0;
917}
918
919
920
921
922
923
924
925
926
927
928
929
930
931void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
932{
933 if (platform_pci_get_power_state(dev) == PCI_D3cold ||
934 !pci_device_is_present(dev)) {
935 dev->current_state = PCI_D3cold;
936 } else if (dev->pm_cap) {
937 u16 pmcsr;
938
939 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
940 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
941 } else {
942 dev->current_state = state;
943 }
944}
945
946
947
948
949
950
951
952
953void pci_refresh_power_state(struct pci_dev *dev)
954{
955 if (platform_pci_power_manageable(dev))
956 platform_pci_refresh_power_state(dev);
957
958 pci_update_current_state(dev, dev->current_state);
959}
960
961
962
963
964
965
966static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
967{
968 int error;
969
970 if (platform_pci_power_manageable(dev)) {
971 error = platform_pci_set_power_state(dev, state);
972 if (!error)
973 pci_update_current_state(dev, state);
974 } else
975 error = -ENODEV;
976
977 if (error && !dev->pm_cap)
978 dev->current_state = PCI_D0;
979
980 return error;
981}
982
983
984
985
986
987
988static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
989{
990 pci_wakeup_event(pci_dev);
991 pm_request_resume(&pci_dev->dev);
992 return 0;
993}
994
995
996
997
998
999void pci_wakeup_bus(struct pci_bus *bus)
1000{
1001 if (bus)
1002 pci_walk_bus(bus, pci_wakeup, NULL);
1003}
1004
1005
1006
1007
1008
1009
1010static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
1011{
1012 if (state == PCI_D0) {
1013 pci_platform_power_transition(dev, PCI_D0);
1014
1015
1016
1017
1018
1019
1020
1021 if (dev->runtime_d3cold) {
1022 if (dev->d3cold_delay && !dev->imm_ready)
1023 msleep(dev->d3cold_delay);
1024
1025
1026
1027
1028
1029
1030 pci_wakeup_bus(dev->subordinate);
1031 }
1032 }
1033}
1034
1035
1036
1037
1038
1039
1040static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1041{
1042 pci_power_t state = *(pci_power_t *)data;
1043
1044 dev->current_state = state;
1045 return 0;
1046}
1047
1048
1049
1050
1051
1052
1053void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1054{
1055 if (bus)
1056 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1057}
1058
1059
1060
1061
1062
1063
1064
1065
1066int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
1067{
1068 int ret;
1069
1070 if (state <= PCI_D0)
1071 return -EINVAL;
1072 ret = pci_platform_power_transition(dev, state);
1073
1074 if (!ret && state == PCI_D3cold)
1075 pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1076 return ret;
1077}
1078EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1098{
1099 int error;
1100
1101
1102 if (state > PCI_D3cold)
1103 state = PCI_D3cold;
1104 else if (state < PCI_D0)
1105 state = PCI_D0;
1106 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1107
1108
1109
1110
1111
1112
1113
1114 return 0;
1115
1116
1117 if (dev->current_state == state)
1118 return 0;
1119
1120 __pci_start_power_transition(dev, state);
1121
1122
1123
1124
1125
1126 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1127 return 0;
1128
1129
1130
1131
1132
1133 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
1134 PCI_D3hot : state);
1135
1136 if (!__pci_complete_power_transition(dev, state))
1137 error = 0;
1138
1139 return error;
1140}
1141EXPORT_SYMBOL(pci_set_power_state);
1142
1143
1144
1145
1146
1147void pci_power_up(struct pci_dev *dev)
1148{
1149 __pci_start_power_transition(dev, PCI_D0);
1150 pci_raw_set_power_state(dev, PCI_D0);
1151 pci_update_current_state(dev, PCI_D0);
1152}
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
1164{
1165 pci_power_t ret;
1166
1167 if (!dev->pm_cap)
1168 return PCI_D0;
1169
1170 ret = platform_pci_choose_state(dev);
1171 if (ret != PCI_POWER_ERROR)
1172 return ret;
1173
1174 switch (state.event) {
1175 case PM_EVENT_ON:
1176 return PCI_D0;
1177 case PM_EVENT_FREEZE:
1178 case PM_EVENT_PRETHAW:
1179
1180 case PM_EVENT_SUSPEND:
1181 case PM_EVENT_HIBERNATE:
1182 return PCI_D3hot;
1183 default:
1184 pci_info(dev, "unrecognized suspend event %d\n",
1185 state.event);
1186 BUG();
1187 }
1188 return PCI_D0;
1189}
1190EXPORT_SYMBOL(pci_choose_state);
1191
1192#define PCI_EXP_SAVE_REGS 7
1193
1194static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1195 u16 cap, bool extended)
1196{
1197 struct pci_cap_saved_state *tmp;
1198
1199 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1200 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1201 return tmp;
1202 }
1203 return NULL;
1204}
1205
1206struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1207{
1208 return _pci_find_saved_cap(dev, cap, false);
1209}
1210
1211struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1212{
1213 return _pci_find_saved_cap(dev, cap, true);
1214}
1215
1216static int pci_save_pcie_state(struct pci_dev *dev)
1217{
1218 int i = 0;
1219 struct pci_cap_saved_state *save_state;
1220 u16 *cap;
1221
1222 if (!pci_is_pcie(dev))
1223 return 0;
1224
1225 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1226 if (!save_state) {
1227 pci_err(dev, "buffer not found in %s\n", __func__);
1228 return -ENOMEM;
1229 }
1230
1231 cap = (u16 *)&save_state->cap.data[0];
1232 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1233 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1234 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1235 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1236 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1237 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1238 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1239
1240 return 0;
1241}
1242
1243static void pci_restore_pcie_state(struct pci_dev *dev)
1244{
1245 int i = 0;
1246 struct pci_cap_saved_state *save_state;
1247 u16 *cap;
1248
1249 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1250 if (!save_state)
1251 return;
1252
1253 cap = (u16 *)&save_state->cap.data[0];
1254 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1255 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1256 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1257 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1258 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1259 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1260 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1261}
1262
1263static int pci_save_pcix_state(struct pci_dev *dev)
1264{
1265 int pos;
1266 struct pci_cap_saved_state *save_state;
1267
1268 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1269 if (!pos)
1270 return 0;
1271
1272 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1273 if (!save_state) {
1274 pci_err(dev, "buffer not found in %s\n", __func__);
1275 return -ENOMEM;
1276 }
1277
1278 pci_read_config_word(dev, pos + PCI_X_CMD,
1279 (u16 *)save_state->cap.data);
1280
1281 return 0;
1282}
1283
1284static void pci_restore_pcix_state(struct pci_dev *dev)
1285{
1286 int i = 0, pos;
1287 struct pci_cap_saved_state *save_state;
1288 u16 *cap;
1289
1290 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1291 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1292 if (!save_state || !pos)
1293 return;
1294 cap = (u16 *)&save_state->cap.data[0];
1295
1296 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1297}
1298
1299static void pci_save_ltr_state(struct pci_dev *dev)
1300{
1301 int ltr;
1302 struct pci_cap_saved_state *save_state;
1303 u16 *cap;
1304
1305 if (!pci_is_pcie(dev))
1306 return;
1307
1308 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1309 if (!ltr)
1310 return;
1311
1312 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1313 if (!save_state) {
1314 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1315 return;
1316 }
1317
1318 cap = (u16 *)&save_state->cap.data[0];
1319 pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
1320 pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
1321}
1322
1323static void pci_restore_ltr_state(struct pci_dev *dev)
1324{
1325 struct pci_cap_saved_state *save_state;
1326 int ltr;
1327 u16 *cap;
1328
1329 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1330 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1331 if (!save_state || !ltr)
1332 return;
1333
1334 cap = (u16 *)&save_state->cap.data[0];
1335 pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
1336 pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
1337}
1338
1339
1340
1341
1342
1343
1344int pci_save_state(struct pci_dev *dev)
1345{
1346 int i;
1347
1348 for (i = 0; i < 16; i++)
1349 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1350 dev->state_saved = true;
1351
1352 i = pci_save_pcie_state(dev);
1353 if (i != 0)
1354 return i;
1355
1356 i = pci_save_pcix_state(dev);
1357 if (i != 0)
1358 return i;
1359
1360 pci_save_ltr_state(dev);
1361 pci_save_dpc_state(dev);
1362 return pci_save_vc_state(dev);
1363}
1364EXPORT_SYMBOL(pci_save_state);
1365
1366static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1367 u32 saved_val, int retry, bool force)
1368{
1369 u32 val;
1370
1371 pci_read_config_dword(pdev, offset, &val);
1372 if (!force && val == saved_val)
1373 return;
1374
1375 for (;;) {
1376 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1377 offset, val, saved_val);
1378 pci_write_config_dword(pdev, offset, saved_val);
1379 if (retry-- <= 0)
1380 return;
1381
1382 pci_read_config_dword(pdev, offset, &val);
1383 if (val == saved_val)
1384 return;
1385
1386 mdelay(1);
1387 }
1388}
1389
1390static void pci_restore_config_space_range(struct pci_dev *pdev,
1391 int start, int end, int retry,
1392 bool force)
1393{
1394 int index;
1395
1396 for (index = end; index >= start; index--)
1397 pci_restore_config_dword(pdev, 4 * index,
1398 pdev->saved_config_space[index],
1399 retry, force);
1400}
1401
1402static void pci_restore_config_space(struct pci_dev *pdev)
1403{
1404 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1405 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1406
1407 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1408 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1409 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1410 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1411
1412
1413
1414
1415
1416
1417 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1418 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1419 } else {
1420 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1421 }
1422}
1423
1424static void pci_restore_rebar_state(struct pci_dev *pdev)
1425{
1426 unsigned int pos, nbars, i;
1427 u32 ctrl;
1428
1429 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1430 if (!pos)
1431 return;
1432
1433 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1434 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1435 PCI_REBAR_CTRL_NBAR_SHIFT;
1436
1437 for (i = 0; i < nbars; i++, pos += 8) {
1438 struct resource *res;
1439 int bar_idx, size;
1440
1441 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1442 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1443 res = pdev->resource + bar_idx;
1444 size = ilog2(resource_size(res)) - 20;
1445 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1446 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1447 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1448 }
1449}
1450
1451
1452
1453
1454
1455void pci_restore_state(struct pci_dev *dev)
1456{
1457 if (!dev->state_saved)
1458 return;
1459
1460
1461
1462
1463
1464 pci_restore_ltr_state(dev);
1465
1466 pci_restore_pcie_state(dev);
1467 pci_restore_pasid_state(dev);
1468 pci_restore_pri_state(dev);
1469 pci_restore_ats_state(dev);
1470 pci_restore_vc_state(dev);
1471 pci_restore_rebar_state(dev);
1472 pci_restore_dpc_state(dev);
1473
1474 pci_cleanup_aer_error_status_regs(dev);
1475
1476 pci_restore_config_space(dev);
1477
1478 pci_restore_pcix_state(dev);
1479 pci_restore_msi_state(dev);
1480
1481
1482 pci_enable_acs(dev);
1483 pci_restore_iov_state(dev);
1484
1485 dev->state_saved = false;
1486}
1487EXPORT_SYMBOL(pci_restore_state);
1488
1489struct pci_saved_state {
1490 u32 config_space[16];
1491 struct pci_cap_saved_data cap[0];
1492};
1493
1494
1495
1496
1497
1498
1499
1500
1501struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1502{
1503 struct pci_saved_state *state;
1504 struct pci_cap_saved_state *tmp;
1505 struct pci_cap_saved_data *cap;
1506 size_t size;
1507
1508 if (!dev->state_saved)
1509 return NULL;
1510
1511 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1512
1513 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1514 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1515
1516 state = kzalloc(size, GFP_KERNEL);
1517 if (!state)
1518 return NULL;
1519
1520 memcpy(state->config_space, dev->saved_config_space,
1521 sizeof(state->config_space));
1522
1523 cap = state->cap;
1524 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1525 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1526 memcpy(cap, &tmp->cap, len);
1527 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1528 }
1529
1530
1531 return state;
1532}
1533EXPORT_SYMBOL_GPL(pci_store_saved_state);
1534
1535
1536
1537
1538
1539
1540int pci_load_saved_state(struct pci_dev *dev,
1541 struct pci_saved_state *state)
1542{
1543 struct pci_cap_saved_data *cap;
1544
1545 dev->state_saved = false;
1546
1547 if (!state)
1548 return 0;
1549
1550 memcpy(dev->saved_config_space, state->config_space,
1551 sizeof(state->config_space));
1552
1553 cap = state->cap;
1554 while (cap->size) {
1555 struct pci_cap_saved_state *tmp;
1556
1557 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1558 if (!tmp || tmp->cap.size != cap->size)
1559 return -EINVAL;
1560
1561 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1562 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1563 sizeof(struct pci_cap_saved_data) + cap->size);
1564 }
1565
1566 dev->state_saved = true;
1567 return 0;
1568}
1569EXPORT_SYMBOL_GPL(pci_load_saved_state);
1570
1571
1572
1573
1574
1575
1576
1577int pci_load_and_free_saved_state(struct pci_dev *dev,
1578 struct pci_saved_state **state)
1579{
1580 int ret = pci_load_saved_state(dev, *state);
1581 kfree(*state);
1582 *state = NULL;
1583 return ret;
1584}
1585EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1586
1587int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1588{
1589 return pci_enable_resources(dev, bars);
1590}
1591
1592static int do_pci_enable_device(struct pci_dev *dev, int bars)
1593{
1594 int err;
1595 struct pci_dev *bridge;
1596 u16 cmd;
1597 u8 pin;
1598
1599 err = pci_set_power_state(dev, PCI_D0);
1600 if (err < 0 && err != -EIO)
1601 return err;
1602
1603 bridge = pci_upstream_bridge(dev);
1604 if (bridge)
1605 pcie_aspm_powersave_config_link(bridge);
1606
1607 err = pcibios_enable_device(dev, bars);
1608 if (err < 0)
1609 return err;
1610 pci_fixup_device(pci_fixup_enable, dev);
1611
1612 if (dev->msi_enabled || dev->msix_enabled)
1613 return 0;
1614
1615 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1616 if (pin) {
1617 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1618 if (cmd & PCI_COMMAND_INTX_DISABLE)
1619 pci_write_config_word(dev, PCI_COMMAND,
1620 cmd & ~PCI_COMMAND_INTX_DISABLE);
1621 }
1622
1623 return 0;
1624}
1625
1626
1627
1628
1629
1630
1631
1632
1633int pci_reenable_device(struct pci_dev *dev)
1634{
1635 if (pci_is_enabled(dev))
1636 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1637 return 0;
1638}
1639EXPORT_SYMBOL(pci_reenable_device);
1640
1641static void pci_enable_bridge(struct pci_dev *dev)
1642{
1643 struct pci_dev *bridge;
1644 int retval;
1645
1646 bridge = pci_upstream_bridge(dev);
1647 if (bridge)
1648 pci_enable_bridge(bridge);
1649
1650 if (pci_is_enabled(dev)) {
1651 if (!dev->is_busmaster)
1652 pci_set_master(dev);
1653 return;
1654 }
1655
1656 retval = pci_enable_device(dev);
1657 if (retval)
1658 pci_err(dev, "Error enabling bridge (%d), continuing\n",
1659 retval);
1660 pci_set_master(dev);
1661}
1662
1663static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1664{
1665 struct pci_dev *bridge;
1666 int err;
1667 int i, bars = 0;
1668
1669
1670
1671
1672
1673
1674
1675 if (dev->pm_cap) {
1676 u16 pmcsr;
1677 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1678 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1679 }
1680
1681 if (atomic_inc_return(&dev->enable_cnt) > 1)
1682 return 0;
1683
1684 bridge = pci_upstream_bridge(dev);
1685 if (bridge)
1686 pci_enable_bridge(bridge);
1687
1688
1689 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1690 if (dev->resource[i].flags & flags)
1691 bars |= (1 << i);
1692 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1693 if (dev->resource[i].flags & flags)
1694 bars |= (1 << i);
1695
1696 err = do_pci_enable_device(dev, bars);
1697 if (err < 0)
1698 atomic_dec(&dev->enable_cnt);
1699 return err;
1700}
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710int pci_enable_device_io(struct pci_dev *dev)
1711{
1712 return pci_enable_device_flags(dev, IORESOURCE_IO);
1713}
1714EXPORT_SYMBOL(pci_enable_device_io);
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724int pci_enable_device_mem(struct pci_dev *dev)
1725{
1726 return pci_enable_device_flags(dev, IORESOURCE_MEM);
1727}
1728EXPORT_SYMBOL(pci_enable_device_mem);
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741int pci_enable_device(struct pci_dev *dev)
1742{
1743 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1744}
1745EXPORT_SYMBOL(pci_enable_device);
1746
1747
1748
1749
1750
1751
1752
1753struct pci_devres {
1754 unsigned int enabled:1;
1755 unsigned int pinned:1;
1756 unsigned int orig_intx:1;
1757 unsigned int restore_intx:1;
1758 unsigned int mwi:1;
1759 u32 region_mask;
1760};
1761
1762static void pcim_release(struct device *gendev, void *res)
1763{
1764 struct pci_dev *dev = to_pci_dev(gendev);
1765 struct pci_devres *this = res;
1766 int i;
1767
1768 if (dev->msi_enabled)
1769 pci_disable_msi(dev);
1770 if (dev->msix_enabled)
1771 pci_disable_msix(dev);
1772
1773 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1774 if (this->region_mask & (1 << i))
1775 pci_release_region(dev, i);
1776
1777 if (this->mwi)
1778 pci_clear_mwi(dev);
1779
1780 if (this->restore_intx)
1781 pci_intx(dev, this->orig_intx);
1782
1783 if (this->enabled && !this->pinned)
1784 pci_disable_device(dev);
1785}
1786
1787static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1788{
1789 struct pci_devres *dr, *new_dr;
1790
1791 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1792 if (dr)
1793 return dr;
1794
1795 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1796 if (!new_dr)
1797 return NULL;
1798 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1799}
1800
1801static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
1802{
1803 if (pci_is_managed(pdev))
1804 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1805 return NULL;
1806}
1807
1808
1809
1810
1811
1812
1813
1814int pcim_enable_device(struct pci_dev *pdev)
1815{
1816 struct pci_devres *dr;
1817 int rc;
1818
1819 dr = get_pci_dr(pdev);
1820 if (unlikely(!dr))
1821 return -ENOMEM;
1822 if (dr->enabled)
1823 return 0;
1824
1825 rc = pci_enable_device(pdev);
1826 if (!rc) {
1827 pdev->is_managed = 1;
1828 dr->enabled = 1;
1829 }
1830 return rc;
1831}
1832EXPORT_SYMBOL(pcim_enable_device);
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842void pcim_pin_device(struct pci_dev *pdev)
1843{
1844 struct pci_devres *dr;
1845
1846 dr = find_pci_dr(pdev);
1847 WARN_ON(!dr || !dr->enabled);
1848 if (dr)
1849 dr->pinned = 1;
1850}
1851EXPORT_SYMBOL(pcim_pin_device);
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861int __weak pcibios_add_device(struct pci_dev *dev)
1862{
1863 return 0;
1864}
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875void __weak pcibios_release_device(struct pci_dev *dev) {}
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885void __weak pcibios_disable_device(struct pci_dev *dev) {}
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1897
1898static void do_pci_disable_device(struct pci_dev *dev)
1899{
1900 u16 pci_command;
1901
1902 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1903 if (pci_command & PCI_COMMAND_MASTER) {
1904 pci_command &= ~PCI_COMMAND_MASTER;
1905 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1906 }
1907
1908 pcibios_disable_device(dev);
1909}
1910
1911
1912
1913
1914
1915
1916
1917
1918void pci_disable_enabled_device(struct pci_dev *dev)
1919{
1920 if (pci_is_enabled(dev))
1921 do_pci_disable_device(dev);
1922}
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934void pci_disable_device(struct pci_dev *dev)
1935{
1936 struct pci_devres *dr;
1937
1938 dr = find_pci_dr(dev);
1939 if (dr)
1940 dr->enabled = 0;
1941
1942 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1943 "disabling already-disabled device");
1944
1945 if (atomic_dec_return(&dev->enable_cnt) != 0)
1946 return;
1947
1948 do_pci_disable_device(dev);
1949
1950 dev->is_busmaster = 0;
1951}
1952EXPORT_SYMBOL(pci_disable_device);
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1963 enum pcie_reset_state state)
1964{
1965 return -EINVAL;
1966}
1967
1968
1969
1970
1971
1972
1973
1974
1975int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1976{
1977 return pcibios_set_pcie_reset_state(dev, state);
1978}
1979EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
1980
1981
1982
1983
1984
1985void pcie_clear_root_pme_status(struct pci_dev *dev)
1986{
1987 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
1988}
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998bool pci_check_pme_status(struct pci_dev *dev)
1999{
2000 int pmcsr_pos;
2001 u16 pmcsr;
2002 bool ret = false;
2003
2004 if (!dev->pm_cap)
2005 return false;
2006
2007 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2008 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2009 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2010 return false;
2011
2012
2013 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2014 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2015
2016 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2017 ret = true;
2018 }
2019
2020 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2021
2022 return ret;
2023}
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2034{
2035 if (pme_poll_reset && dev->pme_poll)
2036 dev->pme_poll = false;
2037
2038 if (pci_check_pme_status(dev)) {
2039 pci_wakeup_event(dev);
2040 pm_request_resume(&dev->dev);
2041 }
2042 return 0;
2043}
2044
2045
2046
2047
2048
2049void pci_pme_wakeup_bus(struct pci_bus *bus)
2050{
2051 if (bus)
2052 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2053}
2054
2055
2056
2057
2058
2059
2060
2061bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2062{
2063 if (!dev->pm_cap)
2064 return false;
2065
2066 return !!(dev->pme_support & (1 << state));
2067}
2068EXPORT_SYMBOL(pci_pme_capable);
2069
2070static void pci_pme_list_scan(struct work_struct *work)
2071{
2072 struct pci_pme_device *pme_dev, *n;
2073
2074 mutex_lock(&pci_pme_list_mutex);
2075 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2076 if (pme_dev->dev->pme_poll) {
2077 struct pci_dev *bridge;
2078
2079 bridge = pme_dev->dev->bus->self;
2080
2081
2082
2083
2084
2085 if (bridge && bridge->current_state != PCI_D0)
2086 continue;
2087
2088
2089
2090
2091 if (pme_dev->dev->current_state == PCI_D3cold)
2092 continue;
2093
2094 pci_pme_wakeup(pme_dev->dev, NULL);
2095 } else {
2096 list_del(&pme_dev->list);
2097 kfree(pme_dev);
2098 }
2099 }
2100 if (!list_empty(&pci_pme_list))
2101 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2102 msecs_to_jiffies(PME_TIMEOUT));
2103 mutex_unlock(&pci_pme_list_mutex);
2104}
2105
2106static void __pci_pme_active(struct pci_dev *dev, bool enable)
2107{
2108 u16 pmcsr;
2109
2110 if (!dev->pme_support)
2111 return;
2112
2113 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2114
2115 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2116 if (!enable)
2117 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2118
2119 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2120}
2121
2122
2123
2124
2125
2126void pci_pme_restore(struct pci_dev *dev)
2127{
2128 u16 pmcsr;
2129
2130 if (!dev->pme_support)
2131 return;
2132
2133 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2134 if (dev->wakeup_prepared) {
2135 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2136 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2137 } else {
2138 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2139 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2140 }
2141 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2142}
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152void pci_pme_active(struct pci_dev *dev, bool enable)
2153{
2154 __pci_pme_active(dev, enable);
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176 if (dev->pme_poll) {
2177 struct pci_pme_device *pme_dev;
2178 if (enable) {
2179 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2180 GFP_KERNEL);
2181 if (!pme_dev) {
2182 pci_warn(dev, "can't enable PME#\n");
2183 return;
2184 }
2185 pme_dev->dev = dev;
2186 mutex_lock(&pci_pme_list_mutex);
2187 list_add(&pme_dev->list, &pci_pme_list);
2188 if (list_is_singular(&pci_pme_list))
2189 queue_delayed_work(system_freezable_wq,
2190 &pci_pme_work,
2191 msecs_to_jiffies(PME_TIMEOUT));
2192 mutex_unlock(&pci_pme_list_mutex);
2193 } else {
2194 mutex_lock(&pci_pme_list_mutex);
2195 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2196 if (pme_dev->dev == dev) {
2197 list_del(&pme_dev->list);
2198 kfree(pme_dev);
2199 break;
2200 }
2201 }
2202 mutex_unlock(&pci_pme_list_mutex);
2203 }
2204 }
2205
2206 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2207}
2208EXPORT_SYMBOL(pci_pme_active);
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2230{
2231 int ret = 0;
2232
2233
2234
2235
2236
2237
2238
2239
2240 if (!pci_power_manageable(dev))
2241 return 0;
2242
2243
2244 if (!!enable == !!dev->wakeup_prepared)
2245 return 0;
2246
2247
2248
2249
2250
2251
2252
2253 if (enable) {
2254 int error;
2255
2256 if (pci_pme_capable(dev, state))
2257 pci_pme_active(dev, true);
2258 else
2259 ret = 1;
2260 error = platform_pci_set_wakeup(dev, true);
2261 if (ret)
2262 ret = error;
2263 if (!ret)
2264 dev->wakeup_prepared = true;
2265 } else {
2266 platform_pci_set_wakeup(dev, false);
2267 pci_pme_active(dev, false);
2268 dev->wakeup_prepared = false;
2269 }
2270
2271 return ret;
2272}
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2284{
2285 if (enable && !device_may_wakeup(&pci_dev->dev))
2286 return -EINVAL;
2287
2288 return __pci_enable_wake(pci_dev, state, enable);
2289}
2290EXPORT_SYMBOL(pci_enable_wake);
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2307{
2308 return pci_pme_capable(dev, PCI_D3cold) ?
2309 pci_enable_wake(dev, PCI_D3cold, enable) :
2310 pci_enable_wake(dev, PCI_D3hot, enable);
2311}
2312EXPORT_SYMBOL(pci_wake_from_d3);
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2324{
2325 pci_power_t target_state = PCI_D3hot;
2326
2327 if (platform_pci_power_manageable(dev)) {
2328
2329
2330
2331 pci_power_t state = platform_pci_choose_state(dev);
2332
2333 switch (state) {
2334 case PCI_POWER_ERROR:
2335 case PCI_UNKNOWN:
2336 break;
2337 case PCI_D1:
2338 case PCI_D2:
2339 if (pci_no_d1d2(dev))
2340 break;
2341
2342 default:
2343 target_state = state;
2344 }
2345
2346 return target_state;
2347 }
2348
2349 if (!dev->pm_cap)
2350 target_state = PCI_D0;
2351
2352
2353
2354
2355
2356
2357 if (dev->current_state == PCI_D3cold)
2358 target_state = PCI_D3cold;
2359
2360 if (wakeup) {
2361
2362
2363
2364
2365 if (dev->pme_support) {
2366 while (target_state
2367 && !(dev->pme_support & (1 << target_state)))
2368 target_state--;
2369 }
2370 }
2371
2372 return target_state;
2373}
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384int pci_prepare_to_sleep(struct pci_dev *dev)
2385{
2386 bool wakeup = device_may_wakeup(&dev->dev);
2387 pci_power_t target_state = pci_target_state(dev, wakeup);
2388 int error;
2389
2390 if (target_state == PCI_POWER_ERROR)
2391 return -EIO;
2392
2393 pci_enable_wake(dev, target_state, wakeup);
2394
2395 error = pci_set_power_state(dev, target_state);
2396
2397 if (error)
2398 pci_enable_wake(dev, target_state, false);
2399
2400 return error;
2401}
2402EXPORT_SYMBOL(pci_prepare_to_sleep);
2403
2404
2405
2406
2407
2408
2409
2410
2411int pci_back_from_sleep(struct pci_dev *dev)
2412{
2413 pci_enable_wake(dev, PCI_D0, false);
2414 return pci_set_power_state(dev, PCI_D0);
2415}
2416EXPORT_SYMBOL(pci_back_from_sleep);
2417
2418
2419
2420
2421
2422
2423
2424
2425int pci_finish_runtime_suspend(struct pci_dev *dev)
2426{
2427 pci_power_t target_state;
2428 int error;
2429
2430 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2431 if (target_state == PCI_POWER_ERROR)
2432 return -EIO;
2433
2434 dev->runtime_d3cold = target_state == PCI_D3cold;
2435
2436 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2437
2438 error = pci_set_power_state(dev, target_state);
2439
2440 if (error) {
2441 pci_enable_wake(dev, target_state, false);
2442 dev->runtime_d3cold = false;
2443 }
2444
2445 return error;
2446}
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456bool pci_dev_run_wake(struct pci_dev *dev)
2457{
2458 struct pci_bus *bus = dev->bus;
2459
2460 if (!dev->pme_support)
2461 return false;
2462
2463
2464 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2465 return false;
2466
2467 if (device_can_wakeup(&dev->dev))
2468 return true;
2469
2470 while (bus->parent) {
2471 struct pci_dev *bridge = bus->self;
2472
2473 if (device_can_wakeup(&bridge->dev))
2474 return true;
2475
2476 bus = bus->parent;
2477 }
2478
2479
2480 if (bus->bridge)
2481 return device_can_wakeup(bus->bridge);
2482
2483 return false;
2484}
2485EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496bool pci_dev_need_resume(struct pci_dev *pci_dev)
2497{
2498 struct device *dev = &pci_dev->dev;
2499 pci_power_t target_state;
2500
2501 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2502 return true;
2503
2504 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2505
2506
2507
2508
2509
2510
2511 return target_state != pci_dev->current_state &&
2512 target_state != PCI_D3cold &&
2513 pci_dev->current_state != PCI_D3hot;
2514}
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2528{
2529 struct device *dev = &pci_dev->dev;
2530
2531 spin_lock_irq(&dev->power.lock);
2532
2533 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2534 pci_dev->current_state < PCI_D3cold)
2535 __pci_pme_active(pci_dev, false);
2536
2537 spin_unlock_irq(&dev->power.lock);
2538}
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548void pci_dev_complete_resume(struct pci_dev *pci_dev)
2549{
2550 struct device *dev = &pci_dev->dev;
2551
2552 if (!pci_dev_run_wake(pci_dev))
2553 return;
2554
2555 spin_lock_irq(&dev->power.lock);
2556
2557 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2558 __pci_pme_active(pci_dev, true);
2559
2560 spin_unlock_irq(&dev->power.lock);
2561}
2562
2563void pci_config_pm_runtime_get(struct pci_dev *pdev)
2564{
2565 struct device *dev = &pdev->dev;
2566 struct device *parent = dev->parent;
2567
2568 if (parent)
2569 pm_runtime_get_sync(parent);
2570 pm_runtime_get_noresume(dev);
2571
2572
2573
2574
2575 pm_runtime_barrier(dev);
2576
2577
2578
2579
2580
2581 if (pdev->current_state == PCI_D3cold)
2582 pm_runtime_resume(dev);
2583}
2584
2585void pci_config_pm_runtime_put(struct pci_dev *pdev)
2586{
2587 struct device *dev = &pdev->dev;
2588 struct device *parent = dev->parent;
2589
2590 pm_runtime_put(dev);
2591 if (parent)
2592 pm_runtime_put_sync(parent);
2593}
2594
2595static const struct dmi_system_id bridge_d3_blacklist[] = {
2596#ifdef CONFIG_X86
2597 {
2598
2599
2600
2601
2602
2603
2604 .ident = "X299 DESIGNARE EX-CF",
2605 .matches = {
2606 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2607 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2608 },
2609 },
2610#endif
2611 { }
2612};
2613
2614
2615
2616
2617
2618
2619
2620
2621bool pci_bridge_d3_possible(struct pci_dev *bridge)
2622{
2623 if (!pci_is_pcie(bridge))
2624 return false;
2625
2626 switch (pci_pcie_type(bridge)) {
2627 case PCI_EXP_TYPE_ROOT_PORT:
2628 case PCI_EXP_TYPE_UPSTREAM:
2629 case PCI_EXP_TYPE_DOWNSTREAM:
2630 if (pci_bridge_d3_disable)
2631 return false;
2632
2633
2634
2635
2636
2637 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2638 return false;
2639
2640 if (pci_bridge_d3_force)
2641 return true;
2642
2643
2644 if (bridge->is_thunderbolt)
2645 return true;
2646
2647
2648 if (platform_pci_bridge_d3(bridge))
2649 return true;
2650
2651
2652
2653
2654
2655
2656 if (bridge->is_hotplug_bridge)
2657 return false;
2658
2659 if (dmi_check_system(bridge_d3_blacklist))
2660 return false;
2661
2662
2663
2664
2665
2666 if (dmi_get_bios_year() >= 2015)
2667 return true;
2668 break;
2669 }
2670
2671 return false;
2672}
2673
2674static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2675{
2676 bool *d3cold_ok = data;
2677
2678 if (
2679 dev->no_d3cold || !dev->d3cold_allowed ||
2680
2681
2682 (device_may_wakeup(&dev->dev) &&
2683 !pci_pme_capable(dev, PCI_D3cold)) ||
2684
2685
2686 !pci_power_manageable(dev))
2687
2688 *d3cold_ok = false;
2689
2690 return !*d3cold_ok;
2691}
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701void pci_bridge_d3_update(struct pci_dev *dev)
2702{
2703 bool remove = !device_is_registered(&dev->dev);
2704 struct pci_dev *bridge;
2705 bool d3cold_ok = true;
2706
2707 bridge = pci_upstream_bridge(dev);
2708 if (!bridge || !pci_bridge_d3_possible(bridge))
2709 return;
2710
2711
2712
2713
2714
2715 if (remove && bridge->bridge_d3)
2716 return;
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726 if (!remove)
2727 pci_dev_check_d3cold(dev, &d3cold_ok);
2728
2729
2730
2731
2732
2733
2734
2735 if (d3cold_ok && !bridge->bridge_d3)
2736 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2737 &d3cold_ok);
2738
2739 if (bridge->bridge_d3 != d3cold_ok) {
2740 bridge->bridge_d3 = d3cold_ok;
2741
2742 pci_bridge_d3_update(bridge);
2743 }
2744}
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754void pci_d3cold_enable(struct pci_dev *dev)
2755{
2756 if (dev->no_d3cold) {
2757 dev->no_d3cold = false;
2758 pci_bridge_d3_update(dev);
2759 }
2760}
2761EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771void pci_d3cold_disable(struct pci_dev *dev)
2772{
2773 if (!dev->no_d3cold) {
2774 dev->no_d3cold = true;
2775 pci_bridge_d3_update(dev);
2776 }
2777}
2778EXPORT_SYMBOL_GPL(pci_d3cold_disable);
2779
2780
2781
2782
2783
2784void pci_pm_init(struct pci_dev *dev)
2785{
2786 int pm;
2787 u16 status;
2788 u16 pmc;
2789
2790 pm_runtime_forbid(&dev->dev);
2791 pm_runtime_set_active(&dev->dev);
2792 pm_runtime_enable(&dev->dev);
2793 device_enable_async_suspend(&dev->dev);
2794 dev->wakeup_prepared = false;
2795
2796 dev->pm_cap = 0;
2797 dev->pme_support = 0;
2798
2799
2800 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2801 if (!pm)
2802 return;
2803
2804 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2805
2806 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2807 pci_err(dev, "unsupported PM cap regs version (%u)\n",
2808 pmc & PCI_PM_CAP_VER_MASK);
2809 return;
2810 }
2811
2812 dev->pm_cap = pm;
2813 dev->d3_delay = PCI_PM_D3_WAIT;
2814 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
2815 dev->bridge_d3 = pci_bridge_d3_possible(dev);
2816 dev->d3cold_allowed = true;
2817
2818 dev->d1_support = false;
2819 dev->d2_support = false;
2820 if (!pci_no_d1d2(dev)) {
2821 if (pmc & PCI_PM_CAP_D1)
2822 dev->d1_support = true;
2823 if (pmc & PCI_PM_CAP_D2)
2824 dev->d2_support = true;
2825
2826 if (dev->d1_support || dev->d2_support)
2827 pci_info(dev, "supports%s%s\n",
2828 dev->d1_support ? " D1" : "",
2829 dev->d2_support ? " D2" : "");
2830 }
2831
2832 pmc &= PCI_PM_CAP_PME_MASK;
2833 if (pmc) {
2834 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
2835 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2836 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2837 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2838 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2839 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
2840 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
2841 dev->pme_poll = true;
2842
2843
2844
2845
2846 device_set_wakeup_capable(&dev->dev, true);
2847
2848 pci_pme_active(dev, false);
2849 }
2850
2851 pci_read_config_word(dev, PCI_STATUS, &status);
2852 if (status & PCI_STATUS_IMM_READY)
2853 dev->imm_ready = 1;
2854}
2855
2856static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
2857{
2858 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
2859
2860 switch (prop) {
2861 case PCI_EA_P_MEM:
2862 case PCI_EA_P_VF_MEM:
2863 flags |= IORESOURCE_MEM;
2864 break;
2865 case PCI_EA_P_MEM_PREFETCH:
2866 case PCI_EA_P_VF_MEM_PREFETCH:
2867 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
2868 break;
2869 case PCI_EA_P_IO:
2870 flags |= IORESOURCE_IO;
2871 break;
2872 default:
2873 return 0;
2874 }
2875
2876 return flags;
2877}
2878
2879static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
2880 u8 prop)
2881{
2882 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
2883 return &dev->resource[bei];
2884#ifdef CONFIG_PCI_IOV
2885 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
2886 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
2887 return &dev->resource[PCI_IOV_RESOURCES +
2888 bei - PCI_EA_BEI_VF_BAR0];
2889#endif
2890 else if (bei == PCI_EA_BEI_ROM)
2891 return &dev->resource[PCI_ROM_RESOURCE];
2892 else
2893 return NULL;
2894}
2895
2896
2897static int pci_ea_read(struct pci_dev *dev, int offset)
2898{
2899 struct resource *res;
2900 int ent_size, ent_offset = offset;
2901 resource_size_t start, end;
2902 unsigned long flags;
2903 u32 dw0, bei, base, max_offset;
2904 u8 prop;
2905 bool support_64 = (sizeof(resource_size_t) >= 8);
2906
2907 pci_read_config_dword(dev, ent_offset, &dw0);
2908 ent_offset += 4;
2909
2910
2911 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
2912
2913 if (!(dw0 & PCI_EA_ENABLE))
2914 goto out;
2915
2916 bei = (dw0 & PCI_EA_BEI) >> 4;
2917 prop = (dw0 & PCI_EA_PP) >> 8;
2918
2919
2920
2921
2922
2923 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
2924 prop = (dw0 & PCI_EA_SP) >> 16;
2925 if (prop > PCI_EA_P_BRIDGE_IO)
2926 goto out;
2927
2928 res = pci_ea_get_resource(dev, bei, prop);
2929 if (!res) {
2930 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
2931 goto out;
2932 }
2933
2934 flags = pci_ea_flags(dev, prop);
2935 if (!flags) {
2936 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
2937 goto out;
2938 }
2939
2940
2941 pci_read_config_dword(dev, ent_offset, &base);
2942 start = (base & PCI_EA_FIELD_MASK);
2943 ent_offset += 4;
2944
2945
2946 pci_read_config_dword(dev, ent_offset, &max_offset);
2947 ent_offset += 4;
2948
2949
2950 if (base & PCI_EA_IS_64) {
2951 u32 base_upper;
2952
2953 pci_read_config_dword(dev, ent_offset, &base_upper);
2954 ent_offset += 4;
2955
2956 flags |= IORESOURCE_MEM_64;
2957
2958
2959 if (!support_64 && base_upper)
2960 goto out;
2961
2962 if (support_64)
2963 start |= ((u64)base_upper << 32);
2964 }
2965
2966 end = start + (max_offset | 0x03);
2967
2968
2969 if (max_offset & PCI_EA_IS_64) {
2970 u32 max_offset_upper;
2971
2972 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
2973 ent_offset += 4;
2974
2975 flags |= IORESOURCE_MEM_64;
2976
2977
2978 if (!support_64 && max_offset_upper)
2979 goto out;
2980
2981 if (support_64)
2982 end += ((u64)max_offset_upper << 32);
2983 }
2984
2985 if (end < start) {
2986 pci_err(dev, "EA Entry crosses address boundary\n");
2987 goto out;
2988 }
2989
2990 if (ent_size != ent_offset - offset) {
2991 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
2992 ent_size, ent_offset - offset);
2993 goto out;
2994 }
2995
2996 res->name = pci_name(dev);
2997 res->start = start;
2998 res->end = end;
2999 res->flags = flags;
3000
3001 if (bei <= PCI_EA_BEI_BAR5)
3002 pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3003 bei, res, prop);
3004 else if (bei == PCI_EA_BEI_ROM)
3005 pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3006 res, prop);
3007 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3008 pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3009 bei - PCI_EA_BEI_VF_BAR0, res, prop);
3010 else
3011 pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3012 bei, res, prop);
3013
3014out:
3015 return offset + ent_size;
3016}
3017
3018
3019void pci_ea_init(struct pci_dev *dev)
3020{
3021 int ea;
3022 u8 num_ent;
3023 int offset;
3024 int i;
3025
3026
3027 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3028 if (!ea)
3029 return;
3030
3031
3032 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3033 &num_ent);
3034 num_ent &= PCI_EA_NUM_ENT_MASK;
3035
3036 offset = ea + PCI_EA_FIRST_ENT;
3037
3038
3039 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3040 offset += 4;
3041
3042
3043 for (i = 0; i < num_ent; ++i)
3044 offset = pci_ea_read(dev, offset);
3045}
3046
3047static void pci_add_saved_cap(struct pci_dev *pci_dev,
3048 struct pci_cap_saved_state *new_cap)
3049{
3050 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3051}
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3062 bool extended, unsigned int size)
3063{
3064 int pos;
3065 struct pci_cap_saved_state *save_state;
3066
3067 if (extended)
3068 pos = pci_find_ext_capability(dev, cap);
3069 else
3070 pos = pci_find_capability(dev, cap);
3071
3072 if (!pos)
3073 return 0;
3074
3075 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3076 if (!save_state)
3077 return -ENOMEM;
3078
3079 save_state->cap.cap_nr = cap;
3080 save_state->cap.cap_extended = extended;
3081 save_state->cap.size = size;
3082 pci_add_saved_cap(dev, save_state);
3083
3084 return 0;
3085}
3086
3087int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3088{
3089 return _pci_add_cap_save_buffer(dev, cap, false, size);
3090}
3091
3092int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3093{
3094 return _pci_add_cap_save_buffer(dev, cap, true, size);
3095}
3096
3097
3098
3099
3100
3101void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3102{
3103 int error;
3104
3105 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3106 PCI_EXP_SAVE_REGS * sizeof(u16));
3107 if (error)
3108 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3109
3110 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3111 if (error)
3112 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3113
3114 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3115 2 * sizeof(u16));
3116 if (error)
3117 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3118
3119 pci_allocate_vc_save_buffers(dev);
3120}
3121
3122void pci_free_cap_save_buffers(struct pci_dev *dev)
3123{
3124 struct pci_cap_saved_state *tmp;
3125 struct hlist_node *n;
3126
3127 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3128 kfree(tmp);
3129}
3130
3131
3132
3133
3134
3135
3136
3137
3138void pci_configure_ari(struct pci_dev *dev)
3139{
3140 u32 cap;
3141 struct pci_dev *bridge;
3142
3143 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3144 return;
3145
3146 bridge = dev->bus->self;
3147 if (!bridge)
3148 return;
3149
3150 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3151 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3152 return;
3153
3154 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3155 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3156 PCI_EXP_DEVCTL2_ARI);
3157 bridge->ari_enabled = 1;
3158 } else {
3159 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3160 PCI_EXP_DEVCTL2_ARI);
3161 bridge->ari_enabled = 0;
3162 }
3163}
3164
3165static int pci_acs_enable;
3166
3167
3168
3169
3170void pci_request_acs(void)
3171{
3172 pci_acs_enable = 1;
3173}
3174
3175static const char *disable_acs_redir_param;
3176
3177
3178
3179
3180
3181
3182
3183static void pci_disable_acs_redir(struct pci_dev *dev)
3184{
3185 int ret = 0;
3186 const char *p;
3187 int pos;
3188 u16 ctrl;
3189
3190 if (!disable_acs_redir_param)
3191 return;
3192
3193 p = disable_acs_redir_param;
3194 while (*p) {
3195 ret = pci_dev_str_match(dev, p, &p);
3196 if (ret < 0) {
3197 pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
3198 disable_acs_redir_param);
3199
3200 break;
3201 } else if (ret == 1) {
3202
3203 break;
3204 }
3205
3206 if (*p != ';' && *p != ',') {
3207
3208 break;
3209 }
3210 p++;
3211 }
3212
3213 if (ret != 1)
3214 return;
3215
3216 if (!pci_dev_specific_disable_acs_redir(dev))
3217 return;
3218
3219 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3220 if (!pos) {
3221 pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
3222 return;
3223 }
3224
3225 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
3226
3227
3228 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
3229
3230 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
3231
3232 pci_info(dev, "disabled ACS redirect\n");
3233}
3234
3235
3236
3237
3238
3239static void pci_std_enable_acs(struct pci_dev *dev)
3240{
3241 int pos;
3242 u16 cap;
3243 u16 ctrl;
3244
3245 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3246 if (!pos)
3247 return;
3248
3249 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
3250 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
3251
3252
3253 ctrl |= (cap & PCI_ACS_SV);
3254
3255
3256 ctrl |= (cap & PCI_ACS_RR);
3257
3258
3259 ctrl |= (cap & PCI_ACS_CR);
3260
3261
3262 ctrl |= (cap & PCI_ACS_UF);
3263
3264 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
3265}
3266
3267
3268
3269
3270
3271void pci_enable_acs(struct pci_dev *dev)
3272{
3273 if (!pci_acs_enable)
3274 goto disable_acs_redir;
3275
3276 if (!pci_dev_specific_enable_acs(dev))
3277 goto disable_acs_redir;
3278
3279 pci_std_enable_acs(dev);
3280
3281disable_acs_redir:
3282
3283
3284
3285
3286
3287
3288
3289 pci_disable_acs_redir(dev);
3290}
3291
3292static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3293{
3294 int pos;
3295 u16 cap, ctrl;
3296
3297 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
3298 if (!pos)
3299 return false;
3300
3301
3302
3303
3304
3305
3306 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3307 acs_flags &= (cap | PCI_ACS_EC);
3308
3309 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3310 return (ctrl & acs_flags) == acs_flags;
3311}
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3330{
3331 int ret;
3332
3333 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3334 if (ret >= 0)
3335 return ret > 0;
3336
3337
3338
3339
3340
3341
3342 if (!pci_is_pcie(pdev))
3343 return false;
3344
3345 switch (pci_pcie_type(pdev)) {
3346
3347
3348
3349
3350
3351 case PCI_EXP_TYPE_PCIE_BRIDGE:
3352
3353
3354
3355
3356
3357
3358 case PCI_EXP_TYPE_PCI_BRIDGE:
3359 case PCI_EXP_TYPE_RC_EC:
3360 return false;
3361
3362
3363
3364
3365
3366 case PCI_EXP_TYPE_DOWNSTREAM:
3367 case PCI_EXP_TYPE_ROOT_PORT:
3368 return pci_acs_flags_enabled(pdev, acs_flags);
3369
3370
3371
3372
3373
3374
3375
3376 case PCI_EXP_TYPE_ENDPOINT:
3377 case PCI_EXP_TYPE_UPSTREAM:
3378 case PCI_EXP_TYPE_LEG_END:
3379 case PCI_EXP_TYPE_RC_END:
3380 if (!pdev->multifunction)
3381 break;
3382
3383 return pci_acs_flags_enabled(pdev, acs_flags);
3384 }
3385
3386
3387
3388
3389
3390 return true;
3391}
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402bool pci_acs_path_enabled(struct pci_dev *start,
3403 struct pci_dev *end, u16 acs_flags)
3404{
3405 struct pci_dev *pdev, *parent = start;
3406
3407 do {
3408 pdev = parent;
3409
3410 if (!pci_acs_enabled(pdev, acs_flags))
3411 return false;
3412
3413 if (pci_is_root_bus(pdev->bus))
3414 return (end == NULL);
3415
3416 parent = pdev->bus->self;
3417 } while (pdev != end);
3418
3419 return true;
3420}
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3432{
3433 unsigned int pos, nbars, i;
3434 u32 ctrl;
3435
3436 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3437 if (!pos)
3438 return -ENOTSUPP;
3439
3440 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3441 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3442 PCI_REBAR_CTRL_NBAR_SHIFT;
3443
3444 for (i = 0; i < nbars; i++, pos += 8) {
3445 int bar_idx;
3446
3447 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3448 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3449 if (bar_idx == bar)
3450 return pos;
3451 }
3452
3453 return -ENOENT;
3454}
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3465{
3466 int pos;
3467 u32 cap;
3468
3469 pos = pci_rebar_find_pos(pdev, bar);
3470 if (pos < 0)
3471 return 0;
3472
3473 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3474 return (cap & PCI_REBAR_CAP_SIZES) >> 4;
3475}
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3486{
3487 int pos;
3488 u32 ctrl;
3489
3490 pos = pci_rebar_find_pos(pdev, bar);
3491 if (pos < 0)
3492 return pos;
3493
3494 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3495 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3496}
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3508{
3509 int pos;
3510 u32 ctrl;
3511
3512 pos = pci_rebar_find_pos(pdev, bar);
3513 if (pos < 0)
3514 return pos;
3515
3516 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3517 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3518 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3519 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3520 return 0;
3521}
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3537{
3538 struct pci_bus *bus = dev->bus;
3539 struct pci_dev *bridge;
3540 u32 cap, ctl2;
3541
3542 if (!pci_is_pcie(dev))
3543 return -EINVAL;
3544
3545
3546
3547
3548
3549
3550
3551
3552 switch (pci_pcie_type(dev)) {
3553 case PCI_EXP_TYPE_ENDPOINT:
3554 case PCI_EXP_TYPE_LEG_END:
3555 case PCI_EXP_TYPE_RC_END:
3556 break;
3557 default:
3558 return -EINVAL;
3559 }
3560
3561 while (bus->parent) {
3562 bridge = bus->self;
3563
3564 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3565
3566 switch (pci_pcie_type(bridge)) {
3567
3568 case PCI_EXP_TYPE_UPSTREAM:
3569 case PCI_EXP_TYPE_DOWNSTREAM:
3570 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3571 return -EINVAL;
3572 break;
3573
3574
3575 case PCI_EXP_TYPE_ROOT_PORT:
3576 if ((cap & cap_mask) != cap_mask)
3577 return -EINVAL;
3578 break;
3579 }
3580
3581
3582 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3583 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3584 &ctl2);
3585 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3586 return -EINVAL;
3587 }
3588
3589 bus = bus->parent;
3590 }
3591
3592 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3593 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3594 return 0;
3595}
3596EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3610{
3611 int slot;
3612
3613 if (pci_ari_enabled(dev->bus))
3614 slot = 0;
3615 else
3616 slot = PCI_SLOT(dev->devfn);
3617
3618 return (((pin - 1) + slot) % 4) + 1;
3619}
3620
3621int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3622{
3623 u8 pin;
3624
3625 pin = dev->pin;
3626 if (!pin)
3627 return -1;
3628
3629 while (!pci_is_root_bus(dev->bus)) {
3630 pin = pci_swizzle_interrupt_pin(dev, pin);
3631 dev = dev->bus->self;
3632 }
3633 *bridge = dev;
3634 return pin;
3635}
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3646{
3647 u8 pin = *pinp;
3648
3649 while (!pci_is_root_bus(dev->bus)) {
3650 pin = pci_swizzle_interrupt_pin(dev, pin);
3651 dev = dev->bus->self;
3652 }
3653 *pinp = pin;
3654 return PCI_SLOT(dev->devfn);
3655}
3656EXPORT_SYMBOL_GPL(pci_common_swizzle);
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668void pci_release_region(struct pci_dev *pdev, int bar)
3669{
3670 struct pci_devres *dr;
3671
3672 if (pci_resource_len(pdev, bar) == 0)
3673 return;
3674 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3675 release_region(pci_resource_start(pdev, bar),
3676 pci_resource_len(pdev, bar));
3677 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3678 release_mem_region(pci_resource_start(pdev, bar),
3679 pci_resource_len(pdev, bar));
3680
3681 dr = find_pci_dr(pdev);
3682 if (dr)
3683 dr->region_mask &= ~(1 << bar);
3684}
3685EXPORT_SYMBOL(pci_release_region);
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706static int __pci_request_region(struct pci_dev *pdev, int bar,
3707 const char *res_name, int exclusive)
3708{
3709 struct pci_devres *dr;
3710
3711 if (pci_resource_len(pdev, bar) == 0)
3712 return 0;
3713
3714 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3715 if (!request_region(pci_resource_start(pdev, bar),
3716 pci_resource_len(pdev, bar), res_name))
3717 goto err_out;
3718 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3719 if (!__request_mem_region(pci_resource_start(pdev, bar),
3720 pci_resource_len(pdev, bar), res_name,
3721 exclusive))
3722 goto err_out;
3723 }
3724
3725 dr = find_pci_dr(pdev);
3726 if (dr)
3727 dr->region_mask |= 1 << bar;
3728
3729 return 0;
3730
3731err_out:
3732 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3733 &pdev->resource[bar]);
3734 return -EBUSY;
3735}
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3752{
3753 return __pci_request_region(pdev, bar, res_name, 0);
3754}
3755EXPORT_SYMBOL(pci_request_region);
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3766{
3767 int i;
3768
3769 for (i = 0; i < 6; i++)
3770 if (bars & (1 << i))
3771 pci_release_region(pdev, i);
3772}
3773EXPORT_SYMBOL(pci_release_selected_regions);
3774
3775static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3776 const char *res_name, int excl)
3777{
3778 int i;
3779
3780 for (i = 0; i < 6; i++)
3781 if (bars & (1 << i))
3782 if (__pci_request_region(pdev, i, res_name, excl))
3783 goto err_out;
3784 return 0;
3785
3786err_out:
3787 while (--i >= 0)
3788 if (bars & (1 << i))
3789 pci_release_region(pdev, i);
3790
3791 return -EBUSY;
3792}
3793
3794
3795
3796
3797
3798
3799
3800
3801int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3802 const char *res_name)
3803{
3804 return __pci_request_selected_regions(pdev, bars, res_name, 0);
3805}
3806EXPORT_SYMBOL(pci_request_selected_regions);
3807
3808int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3809 const char *res_name)
3810{
3811 return __pci_request_selected_regions(pdev, bars, res_name,
3812 IORESOURCE_EXCLUSIVE);
3813}
3814EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826void pci_release_regions(struct pci_dev *pdev)
3827{
3828 pci_release_selected_regions(pdev, (1 << 6) - 1);
3829}
3830EXPORT_SYMBOL(pci_release_regions);
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3846{
3847 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
3848}
3849EXPORT_SYMBOL(pci_request_regions);
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3867{
3868 return pci_request_selected_regions_exclusive(pdev,
3869 ((1 << 6) - 1), res_name);
3870}
3871EXPORT_SYMBOL(pci_request_regions_exclusive);
3872
3873
3874
3875
3876
3877int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
3878 resource_size_t size)
3879{
3880 int ret = 0;
3881#ifdef PCI_IOBASE
3882 struct logic_pio_hwaddr *range;
3883
3884 if (!size || addr + size < addr)
3885 return -EINVAL;
3886
3887 range = kzalloc(sizeof(*range), GFP_ATOMIC);
3888 if (!range)
3889 return -ENOMEM;
3890
3891 range->fwnode = fwnode;
3892 range->size = size;
3893 range->hw_start = addr;
3894 range->flags = LOGIC_PIO_CPU_MMIO;
3895
3896 ret = logic_pio_register_range(range);
3897 if (ret)
3898 kfree(range);
3899#endif
3900
3901 return ret;
3902}
3903
3904phys_addr_t pci_pio_to_address(unsigned long pio)
3905{
3906 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
3907
3908#ifdef PCI_IOBASE
3909 if (pio >= MMIO_UPPER_LIMIT)
3910 return address;
3911
3912 address = logic_pio_to_hwaddr(pio);
3913#endif
3914
3915 return address;
3916}
3917
3918unsigned long __weak pci_address_to_pio(phys_addr_t address)
3919{
3920#ifdef PCI_IOBASE
3921 return logic_pio_trans_cpuaddr(address);
3922#else
3923 if (address > IO_SPACE_LIMIT)
3924 return (unsigned long)-1;
3925
3926 return (unsigned long) address;
3927#endif
3928}
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
3941{
3942#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3943 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3944
3945 if (!(res->flags & IORESOURCE_IO))
3946 return -EINVAL;
3947
3948 if (res->end > IO_SPACE_LIMIT)
3949 return -EINVAL;
3950
3951 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
3952 pgprot_device(PAGE_KERNEL));
3953#else
3954
3955
3956
3957
3958 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
3959 return -ENODEV;
3960#endif
3961}
3962EXPORT_SYMBOL(pci_remap_iospace);
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972void pci_unmap_iospace(struct resource *res)
3973{
3974#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3975 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3976
3977 unmap_kernel_range(vaddr, resource_size(res));
3978#endif
3979}
3980EXPORT_SYMBOL(pci_unmap_iospace);
3981
3982static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
3983{
3984 struct resource **res = ptr;
3985
3986 pci_unmap_iospace(*res);
3987}
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
3999 phys_addr_t phys_addr)
4000{
4001 const struct resource **ptr;
4002 int error;
4003
4004 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4005 if (!ptr)
4006 return -ENOMEM;
4007
4008 error = pci_remap_iospace(res, phys_addr);
4009 if (error) {
4010 devres_free(ptr);
4011 } else {
4012 *ptr = res;
4013 devres_add(dev, ptr);
4014 }
4015
4016 return error;
4017}
4018EXPORT_SYMBOL(devm_pci_remap_iospace);
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4030 resource_size_t offset,
4031 resource_size_t size)
4032{
4033 void __iomem **ptr, *addr;
4034
4035 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4036 if (!ptr)
4037 return NULL;
4038
4039 addr = pci_remap_cfgspace(offset, size);
4040 if (addr) {
4041 *ptr = addr;
4042 devres_add(dev, ptr);
4043 } else
4044 devres_free(ptr);
4045
4046 return addr;
4047}
4048EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4070 struct resource *res)
4071{
4072 resource_size_t size;
4073 const char *name;
4074 void __iomem *dest_ptr;
4075
4076 BUG_ON(!dev);
4077
4078 if (!res || resource_type(res) != IORESOURCE_MEM) {
4079 dev_err(dev, "invalid resource\n");
4080 return IOMEM_ERR_PTR(-EINVAL);
4081 }
4082
4083 size = resource_size(res);
4084 name = res->name ?: dev_name(dev);
4085
4086 if (!devm_request_mem_region(dev, res->start, size, name)) {
4087 dev_err(dev, "can't request region for resource %pR\n", res);
4088 return IOMEM_ERR_PTR(-EBUSY);
4089 }
4090
4091 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4092 if (!dest_ptr) {
4093 dev_err(dev, "ioremap failed for resource %pR\n", res);
4094 devm_release_mem_region(dev, res->start, size);
4095 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4096 }
4097
4098 return dest_ptr;
4099}
4100EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4101
4102static void __pci_set_master(struct pci_dev *dev, bool enable)
4103{
4104 u16 old_cmd, cmd;
4105
4106 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4107 if (enable)
4108 cmd = old_cmd | PCI_COMMAND_MASTER;
4109 else
4110 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4111 if (cmd != old_cmd) {
4112 pci_dbg(dev, "%s bus mastering\n",
4113 enable ? "enabling" : "disabling");
4114 pci_write_config_word(dev, PCI_COMMAND, cmd);
4115 }
4116 dev->is_busmaster = enable;
4117}
4118
4119
4120
4121
4122
4123
4124
4125
4126char * __weak __init pcibios_setup(char *str)
4127{
4128 return str;
4129}
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139void __weak pcibios_set_master(struct pci_dev *dev)
4140{
4141 u8 lat;
4142
4143
4144 if (pci_is_pcie(dev))
4145 return;
4146
4147 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4148 if (lat < 16)
4149 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4150 else if (lat > pcibios_max_latency)
4151 lat = pcibios_max_latency;
4152 else
4153 return;
4154
4155 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4156}
4157
4158
4159
4160
4161
4162
4163
4164
4165void pci_set_master(struct pci_dev *dev)
4166{
4167 __pci_set_master(dev, true);
4168 pcibios_set_master(dev);
4169}
4170EXPORT_SYMBOL(pci_set_master);
4171
4172
4173
4174
4175
4176void pci_clear_master(struct pci_dev *dev)
4177{
4178 __pci_set_master(dev, false);
4179}
4180EXPORT_SYMBOL(pci_clear_master);
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192int pci_set_cacheline_size(struct pci_dev *dev)
4193{
4194 u8 cacheline_size;
4195
4196 if (!pci_cache_line_size)
4197 return -EINVAL;
4198
4199
4200
4201 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4202 if (cacheline_size >= pci_cache_line_size &&
4203 (cacheline_size % pci_cache_line_size) == 0)
4204 return 0;
4205
4206
4207 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4208
4209 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4210 if (cacheline_size == pci_cache_line_size)
4211 return 0;
4212
4213 pci_info(dev, "cache line size of %d is not supported\n",
4214 pci_cache_line_size << 2);
4215
4216 return -EINVAL;
4217}
4218EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228int pci_set_mwi(struct pci_dev *dev)
4229{
4230#ifdef PCI_DISABLE_MWI
4231 return 0;
4232#else
4233 int rc;
4234 u16 cmd;
4235
4236 rc = pci_set_cacheline_size(dev);
4237 if (rc)
4238 return rc;
4239
4240 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4241 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4242 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4243 cmd |= PCI_COMMAND_INVALIDATE;
4244 pci_write_config_word(dev, PCI_COMMAND, cmd);
4245 }
4246 return 0;
4247#endif
4248}
4249EXPORT_SYMBOL(pci_set_mwi);
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259int pcim_set_mwi(struct pci_dev *dev)
4260{
4261 struct pci_devres *dr;
4262
4263 dr = find_pci_dr(dev);
4264 if (!dr)
4265 return -ENOMEM;
4266
4267 dr->mwi = 1;
4268 return pci_set_mwi(dev);
4269}
4270EXPORT_SYMBOL(pcim_set_mwi);
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281int pci_try_set_mwi(struct pci_dev *dev)
4282{
4283#ifdef PCI_DISABLE_MWI
4284 return 0;
4285#else
4286 return pci_set_mwi(dev);
4287#endif
4288}
4289EXPORT_SYMBOL(pci_try_set_mwi);
4290
4291
4292
4293
4294
4295
4296
4297void pci_clear_mwi(struct pci_dev *dev)
4298{
4299#ifndef PCI_DISABLE_MWI
4300 u16 cmd;
4301
4302 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4303 if (cmd & PCI_COMMAND_INVALIDATE) {
4304 cmd &= ~PCI_COMMAND_INVALIDATE;
4305 pci_write_config_word(dev, PCI_COMMAND, cmd);
4306 }
4307#endif
4308}
4309EXPORT_SYMBOL(pci_clear_mwi);
4310
4311
4312
4313
4314
4315
4316
4317
4318void pci_intx(struct pci_dev *pdev, int enable)
4319{
4320 u16 pci_command, new;
4321
4322 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4323
4324 if (enable)
4325 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4326 else
4327 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4328
4329 if (new != pci_command) {
4330 struct pci_devres *dr;
4331
4332 pci_write_config_word(pdev, PCI_COMMAND, new);
4333
4334 dr = find_pci_dr(pdev);
4335 if (dr && !dr->restore_intx) {
4336 dr->restore_intx = 1;
4337 dr->orig_intx = !enable;
4338 }
4339 }
4340}
4341EXPORT_SYMBOL_GPL(pci_intx);
4342
4343static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4344{
4345 struct pci_bus *bus = dev->bus;
4346 bool mask_updated = true;
4347 u32 cmd_status_dword;
4348 u16 origcmd, newcmd;
4349 unsigned long flags;
4350 bool irq_pending;
4351
4352
4353
4354
4355
4356 BUILD_BUG_ON(PCI_COMMAND % 4);
4357 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4358
4359 raw_spin_lock_irqsave(&pci_lock, flags);
4360
4361 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4362
4363 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4364
4365
4366
4367
4368
4369
4370 if (mask != irq_pending) {
4371 mask_updated = false;
4372 goto done;
4373 }
4374
4375 origcmd = cmd_status_dword;
4376 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4377 if (mask)
4378 newcmd |= PCI_COMMAND_INTX_DISABLE;
4379 if (newcmd != origcmd)
4380 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4381
4382done:
4383 raw_spin_unlock_irqrestore(&pci_lock, flags);
4384
4385 return mask_updated;
4386}
4387
4388
4389
4390
4391
4392
4393
4394
4395bool pci_check_and_mask_intx(struct pci_dev *dev)
4396{
4397 return pci_check_and_set_intx_mask(dev, true);
4398}
4399EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409bool pci_check_and_unmask_intx(struct pci_dev *dev)
4410{
4411 return pci_check_and_set_intx_mask(dev, false);
4412}
4413EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4414
4415
4416
4417
4418
4419
4420
4421int pci_wait_for_pending_transaction(struct pci_dev *dev)
4422{
4423 if (!pci_is_pcie(dev))
4424 return 1;
4425
4426 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4427 PCI_EXP_DEVSTA_TRPND);
4428}
4429EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4430
4431static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
4432{
4433 int delay = 1;
4434 u32 id;
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448 pci_read_config_dword(dev, PCI_COMMAND, &id);
4449 while (id == ~0) {
4450 if (delay > timeout) {
4451 pci_warn(dev, "not ready %dms after %s; giving up\n",
4452 delay - 1, reset_type);
4453 return -ENOTTY;
4454 }
4455
4456 if (delay > 1000)
4457 pci_info(dev, "not ready %dms after %s; waiting\n",
4458 delay - 1, reset_type);
4459
4460 msleep(delay);
4461 delay *= 2;
4462 pci_read_config_dword(dev, PCI_COMMAND, &id);
4463 }
4464
4465 if (delay > 1000)
4466 pci_info(dev, "ready %dms after %s\n", delay - 1,
4467 reset_type);
4468
4469 return 0;
4470}
4471
4472
4473
4474
4475
4476
4477
4478
4479bool pcie_has_flr(struct pci_dev *dev)
4480{
4481 u32 cap;
4482
4483 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4484 return false;
4485
4486 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
4487 return cap & PCI_EXP_DEVCAP_FLR;
4488}
4489EXPORT_SYMBOL_GPL(pcie_has_flr);
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499int pcie_flr(struct pci_dev *dev)
4500{
4501 if (!pci_wait_for_pending_transaction(dev))
4502 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4503
4504 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4505
4506 if (dev->imm_ready)
4507 return 0;
4508
4509
4510
4511
4512
4513
4514 msleep(100);
4515
4516 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4517}
4518EXPORT_SYMBOL_GPL(pcie_flr);
4519
4520static int pci_af_flr(struct pci_dev *dev, int probe)
4521{
4522 int pos;
4523 u8 cap;
4524
4525 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4526 if (!pos)
4527 return -ENOTTY;
4528
4529 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4530 return -ENOTTY;
4531
4532 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4533 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4534 return -ENOTTY;
4535
4536 if (probe)
4537 return 0;
4538
4539
4540
4541
4542
4543
4544 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4545 PCI_AF_STATUS_TP << 8))
4546 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4547
4548 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4549
4550 if (dev->imm_ready)
4551 return 0;
4552
4553
4554
4555
4556
4557
4558
4559 msleep(100);
4560
4561 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4562}
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579static int pci_pm_reset(struct pci_dev *dev, int probe)
4580{
4581 u16 csr;
4582
4583 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4584 return -ENOTTY;
4585
4586 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4587 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4588 return -ENOTTY;
4589
4590 if (probe)
4591 return 0;
4592
4593 if (dev->current_state != PCI_D0)
4594 return -EINVAL;
4595
4596 csr &= ~PCI_PM_CTRL_STATE_MASK;
4597 csr |= PCI_D3hot;
4598 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4599 pci_dev_d3_sleep(dev);
4600
4601 csr &= ~PCI_PM_CTRL_STATE_MASK;
4602 csr |= PCI_D0;
4603 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4604 pci_dev_d3_sleep(dev);
4605
4606 return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
4607}
4608
4609
4610
4611
4612
4613
4614
4615bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4616{
4617 int timeout = 1000;
4618 bool ret;
4619 u16 lnk_status;
4620
4621
4622
4623
4624
4625 if (!pdev->link_active_reporting) {
4626 msleep(1100);
4627 return true;
4628 }
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639 if (active)
4640 msleep(20);
4641 for (;;) {
4642 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4643 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4644 if (ret == active)
4645 break;
4646 if (timeout <= 0)
4647 break;
4648 msleep(10);
4649 timeout -= 10;
4650 }
4651 if (active && ret)
4652 msleep(100);
4653 else if (ret != active)
4654 pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
4655 active ? "set" : "cleared");
4656 return ret == active;
4657}
4658
4659void pci_reset_secondary_bus(struct pci_dev *dev)
4660{
4661 u16 ctrl;
4662
4663 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4664 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4665 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4666
4667
4668
4669
4670
4671 msleep(2);
4672
4673 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4674 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4675
4676
4677
4678
4679
4680
4681
4682
4683 ssleep(1);
4684}
4685
4686void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4687{
4688 pci_reset_secondary_bus(dev);
4689}
4690
4691
4692
4693
4694
4695
4696
4697
4698int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4699{
4700 pcibios_reset_secondary_bus(dev);
4701
4702 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
4703}
4704EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4705
4706static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4707{
4708 struct pci_dev *pdev;
4709
4710 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4711 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4712 return -ENOTTY;
4713
4714 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4715 if (pdev != dev)
4716 return -ENOTTY;
4717
4718 if (probe)
4719 return 0;
4720
4721 return pci_bridge_secondary_bus_reset(dev->bus->self);
4722}
4723
4724static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4725{
4726 int rc = -ENOTTY;
4727
4728 if (!hotplug || !try_module_get(hotplug->owner))
4729 return rc;
4730
4731 if (hotplug->ops->reset_slot)
4732 rc = hotplug->ops->reset_slot(hotplug, probe);
4733
4734 module_put(hotplug->owner);
4735
4736 return rc;
4737}
4738
4739static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4740{
4741 struct pci_dev *pdev;
4742
4743 if (dev->subordinate || !dev->slot ||
4744 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4745 return -ENOTTY;
4746
4747 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4748 if (pdev != dev && pdev->slot == dev->slot)
4749 return -ENOTTY;
4750
4751 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4752}
4753
4754static void pci_dev_lock(struct pci_dev *dev)
4755{
4756 pci_cfg_access_lock(dev);
4757
4758 device_lock(&dev->dev);
4759}
4760
4761
4762static int pci_dev_trylock(struct pci_dev *dev)
4763{
4764 if (pci_cfg_access_trylock(dev)) {
4765 if (device_trylock(&dev->dev))
4766 return 1;
4767 pci_cfg_access_unlock(dev);
4768 }
4769
4770 return 0;
4771}
4772
4773static void pci_dev_unlock(struct pci_dev *dev)
4774{
4775 device_unlock(&dev->dev);
4776 pci_cfg_access_unlock(dev);
4777}
4778
4779static void pci_dev_save_and_disable(struct pci_dev *dev)
4780{
4781 const struct pci_error_handlers *err_handler =
4782 dev->driver ? dev->driver->err_handler : NULL;
4783
4784
4785
4786
4787
4788
4789 if (err_handler && err_handler->reset_prepare)
4790 err_handler->reset_prepare(dev);
4791
4792
4793
4794
4795
4796
4797 pci_set_power_state(dev, PCI_D0);
4798
4799 pci_save_state(dev);
4800
4801
4802
4803
4804
4805
4806
4807 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
4808}
4809
4810static void pci_dev_restore(struct pci_dev *dev)
4811{
4812 const struct pci_error_handlers *err_handler =
4813 dev->driver ? dev->driver->err_handler : NULL;
4814
4815 pci_restore_state(dev);
4816
4817
4818
4819
4820
4821
4822 if (err_handler && err_handler->reset_done)
4823 err_handler->reset_done(dev);
4824}
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846int __pci_reset_function_locked(struct pci_dev *dev)
4847{
4848 int rc;
4849
4850 might_sleep();
4851
4852
4853
4854
4855
4856
4857
4858
4859
4860 rc = pci_dev_specific_reset(dev, 0);
4861 if (rc != -ENOTTY)
4862 return rc;
4863 if (pcie_has_flr(dev)) {
4864 rc = pcie_flr(dev);
4865 if (rc != -ENOTTY)
4866 return rc;
4867 }
4868 rc = pci_af_flr(dev, 0);
4869 if (rc != -ENOTTY)
4870 return rc;
4871 rc = pci_pm_reset(dev, 0);
4872 if (rc != -ENOTTY)
4873 return rc;
4874 rc = pci_dev_reset_slot_function(dev, 0);
4875 if (rc != -ENOTTY)
4876 return rc;
4877 return pci_parent_bus_reset(dev, 0);
4878}
4879EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890
4891
4892int pci_probe_reset_function(struct pci_dev *dev)
4893{
4894 int rc;
4895
4896 might_sleep();
4897
4898 rc = pci_dev_specific_reset(dev, 1);
4899 if (rc != -ENOTTY)
4900 return rc;
4901 if (pcie_has_flr(dev))
4902 return 0;
4903 rc = pci_af_flr(dev, 1);
4904 if (rc != -ENOTTY)
4905 return rc;
4906 rc = pci_pm_reset(dev, 1);
4907 if (rc != -ENOTTY)
4908 return rc;
4909 rc = pci_dev_reset_slot_function(dev, 1);
4910 if (rc != -ENOTTY)
4911 return rc;
4912
4913 return pci_parent_bus_reset(dev, 1);
4914}
4915
4916
4917
4918
4919
4920
4921
4922
4923
4924
4925
4926
4927
4928
4929
4930
4931
4932int pci_reset_function(struct pci_dev *dev)
4933{
4934 int rc;
4935
4936 if (!dev->reset_fn)
4937 return -ENOTTY;
4938
4939 pci_dev_lock(dev);
4940 pci_dev_save_and_disable(dev);
4941
4942 rc = __pci_reset_function_locked(dev);
4943
4944 pci_dev_restore(dev);
4945 pci_dev_unlock(dev);
4946
4947 return rc;
4948}
4949EXPORT_SYMBOL_GPL(pci_reset_function);
4950
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968int pci_reset_function_locked(struct pci_dev *dev)
4969{
4970 int rc;
4971
4972 if (!dev->reset_fn)
4973 return -ENOTTY;
4974
4975 pci_dev_save_and_disable(dev);
4976
4977 rc = __pci_reset_function_locked(dev);
4978
4979 pci_dev_restore(dev);
4980
4981 return rc;
4982}
4983EXPORT_SYMBOL_GPL(pci_reset_function_locked);
4984
4985
4986
4987
4988
4989
4990
4991int pci_try_reset_function(struct pci_dev *dev)
4992{
4993 int rc;
4994
4995 if (!dev->reset_fn)
4996 return -ENOTTY;
4997
4998 if (!pci_dev_trylock(dev))
4999 return -EAGAIN;
5000
5001 pci_dev_save_and_disable(dev);
5002 rc = __pci_reset_function_locked(dev);
5003 pci_dev_restore(dev);
5004 pci_dev_unlock(dev);
5005
5006 return rc;
5007}
5008EXPORT_SYMBOL_GPL(pci_try_reset_function);
5009
5010
5011static bool pci_bus_resetable(struct pci_bus *bus)
5012{
5013 struct pci_dev *dev;
5014
5015
5016 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5017 return false;
5018
5019 list_for_each_entry(dev, &bus->devices, bus_list) {
5020 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5021 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5022 return false;
5023 }
5024
5025 return true;
5026}
5027
5028
5029static void pci_bus_lock(struct pci_bus *bus)
5030{
5031 struct pci_dev *dev;
5032
5033 list_for_each_entry(dev, &bus->devices, bus_list) {
5034 pci_dev_lock(dev);
5035 if (dev->subordinate)
5036 pci_bus_lock(dev->subordinate);
5037 }
5038}
5039
5040
5041static void pci_bus_unlock(struct pci_bus *bus)
5042{
5043 struct pci_dev *dev;
5044
5045 list_for_each_entry(dev, &bus->devices, bus_list) {
5046 if (dev->subordinate)
5047 pci_bus_unlock(dev->subordinate);
5048 pci_dev_unlock(dev);
5049 }
5050}
5051
5052
5053static int pci_bus_trylock(struct pci_bus *bus)
5054{
5055 struct pci_dev *dev;
5056
5057 list_for_each_entry(dev, &bus->devices, bus_list) {
5058 if (!pci_dev_trylock(dev))
5059 goto unlock;
5060 if (dev->subordinate) {
5061 if (!pci_bus_trylock(dev->subordinate)) {
5062 pci_dev_unlock(dev);
5063 goto unlock;
5064 }
5065 }
5066 }
5067 return 1;
5068
5069unlock:
5070 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5071 if (dev->subordinate)
5072 pci_bus_unlock(dev->subordinate);
5073 pci_dev_unlock(dev);
5074 }
5075 return 0;
5076}
5077
5078
5079static bool pci_slot_resetable(struct pci_slot *slot)
5080{
5081 struct pci_dev *dev;
5082
5083 if (slot->bus->self &&
5084 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5085 return false;
5086
5087 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5088 if (!dev->slot || dev->slot != slot)
5089 continue;
5090 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5091 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5092 return false;
5093 }
5094
5095 return true;
5096}
5097
5098
5099static void pci_slot_lock(struct pci_slot *slot)
5100{
5101 struct pci_dev *dev;
5102
5103 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5104 if (!dev->slot || dev->slot != slot)
5105 continue;
5106 pci_dev_lock(dev);
5107 if (dev->subordinate)
5108 pci_bus_lock(dev->subordinate);
5109 }
5110}
5111
5112
5113static void pci_slot_unlock(struct pci_slot *slot)
5114{
5115 struct pci_dev *dev;
5116
5117 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5118 if (!dev->slot || dev->slot != slot)
5119 continue;
5120 if (dev->subordinate)
5121 pci_bus_unlock(dev->subordinate);
5122 pci_dev_unlock(dev);
5123 }
5124}
5125
5126
5127static int pci_slot_trylock(struct pci_slot *slot)
5128{
5129 struct pci_dev *dev;
5130
5131 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5132 if (!dev->slot || dev->slot != slot)
5133 continue;
5134 if (!pci_dev_trylock(dev))
5135 goto unlock;
5136 if (dev->subordinate) {
5137 if (!pci_bus_trylock(dev->subordinate)) {
5138 pci_dev_unlock(dev);
5139 goto unlock;
5140 }
5141 }
5142 }
5143 return 1;
5144
5145unlock:
5146 list_for_each_entry_continue_reverse(dev,
5147 &slot->bus->devices, bus_list) {
5148 if (!dev->slot || dev->slot != slot)
5149 continue;
5150 if (dev->subordinate)
5151 pci_bus_unlock(dev->subordinate);
5152 pci_dev_unlock(dev);
5153 }
5154 return 0;
5155}
5156
5157
5158
5159
5160
5161static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5162{
5163 struct pci_dev *dev;
5164
5165 list_for_each_entry(dev, &bus->devices, bus_list) {
5166 pci_dev_save_and_disable(dev);
5167 if (dev->subordinate)
5168 pci_bus_save_and_disable_locked(dev->subordinate);
5169 }
5170}
5171
5172
5173
5174
5175
5176
5177static void pci_bus_restore_locked(struct pci_bus *bus)
5178{
5179 struct pci_dev *dev;
5180
5181 list_for_each_entry(dev, &bus->devices, bus_list) {
5182 pci_dev_restore(dev);
5183 if (dev->subordinate)
5184 pci_bus_restore_locked(dev->subordinate);
5185 }
5186}
5187
5188
5189
5190
5191
5192static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5193{
5194 struct pci_dev *dev;
5195
5196 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5197 if (!dev->slot || dev->slot != slot)
5198 continue;
5199 pci_dev_save_and_disable(dev);
5200 if (dev->subordinate)
5201 pci_bus_save_and_disable_locked(dev->subordinate);
5202 }
5203}
5204
5205
5206
5207
5208
5209
5210static void pci_slot_restore_locked(struct pci_slot *slot)
5211{
5212 struct pci_dev *dev;
5213
5214 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5215 if (!dev->slot || dev->slot != slot)
5216 continue;
5217 pci_dev_restore(dev);
5218 if (dev->subordinate)
5219 pci_bus_restore_locked(dev->subordinate);
5220 }
5221}
5222
5223static int pci_slot_reset(struct pci_slot *slot, int probe)
5224{
5225 int rc;
5226
5227 if (!slot || !pci_slot_resetable(slot))
5228 return -ENOTTY;
5229
5230 if (!probe)
5231 pci_slot_lock(slot);
5232
5233 might_sleep();
5234
5235 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5236
5237 if (!probe)
5238 pci_slot_unlock(slot);
5239
5240 return rc;
5241}
5242
5243
5244
5245
5246
5247
5248
5249int pci_probe_reset_slot(struct pci_slot *slot)
5250{
5251 return pci_slot_reset(slot, 1);
5252}
5253EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5254
5255
5256
5257
5258
5259
5260
5261
5262
5263
5264
5265
5266
5267
5268
5269
5270static int __pci_reset_slot(struct pci_slot *slot)
5271{
5272 int rc;
5273
5274 rc = pci_slot_reset(slot, 1);
5275 if (rc)
5276 return rc;
5277
5278 if (pci_slot_trylock(slot)) {
5279 pci_slot_save_and_disable_locked(slot);
5280 might_sleep();
5281 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
5282 pci_slot_restore_locked(slot);
5283 pci_slot_unlock(slot);
5284 } else
5285 rc = -EAGAIN;
5286
5287 return rc;
5288}
5289
5290static int pci_bus_reset(struct pci_bus *bus, int probe)
5291{
5292 int ret;
5293
5294 if (!bus->self || !pci_bus_resetable(bus))
5295 return -ENOTTY;
5296
5297 if (probe)
5298 return 0;
5299
5300 pci_bus_lock(bus);
5301
5302 might_sleep();
5303
5304 ret = pci_bridge_secondary_bus_reset(bus->self);
5305
5306 pci_bus_unlock(bus);
5307
5308 return ret;
5309}
5310
5311
5312
5313
5314
5315
5316
5317
5318
5319int pci_bus_error_reset(struct pci_dev *bridge)
5320{
5321 struct pci_bus *bus = bridge->subordinate;
5322 struct pci_slot *slot;
5323
5324 if (!bus)
5325 return -ENOTTY;
5326
5327 mutex_lock(&pci_slot_mutex);
5328 if (list_empty(&bus->slots))
5329 goto bus_reset;
5330
5331 list_for_each_entry(slot, &bus->slots, list)
5332 if (pci_probe_reset_slot(slot))
5333 goto bus_reset;
5334
5335 list_for_each_entry(slot, &bus->slots, list)
5336 if (pci_slot_reset(slot, 0))
5337 goto bus_reset;
5338
5339 mutex_unlock(&pci_slot_mutex);
5340 return 0;
5341bus_reset:
5342 mutex_unlock(&pci_slot_mutex);
5343 return pci_bus_reset(bridge->subordinate, 0);
5344}
5345
5346
5347
5348
5349
5350
5351
5352int pci_probe_reset_bus(struct pci_bus *bus)
5353{
5354 return pci_bus_reset(bus, 1);
5355}
5356EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5357
5358
5359
5360
5361
5362
5363
5364static int __pci_reset_bus(struct pci_bus *bus)
5365{
5366 int rc;
5367
5368 rc = pci_bus_reset(bus, 1);
5369 if (rc)
5370 return rc;
5371
5372 if (pci_bus_trylock(bus)) {
5373 pci_bus_save_and_disable_locked(bus);
5374 might_sleep();
5375 rc = pci_bridge_secondary_bus_reset(bus->self);
5376 pci_bus_restore_locked(bus);
5377 pci_bus_unlock(bus);
5378 } else
5379 rc = -EAGAIN;
5380
5381 return rc;
5382}
5383
5384
5385
5386
5387
5388
5389
5390int pci_reset_bus(struct pci_dev *pdev)
5391{
5392 return (!pci_probe_reset_slot(pdev->slot)) ?
5393 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5394}
5395EXPORT_SYMBOL_GPL(pci_reset_bus);
5396
5397
5398
5399
5400
5401
5402
5403
5404int pcix_get_max_mmrbc(struct pci_dev *dev)
5405{
5406 int cap;
5407 u32 stat;
5408
5409 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5410 if (!cap)
5411 return -EINVAL;
5412
5413 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5414 return -EINVAL;
5415
5416 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5417}
5418EXPORT_SYMBOL(pcix_get_max_mmrbc);
5419
5420
5421
5422
5423
5424
5425
5426
5427int pcix_get_mmrbc(struct pci_dev *dev)
5428{
5429 int cap;
5430 u16 cmd;
5431
5432 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5433 if (!cap)
5434 return -EINVAL;
5435
5436 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5437 return -EINVAL;
5438
5439 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5440}
5441EXPORT_SYMBOL(pcix_get_mmrbc);
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5453{
5454 int cap;
5455 u32 stat, v, o;
5456 u16 cmd;
5457
5458 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5459 return -EINVAL;
5460
5461 v = ffs(mmrbc) - 10;
5462
5463 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5464 if (!cap)
5465 return -EINVAL;
5466
5467 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5468 return -EINVAL;
5469
5470 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5471 return -E2BIG;
5472
5473 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5474 return -EINVAL;
5475
5476 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5477 if (o != v) {
5478 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5479 return -EIO;
5480
5481 cmd &= ~PCI_X_CMD_MAX_READ;
5482 cmd |= v << 2;
5483 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5484 return -EIO;
5485 }
5486 return 0;
5487}
5488EXPORT_SYMBOL(pcix_set_mmrbc);
5489
5490
5491
5492
5493
5494
5495
5496int pcie_get_readrq(struct pci_dev *dev)
5497{
5498 u16 ctl;
5499
5500 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5501
5502 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5503}
5504EXPORT_SYMBOL(pcie_get_readrq);
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514int pcie_set_readrq(struct pci_dev *dev, int rq)
5515{
5516 u16 v;
5517
5518 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5519 return -EINVAL;
5520
5521
5522
5523
5524
5525
5526 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5527 int mps = pcie_get_mps(dev);
5528
5529 if (mps < rq)
5530 rq = mps;
5531 }
5532
5533 v = (ffs(rq) - 8) << 12;
5534
5535 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5536 PCI_EXP_DEVCTL_READRQ, v);
5537}
5538EXPORT_SYMBOL(pcie_set_readrq);
5539
5540
5541
5542
5543
5544
5545
5546int pcie_get_mps(struct pci_dev *dev)
5547{
5548 u16 ctl;
5549
5550 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5551
5552 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5553}
5554EXPORT_SYMBOL(pcie_get_mps);
5555
5556
5557
5558
5559
5560
5561
5562
5563
5564int pcie_set_mps(struct pci_dev *dev, int mps)
5565{
5566 u16 v;
5567
5568 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5569 return -EINVAL;
5570
5571 v = ffs(mps) - 8;
5572 if (v > dev->pcie_mpss)
5573 return -EINVAL;
5574 v <<= 5;
5575
5576 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5577 PCI_EXP_DEVCTL_PAYLOAD, v);
5578}
5579EXPORT_SYMBOL(pcie_set_mps);
5580
5581
5582
5583
5584
5585
5586
5587
5588
5589
5590
5591
5592
5593
5594
5595u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
5596 enum pci_bus_speed *speed,
5597 enum pcie_link_width *width)
5598{
5599 u16 lnksta;
5600 enum pci_bus_speed next_speed;
5601 enum pcie_link_width next_width;
5602 u32 bw, next_bw;
5603
5604 if (speed)
5605 *speed = PCI_SPEED_UNKNOWN;
5606 if (width)
5607 *width = PCIE_LNK_WIDTH_UNKNOWN;
5608
5609 bw = 0;
5610
5611 while (dev) {
5612 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5613
5614 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5615 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5616 PCI_EXP_LNKSTA_NLW_SHIFT;
5617
5618 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
5619
5620
5621 if (!bw || next_bw <= bw) {
5622 bw = next_bw;
5623
5624 if (limiting_dev)
5625 *limiting_dev = dev;
5626 if (speed)
5627 *speed = next_speed;
5628 if (width)
5629 *width = next_width;
5630 }
5631
5632 dev = pci_upstream_bridge(dev);
5633 }
5634
5635 return bw;
5636}
5637EXPORT_SYMBOL(pcie_bandwidth_available);
5638
5639
5640
5641
5642
5643
5644
5645
5646enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5647{
5648 u32 lnkcap2, lnkcap;
5649
5650
5651
5652
5653
5654
5655
5656
5657
5658
5659 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5660 if (lnkcap2) {
5661 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_32_0GB)
5662 return PCIE_SPEED_32_0GT;
5663 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
5664 return PCIE_SPEED_16_0GT;
5665 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
5666 return PCIE_SPEED_8_0GT;
5667 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
5668 return PCIE_SPEED_5_0GT;
5669 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
5670 return PCIE_SPEED_2_5GT;
5671 return PCI_SPEED_UNKNOWN;
5672 }
5673
5674 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5675 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
5676 return PCIE_SPEED_5_0GT;
5677 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
5678 return PCIE_SPEED_2_5GT;
5679
5680 return PCI_SPEED_UNKNOWN;
5681}
5682EXPORT_SYMBOL(pcie_get_speed_cap);
5683
5684
5685
5686
5687
5688
5689
5690
5691enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
5692{
5693 u32 lnkcap;
5694
5695 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5696 if (lnkcap)
5697 return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
5698
5699 return PCIE_LNK_WIDTH_UNKNOWN;
5700}
5701EXPORT_SYMBOL(pcie_get_width_cap);
5702
5703
5704
5705
5706
5707
5708
5709
5710
5711
5712
5713u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
5714 enum pcie_link_width *width)
5715{
5716 *speed = pcie_get_speed_cap(dev);
5717 *width = pcie_get_width_cap(dev);
5718
5719 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5720 return 0;
5721
5722 return *width * PCIE_SPEED2MBS_ENC(*speed);
5723}
5724
5725
5726
5727
5728
5729
5730
5731
5732
5733
5734
5735void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
5736{
5737 enum pcie_link_width width, width_cap;
5738 enum pci_bus_speed speed, speed_cap;
5739 struct pci_dev *limiting_dev = NULL;
5740 u32 bw_avail, bw_cap;
5741
5742 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
5743 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
5744
5745 if (bw_avail >= bw_cap && verbose)
5746 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
5747 bw_cap / 1000, bw_cap % 1000,
5748 PCIE_SPEED2STR(speed_cap), width_cap);
5749 else if (bw_avail < bw_cap)
5750 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
5751 bw_avail / 1000, bw_avail % 1000,
5752 PCIE_SPEED2STR(speed), width,
5753 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
5754 bw_cap / 1000, bw_cap % 1000,
5755 PCIE_SPEED2STR(speed_cap), width_cap);
5756}
5757
5758
5759
5760
5761
5762
5763
5764void pcie_print_link_status(struct pci_dev *dev)
5765{
5766 __pcie_print_link_status(dev, true);
5767}
5768EXPORT_SYMBOL(pcie_print_link_status);
5769
5770
5771
5772
5773
5774
5775
5776
5777int pci_select_bars(struct pci_dev *dev, unsigned long flags)
5778{
5779 int i, bars = 0;
5780 for (i = 0; i < PCI_NUM_RESOURCES; i++)
5781 if (pci_resource_flags(dev, i) & flags)
5782 bars |= (1 << i);
5783 return bars;
5784}
5785EXPORT_SYMBOL(pci_select_bars);
5786
5787
5788static arch_set_vga_state_t arch_set_vga_state;
5789
5790void __init pci_register_set_vga_state(arch_set_vga_state_t func)
5791{
5792 arch_set_vga_state = func;
5793}
5794
5795static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
5796 unsigned int command_bits, u32 flags)
5797{
5798 if (arch_set_vga_state)
5799 return arch_set_vga_state(dev, decode, command_bits,
5800 flags);
5801 return 0;
5802}
5803
5804
5805
5806
5807
5808
5809
5810
5811
5812int pci_set_vga_state(struct pci_dev *dev, bool decode,
5813 unsigned int command_bits, u32 flags)
5814{
5815 struct pci_bus *bus;
5816 struct pci_dev *bridge;
5817 u16 cmd;
5818 int rc;
5819
5820 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
5821
5822
5823 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
5824 if (rc)
5825 return rc;
5826
5827 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
5828 pci_read_config_word(dev, PCI_COMMAND, &cmd);
5829 if (decode == true)
5830 cmd |= command_bits;
5831 else
5832 cmd &= ~command_bits;
5833 pci_write_config_word(dev, PCI_COMMAND, cmd);
5834 }
5835
5836 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
5837 return 0;
5838
5839 bus = dev->bus;
5840 while (bus) {
5841 bridge = bus->self;
5842 if (bridge) {
5843 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
5844 &cmd);
5845 if (decode == true)
5846 cmd |= PCI_BRIDGE_CTL_VGA;
5847 else
5848 cmd &= ~PCI_BRIDGE_CTL_VGA;
5849 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
5850 cmd);
5851 }
5852 bus = bus->parent;
5853 }
5854 return 0;
5855}
5856
5857
5858
5859
5860
5861
5862
5863
5864
5865
5866
5867
5868
5869
5870
5871
5872
5873
5874
5875
5876void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
5877{
5878 if (!dev->dma_alias_mask)
5879 dev->dma_alias_mask = bitmap_zalloc(U8_MAX, GFP_KERNEL);
5880 if (!dev->dma_alias_mask) {
5881 pci_warn(dev, "Unable to allocate DMA alias mask\n");
5882 return;
5883 }
5884
5885 set_bit(devfn, dev->dma_alias_mask);
5886 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
5887 PCI_SLOT(devfn), PCI_FUNC(devfn));
5888}
5889
5890bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
5891{
5892 return (dev1->dma_alias_mask &&
5893 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
5894 (dev2->dma_alias_mask &&
5895 test_bit(dev1->devfn, dev2->dma_alias_mask));
5896}
5897
5898bool pci_device_is_present(struct pci_dev *pdev)
5899{
5900 u32 v;
5901
5902 if (pci_dev_is_disconnected(pdev))
5903 return false;
5904 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
5905}
5906EXPORT_SYMBOL_GPL(pci_device_is_present);
5907
5908void pci_ignore_hotplug(struct pci_dev *dev)
5909{
5910 struct pci_dev *bridge = dev->bus->self;
5911
5912 dev->ignore_hotplug = 1;
5913
5914 if (bridge)
5915 bridge->ignore_hotplug = 1;
5916}
5917EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
5918
5919resource_size_t __weak pcibios_default_alignment(void)
5920{
5921 return 0;
5922}
5923
5924
5925
5926
5927
5928void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
5929 const struct resource *rsrc,
5930 resource_size_t *start, resource_size_t *end)
5931{
5932 *start = rsrc->start;
5933 *end = rsrc->end;
5934}
5935
5936static char *resource_alignment_param;
5937static DEFINE_SPINLOCK(resource_alignment_lock);
5938
5939
5940
5941
5942
5943
5944
5945
5946
5947static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
5948 bool *resize)
5949{
5950 int align_order, count;
5951 resource_size_t align = pcibios_default_alignment();
5952 const char *p;
5953 int ret;
5954
5955 spin_lock(&resource_alignment_lock);
5956 p = resource_alignment_param;
5957 if (!p || !*p)
5958 goto out;
5959 if (pci_has_flag(PCI_PROBE_ONLY)) {
5960 align = 0;
5961 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
5962 goto out;
5963 }
5964
5965 while (*p) {
5966 count = 0;
5967 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
5968 p[count] == '@') {
5969 p += count + 1;
5970 } else {
5971 align_order = -1;
5972 }
5973
5974 ret = pci_dev_str_match(dev, p, &p);
5975 if (ret == 1) {
5976 *resize = true;
5977 if (align_order == -1)
5978 align = PAGE_SIZE;
5979 else
5980 align = 1 << align_order;
5981 break;
5982 } else if (ret < 0) {
5983 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
5984 p);
5985 break;
5986 }
5987
5988 if (*p != ';' && *p != ',') {
5989
5990 break;
5991 }
5992 p++;
5993 }
5994out:
5995 spin_unlock(&resource_alignment_lock);
5996 return align;
5997}
5998
5999static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6000 resource_size_t align, bool resize)
6001{
6002 struct resource *r = &dev->resource[bar];
6003 resource_size_t size;
6004
6005 if (!(r->flags & IORESOURCE_MEM))
6006 return;
6007
6008 if (r->flags & IORESOURCE_PCI_FIXED) {
6009 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6010 bar, r, (unsigned long long)align);
6011 return;
6012 }
6013
6014 size = resource_size(r);
6015 if (size >= align)
6016 return;
6017
6018
6019
6020
6021
6022
6023
6024
6025
6026
6027
6028
6029
6030
6031
6032
6033
6034
6035
6036
6037
6038
6039
6040
6041
6042
6043
6044
6045
6046 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6047 bar, r, (unsigned long long)align);
6048
6049 if (resize) {
6050 r->start = 0;
6051 r->end = align - 1;
6052 } else {
6053 r->flags &= ~IORESOURCE_SIZEALIGN;
6054 r->flags |= IORESOURCE_STARTALIGN;
6055 r->start = align;
6056 r->end = r->start + size - 1;
6057 }
6058 r->flags |= IORESOURCE_UNSET;
6059}
6060
6061
6062
6063
6064
6065
6066
6067
6068void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6069{
6070 int i;
6071 struct resource *r;
6072 resource_size_t align;
6073 u16 command;
6074 bool resize = false;
6075
6076
6077
6078
6079
6080
6081
6082 if (dev->is_virtfn)
6083 return;
6084
6085
6086 align = pci_specified_resource_alignment(dev, &resize);
6087 if (!align)
6088 return;
6089
6090 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6091 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6092 pci_warn(dev, "Can't reassign resources to host bridge\n");
6093 return;
6094 }
6095
6096 pci_read_config_word(dev, PCI_COMMAND, &command);
6097 command &= ~PCI_COMMAND_MEMORY;
6098 pci_write_config_word(dev, PCI_COMMAND, command);
6099
6100 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6101 pci_request_resource_alignment(dev, i, align, resize);
6102
6103
6104
6105
6106
6107
6108 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6109 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6110 r = &dev->resource[i];
6111 if (!(r->flags & IORESOURCE_MEM))
6112 continue;
6113 r->flags |= IORESOURCE_UNSET;
6114 r->end = resource_size(r) - 1;
6115 r->start = 0;
6116 }
6117 pci_disable_bridge_window(dev);
6118 }
6119}
6120
6121static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6122{
6123 size_t count = 0;
6124
6125 spin_lock(&resource_alignment_lock);
6126 if (resource_alignment_param)
6127 count = snprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
6128 spin_unlock(&resource_alignment_lock);
6129
6130
6131
6132
6133
6134
6135 if (count >= 2 && buf[count - 2] != '\n' && count < PAGE_SIZE - 1) {
6136 buf[count - 1] = '\n';
6137 buf[count++] = 0;
6138 }
6139
6140 return count;
6141}
6142
6143static ssize_t resource_alignment_store(struct bus_type *bus,
6144 const char *buf, size_t count)
6145{
6146 char *param = kstrndup(buf, count, GFP_KERNEL);
6147
6148 if (!param)
6149 return -ENOMEM;
6150
6151 spin_lock(&resource_alignment_lock);
6152 kfree(resource_alignment_param);
6153 resource_alignment_param = param;
6154 spin_unlock(&resource_alignment_lock);
6155 return count;
6156}
6157
6158static BUS_ATTR_RW(resource_alignment);
6159
6160static int __init pci_resource_alignment_sysfs_init(void)
6161{
6162 return bus_create_file(&pci_bus_type,
6163 &bus_attr_resource_alignment);
6164}
6165late_initcall(pci_resource_alignment_sysfs_init);
6166
6167static void pci_no_domains(void)
6168{
6169#ifdef CONFIG_PCI_DOMAINS
6170 pci_domains_supported = 0;
6171#endif
6172}
6173
6174#ifdef CONFIG_PCI_DOMAINS_GENERIC
6175static atomic_t __domain_nr = ATOMIC_INIT(-1);
6176
6177static int pci_get_new_domain_nr(void)
6178{
6179 return atomic_inc_return(&__domain_nr);
6180}
6181
6182static int of_pci_bus_find_domain_nr(struct device *parent)
6183{
6184 static int use_dt_domains = -1;
6185 int domain = -1;
6186
6187 if (parent)
6188 domain = of_get_pci_domain_nr(parent->of_node);
6189
6190
6191
6192
6193
6194
6195
6196
6197
6198
6199
6200
6201
6202
6203
6204
6205
6206
6207
6208
6209
6210
6211
6212
6213
6214
6215
6216 if (domain >= 0 && use_dt_domains) {
6217 use_dt_domains = 1;
6218 } else if (domain < 0 && use_dt_domains != 1) {
6219 use_dt_domains = 0;
6220 domain = pci_get_new_domain_nr();
6221 } else {
6222 if (parent)
6223 pr_err("Node %pOF has ", parent->of_node);
6224 pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6225 domain = -1;
6226 }
6227
6228 return domain;
6229}
6230
6231int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6232{
6233 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6234 acpi_pci_bus_find_domain_nr(bus);
6235}
6236#endif
6237
6238
6239
6240
6241
6242
6243
6244
6245int __weak pci_ext_cfg_avail(void)
6246{
6247 return 1;
6248}
6249
6250void __weak pci_fixup_cardbus(struct pci_bus *bus)
6251{
6252}
6253EXPORT_SYMBOL(pci_fixup_cardbus);
6254
6255static int __init pci_setup(char *str)
6256{
6257 while (str) {
6258 char *k = strchr(str, ',');
6259 if (k)
6260 *k++ = 0;
6261 if (*str && (str = pcibios_setup(str)) && *str) {
6262 if (!strcmp(str, "nomsi")) {
6263 pci_no_msi();
6264 } else if (!strncmp(str, "noats", 5)) {
6265 pr_info("PCIe: ATS is disabled\n");
6266 pcie_ats_disabled = true;
6267 } else if (!strcmp(str, "noaer")) {
6268 pci_no_aer();
6269 } else if (!strcmp(str, "earlydump")) {
6270 pci_early_dump = true;
6271 } else if (!strncmp(str, "realloc=", 8)) {
6272 pci_realloc_get_opt(str + 8);
6273 } else if (!strncmp(str, "realloc", 7)) {
6274 pci_realloc_get_opt("on");
6275 } else if (!strcmp(str, "nodomains")) {
6276 pci_no_domains();
6277 } else if (!strncmp(str, "noari", 5)) {
6278 pcie_ari_disabled = true;
6279 } else if (!strncmp(str, "cbiosize=", 9)) {
6280 pci_cardbus_io_size = memparse(str + 9, &str);
6281 } else if (!strncmp(str, "cbmemsize=", 10)) {
6282 pci_cardbus_mem_size = memparse(str + 10, &str);
6283 } else if (!strncmp(str, "resource_alignment=", 19)) {
6284 resource_alignment_param = str + 19;
6285 } else if (!strncmp(str, "ecrc=", 5)) {
6286 pcie_ecrc_get_policy(str + 5);
6287 } else if (!strncmp(str, "hpiosize=", 9)) {
6288 pci_hotplug_io_size = memparse(str + 9, &str);
6289 } else if (!strncmp(str, "hpmemsize=", 10)) {
6290 pci_hotplug_mem_size = memparse(str + 10, &str);
6291 } else if (!strncmp(str, "hpbussize=", 10)) {
6292 pci_hotplug_bus_size =
6293 simple_strtoul(str + 10, &str, 0);
6294 if (pci_hotplug_bus_size > 0xff)
6295 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6296 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6297 pcie_bus_config = PCIE_BUS_TUNE_OFF;
6298 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
6299 pcie_bus_config = PCIE_BUS_SAFE;
6300 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
6301 pcie_bus_config = PCIE_BUS_PERFORMANCE;
6302 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6303 pcie_bus_config = PCIE_BUS_PEER2PEER;
6304 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6305 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6306 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6307 disable_acs_redir_param = str + 18;
6308 } else {
6309 pr_err("PCI: Unknown option `%s'\n", str);
6310 }
6311 }
6312 str = k;
6313 }
6314 return 0;
6315}
6316early_param("pci", pci_setup);
6317
6318
6319
6320
6321
6322
6323
6324
6325
6326
6327static int __init pci_realloc_setup_params(void)
6328{
6329 resource_alignment_param = kstrdup(resource_alignment_param,
6330 GFP_KERNEL);
6331 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6332
6333 return 0;
6334}
6335pure_initcall(pci_realloc_setup_params);
6336