1
2
3
4
5
6
7
8
9
10#include <linux/acpi.h>
11#include <linux/kernel.h>
12#include <linux/delay.h>
13#include <linux/dmi.h>
14#include <linux/init.h>
15#include <linux/of.h>
16#include <linux/of_pci.h>
17#include <linux/pci.h>
18#include <linux/pm.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/spinlock.h>
22#include <linux/string.h>
23#include <linux/log2.h>
24#include <linux/pci-aspm.h>
25#include <linux/pm_wakeup.h>
26#include <linux/interrupt.h>
27#include <linux/device.h>
28#include <linux/pm_runtime.h>
29#include <linux/pci_hotplug.h>
30#include <linux/vmalloc.h>
31#include <linux/pci-ats.h>
32#include <asm/setup.h>
33#include <asm/dma.h>
34#include <linux/aer.h>
35#include "pci.h"
36
37const char *pci_power_names[] = {
38 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
39};
40EXPORT_SYMBOL_GPL(pci_power_names);
41
42int isa_dma_bridge_buggy;
43EXPORT_SYMBOL(isa_dma_bridge_buggy);
44
45int pci_pci_problems;
46EXPORT_SYMBOL(pci_pci_problems);
47
48unsigned int pci_pm_d3_delay;
49
50static void pci_pme_list_scan(struct work_struct *work);
51
52static LIST_HEAD(pci_pme_list);
53static DEFINE_MUTEX(pci_pme_list_mutex);
54static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
55
56struct pci_pme_device {
57 struct list_head list;
58 struct pci_dev *dev;
59};
60
61#define PME_TIMEOUT 1000
62
63static void pci_dev_d3_sleep(struct pci_dev *dev)
64{
65 unsigned int delay = dev->d3_delay;
66
67 if (delay < pci_pm_d3_delay)
68 delay = pci_pm_d3_delay;
69
70 if (delay)
71 msleep(delay);
72}
73
74#ifdef CONFIG_PCI_DOMAINS
75int pci_domains_supported = 1;
76#endif
77
78#define DEFAULT_CARDBUS_IO_SIZE (256)
79#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
80
81unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
82unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
83
84#define DEFAULT_HOTPLUG_IO_SIZE (256)
85#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
86
87unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
88unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
89
90#define DEFAULT_HOTPLUG_BUS_SIZE 1
91unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
92
93enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
94
95
96
97
98
99
100
101u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
102u8 pci_cache_line_size;
103
104
105
106
107
108unsigned int pcibios_max_latency = 255;
109
110
111static bool pcie_ari_disabled;
112
113
114static bool pci_bridge_d3_disable;
115
116static bool pci_bridge_d3_force;
117
118static int __init pcie_port_pm_setup(char *str)
119{
120 if (!strcmp(str, "off"))
121 pci_bridge_d3_disable = true;
122 else if (!strcmp(str, "force"))
123 pci_bridge_d3_force = true;
124 return 1;
125}
126__setup("pcie_port_pm=", pcie_port_pm_setup);
127
128
129
130
131
132
133
134
135unsigned char pci_bus_max_busnr(struct pci_bus *bus)
136{
137 struct pci_bus *tmp;
138 unsigned char max, n;
139
140 max = bus->busn_res.end;
141 list_for_each_entry(tmp, &bus->children, node) {
142 n = pci_bus_max_busnr(tmp);
143 if (n > max)
144 max = n;
145 }
146 return max;
147}
148EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
149
150#ifdef CONFIG_HAS_IOMEM
151void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
152{
153 struct resource *res = &pdev->resource[bar];
154
155
156
157
158 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
159 dev_warn(&pdev->dev, "can't ioremap BAR %d: %pR\n", bar, res);
160 return NULL;
161 }
162 return ioremap_nocache(res->start, resource_size(res));
163}
164EXPORT_SYMBOL_GPL(pci_ioremap_bar);
165
166void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
167{
168
169
170
171 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
172 WARN_ON(1);
173 return NULL;
174 }
175 return ioremap_wc(pci_resource_start(pdev, bar),
176 pci_resource_len(pdev, bar));
177}
178EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
179#endif
180
181
182static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
183 u8 pos, int cap, int *ttl)
184{
185 u8 id;
186 u16 ent;
187
188 pci_bus_read_config_byte(bus, devfn, pos, &pos);
189
190 while ((*ttl)--) {
191 if (pos < 0x40)
192 break;
193 pos &= ~3;
194 pci_bus_read_config_word(bus, devfn, pos, &ent);
195
196 id = ent & 0xff;
197 if (id == 0xff)
198 break;
199 if (id == cap)
200 return pos;
201 pos = (ent >> 8);
202 }
203 return 0;
204}
205
206static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
207 u8 pos, int cap)
208{
209 int ttl = PCI_FIND_CAP_TTL;
210
211 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
212}
213
214int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
215{
216 return __pci_find_next_cap(dev->bus, dev->devfn,
217 pos + PCI_CAP_LIST_NEXT, cap);
218}
219EXPORT_SYMBOL_GPL(pci_find_next_capability);
220
221static int __pci_bus_find_cap_start(struct pci_bus *bus,
222 unsigned int devfn, u8 hdr_type)
223{
224 u16 status;
225
226 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
227 if (!(status & PCI_STATUS_CAP_LIST))
228 return 0;
229
230 switch (hdr_type) {
231 case PCI_HEADER_TYPE_NORMAL:
232 case PCI_HEADER_TYPE_BRIDGE:
233 return PCI_CAPABILITY_LIST;
234 case PCI_HEADER_TYPE_CARDBUS:
235 return PCI_CB_CAPABILITY_LIST;
236 }
237
238 return 0;
239}
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260int pci_find_capability(struct pci_dev *dev, int cap)
261{
262 int pos;
263
264 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
265 if (pos)
266 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
267
268 return pos;
269}
270EXPORT_SYMBOL(pci_find_capability);
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
286{
287 int pos;
288 u8 hdr_type;
289
290 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
291
292 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
293 if (pos)
294 pos = __pci_find_next_cap(bus, devfn, pos, cap);
295
296 return pos;
297}
298EXPORT_SYMBOL(pci_bus_find_capability);
299
300
301
302
303
304
305
306
307
308
309
310
311int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
312{
313 u32 header;
314 int ttl;
315 int pos = PCI_CFG_SPACE_SIZE;
316
317
318 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
319
320 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
321 return 0;
322
323 if (start)
324 pos = start;
325
326 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
327 return 0;
328
329
330
331
332
333 if (header == 0)
334 return 0;
335
336 while (ttl-- > 0) {
337 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
338 return pos;
339
340 pos = PCI_EXT_CAP_NEXT(header);
341 if (pos < PCI_CFG_SPACE_SIZE)
342 break;
343
344 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
345 break;
346 }
347
348 return 0;
349}
350EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366int pci_find_ext_capability(struct pci_dev *dev, int cap)
367{
368 return pci_find_next_ext_capability(dev, 0, cap);
369}
370EXPORT_SYMBOL_GPL(pci_find_ext_capability);
371
372static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
373{
374 int rc, ttl = PCI_FIND_CAP_TTL;
375 u8 cap, mask;
376
377 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
378 mask = HT_3BIT_CAP_MASK;
379 else
380 mask = HT_5BIT_CAP_MASK;
381
382 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
383 PCI_CAP_ID_HT, &ttl);
384 while (pos) {
385 rc = pci_read_config_byte(dev, pos + 3, &cap);
386 if (rc != PCIBIOS_SUCCESSFUL)
387 return 0;
388
389 if ((cap & mask) == ht_cap)
390 return pos;
391
392 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
393 pos + PCI_CAP_LIST_NEXT,
394 PCI_CAP_ID_HT, &ttl);
395 }
396
397 return 0;
398}
399
400
401
402
403
404
405
406
407
408
409
410
411
412int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
413{
414 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
415}
416EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
417
418
419
420
421
422
423
424
425
426
427
428
429int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
430{
431 int pos;
432
433 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
434 if (pos)
435 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
436
437 return pos;
438}
439EXPORT_SYMBOL_GPL(pci_find_ht_capability);
440
441
442
443
444
445
446
447
448
449struct resource *pci_find_parent_resource(const struct pci_dev *dev,
450 struct resource *res)
451{
452 const struct pci_bus *bus = dev->bus;
453 struct resource *r;
454 int i;
455
456 pci_bus_for_each_resource(bus, r, i) {
457 if (!r)
458 continue;
459 if (resource_contains(r, res)) {
460
461
462
463
464
465 if (r->flags & IORESOURCE_PREFETCH &&
466 !(res->flags & IORESOURCE_PREFETCH))
467 return NULL;
468
469
470
471
472
473
474
475
476
477 return r;
478 }
479 }
480 return NULL;
481}
482EXPORT_SYMBOL(pci_find_parent_resource);
483
484
485
486
487
488
489
490
491
492
493struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
494{
495 int i;
496
497 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
498 struct resource *r = &dev->resource[i];
499
500 if (r->start && resource_contains(r, res))
501 return r;
502 }
503
504 return NULL;
505}
506EXPORT_SYMBOL(pci_find_resource);
507
508
509
510
511
512
513
514
515struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
516{
517 struct pci_dev *bridge, *highest_pcie_bridge = dev;
518
519 bridge = pci_upstream_bridge(dev);
520 while (bridge && pci_is_pcie(bridge)) {
521 highest_pcie_bridge = bridge;
522 bridge = pci_upstream_bridge(bridge);
523 }
524
525 if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
526 return NULL;
527
528 return highest_pcie_bridge;
529}
530EXPORT_SYMBOL(pci_find_pcie_root_port);
531
532
533
534
535
536
537
538
539
540int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
541{
542 int i;
543
544
545 for (i = 0; i < 4; i++) {
546 u16 status;
547 if (i)
548 msleep((1 << (i - 1)) * 100);
549
550 pci_read_config_word(dev, pos, &status);
551 if (!(status & mask))
552 return 1;
553 }
554
555 return 0;
556}
557
558
559
560
561
562
563
564
565static void pci_restore_bars(struct pci_dev *dev)
566{
567 int i;
568
569 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
570 pci_update_resource(dev, i);
571}
572
573static const struct pci_platform_pm_ops *pci_platform_pm;
574
575int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
576{
577 if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
578 !ops->choose_state || !ops->set_wakeup || !ops->need_resume)
579 return -EINVAL;
580 pci_platform_pm = ops;
581 return 0;
582}
583
584static inline bool platform_pci_power_manageable(struct pci_dev *dev)
585{
586 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
587}
588
589static inline int platform_pci_set_power_state(struct pci_dev *dev,
590 pci_power_t t)
591{
592 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
593}
594
595static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
596{
597 return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
598}
599
600static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
601{
602 return pci_platform_pm ?
603 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
604}
605
606static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
607{
608 return pci_platform_pm ?
609 pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
610}
611
612static inline bool platform_pci_need_resume(struct pci_dev *dev)
613{
614 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
615}
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
631{
632 u16 pmcsr;
633 bool need_restore = false;
634
635
636 if (dev->current_state == state)
637 return 0;
638
639 if (!dev->pm_cap)
640 return -EIO;
641
642 if (state < PCI_D0 || state > PCI_D3hot)
643 return -EINVAL;
644
645
646
647
648
649 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
650 && dev->current_state > state) {
651 dev_err(&dev->dev, "invalid power transition (from state %d to %d)\n",
652 dev->current_state, state);
653 return -EINVAL;
654 }
655
656
657 if ((state == PCI_D1 && !dev->d1_support)
658 || (state == PCI_D2 && !dev->d2_support))
659 return -EIO;
660
661 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
662
663
664
665
666
667 switch (dev->current_state) {
668 case PCI_D0:
669 case PCI_D1:
670 case PCI_D2:
671 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
672 pmcsr |= state;
673 break;
674 case PCI_D3hot:
675 case PCI_D3cold:
676 case PCI_UNKNOWN:
677 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
678 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
679 need_restore = true;
680
681 default:
682 pmcsr = 0;
683 break;
684 }
685
686
687 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
688
689
690
691 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
692 pci_dev_d3_sleep(dev);
693 else if (state == PCI_D2 || dev->current_state == PCI_D2)
694 udelay(PCI_PM_D2_DELAY);
695
696 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
697 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
698 if (dev->current_state != state && printk_ratelimit())
699 dev_info(&dev->dev, "Refused to change power state, currently in D%d\n",
700 dev->current_state);
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715 if (need_restore)
716 pci_restore_bars(dev);
717
718 if (dev->bus->self)
719 pcie_aspm_pm_state_change(dev->bus->self);
720
721 return 0;
722}
723
724
725
726
727
728
729
730
731
732
733
734
735
736void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
737{
738 if (platform_pci_get_power_state(dev) == PCI_D3cold ||
739 !pci_device_is_present(dev)) {
740 dev->current_state = PCI_D3cold;
741 } else if (dev->pm_cap) {
742 u16 pmcsr;
743
744 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
745 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
746 } else {
747 dev->current_state = state;
748 }
749}
750
751
752
753
754
755void pci_power_up(struct pci_dev *dev)
756{
757 if (platform_pci_power_manageable(dev))
758 platform_pci_set_power_state(dev, PCI_D0);
759
760 pci_raw_set_power_state(dev, PCI_D0);
761 pci_update_current_state(dev, PCI_D0);
762}
763
764
765
766
767
768
769static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
770{
771 int error;
772
773 if (platform_pci_power_manageable(dev)) {
774 error = platform_pci_set_power_state(dev, state);
775 if (!error)
776 pci_update_current_state(dev, state);
777 } else
778 error = -ENODEV;
779
780 if (error && !dev->pm_cap)
781 dev->current_state = PCI_D0;
782
783 return error;
784}
785
786
787
788
789
790
791static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
792{
793 pci_wakeup_event(pci_dev);
794 pm_request_resume(&pci_dev->dev);
795 return 0;
796}
797
798
799
800
801
802static void pci_wakeup_bus(struct pci_bus *bus)
803{
804 if (bus)
805 pci_walk_bus(bus, pci_wakeup, NULL);
806}
807
808
809
810
811
812
813static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
814{
815 if (state == PCI_D0) {
816 pci_platform_power_transition(dev, PCI_D0);
817
818
819
820
821
822
823
824 if (dev->runtime_d3cold) {
825 if (dev->d3cold_delay)
826 msleep(dev->d3cold_delay);
827
828
829
830
831
832
833 pci_wakeup_bus(dev->subordinate);
834 }
835 }
836}
837
838
839
840
841
842
843static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
844{
845 pci_power_t state = *(pci_power_t *)data;
846
847 dev->current_state = state;
848 return 0;
849}
850
851
852
853
854
855
856static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
857{
858 if (bus)
859 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
860}
861
862
863
864
865
866
867
868
869int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
870{
871 int ret;
872
873 if (state <= PCI_D0)
874 return -EINVAL;
875 ret = pci_platform_power_transition(dev, state);
876
877 if (!ret && state == PCI_D3cold)
878 __pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
879 return ret;
880}
881EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
901{
902 int error;
903
904
905 if (state > PCI_D3cold)
906 state = PCI_D3cold;
907 else if (state < PCI_D0)
908 state = PCI_D0;
909 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
910
911
912
913
914
915 return 0;
916
917
918 if (dev->current_state == state)
919 return 0;
920
921 __pci_start_power_transition(dev, state);
922
923
924
925 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
926 return 0;
927
928
929
930
931
932 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
933 PCI_D3hot : state);
934
935 if (!__pci_complete_power_transition(dev, state))
936 error = 0;
937
938 return error;
939}
940EXPORT_SYMBOL(pci_set_power_state);
941
942
943
944
945
946
947
948
949
950
951
952pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
953{
954 pci_power_t ret;
955
956 if (!dev->pm_cap)
957 return PCI_D0;
958
959 ret = platform_pci_choose_state(dev);
960 if (ret != PCI_POWER_ERROR)
961 return ret;
962
963 switch (state.event) {
964 case PM_EVENT_ON:
965 return PCI_D0;
966 case PM_EVENT_FREEZE:
967 case PM_EVENT_PRETHAW:
968
969 case PM_EVENT_SUSPEND:
970 case PM_EVENT_HIBERNATE:
971 return PCI_D3hot;
972 default:
973 dev_info(&dev->dev, "unrecognized suspend event %d\n",
974 state.event);
975 BUG();
976 }
977 return PCI_D0;
978}
979EXPORT_SYMBOL(pci_choose_state);
980
981#define PCI_EXP_SAVE_REGS 7
982
983static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
984 u16 cap, bool extended)
985{
986 struct pci_cap_saved_state *tmp;
987
988 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
989 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
990 return tmp;
991 }
992 return NULL;
993}
994
995struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
996{
997 return _pci_find_saved_cap(dev, cap, false);
998}
999
1000struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1001{
1002 return _pci_find_saved_cap(dev, cap, true);
1003}
1004
1005static int pci_save_pcie_state(struct pci_dev *dev)
1006{
1007 int i = 0;
1008 struct pci_cap_saved_state *save_state;
1009 u16 *cap;
1010
1011 if (!pci_is_pcie(dev))
1012 return 0;
1013
1014 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1015 if (!save_state) {
1016 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
1017 return -ENOMEM;
1018 }
1019
1020 cap = (u16 *)&save_state->cap.data[0];
1021 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1022 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1023 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1024 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1025 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1026 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1027 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1028
1029 return 0;
1030}
1031
1032static void pci_restore_pcie_state(struct pci_dev *dev)
1033{
1034 int i = 0;
1035 struct pci_cap_saved_state *save_state;
1036 u16 *cap;
1037
1038 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1039 if (!save_state)
1040 return;
1041
1042 cap = (u16 *)&save_state->cap.data[0];
1043 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1044 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1045 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1046 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1047 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1048 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1049 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1050}
1051
1052
1053static int pci_save_pcix_state(struct pci_dev *dev)
1054{
1055 int pos;
1056 struct pci_cap_saved_state *save_state;
1057
1058 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1059 if (!pos)
1060 return 0;
1061
1062 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1063 if (!save_state) {
1064 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
1065 return -ENOMEM;
1066 }
1067
1068 pci_read_config_word(dev, pos + PCI_X_CMD,
1069 (u16 *)save_state->cap.data);
1070
1071 return 0;
1072}
1073
1074static void pci_restore_pcix_state(struct pci_dev *dev)
1075{
1076 int i = 0, pos;
1077 struct pci_cap_saved_state *save_state;
1078 u16 *cap;
1079
1080 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1081 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1082 if (!save_state || !pos)
1083 return;
1084 cap = (u16 *)&save_state->cap.data[0];
1085
1086 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1087}
1088
1089
1090
1091
1092
1093
1094int pci_save_state(struct pci_dev *dev)
1095{
1096 int i;
1097
1098 for (i = 0; i < 16; i++)
1099 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1100 dev->state_saved = true;
1101
1102 i = pci_save_pcie_state(dev);
1103 if (i != 0)
1104 return i;
1105
1106 i = pci_save_pcix_state(dev);
1107 if (i != 0)
1108 return i;
1109
1110 return pci_save_vc_state(dev);
1111}
1112EXPORT_SYMBOL(pci_save_state);
1113
1114static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1115 u32 saved_val, int retry)
1116{
1117 u32 val;
1118
1119 pci_read_config_dword(pdev, offset, &val);
1120 if (val == saved_val)
1121 return;
1122
1123 for (;;) {
1124 dev_dbg(&pdev->dev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1125 offset, val, saved_val);
1126 pci_write_config_dword(pdev, offset, saved_val);
1127 if (retry-- <= 0)
1128 return;
1129
1130 pci_read_config_dword(pdev, offset, &val);
1131 if (val == saved_val)
1132 return;
1133
1134 mdelay(1);
1135 }
1136}
1137
1138static void pci_restore_config_space_range(struct pci_dev *pdev,
1139 int start, int end, int retry)
1140{
1141 int index;
1142
1143 for (index = end; index >= start; index--)
1144 pci_restore_config_dword(pdev, 4 * index,
1145 pdev->saved_config_space[index],
1146 retry);
1147}
1148
1149static void pci_restore_config_space(struct pci_dev *pdev)
1150{
1151 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1152 pci_restore_config_space_range(pdev, 10, 15, 0);
1153
1154 pci_restore_config_space_range(pdev, 4, 9, 10);
1155 pci_restore_config_space_range(pdev, 0, 3, 0);
1156 } else {
1157 pci_restore_config_space_range(pdev, 0, 15, 0);
1158 }
1159}
1160
1161
1162
1163
1164
1165void pci_restore_state(struct pci_dev *dev)
1166{
1167 if (!dev->state_saved)
1168 return;
1169
1170
1171 pci_restore_pcie_state(dev);
1172 pci_restore_pasid_state(dev);
1173 pci_restore_pri_state(dev);
1174 pci_restore_ats_state(dev);
1175 pci_restore_vc_state(dev);
1176
1177 pci_cleanup_aer_error_status_regs(dev);
1178
1179 pci_restore_config_space(dev);
1180
1181 pci_restore_pcix_state(dev);
1182 pci_restore_msi_state(dev);
1183
1184
1185 pci_enable_acs(dev);
1186 pci_restore_iov_state(dev);
1187
1188 dev->state_saved = false;
1189}
1190EXPORT_SYMBOL(pci_restore_state);
1191
1192struct pci_saved_state {
1193 u32 config_space[16];
1194 struct pci_cap_saved_data cap[0];
1195};
1196
1197
1198
1199
1200
1201
1202
1203
1204struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1205{
1206 struct pci_saved_state *state;
1207 struct pci_cap_saved_state *tmp;
1208 struct pci_cap_saved_data *cap;
1209 size_t size;
1210
1211 if (!dev->state_saved)
1212 return NULL;
1213
1214 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1215
1216 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1217 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1218
1219 state = kzalloc(size, GFP_KERNEL);
1220 if (!state)
1221 return NULL;
1222
1223 memcpy(state->config_space, dev->saved_config_space,
1224 sizeof(state->config_space));
1225
1226 cap = state->cap;
1227 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1228 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1229 memcpy(cap, &tmp->cap, len);
1230 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1231 }
1232
1233
1234 return state;
1235}
1236EXPORT_SYMBOL_GPL(pci_store_saved_state);
1237
1238
1239
1240
1241
1242
1243int pci_load_saved_state(struct pci_dev *dev,
1244 struct pci_saved_state *state)
1245{
1246 struct pci_cap_saved_data *cap;
1247
1248 dev->state_saved = false;
1249
1250 if (!state)
1251 return 0;
1252
1253 memcpy(dev->saved_config_space, state->config_space,
1254 sizeof(state->config_space));
1255
1256 cap = state->cap;
1257 while (cap->size) {
1258 struct pci_cap_saved_state *tmp;
1259
1260 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1261 if (!tmp || tmp->cap.size != cap->size)
1262 return -EINVAL;
1263
1264 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1265 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1266 sizeof(struct pci_cap_saved_data) + cap->size);
1267 }
1268
1269 dev->state_saved = true;
1270 return 0;
1271}
1272EXPORT_SYMBOL_GPL(pci_load_saved_state);
1273
1274
1275
1276
1277
1278
1279
1280int pci_load_and_free_saved_state(struct pci_dev *dev,
1281 struct pci_saved_state **state)
1282{
1283 int ret = pci_load_saved_state(dev, *state);
1284 kfree(*state);
1285 *state = NULL;
1286 return ret;
1287}
1288EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1289
1290int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1291{
1292 return pci_enable_resources(dev, bars);
1293}
1294
1295static int do_pci_enable_device(struct pci_dev *dev, int bars)
1296{
1297 int err;
1298 struct pci_dev *bridge;
1299 u16 cmd;
1300 u8 pin;
1301
1302 err = pci_set_power_state(dev, PCI_D0);
1303 if (err < 0 && err != -EIO)
1304 return err;
1305
1306 bridge = pci_upstream_bridge(dev);
1307 if (bridge)
1308 pcie_aspm_powersave_config_link(bridge);
1309
1310 err = pcibios_enable_device(dev, bars);
1311 if (err < 0)
1312 return err;
1313 pci_fixup_device(pci_fixup_enable, dev);
1314
1315 if (dev->msi_enabled || dev->msix_enabled)
1316 return 0;
1317
1318 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1319 if (pin) {
1320 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1321 if (cmd & PCI_COMMAND_INTX_DISABLE)
1322 pci_write_config_word(dev, PCI_COMMAND,
1323 cmd & ~PCI_COMMAND_INTX_DISABLE);
1324 }
1325
1326 return 0;
1327}
1328
1329
1330
1331
1332
1333
1334
1335
1336int pci_reenable_device(struct pci_dev *dev)
1337{
1338 if (pci_is_enabled(dev))
1339 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1340 return 0;
1341}
1342EXPORT_SYMBOL(pci_reenable_device);
1343
1344static void pci_enable_bridge(struct pci_dev *dev)
1345{
1346 struct pci_dev *bridge;
1347 int retval;
1348
1349 bridge = pci_upstream_bridge(dev);
1350 if (bridge)
1351 pci_enable_bridge(bridge);
1352
1353 if (pci_is_enabled(dev)) {
1354 if (!dev->is_busmaster)
1355 pci_set_master(dev);
1356 return;
1357 }
1358
1359 retval = pci_enable_device(dev);
1360 if (retval)
1361 dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
1362 retval);
1363 pci_set_master(dev);
1364}
1365
1366static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1367{
1368 struct pci_dev *bridge;
1369 int err;
1370 int i, bars = 0;
1371
1372
1373
1374
1375
1376
1377
1378 if (dev->pm_cap) {
1379 u16 pmcsr;
1380 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1381 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1382 }
1383
1384 if (atomic_inc_return(&dev->enable_cnt) > 1)
1385 return 0;
1386
1387 bridge = pci_upstream_bridge(dev);
1388 if (bridge)
1389 pci_enable_bridge(bridge);
1390
1391
1392 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1393 if (dev->resource[i].flags & flags)
1394 bars |= (1 << i);
1395 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1396 if (dev->resource[i].flags & flags)
1397 bars |= (1 << i);
1398
1399 err = do_pci_enable_device(dev, bars);
1400 if (err < 0)
1401 atomic_dec(&dev->enable_cnt);
1402 return err;
1403}
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413int pci_enable_device_io(struct pci_dev *dev)
1414{
1415 return pci_enable_device_flags(dev, IORESOURCE_IO);
1416}
1417EXPORT_SYMBOL(pci_enable_device_io);
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427int pci_enable_device_mem(struct pci_dev *dev)
1428{
1429 return pci_enable_device_flags(dev, IORESOURCE_MEM);
1430}
1431EXPORT_SYMBOL(pci_enable_device_mem);
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444int pci_enable_device(struct pci_dev *dev)
1445{
1446 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1447}
1448EXPORT_SYMBOL(pci_enable_device);
1449
1450
1451
1452
1453
1454
1455
1456struct pci_devres {
1457 unsigned int enabled:1;
1458 unsigned int pinned:1;
1459 unsigned int orig_intx:1;
1460 unsigned int restore_intx:1;
1461 u32 region_mask;
1462};
1463
1464static void pcim_release(struct device *gendev, void *res)
1465{
1466 struct pci_dev *dev = to_pci_dev(gendev);
1467 struct pci_devres *this = res;
1468 int i;
1469
1470 if (dev->msi_enabled)
1471 pci_disable_msi(dev);
1472 if (dev->msix_enabled)
1473 pci_disable_msix(dev);
1474
1475 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1476 if (this->region_mask & (1 << i))
1477 pci_release_region(dev, i);
1478
1479 if (this->restore_intx)
1480 pci_intx(dev, this->orig_intx);
1481
1482 if (this->enabled && !this->pinned)
1483 pci_disable_device(dev);
1484}
1485
1486static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1487{
1488 struct pci_devres *dr, *new_dr;
1489
1490 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1491 if (dr)
1492 return dr;
1493
1494 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1495 if (!new_dr)
1496 return NULL;
1497 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1498}
1499
1500static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
1501{
1502 if (pci_is_managed(pdev))
1503 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1504 return NULL;
1505}
1506
1507
1508
1509
1510
1511
1512
1513int pcim_enable_device(struct pci_dev *pdev)
1514{
1515 struct pci_devres *dr;
1516 int rc;
1517
1518 dr = get_pci_dr(pdev);
1519 if (unlikely(!dr))
1520 return -ENOMEM;
1521 if (dr->enabled)
1522 return 0;
1523
1524 rc = pci_enable_device(pdev);
1525 if (!rc) {
1526 pdev->is_managed = 1;
1527 dr->enabled = 1;
1528 }
1529 return rc;
1530}
1531EXPORT_SYMBOL(pcim_enable_device);
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541void pcim_pin_device(struct pci_dev *pdev)
1542{
1543 struct pci_devres *dr;
1544
1545 dr = find_pci_dr(pdev);
1546 WARN_ON(!dr || !dr->enabled);
1547 if (dr)
1548 dr->pinned = 1;
1549}
1550EXPORT_SYMBOL(pcim_pin_device);
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560int __weak pcibios_add_device(struct pci_dev *dev)
1561{
1562 return 0;
1563}
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573void __weak pcibios_release_device(struct pci_dev *dev) {}
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583void __weak pcibios_disable_device(struct pci_dev *dev) {}
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1595
1596static void do_pci_disable_device(struct pci_dev *dev)
1597{
1598 u16 pci_command;
1599
1600 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1601 if (pci_command & PCI_COMMAND_MASTER) {
1602 pci_command &= ~PCI_COMMAND_MASTER;
1603 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1604 }
1605
1606 pcibios_disable_device(dev);
1607}
1608
1609
1610
1611
1612
1613
1614
1615
1616void pci_disable_enabled_device(struct pci_dev *dev)
1617{
1618 if (pci_is_enabled(dev))
1619 do_pci_disable_device(dev);
1620}
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632void pci_disable_device(struct pci_dev *dev)
1633{
1634 struct pci_devres *dr;
1635
1636 dr = find_pci_dr(dev);
1637 if (dr)
1638 dr->enabled = 0;
1639
1640 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1641 "disabling already-disabled device");
1642
1643 if (atomic_dec_return(&dev->enable_cnt) != 0)
1644 return;
1645
1646 do_pci_disable_device(dev);
1647
1648 dev->is_busmaster = 0;
1649}
1650EXPORT_SYMBOL(pci_disable_device);
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1662 enum pcie_reset_state state)
1663{
1664 return -EINVAL;
1665}
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1676{
1677 return pcibios_set_pcie_reset_state(dev, state);
1678}
1679EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689bool pci_check_pme_status(struct pci_dev *dev)
1690{
1691 int pmcsr_pos;
1692 u16 pmcsr;
1693 bool ret = false;
1694
1695 if (!dev->pm_cap)
1696 return false;
1697
1698 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1699 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1700 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1701 return false;
1702
1703
1704 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1705 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1706
1707 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1708 ret = true;
1709 }
1710
1711 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1712
1713 return ret;
1714}
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1725{
1726 if (pme_poll_reset && dev->pme_poll)
1727 dev->pme_poll = false;
1728
1729 if (pci_check_pme_status(dev)) {
1730 pci_wakeup_event(dev);
1731 pm_request_resume(&dev->dev);
1732 }
1733 return 0;
1734}
1735
1736
1737
1738
1739
1740void pci_pme_wakeup_bus(struct pci_bus *bus)
1741{
1742 if (bus)
1743 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1744}
1745
1746
1747
1748
1749
1750
1751
1752bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1753{
1754 if (!dev->pm_cap)
1755 return false;
1756
1757 return !!(dev->pme_support & (1 << state));
1758}
1759EXPORT_SYMBOL(pci_pme_capable);
1760
1761static void pci_pme_list_scan(struct work_struct *work)
1762{
1763 struct pci_pme_device *pme_dev, *n;
1764
1765 mutex_lock(&pci_pme_list_mutex);
1766 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1767 if (pme_dev->dev->pme_poll) {
1768 struct pci_dev *bridge;
1769
1770 bridge = pme_dev->dev->bus->self;
1771
1772
1773
1774
1775
1776 if (bridge && bridge->current_state != PCI_D0)
1777 continue;
1778 pci_pme_wakeup(pme_dev->dev, NULL);
1779 } else {
1780 list_del(&pme_dev->list);
1781 kfree(pme_dev);
1782 }
1783 }
1784 if (!list_empty(&pci_pme_list))
1785 queue_delayed_work(system_freezable_wq, &pci_pme_work,
1786 msecs_to_jiffies(PME_TIMEOUT));
1787 mutex_unlock(&pci_pme_list_mutex);
1788}
1789
1790static void __pci_pme_active(struct pci_dev *dev, bool enable)
1791{
1792 u16 pmcsr;
1793
1794 if (!dev->pme_support)
1795 return;
1796
1797 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1798
1799 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1800 if (!enable)
1801 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1802
1803 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1804}
1805
1806
1807
1808
1809
1810void pci_pme_restore(struct pci_dev *dev)
1811{
1812 u16 pmcsr;
1813
1814 if (!dev->pme_support)
1815 return;
1816
1817 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1818 if (dev->wakeup_prepared) {
1819 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1820 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
1821 } else {
1822 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1823 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1824 }
1825 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1826}
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836void pci_pme_active(struct pci_dev *dev, bool enable)
1837{
1838 __pci_pme_active(dev, enable);
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860 if (dev->pme_poll) {
1861 struct pci_pme_device *pme_dev;
1862 if (enable) {
1863 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1864 GFP_KERNEL);
1865 if (!pme_dev) {
1866 dev_warn(&dev->dev, "can't enable PME#\n");
1867 return;
1868 }
1869 pme_dev->dev = dev;
1870 mutex_lock(&pci_pme_list_mutex);
1871 list_add(&pme_dev->list, &pci_pme_list);
1872 if (list_is_singular(&pci_pme_list))
1873 queue_delayed_work(system_freezable_wq,
1874 &pci_pme_work,
1875 msecs_to_jiffies(PME_TIMEOUT));
1876 mutex_unlock(&pci_pme_list_mutex);
1877 } else {
1878 mutex_lock(&pci_pme_list_mutex);
1879 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1880 if (pme_dev->dev == dev) {
1881 list_del(&pme_dev->list);
1882 kfree(pme_dev);
1883 break;
1884 }
1885 }
1886 mutex_unlock(&pci_pme_list_mutex);
1887 }
1888 }
1889
1890 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
1891}
1892EXPORT_SYMBOL(pci_pme_active);
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
1914{
1915 int ret = 0;
1916
1917
1918
1919
1920
1921 if (pci_has_subordinate(dev))
1922 return 0;
1923
1924
1925 if (!!enable == !!dev->wakeup_prepared)
1926 return 0;
1927
1928
1929
1930
1931
1932
1933
1934 if (enable) {
1935 int error;
1936
1937 if (pci_pme_capable(dev, state))
1938 pci_pme_active(dev, true);
1939 else
1940 ret = 1;
1941 error = platform_pci_set_wakeup(dev, true);
1942 if (ret)
1943 ret = error;
1944 if (!ret)
1945 dev->wakeup_prepared = true;
1946 } else {
1947 platform_pci_set_wakeup(dev, false);
1948 pci_pme_active(dev, false);
1949 dev->wakeup_prepared = false;
1950 }
1951
1952 return ret;
1953}
1954EXPORT_SYMBOL(pci_enable_wake);
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1971{
1972 return pci_pme_capable(dev, PCI_D3cold) ?
1973 pci_enable_wake(dev, PCI_D3cold, enable) :
1974 pci_enable_wake(dev, PCI_D3hot, enable);
1975}
1976EXPORT_SYMBOL(pci_wake_from_d3);
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
1988{
1989 pci_power_t target_state = PCI_D3hot;
1990
1991 if (platform_pci_power_manageable(dev)) {
1992
1993
1994
1995
1996 pci_power_t state = platform_pci_choose_state(dev);
1997
1998 switch (state) {
1999 case PCI_POWER_ERROR:
2000 case PCI_UNKNOWN:
2001 break;
2002 case PCI_D1:
2003 case PCI_D2:
2004 if (pci_no_d1d2(dev))
2005 break;
2006 default:
2007 target_state = state;
2008 }
2009
2010 return target_state;
2011 }
2012
2013 if (!dev->pm_cap)
2014 target_state = PCI_D0;
2015
2016
2017
2018
2019
2020
2021 if (dev->current_state == PCI_D3cold)
2022 target_state = PCI_D3cold;
2023
2024 if (wakeup) {
2025
2026
2027
2028
2029
2030 if (dev->pme_support) {
2031 while (target_state
2032 && !(dev->pme_support & (1 << target_state)))
2033 target_state--;
2034 }
2035 }
2036
2037 return target_state;
2038}
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048int pci_prepare_to_sleep(struct pci_dev *dev)
2049{
2050 bool wakeup = device_may_wakeup(&dev->dev);
2051 pci_power_t target_state = pci_target_state(dev, wakeup);
2052 int error;
2053
2054 if (target_state == PCI_POWER_ERROR)
2055 return -EIO;
2056
2057 pci_enable_wake(dev, target_state, wakeup);
2058
2059 error = pci_set_power_state(dev, target_state);
2060
2061 if (error)
2062 pci_enable_wake(dev, target_state, false);
2063
2064 return error;
2065}
2066EXPORT_SYMBOL(pci_prepare_to_sleep);
2067
2068
2069
2070
2071
2072
2073
2074int pci_back_from_sleep(struct pci_dev *dev)
2075{
2076 pci_enable_wake(dev, PCI_D0, false);
2077 return pci_set_power_state(dev, PCI_D0);
2078}
2079EXPORT_SYMBOL(pci_back_from_sleep);
2080
2081
2082
2083
2084
2085
2086
2087
2088int pci_finish_runtime_suspend(struct pci_dev *dev)
2089{
2090 pci_power_t target_state;
2091 int error;
2092
2093 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2094 if (target_state == PCI_POWER_ERROR)
2095 return -EIO;
2096
2097 dev->runtime_d3cold = target_state == PCI_D3cold;
2098
2099 pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2100
2101 error = pci_set_power_state(dev, target_state);
2102
2103 if (error) {
2104 pci_enable_wake(dev, target_state, false);
2105 dev->runtime_d3cold = false;
2106 }
2107
2108 return error;
2109}
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119bool pci_dev_run_wake(struct pci_dev *dev)
2120{
2121 struct pci_bus *bus = dev->bus;
2122
2123 if (device_can_wakeup(&dev->dev))
2124 return true;
2125
2126 if (!dev->pme_support)
2127 return false;
2128
2129
2130 if (!pci_pme_capable(dev, pci_target_state(dev, false)))
2131 return false;
2132
2133 while (bus->parent) {
2134 struct pci_dev *bridge = bus->self;
2135
2136 if (device_can_wakeup(&bridge->dev))
2137 return true;
2138
2139 bus = bus->parent;
2140 }
2141
2142
2143 if (bus->bridge)
2144 return device_can_wakeup(bus->bridge);
2145
2146 return false;
2147}
2148EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
2163{
2164 struct device *dev = &pci_dev->dev;
2165 bool wakeup = device_may_wakeup(dev);
2166
2167 if (!pm_runtime_suspended(dev)
2168 || pci_target_state(pci_dev, wakeup) != pci_dev->current_state
2169 || platform_pci_need_resume(pci_dev)
2170 || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
2171 return false;
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183 spin_lock_irq(&dev->power.lock);
2184
2185 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
2186 !wakeup)
2187 __pci_pme_active(pci_dev, false);
2188
2189 spin_unlock_irq(&dev->power.lock);
2190 return true;
2191}
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201void pci_dev_complete_resume(struct pci_dev *pci_dev)
2202{
2203 struct device *dev = &pci_dev->dev;
2204
2205 if (!pci_dev_run_wake(pci_dev))
2206 return;
2207
2208 spin_lock_irq(&dev->power.lock);
2209
2210 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2211 __pci_pme_active(pci_dev, true);
2212
2213 spin_unlock_irq(&dev->power.lock);
2214}
2215
2216void pci_config_pm_runtime_get(struct pci_dev *pdev)
2217{
2218 struct device *dev = &pdev->dev;
2219 struct device *parent = dev->parent;
2220
2221 if (parent)
2222 pm_runtime_get_sync(parent);
2223 pm_runtime_get_noresume(dev);
2224
2225
2226
2227
2228 pm_runtime_barrier(dev);
2229
2230
2231
2232
2233
2234 if (pdev->current_state == PCI_D3cold)
2235 pm_runtime_resume(dev);
2236}
2237
2238void pci_config_pm_runtime_put(struct pci_dev *pdev)
2239{
2240 struct device *dev = &pdev->dev;
2241 struct device *parent = dev->parent;
2242
2243 pm_runtime_put(dev);
2244 if (parent)
2245 pm_runtime_put_sync(parent);
2246}
2247
2248
2249
2250
2251
2252
2253
2254
2255bool pci_bridge_d3_possible(struct pci_dev *bridge)
2256{
2257 unsigned int year;
2258
2259 if (!pci_is_pcie(bridge))
2260 return false;
2261
2262 switch (pci_pcie_type(bridge)) {
2263 case PCI_EXP_TYPE_ROOT_PORT:
2264 case PCI_EXP_TYPE_UPSTREAM:
2265 case PCI_EXP_TYPE_DOWNSTREAM:
2266 if (pci_bridge_d3_disable)
2267 return false;
2268
2269
2270
2271
2272
2273
2274
2275
2276 if (bridge->is_hotplug_bridge)
2277 return false;
2278
2279 if (pci_bridge_d3_force)
2280 return true;
2281
2282
2283
2284
2285
2286 if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) &&
2287 year >= 2015) {
2288 return true;
2289 }
2290 break;
2291 }
2292
2293 return false;
2294}
2295
2296static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2297{
2298 bool *d3cold_ok = data;
2299
2300 if (
2301 dev->no_d3cold || !dev->d3cold_allowed ||
2302
2303
2304 (device_may_wakeup(&dev->dev) &&
2305 !pci_pme_capable(dev, PCI_D3cold)) ||
2306
2307
2308 !pci_power_manageable(dev))
2309
2310 *d3cold_ok = false;
2311
2312 return !*d3cold_ok;
2313}
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323void pci_bridge_d3_update(struct pci_dev *dev)
2324{
2325 bool remove = !device_is_registered(&dev->dev);
2326 struct pci_dev *bridge;
2327 bool d3cold_ok = true;
2328
2329 bridge = pci_upstream_bridge(dev);
2330 if (!bridge || !pci_bridge_d3_possible(bridge))
2331 return;
2332
2333
2334
2335
2336
2337 if (remove && bridge->bridge_d3)
2338 return;
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348 if (!remove)
2349 pci_dev_check_d3cold(dev, &d3cold_ok);
2350
2351
2352
2353
2354
2355
2356
2357 if (d3cold_ok && !bridge->bridge_d3)
2358 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2359 &d3cold_ok);
2360
2361 if (bridge->bridge_d3 != d3cold_ok) {
2362 bridge->bridge_d3 = d3cold_ok;
2363
2364 pci_bridge_d3_update(bridge);
2365 }
2366}
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376void pci_d3cold_enable(struct pci_dev *dev)
2377{
2378 if (dev->no_d3cold) {
2379 dev->no_d3cold = false;
2380 pci_bridge_d3_update(dev);
2381 }
2382}
2383EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393void pci_d3cold_disable(struct pci_dev *dev)
2394{
2395 if (!dev->no_d3cold) {
2396 dev->no_d3cold = true;
2397 pci_bridge_d3_update(dev);
2398 }
2399}
2400EXPORT_SYMBOL_GPL(pci_d3cold_disable);
2401
2402
2403
2404
2405
2406void pci_pm_init(struct pci_dev *dev)
2407{
2408 int pm;
2409 u16 pmc;
2410
2411 pm_runtime_forbid(&dev->dev);
2412 pm_runtime_set_active(&dev->dev);
2413 pm_runtime_enable(&dev->dev);
2414 device_enable_async_suspend(&dev->dev);
2415 dev->wakeup_prepared = false;
2416
2417 dev->pm_cap = 0;
2418 dev->pme_support = 0;
2419
2420
2421 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2422 if (!pm)
2423 return;
2424
2425 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2426
2427 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2428 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
2429 pmc & PCI_PM_CAP_VER_MASK);
2430 return;
2431 }
2432
2433 dev->pm_cap = pm;
2434 dev->d3_delay = PCI_PM_D3_WAIT;
2435 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
2436 dev->bridge_d3 = pci_bridge_d3_possible(dev);
2437 dev->d3cold_allowed = true;
2438
2439 dev->d1_support = false;
2440 dev->d2_support = false;
2441 if (!pci_no_d1d2(dev)) {
2442 if (pmc & PCI_PM_CAP_D1)
2443 dev->d1_support = true;
2444 if (pmc & PCI_PM_CAP_D2)
2445 dev->d2_support = true;
2446
2447 if (dev->d1_support || dev->d2_support)
2448 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
2449 dev->d1_support ? " D1" : "",
2450 dev->d2_support ? " D2" : "");
2451 }
2452
2453 pmc &= PCI_PM_CAP_PME_MASK;
2454 if (pmc) {
2455 dev_printk(KERN_DEBUG, &dev->dev,
2456 "PME# supported from%s%s%s%s%s\n",
2457 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2458 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2459 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2460 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2461 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
2462 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
2463 dev->pme_poll = true;
2464
2465
2466
2467
2468 device_set_wakeup_capable(&dev->dev, true);
2469
2470 pci_pme_active(dev, false);
2471 }
2472}
2473
2474static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
2475{
2476 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
2477
2478 switch (prop) {
2479 case PCI_EA_P_MEM:
2480 case PCI_EA_P_VF_MEM:
2481 flags |= IORESOURCE_MEM;
2482 break;
2483 case PCI_EA_P_MEM_PREFETCH:
2484 case PCI_EA_P_VF_MEM_PREFETCH:
2485 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
2486 break;
2487 case PCI_EA_P_IO:
2488 flags |= IORESOURCE_IO;
2489 break;
2490 default:
2491 return 0;
2492 }
2493
2494 return flags;
2495}
2496
2497static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
2498 u8 prop)
2499{
2500 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
2501 return &dev->resource[bei];
2502#ifdef CONFIG_PCI_IOV
2503 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
2504 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
2505 return &dev->resource[PCI_IOV_RESOURCES +
2506 bei - PCI_EA_BEI_VF_BAR0];
2507#endif
2508 else if (bei == PCI_EA_BEI_ROM)
2509 return &dev->resource[PCI_ROM_RESOURCE];
2510 else
2511 return NULL;
2512}
2513
2514
2515static int pci_ea_read(struct pci_dev *dev, int offset)
2516{
2517 struct resource *res;
2518 int ent_size, ent_offset = offset;
2519 resource_size_t start, end;
2520 unsigned long flags;
2521 u32 dw0, bei, base, max_offset;
2522 u8 prop;
2523 bool support_64 = (sizeof(resource_size_t) >= 8);
2524
2525 pci_read_config_dword(dev, ent_offset, &dw0);
2526 ent_offset += 4;
2527
2528
2529 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
2530
2531 if (!(dw0 & PCI_EA_ENABLE))
2532 goto out;
2533
2534 bei = (dw0 & PCI_EA_BEI) >> 4;
2535 prop = (dw0 & PCI_EA_PP) >> 8;
2536
2537
2538
2539
2540
2541 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
2542 prop = (dw0 & PCI_EA_SP) >> 16;
2543 if (prop > PCI_EA_P_BRIDGE_IO)
2544 goto out;
2545
2546 res = pci_ea_get_resource(dev, bei, prop);
2547 if (!res) {
2548 dev_err(&dev->dev, "Unsupported EA entry BEI: %u\n", bei);
2549 goto out;
2550 }
2551
2552 flags = pci_ea_flags(dev, prop);
2553 if (!flags) {
2554 dev_err(&dev->dev, "Unsupported EA properties: %#x\n", prop);
2555 goto out;
2556 }
2557
2558
2559 pci_read_config_dword(dev, ent_offset, &base);
2560 start = (base & PCI_EA_FIELD_MASK);
2561 ent_offset += 4;
2562
2563
2564 pci_read_config_dword(dev, ent_offset, &max_offset);
2565 ent_offset += 4;
2566
2567
2568 if (base & PCI_EA_IS_64) {
2569 u32 base_upper;
2570
2571 pci_read_config_dword(dev, ent_offset, &base_upper);
2572 ent_offset += 4;
2573
2574 flags |= IORESOURCE_MEM_64;
2575
2576
2577 if (!support_64 && base_upper)
2578 goto out;
2579
2580 if (support_64)
2581 start |= ((u64)base_upper << 32);
2582 }
2583
2584 end = start + (max_offset | 0x03);
2585
2586
2587 if (max_offset & PCI_EA_IS_64) {
2588 u32 max_offset_upper;
2589
2590 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
2591 ent_offset += 4;
2592
2593 flags |= IORESOURCE_MEM_64;
2594
2595
2596 if (!support_64 && max_offset_upper)
2597 goto out;
2598
2599 if (support_64)
2600 end += ((u64)max_offset_upper << 32);
2601 }
2602
2603 if (end < start) {
2604 dev_err(&dev->dev, "EA Entry crosses address boundary\n");
2605 goto out;
2606 }
2607
2608 if (ent_size != ent_offset - offset) {
2609 dev_err(&dev->dev,
2610 "EA Entry Size (%d) does not match length read (%d)\n",
2611 ent_size, ent_offset - offset);
2612 goto out;
2613 }
2614
2615 res->name = pci_name(dev);
2616 res->start = start;
2617 res->end = end;
2618 res->flags = flags;
2619
2620 if (bei <= PCI_EA_BEI_BAR5)
2621 dev_printk(KERN_DEBUG, &dev->dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2622 bei, res, prop);
2623 else if (bei == PCI_EA_BEI_ROM)
2624 dev_printk(KERN_DEBUG, &dev->dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
2625 res, prop);
2626 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
2627 dev_printk(KERN_DEBUG, &dev->dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2628 bei - PCI_EA_BEI_VF_BAR0, res, prop);
2629 else
2630 dev_printk(KERN_DEBUG, &dev->dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
2631 bei, res, prop);
2632
2633out:
2634 return offset + ent_size;
2635}
2636
2637
2638void pci_ea_init(struct pci_dev *dev)
2639{
2640 int ea;
2641 u8 num_ent;
2642 int offset;
2643 int i;
2644
2645
2646 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
2647 if (!ea)
2648 return;
2649
2650
2651 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
2652 &num_ent);
2653 num_ent &= PCI_EA_NUM_ENT_MASK;
2654
2655 offset = ea + PCI_EA_FIRST_ENT;
2656
2657
2658 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
2659 offset += 4;
2660
2661
2662 for (i = 0; i < num_ent; ++i)
2663 offset = pci_ea_read(dev, offset);
2664}
2665
2666static void pci_add_saved_cap(struct pci_dev *pci_dev,
2667 struct pci_cap_saved_state *new_cap)
2668{
2669 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
2670}
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
2681 bool extended, unsigned int size)
2682{
2683 int pos;
2684 struct pci_cap_saved_state *save_state;
2685
2686 if (extended)
2687 pos = pci_find_ext_capability(dev, cap);
2688 else
2689 pos = pci_find_capability(dev, cap);
2690
2691 if (!pos)
2692 return 0;
2693
2694 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
2695 if (!save_state)
2696 return -ENOMEM;
2697
2698 save_state->cap.cap_nr = cap;
2699 save_state->cap.cap_extended = extended;
2700 save_state->cap.size = size;
2701 pci_add_saved_cap(dev, save_state);
2702
2703 return 0;
2704}
2705
2706int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
2707{
2708 return _pci_add_cap_save_buffer(dev, cap, false, size);
2709}
2710
2711int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
2712{
2713 return _pci_add_cap_save_buffer(dev, cap, true, size);
2714}
2715
2716
2717
2718
2719
2720void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2721{
2722 int error;
2723
2724 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2725 PCI_EXP_SAVE_REGS * sizeof(u16));
2726 if (error)
2727 dev_err(&dev->dev,
2728 "unable to preallocate PCI Express save buffer\n");
2729
2730 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2731 if (error)
2732 dev_err(&dev->dev,
2733 "unable to preallocate PCI-X save buffer\n");
2734
2735 pci_allocate_vc_save_buffers(dev);
2736}
2737
2738void pci_free_cap_save_buffers(struct pci_dev *dev)
2739{
2740 struct pci_cap_saved_state *tmp;
2741 struct hlist_node *n;
2742
2743 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
2744 kfree(tmp);
2745}
2746
2747
2748
2749
2750
2751
2752
2753
2754void pci_configure_ari(struct pci_dev *dev)
2755{
2756 u32 cap;
2757 struct pci_dev *bridge;
2758
2759 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
2760 return;
2761
2762 bridge = dev->bus->self;
2763 if (!bridge)
2764 return;
2765
2766 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
2767 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2768 return;
2769
2770 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
2771 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
2772 PCI_EXP_DEVCTL2_ARI);
2773 bridge->ari_enabled = 1;
2774 } else {
2775 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
2776 PCI_EXP_DEVCTL2_ARI);
2777 bridge->ari_enabled = 0;
2778 }
2779}
2780
2781static int pci_acs_enable;
2782
2783
2784
2785
2786void pci_request_acs(void)
2787{
2788 pci_acs_enable = 1;
2789}
2790
2791
2792
2793
2794
2795static void pci_std_enable_acs(struct pci_dev *dev)
2796{
2797 int pos;
2798 u16 cap;
2799 u16 ctrl;
2800
2801 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2802 if (!pos)
2803 return;
2804
2805 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2806 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2807
2808
2809 ctrl |= (cap & PCI_ACS_SV);
2810
2811
2812 ctrl |= (cap & PCI_ACS_RR);
2813
2814
2815 ctrl |= (cap & PCI_ACS_CR);
2816
2817
2818 ctrl |= (cap & PCI_ACS_UF);
2819
2820 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2821}
2822
2823
2824
2825
2826
2827void pci_enable_acs(struct pci_dev *dev)
2828{
2829 if (!pci_acs_enable)
2830 return;
2831
2832 if (!pci_dev_specific_enable_acs(dev))
2833 return;
2834
2835 pci_std_enable_acs(dev);
2836}
2837
2838static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
2839{
2840 int pos;
2841 u16 cap, ctrl;
2842
2843 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2844 if (!pos)
2845 return false;
2846
2847
2848
2849
2850
2851
2852 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
2853 acs_flags &= (cap | PCI_ACS_EC);
2854
2855 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2856 return (ctrl & acs_flags) == acs_flags;
2857}
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2876{
2877 int ret;
2878
2879 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2880 if (ret >= 0)
2881 return ret > 0;
2882
2883
2884
2885
2886
2887
2888 if (!pci_is_pcie(pdev))
2889 return false;
2890
2891 switch (pci_pcie_type(pdev)) {
2892
2893
2894
2895
2896
2897 case PCI_EXP_TYPE_PCIE_BRIDGE:
2898
2899
2900
2901
2902
2903
2904 case PCI_EXP_TYPE_PCI_BRIDGE:
2905 case PCI_EXP_TYPE_RC_EC:
2906 return false;
2907
2908
2909
2910
2911
2912 case PCI_EXP_TYPE_DOWNSTREAM:
2913 case PCI_EXP_TYPE_ROOT_PORT:
2914 return pci_acs_flags_enabled(pdev, acs_flags);
2915
2916
2917
2918
2919
2920
2921
2922 case PCI_EXP_TYPE_ENDPOINT:
2923 case PCI_EXP_TYPE_UPSTREAM:
2924 case PCI_EXP_TYPE_LEG_END:
2925 case PCI_EXP_TYPE_RC_END:
2926 if (!pdev->multifunction)
2927 break;
2928
2929 return pci_acs_flags_enabled(pdev, acs_flags);
2930 }
2931
2932
2933
2934
2935
2936 return true;
2937}
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948bool pci_acs_path_enabled(struct pci_dev *start,
2949 struct pci_dev *end, u16 acs_flags)
2950{
2951 struct pci_dev *pdev, *parent = start;
2952
2953 do {
2954 pdev = parent;
2955
2956 if (!pci_acs_enabled(pdev, acs_flags))
2957 return false;
2958
2959 if (pci_is_root_bus(pdev->bus))
2960 return (end == NULL);
2961
2962 parent = pdev->bus->self;
2963 } while (pdev != end);
2964
2965 return true;
2966}
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
2980{
2981 int slot;
2982
2983 if (pci_ari_enabled(dev->bus))
2984 slot = 0;
2985 else
2986 slot = PCI_SLOT(dev->devfn);
2987
2988 return (((pin - 1) + slot) % 4) + 1;
2989}
2990
2991int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2992{
2993 u8 pin;
2994
2995 pin = dev->pin;
2996 if (!pin)
2997 return -1;
2998
2999 while (!pci_is_root_bus(dev->bus)) {
3000 pin = pci_swizzle_interrupt_pin(dev, pin);
3001 dev = dev->bus->self;
3002 }
3003 *bridge = dev;
3004 return pin;
3005}
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3016{
3017 u8 pin = *pinp;
3018
3019 while (!pci_is_root_bus(dev->bus)) {
3020 pin = pci_swizzle_interrupt_pin(dev, pin);
3021 dev = dev->bus->self;
3022 }
3023 *pinp = pin;
3024 return PCI_SLOT(dev->devfn);
3025}
3026EXPORT_SYMBOL_GPL(pci_common_swizzle);
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037void pci_release_region(struct pci_dev *pdev, int bar)
3038{
3039 struct pci_devres *dr;
3040
3041 if (pci_resource_len(pdev, bar) == 0)
3042 return;
3043 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3044 release_region(pci_resource_start(pdev, bar),
3045 pci_resource_len(pdev, bar));
3046 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3047 release_mem_region(pci_resource_start(pdev, bar),
3048 pci_resource_len(pdev, bar));
3049
3050 dr = find_pci_dr(pdev);
3051 if (dr)
3052 dr->region_mask &= ~(1 << bar);
3053}
3054EXPORT_SYMBOL(pci_release_region);
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075static int __pci_request_region(struct pci_dev *pdev, int bar,
3076 const char *res_name, int exclusive)
3077{
3078 struct pci_devres *dr;
3079
3080 if (pci_resource_len(pdev, bar) == 0)
3081 return 0;
3082
3083 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3084 if (!request_region(pci_resource_start(pdev, bar),
3085 pci_resource_len(pdev, bar), res_name))
3086 goto err_out;
3087 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3088 if (!__request_mem_region(pci_resource_start(pdev, bar),
3089 pci_resource_len(pdev, bar), res_name,
3090 exclusive))
3091 goto err_out;
3092 }
3093
3094 dr = find_pci_dr(pdev);
3095 if (dr)
3096 dr->region_mask |= 1 << bar;
3097
3098 return 0;
3099
3100err_out:
3101 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
3102 &pdev->resource[bar]);
3103 return -EBUSY;
3104}
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3121{
3122 return __pci_request_region(pdev, bar, res_name, 0);
3123}
3124EXPORT_SYMBOL(pci_request_region);
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144int pci_request_region_exclusive(struct pci_dev *pdev, int bar,
3145 const char *res_name)
3146{
3147 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
3148}
3149EXPORT_SYMBOL(pci_request_region_exclusive);
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3160{
3161 int i;
3162
3163 for (i = 0; i < 6; i++)
3164 if (bars & (1 << i))
3165 pci_release_region(pdev, i);
3166}
3167EXPORT_SYMBOL(pci_release_selected_regions);
3168
3169static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3170 const char *res_name, int excl)
3171{
3172 int i;
3173
3174 for (i = 0; i < 6; i++)
3175 if (bars & (1 << i))
3176 if (__pci_request_region(pdev, i, res_name, excl))
3177 goto err_out;
3178 return 0;
3179
3180err_out:
3181 while (--i >= 0)
3182 if (bars & (1 << i))
3183 pci_release_region(pdev, i);
3184
3185 return -EBUSY;
3186}
3187
3188
3189
3190
3191
3192
3193
3194
3195int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3196 const char *res_name)
3197{
3198 return __pci_request_selected_regions(pdev, bars, res_name, 0);
3199}
3200EXPORT_SYMBOL(pci_request_selected_regions);
3201
3202int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3203 const char *res_name)
3204{
3205 return __pci_request_selected_regions(pdev, bars, res_name,
3206 IORESOURCE_EXCLUSIVE);
3207}
3208EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219void pci_release_regions(struct pci_dev *pdev)
3220{
3221 pci_release_selected_regions(pdev, (1 << 6) - 1);
3222}
3223EXPORT_SYMBOL(pci_release_regions);
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3239{
3240 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
3241}
3242EXPORT_SYMBOL(pci_request_regions);
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3261{
3262 return pci_request_selected_regions_exclusive(pdev,
3263 ((1 << 6) - 1), res_name);
3264}
3265EXPORT_SYMBOL(pci_request_regions_exclusive);
3266
3267#ifdef PCI_IOBASE
3268struct io_range {
3269 struct list_head list;
3270 phys_addr_t start;
3271 resource_size_t size;
3272};
3273
3274static LIST_HEAD(io_range_list);
3275static DEFINE_SPINLOCK(io_range_lock);
3276#endif
3277
3278
3279
3280
3281
3282int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size)
3283{
3284 int err = 0;
3285
3286#ifdef PCI_IOBASE
3287 struct io_range *range;
3288 resource_size_t allocated_size = 0;
3289
3290
3291 spin_lock(&io_range_lock);
3292 list_for_each_entry(range, &io_range_list, list) {
3293 if (addr >= range->start && addr + size <= range->start + size) {
3294
3295 goto end_register;
3296 }
3297 allocated_size += range->size;
3298 }
3299
3300
3301 if (allocated_size + size - 1 > IO_SPACE_LIMIT) {
3302
3303 if (allocated_size + SZ_64K - 1 > IO_SPACE_LIMIT) {
3304 err = -E2BIG;
3305 goto end_register;
3306 }
3307
3308 size = SZ_64K;
3309 pr_warn("Requested IO range too big, new size set to 64K\n");
3310 }
3311
3312
3313 range = kzalloc(sizeof(*range), GFP_ATOMIC);
3314 if (!range) {
3315 err = -ENOMEM;
3316 goto end_register;
3317 }
3318
3319 range->start = addr;
3320 range->size = size;
3321
3322 list_add_tail(&range->list, &io_range_list);
3323
3324end_register:
3325 spin_unlock(&io_range_lock);
3326#endif
3327
3328 return err;
3329}
3330
3331phys_addr_t pci_pio_to_address(unsigned long pio)
3332{
3333 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
3334
3335#ifdef PCI_IOBASE
3336 struct io_range *range;
3337 resource_size_t allocated_size = 0;
3338
3339 if (pio > IO_SPACE_LIMIT)
3340 return address;
3341
3342 spin_lock(&io_range_lock);
3343 list_for_each_entry(range, &io_range_list, list) {
3344 if (pio >= allocated_size && pio < allocated_size + range->size) {
3345 address = range->start + pio - allocated_size;
3346 break;
3347 }
3348 allocated_size += range->size;
3349 }
3350 spin_unlock(&io_range_lock);
3351#endif
3352
3353 return address;
3354}
3355
3356unsigned long __weak pci_address_to_pio(phys_addr_t address)
3357{
3358#ifdef PCI_IOBASE
3359 struct io_range *res;
3360 resource_size_t offset = 0;
3361 unsigned long addr = -1;
3362
3363 spin_lock(&io_range_lock);
3364 list_for_each_entry(res, &io_range_list, list) {
3365 if (address >= res->start && address < res->start + res->size) {
3366 addr = address - res->start + offset;
3367 break;
3368 }
3369 offset += res->size;
3370 }
3371 spin_unlock(&io_range_lock);
3372
3373 return addr;
3374#else
3375 if (address > IO_SPACE_LIMIT)
3376 return (unsigned long)-1;
3377
3378 return (unsigned long) address;
3379#endif
3380}
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
3393{
3394#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3395 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3396
3397 if (!(res->flags & IORESOURCE_IO))
3398 return -EINVAL;
3399
3400 if (res->end > IO_SPACE_LIMIT)
3401 return -EINVAL;
3402
3403 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
3404 pgprot_device(PAGE_KERNEL));
3405#else
3406
3407
3408 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
3409 return -ENODEV;
3410#endif
3411}
3412EXPORT_SYMBOL(pci_remap_iospace);
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422void pci_unmap_iospace(struct resource *res)
3423{
3424#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3425 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3426
3427 unmap_kernel_range(vaddr, resource_size(res));
3428#endif
3429}
3430EXPORT_SYMBOL(pci_unmap_iospace);
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441void __iomem *devm_pci_remap_cfgspace(struct device *dev,
3442 resource_size_t offset,
3443 resource_size_t size)
3444{
3445 void __iomem **ptr, *addr;
3446
3447 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
3448 if (!ptr)
3449 return NULL;
3450
3451 addr = pci_remap_cfgspace(offset, size);
3452 if (addr) {
3453 *ptr = addr;
3454 devres_add(dev, ptr);
3455 } else
3456 devres_free(ptr);
3457
3458 return addr;
3459}
3460EXPORT_SYMBOL(devm_pci_remap_cfgspace);
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
3482 struct resource *res)
3483{
3484 resource_size_t size;
3485 const char *name;
3486 void __iomem *dest_ptr;
3487
3488 BUG_ON(!dev);
3489
3490 if (!res || resource_type(res) != IORESOURCE_MEM) {
3491 dev_err(dev, "invalid resource\n");
3492 return IOMEM_ERR_PTR(-EINVAL);
3493 }
3494
3495 size = resource_size(res);
3496 name = res->name ?: dev_name(dev);
3497
3498 if (!devm_request_mem_region(dev, res->start, size, name)) {
3499 dev_err(dev, "can't request region for resource %pR\n", res);
3500 return IOMEM_ERR_PTR(-EBUSY);
3501 }
3502
3503 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
3504 if (!dest_ptr) {
3505 dev_err(dev, "ioremap failed for resource %pR\n", res);
3506 devm_release_mem_region(dev, res->start, size);
3507 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
3508 }
3509
3510 return dest_ptr;
3511}
3512EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
3513
3514static void __pci_set_master(struct pci_dev *dev, bool enable)
3515{
3516 u16 old_cmd, cmd;
3517
3518 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
3519 if (enable)
3520 cmd = old_cmd | PCI_COMMAND_MASTER;
3521 else
3522 cmd = old_cmd & ~PCI_COMMAND_MASTER;
3523 if (cmd != old_cmd) {
3524 dev_dbg(&dev->dev, "%s bus mastering\n",
3525 enable ? "enabling" : "disabling");
3526 pci_write_config_word(dev, PCI_COMMAND, cmd);
3527 }
3528 dev->is_busmaster = enable;
3529}
3530
3531
3532
3533
3534
3535
3536
3537
3538char * __weak __init pcibios_setup(char *str)
3539{
3540 return str;
3541}
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551void __weak pcibios_set_master(struct pci_dev *dev)
3552{
3553 u8 lat;
3554
3555
3556 if (pci_is_pcie(dev))
3557 return;
3558
3559 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
3560 if (lat < 16)
3561 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
3562 else if (lat > pcibios_max_latency)
3563 lat = pcibios_max_latency;
3564 else
3565 return;
3566
3567 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
3568}
3569
3570
3571
3572
3573
3574
3575
3576
3577void pci_set_master(struct pci_dev *dev)
3578{
3579 __pci_set_master(dev, true);
3580 pcibios_set_master(dev);
3581}
3582EXPORT_SYMBOL(pci_set_master);
3583
3584
3585
3586
3587
3588void pci_clear_master(struct pci_dev *dev)
3589{
3590 __pci_set_master(dev, false);
3591}
3592EXPORT_SYMBOL(pci_clear_master);
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604int pci_set_cacheline_size(struct pci_dev *dev)
3605{
3606 u8 cacheline_size;
3607
3608 if (!pci_cache_line_size)
3609 return -EINVAL;
3610
3611
3612
3613 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
3614 if (cacheline_size >= pci_cache_line_size &&
3615 (cacheline_size % pci_cache_line_size) == 0)
3616 return 0;
3617
3618
3619 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
3620
3621 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
3622 if (cacheline_size == pci_cache_line_size)
3623 return 0;
3624
3625 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not supported\n",
3626 pci_cache_line_size << 2);
3627
3628 return -EINVAL;
3629}
3630EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640int pci_set_mwi(struct pci_dev *dev)
3641{
3642#ifdef PCI_DISABLE_MWI
3643 return 0;
3644#else
3645 int rc;
3646 u16 cmd;
3647
3648 rc = pci_set_cacheline_size(dev);
3649 if (rc)
3650 return rc;
3651
3652 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3653 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
3654 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
3655 cmd |= PCI_COMMAND_INVALIDATE;
3656 pci_write_config_word(dev, PCI_COMMAND, cmd);
3657 }
3658 return 0;
3659#endif
3660}
3661EXPORT_SYMBOL(pci_set_mwi);
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672int pci_try_set_mwi(struct pci_dev *dev)
3673{
3674#ifdef PCI_DISABLE_MWI
3675 return 0;
3676#else
3677 return pci_set_mwi(dev);
3678#endif
3679}
3680EXPORT_SYMBOL(pci_try_set_mwi);
3681
3682
3683
3684
3685
3686
3687
3688void pci_clear_mwi(struct pci_dev *dev)
3689{
3690#ifndef PCI_DISABLE_MWI
3691 u16 cmd;
3692
3693 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3694 if (cmd & PCI_COMMAND_INVALIDATE) {
3695 cmd &= ~PCI_COMMAND_INVALIDATE;
3696 pci_write_config_word(dev, PCI_COMMAND, cmd);
3697 }
3698#endif
3699}
3700EXPORT_SYMBOL(pci_clear_mwi);
3701
3702
3703
3704
3705
3706
3707
3708
3709void pci_intx(struct pci_dev *pdev, int enable)
3710{
3711 u16 pci_command, new;
3712
3713 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
3714
3715 if (enable)
3716 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
3717 else
3718 new = pci_command | PCI_COMMAND_INTX_DISABLE;
3719
3720 if (new != pci_command) {
3721 struct pci_devres *dr;
3722
3723 pci_write_config_word(pdev, PCI_COMMAND, new);
3724
3725 dr = find_pci_dr(pdev);
3726 if (dr && !dr->restore_intx) {
3727 dr->restore_intx = 1;
3728 dr->orig_intx = !enable;
3729 }
3730 }
3731}
3732EXPORT_SYMBOL_GPL(pci_intx);
3733
3734static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
3735{
3736 struct pci_bus *bus = dev->bus;
3737 bool mask_updated = true;
3738 u32 cmd_status_dword;
3739 u16 origcmd, newcmd;
3740 unsigned long flags;
3741 bool irq_pending;
3742
3743
3744
3745
3746
3747 BUILD_BUG_ON(PCI_COMMAND % 4);
3748 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
3749
3750 raw_spin_lock_irqsave(&pci_lock, flags);
3751
3752 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
3753
3754 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
3755
3756
3757
3758
3759
3760
3761 if (mask != irq_pending) {
3762 mask_updated = false;
3763 goto done;
3764 }
3765
3766 origcmd = cmd_status_dword;
3767 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
3768 if (mask)
3769 newcmd |= PCI_COMMAND_INTX_DISABLE;
3770 if (newcmd != origcmd)
3771 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
3772
3773done:
3774 raw_spin_unlock_irqrestore(&pci_lock, flags);
3775
3776 return mask_updated;
3777}
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787bool pci_check_and_mask_intx(struct pci_dev *dev)
3788{
3789 return pci_check_and_set_intx_mask(dev, true);
3790}
3791EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801bool pci_check_and_unmask_intx(struct pci_dev *dev)
3802{
3803 return pci_check_and_set_intx_mask(dev, false);
3804}
3805EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
3806
3807
3808
3809
3810
3811
3812
3813int pci_wait_for_pending_transaction(struct pci_dev *dev)
3814{
3815 if (!pci_is_pcie(dev))
3816 return 1;
3817
3818 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
3819 PCI_EXP_DEVSTA_TRPND);
3820}
3821EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3822
3823static void pci_flr_wait(struct pci_dev *dev)
3824{
3825 int delay = 1, timeout = 60000;
3826 u32 id;
3827
3828
3829
3830
3831
3832
3833 msleep(100);
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847 pci_read_config_dword(dev, PCI_COMMAND, &id);
3848 while (id == ~0) {
3849 if (delay > timeout) {
3850 dev_warn(&dev->dev, "not ready %dms after FLR; giving up\n",
3851 100 + delay - 1);
3852 return;
3853 }
3854
3855 if (delay > 1000)
3856 dev_info(&dev->dev, "not ready %dms after FLR; waiting\n",
3857 100 + delay - 1);
3858
3859 msleep(delay);
3860 delay *= 2;
3861 pci_read_config_dword(dev, PCI_COMMAND, &id);
3862 }
3863
3864 if (delay > 1000)
3865 dev_info(&dev->dev, "ready %dms after FLR\n", 100 + delay - 1);
3866}
3867
3868
3869
3870
3871
3872
3873
3874
3875static bool pcie_has_flr(struct pci_dev *dev)
3876{
3877 u32 cap;
3878
3879 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
3880 return false;
3881
3882 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
3883 return cap & PCI_EXP_DEVCAP_FLR;
3884}
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894void pcie_flr(struct pci_dev *dev)
3895{
3896 if (!pci_wait_for_pending_transaction(dev))
3897 dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
3898
3899 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3900 pci_flr_wait(dev);
3901}
3902EXPORT_SYMBOL_GPL(pcie_flr);
3903
3904static int pci_af_flr(struct pci_dev *dev, int probe)
3905{
3906 int pos;
3907 u8 cap;
3908
3909 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3910 if (!pos)
3911 return -ENOTTY;
3912
3913 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
3914 return -ENOTTY;
3915
3916 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
3917 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3918 return -ENOTTY;
3919
3920 if (probe)
3921 return 0;
3922
3923
3924
3925
3926
3927
3928 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
3929 PCI_AF_STATUS_TP << 8))
3930 dev_err(&dev->dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
3931
3932 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
3933 pci_flr_wait(dev);
3934 return 0;
3935}
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952static int pci_pm_reset(struct pci_dev *dev, int probe)
3953{
3954 u16 csr;
3955
3956 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
3957 return -ENOTTY;
3958
3959 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3960 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3961 return -ENOTTY;
3962
3963 if (probe)
3964 return 0;
3965
3966 if (dev->current_state != PCI_D0)
3967 return -EINVAL;
3968
3969 csr &= ~PCI_PM_CTRL_STATE_MASK;
3970 csr |= PCI_D3hot;
3971 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3972 pci_dev_d3_sleep(dev);
3973
3974 csr &= ~PCI_PM_CTRL_STATE_MASK;
3975 csr |= PCI_D0;
3976 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3977 pci_dev_d3_sleep(dev);
3978
3979 return 0;
3980}
3981
3982void pci_reset_secondary_bus(struct pci_dev *dev)
3983{
3984 u16 ctrl;
3985
3986 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
3987 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3988 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
3989
3990
3991
3992
3993 msleep(2);
3994
3995 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3996 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
3997
3998
3999
4000
4001
4002
4003
4004
4005 ssleep(1);
4006}
4007
4008void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4009{
4010 pci_reset_secondary_bus(dev);
4011}
4012
4013
4014
4015
4016
4017
4018
4019
4020void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
4021{
4022 pcibios_reset_secondary_bus(dev);
4023}
4024EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
4025
4026static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4027{
4028 struct pci_dev *pdev;
4029
4030 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4031 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4032 return -ENOTTY;
4033
4034 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4035 if (pdev != dev)
4036 return -ENOTTY;
4037
4038 if (probe)
4039 return 0;
4040
4041 pci_reset_bridge_secondary_bus(dev->bus->self);
4042
4043 return 0;
4044}
4045
4046static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4047{
4048 int rc = -ENOTTY;
4049
4050 if (!hotplug || !try_module_get(hotplug->ops->owner))
4051 return rc;
4052
4053 if (hotplug->ops->reset_slot)
4054 rc = hotplug->ops->reset_slot(hotplug, probe);
4055
4056 module_put(hotplug->ops->owner);
4057
4058 return rc;
4059}
4060
4061static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4062{
4063 struct pci_dev *pdev;
4064
4065 if (dev->subordinate || !dev->slot ||
4066 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4067 return -ENOTTY;
4068
4069 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4070 if (pdev != dev && pdev->slot == dev->slot)
4071 return -ENOTTY;
4072
4073 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4074}
4075
4076static void pci_dev_lock(struct pci_dev *dev)
4077{
4078 pci_cfg_access_lock(dev);
4079
4080 device_lock(&dev->dev);
4081}
4082
4083
4084static int pci_dev_trylock(struct pci_dev *dev)
4085{
4086 if (pci_cfg_access_trylock(dev)) {
4087 if (device_trylock(&dev->dev))
4088 return 1;
4089 pci_cfg_access_unlock(dev);
4090 }
4091
4092 return 0;
4093}
4094
4095static void pci_dev_unlock(struct pci_dev *dev)
4096{
4097 device_unlock(&dev->dev);
4098 pci_cfg_access_unlock(dev);
4099}
4100
4101static void pci_dev_save_and_disable(struct pci_dev *dev)
4102{
4103 const struct pci_error_handlers *err_handler =
4104 dev->driver ? dev->driver->err_handler : NULL;
4105
4106
4107
4108
4109
4110
4111 if (err_handler && err_handler->reset_prepare)
4112 err_handler->reset_prepare(dev);
4113
4114
4115
4116
4117
4118
4119 pci_set_power_state(dev, PCI_D0);
4120
4121 pci_save_state(dev);
4122
4123
4124
4125
4126
4127
4128
4129 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
4130}
4131
4132static void pci_dev_restore(struct pci_dev *dev)
4133{
4134 const struct pci_error_handlers *err_handler =
4135 dev->driver ? dev->driver->err_handler : NULL;
4136
4137 pci_restore_state(dev);
4138
4139
4140
4141
4142
4143
4144 if (err_handler && err_handler->reset_done)
4145 err_handler->reset_done(dev);
4146}
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165int __pci_reset_function(struct pci_dev *dev)
4166{
4167 int ret;
4168
4169 pci_dev_lock(dev);
4170 ret = __pci_reset_function_locked(dev);
4171 pci_dev_unlock(dev);
4172
4173 return ret;
4174}
4175EXPORT_SYMBOL_GPL(__pci_reset_function);
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196int __pci_reset_function_locked(struct pci_dev *dev)
4197{
4198 int rc;
4199
4200 might_sleep();
4201
4202 rc = pci_dev_specific_reset(dev, 0);
4203 if (rc != -ENOTTY)
4204 return rc;
4205 if (pcie_has_flr(dev)) {
4206 pcie_flr(dev);
4207 return 0;
4208 }
4209 rc = pci_af_flr(dev, 0);
4210 if (rc != -ENOTTY)
4211 return rc;
4212 rc = pci_pm_reset(dev, 0);
4213 if (rc != -ENOTTY)
4214 return rc;
4215 rc = pci_dev_reset_slot_function(dev, 0);
4216 if (rc != -ENOTTY)
4217 return rc;
4218 return pci_parent_bus_reset(dev, 0);
4219}
4220EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233int pci_probe_reset_function(struct pci_dev *dev)
4234{
4235 int rc;
4236
4237 might_sleep();
4238
4239 rc = pci_dev_specific_reset(dev, 1);
4240 if (rc != -ENOTTY)
4241 return rc;
4242 if (pcie_has_flr(dev))
4243 return 0;
4244 rc = pci_af_flr(dev, 1);
4245 if (rc != -ENOTTY)
4246 return rc;
4247 rc = pci_pm_reset(dev, 1);
4248 if (rc != -ENOTTY)
4249 return rc;
4250 rc = pci_dev_reset_slot_function(dev, 1);
4251 if (rc != -ENOTTY)
4252 return rc;
4253
4254 return pci_parent_bus_reset(dev, 1);
4255}
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273int pci_reset_function(struct pci_dev *dev)
4274{
4275 int rc;
4276
4277 rc = pci_probe_reset_function(dev);
4278 if (rc)
4279 return rc;
4280
4281 pci_dev_lock(dev);
4282 pci_dev_save_and_disable(dev);
4283
4284 rc = __pci_reset_function_locked(dev);
4285
4286 pci_dev_restore(dev);
4287 pci_dev_unlock(dev);
4288
4289 return rc;
4290}
4291EXPORT_SYMBOL_GPL(pci_reset_function);
4292
4293
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308
4309
4310int pci_reset_function_locked(struct pci_dev *dev)
4311{
4312 int rc;
4313
4314 rc = pci_probe_reset_function(dev);
4315 if (rc)
4316 return rc;
4317
4318 pci_dev_save_and_disable(dev);
4319
4320 rc = __pci_reset_function_locked(dev);
4321
4322 pci_dev_restore(dev);
4323
4324 return rc;
4325}
4326EXPORT_SYMBOL_GPL(pci_reset_function_locked);
4327
4328
4329
4330
4331
4332
4333
4334int pci_try_reset_function(struct pci_dev *dev)
4335{
4336 int rc;
4337
4338 rc = pci_probe_reset_function(dev);
4339 if (rc)
4340 return rc;
4341
4342 if (!pci_dev_trylock(dev))
4343 return -EAGAIN;
4344
4345 pci_dev_save_and_disable(dev);
4346 rc = __pci_reset_function_locked(dev);
4347 pci_dev_unlock(dev);
4348
4349 pci_dev_restore(dev);
4350 return rc;
4351}
4352EXPORT_SYMBOL_GPL(pci_try_reset_function);
4353
4354
4355static bool pci_bus_resetable(struct pci_bus *bus)
4356{
4357 struct pci_dev *dev;
4358
4359 list_for_each_entry(dev, &bus->devices, bus_list) {
4360 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
4361 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
4362 return false;
4363 }
4364
4365 return true;
4366}
4367
4368
4369static void pci_bus_lock(struct pci_bus *bus)
4370{
4371 struct pci_dev *dev;
4372
4373 list_for_each_entry(dev, &bus->devices, bus_list) {
4374 pci_dev_lock(dev);
4375 if (dev->subordinate)
4376 pci_bus_lock(dev->subordinate);
4377 }
4378}
4379
4380
4381static void pci_bus_unlock(struct pci_bus *bus)
4382{
4383 struct pci_dev *dev;
4384
4385 list_for_each_entry(dev, &bus->devices, bus_list) {
4386 if (dev->subordinate)
4387 pci_bus_unlock(dev->subordinate);
4388 pci_dev_unlock(dev);
4389 }
4390}
4391
4392
4393static int pci_bus_trylock(struct pci_bus *bus)
4394{
4395 struct pci_dev *dev;
4396
4397 list_for_each_entry(dev, &bus->devices, bus_list) {
4398 if (!pci_dev_trylock(dev))
4399 goto unlock;
4400 if (dev->subordinate) {
4401 if (!pci_bus_trylock(dev->subordinate)) {
4402 pci_dev_unlock(dev);
4403 goto unlock;
4404 }
4405 }
4406 }
4407 return 1;
4408
4409unlock:
4410 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
4411 if (dev->subordinate)
4412 pci_bus_unlock(dev->subordinate);
4413 pci_dev_unlock(dev);
4414 }
4415 return 0;
4416}
4417
4418
4419static bool pci_slot_resetable(struct pci_slot *slot)
4420{
4421 struct pci_dev *dev;
4422
4423 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4424 if (!dev->slot || dev->slot != slot)
4425 continue;
4426 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
4427 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
4428 return false;
4429 }
4430
4431 return true;
4432}
4433
4434
4435static void pci_slot_lock(struct pci_slot *slot)
4436{
4437 struct pci_dev *dev;
4438
4439 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4440 if (!dev->slot || dev->slot != slot)
4441 continue;
4442 pci_dev_lock(dev);
4443 if (dev->subordinate)
4444 pci_bus_lock(dev->subordinate);
4445 }
4446}
4447
4448
4449static void pci_slot_unlock(struct pci_slot *slot)
4450{
4451 struct pci_dev *dev;
4452
4453 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4454 if (!dev->slot || dev->slot != slot)
4455 continue;
4456 if (dev->subordinate)
4457 pci_bus_unlock(dev->subordinate);
4458 pci_dev_unlock(dev);
4459 }
4460}
4461
4462
4463static int pci_slot_trylock(struct pci_slot *slot)
4464{
4465 struct pci_dev *dev;
4466
4467 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4468 if (!dev->slot || dev->slot != slot)
4469 continue;
4470 if (!pci_dev_trylock(dev))
4471 goto unlock;
4472 if (dev->subordinate) {
4473 if (!pci_bus_trylock(dev->subordinate)) {
4474 pci_dev_unlock(dev);
4475 goto unlock;
4476 }
4477 }
4478 }
4479 return 1;
4480
4481unlock:
4482 list_for_each_entry_continue_reverse(dev,
4483 &slot->bus->devices, bus_list) {
4484 if (!dev->slot || dev->slot != slot)
4485 continue;
4486 if (dev->subordinate)
4487 pci_bus_unlock(dev->subordinate);
4488 pci_dev_unlock(dev);
4489 }
4490 return 0;
4491}
4492
4493
4494static void pci_bus_save_and_disable(struct pci_bus *bus)
4495{
4496 struct pci_dev *dev;
4497
4498 list_for_each_entry(dev, &bus->devices, bus_list) {
4499 pci_dev_lock(dev);
4500 pci_dev_save_and_disable(dev);
4501 pci_dev_unlock(dev);
4502 if (dev->subordinate)
4503 pci_bus_save_and_disable(dev->subordinate);
4504 }
4505}
4506
4507
4508
4509
4510
4511static void pci_bus_restore(struct pci_bus *bus)
4512{
4513 struct pci_dev *dev;
4514
4515 list_for_each_entry(dev, &bus->devices, bus_list) {
4516 pci_dev_lock(dev);
4517 pci_dev_restore(dev);
4518 pci_dev_unlock(dev);
4519 if (dev->subordinate)
4520 pci_bus_restore(dev->subordinate);
4521 }
4522}
4523
4524
4525static void pci_slot_save_and_disable(struct pci_slot *slot)
4526{
4527 struct pci_dev *dev;
4528
4529 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4530 if (!dev->slot || dev->slot != slot)
4531 continue;
4532 pci_dev_save_and_disable(dev);
4533 if (dev->subordinate)
4534 pci_bus_save_and_disable(dev->subordinate);
4535 }
4536}
4537
4538
4539
4540
4541
4542static void pci_slot_restore(struct pci_slot *slot)
4543{
4544 struct pci_dev *dev;
4545
4546 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4547 if (!dev->slot || dev->slot != slot)
4548 continue;
4549 pci_dev_restore(dev);
4550 if (dev->subordinate)
4551 pci_bus_restore(dev->subordinate);
4552 }
4553}
4554
4555static int pci_slot_reset(struct pci_slot *slot, int probe)
4556{
4557 int rc;
4558
4559 if (!slot || !pci_slot_resetable(slot))
4560 return -ENOTTY;
4561
4562 if (!probe)
4563 pci_slot_lock(slot);
4564
4565 might_sleep();
4566
4567 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
4568
4569 if (!probe)
4570 pci_slot_unlock(slot);
4571
4572 return rc;
4573}
4574
4575
4576
4577
4578
4579
4580
4581int pci_probe_reset_slot(struct pci_slot *slot)
4582{
4583 return pci_slot_reset(slot, 1);
4584}
4585EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
4586
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600
4601
4602int pci_reset_slot(struct pci_slot *slot)
4603{
4604 int rc;
4605
4606 rc = pci_slot_reset(slot, 1);
4607 if (rc)
4608 return rc;
4609
4610 pci_slot_save_and_disable(slot);
4611
4612 rc = pci_slot_reset(slot, 0);
4613
4614 pci_slot_restore(slot);
4615
4616 return rc;
4617}
4618EXPORT_SYMBOL_GPL(pci_reset_slot);
4619
4620
4621
4622
4623
4624
4625
4626int pci_try_reset_slot(struct pci_slot *slot)
4627{
4628 int rc;
4629
4630 rc = pci_slot_reset(slot, 1);
4631 if (rc)
4632 return rc;
4633
4634 pci_slot_save_and_disable(slot);
4635
4636 if (pci_slot_trylock(slot)) {
4637 might_sleep();
4638 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
4639 pci_slot_unlock(slot);
4640 } else
4641 rc = -EAGAIN;
4642
4643 pci_slot_restore(slot);
4644
4645 return rc;
4646}
4647EXPORT_SYMBOL_GPL(pci_try_reset_slot);
4648
4649static int pci_bus_reset(struct pci_bus *bus, int probe)
4650{
4651 if (!bus->self || !pci_bus_resetable(bus))
4652 return -ENOTTY;
4653
4654 if (probe)
4655 return 0;
4656
4657 pci_bus_lock(bus);
4658
4659 might_sleep();
4660
4661 pci_reset_bridge_secondary_bus(bus->self);
4662
4663 pci_bus_unlock(bus);
4664
4665 return 0;
4666}
4667
4668
4669
4670
4671
4672
4673
4674int pci_probe_reset_bus(struct pci_bus *bus)
4675{
4676 return pci_bus_reset(bus, 1);
4677}
4678EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688
4689int pci_reset_bus(struct pci_bus *bus)
4690{
4691 int rc;
4692
4693 rc = pci_bus_reset(bus, 1);
4694 if (rc)
4695 return rc;
4696
4697 pci_bus_save_and_disable(bus);
4698
4699 rc = pci_bus_reset(bus, 0);
4700
4701 pci_bus_restore(bus);
4702
4703 return rc;
4704}
4705EXPORT_SYMBOL_GPL(pci_reset_bus);
4706
4707
4708
4709
4710
4711
4712
4713int pci_try_reset_bus(struct pci_bus *bus)
4714{
4715 int rc;
4716
4717 rc = pci_bus_reset(bus, 1);
4718 if (rc)
4719 return rc;
4720
4721 pci_bus_save_and_disable(bus);
4722
4723 if (pci_bus_trylock(bus)) {
4724 might_sleep();
4725 pci_reset_bridge_secondary_bus(bus->self);
4726 pci_bus_unlock(bus);
4727 } else
4728 rc = -EAGAIN;
4729
4730 pci_bus_restore(bus);
4731
4732 return rc;
4733}
4734EXPORT_SYMBOL_GPL(pci_try_reset_bus);
4735
4736
4737
4738
4739
4740
4741
4742
4743int pcix_get_max_mmrbc(struct pci_dev *dev)
4744{
4745 int cap;
4746 u32 stat;
4747
4748 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4749 if (!cap)
4750 return -EINVAL;
4751
4752 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
4753 return -EINVAL;
4754
4755 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
4756}
4757EXPORT_SYMBOL(pcix_get_max_mmrbc);
4758
4759
4760
4761
4762
4763
4764
4765
4766int pcix_get_mmrbc(struct pci_dev *dev)
4767{
4768 int cap;
4769 u16 cmd;
4770
4771 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4772 if (!cap)
4773 return -EINVAL;
4774
4775 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4776 return -EINVAL;
4777
4778 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
4779}
4780EXPORT_SYMBOL(pcix_get_mmrbc);
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
4792{
4793 int cap;
4794 u32 stat, v, o;
4795 u16 cmd;
4796
4797 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
4798 return -EINVAL;
4799
4800 v = ffs(mmrbc) - 10;
4801
4802 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4803 if (!cap)
4804 return -EINVAL;
4805
4806 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
4807 return -EINVAL;
4808
4809 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
4810 return -E2BIG;
4811
4812 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4813 return -EINVAL;
4814
4815 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
4816 if (o != v) {
4817 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
4818 return -EIO;
4819
4820 cmd &= ~PCI_X_CMD_MAX_READ;
4821 cmd |= v << 2;
4822 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
4823 return -EIO;
4824 }
4825 return 0;
4826}
4827EXPORT_SYMBOL(pcix_set_mmrbc);
4828
4829
4830
4831
4832
4833
4834
4835
4836int pcie_get_readrq(struct pci_dev *dev)
4837{
4838 u16 ctl;
4839
4840 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
4841
4842 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4843}
4844EXPORT_SYMBOL(pcie_get_readrq);
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854int pcie_set_readrq(struct pci_dev *dev, int rq)
4855{
4856 u16 v;
4857
4858 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
4859 return -EINVAL;
4860
4861
4862
4863
4864
4865
4866
4867 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
4868 int mps = pcie_get_mps(dev);
4869
4870 if (mps < rq)
4871 rq = mps;
4872 }
4873
4874 v = (ffs(rq) - 8) << 12;
4875
4876 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
4877 PCI_EXP_DEVCTL_READRQ, v);
4878}
4879EXPORT_SYMBOL(pcie_set_readrq);
4880
4881
4882
4883
4884
4885
4886
4887int pcie_get_mps(struct pci_dev *dev)
4888{
4889 u16 ctl;
4890
4891 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
4892
4893 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4894}
4895EXPORT_SYMBOL(pcie_get_mps);
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905int pcie_set_mps(struct pci_dev *dev, int mps)
4906{
4907 u16 v;
4908
4909 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
4910 return -EINVAL;
4911
4912 v = ffs(mps) - 8;
4913 if (v > dev->pcie_mpss)
4914 return -EINVAL;
4915 v <<= 5;
4916
4917 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
4918 PCI_EXP_DEVCTL_PAYLOAD, v);
4919}
4920EXPORT_SYMBOL(pcie_set_mps);
4921
4922
4923
4924
4925
4926
4927
4928
4929
4930
4931int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
4932 enum pcie_link_width *width)
4933{
4934 int ret;
4935
4936 *speed = PCI_SPEED_UNKNOWN;
4937 *width = PCIE_LNK_WIDTH_UNKNOWN;
4938
4939 while (dev) {
4940 u16 lnksta;
4941 enum pci_bus_speed next_speed;
4942 enum pcie_link_width next_width;
4943
4944 ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
4945 if (ret)
4946 return ret;
4947
4948 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
4949 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
4950 PCI_EXP_LNKSTA_NLW_SHIFT;
4951
4952 if (next_speed < *speed)
4953 *speed = next_speed;
4954
4955 if (next_width < *width)
4956 *width = next_width;
4957
4958 dev = dev->bus->self;
4959 }
4960
4961 return 0;
4962}
4963EXPORT_SYMBOL(pcie_get_minimum_link);
4964
4965
4966
4967
4968
4969
4970
4971
4972int pci_select_bars(struct pci_dev *dev, unsigned long flags)
4973{
4974 int i, bars = 0;
4975 for (i = 0; i < PCI_NUM_RESOURCES; i++)
4976 if (pci_resource_flags(dev, i) & flags)
4977 bars |= (1 << i);
4978 return bars;
4979}
4980EXPORT_SYMBOL(pci_select_bars);
4981
4982
4983static arch_set_vga_state_t arch_set_vga_state;
4984
4985void __init pci_register_set_vga_state(arch_set_vga_state_t func)
4986{
4987 arch_set_vga_state = func;
4988}
4989
4990static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
4991 unsigned int command_bits, u32 flags)
4992{
4993 if (arch_set_vga_state)
4994 return arch_set_vga_state(dev, decode, command_bits,
4995 flags);
4996 return 0;
4997}
4998
4999
5000
5001
5002
5003
5004
5005
5006
5007int pci_set_vga_state(struct pci_dev *dev, bool decode,
5008 unsigned int command_bits, u32 flags)
5009{
5010 struct pci_bus *bus;
5011 struct pci_dev *bridge;
5012 u16 cmd;
5013 int rc;
5014
5015 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
5016
5017
5018 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
5019 if (rc)
5020 return rc;
5021
5022 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
5023 pci_read_config_word(dev, PCI_COMMAND, &cmd);
5024 if (decode == true)
5025 cmd |= command_bits;
5026 else
5027 cmd &= ~command_bits;
5028 pci_write_config_word(dev, PCI_COMMAND, cmd);
5029 }
5030
5031 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
5032 return 0;
5033
5034 bus = dev->bus;
5035 while (bus) {
5036 bridge = bus->self;
5037 if (bridge) {
5038 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
5039 &cmd);
5040 if (decode == true)
5041 cmd |= PCI_BRIDGE_CTL_VGA;
5042 else
5043 cmd &= ~PCI_BRIDGE_CTL_VGA;
5044 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
5045 cmd);
5046 }
5047 bus = bus->parent;
5048 }
5049 return 0;
5050}
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
5061{
5062 if (!dev->dma_alias_mask)
5063 dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX),
5064 sizeof(long), GFP_KERNEL);
5065 if (!dev->dma_alias_mask) {
5066 dev_warn(&dev->dev, "Unable to allocate DMA alias mask\n");
5067 return;
5068 }
5069
5070 set_bit(devfn, dev->dma_alias_mask);
5071 dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n",
5072 PCI_SLOT(devfn), PCI_FUNC(devfn));
5073}
5074
5075bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
5076{
5077 return (dev1->dma_alias_mask &&
5078 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
5079 (dev2->dma_alias_mask &&
5080 test_bit(dev1->devfn, dev2->dma_alias_mask));
5081}
5082
5083bool pci_device_is_present(struct pci_dev *pdev)
5084{
5085 u32 v;
5086
5087 if (pci_dev_is_disconnected(pdev))
5088 return false;
5089 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
5090}
5091EXPORT_SYMBOL_GPL(pci_device_is_present);
5092
5093void pci_ignore_hotplug(struct pci_dev *dev)
5094{
5095 struct pci_dev *bridge = dev->bus->self;
5096
5097 dev->ignore_hotplug = 1;
5098
5099 if (bridge)
5100 bridge->ignore_hotplug = 1;
5101}
5102EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
5103
5104resource_size_t __weak pcibios_default_alignment(void)
5105{
5106 return 0;
5107}
5108
5109#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
5110static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
5111static DEFINE_SPINLOCK(resource_alignment_lock);
5112
5113
5114
5115
5116
5117
5118
5119
5120
5121static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
5122 bool *resize)
5123{
5124 int seg, bus, slot, func, align_order, count;
5125 unsigned short vendor, device, subsystem_vendor, subsystem_device;
5126 resource_size_t align = pcibios_default_alignment();
5127 char *p;
5128
5129 spin_lock(&resource_alignment_lock);
5130 p = resource_alignment_param;
5131 if (!*p && !align)
5132 goto out;
5133 if (pci_has_flag(PCI_PROBE_ONLY)) {
5134 align = 0;
5135 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
5136 goto out;
5137 }
5138
5139 while (*p) {
5140 count = 0;
5141 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
5142 p[count] == '@') {
5143 p += count + 1;
5144 } else {
5145 align_order = -1;
5146 }
5147 if (strncmp(p, "pci:", 4) == 0) {
5148
5149 p += 4;
5150 if (sscanf(p, "%hx:%hx:%hx:%hx%n",
5151 &vendor, &device, &subsystem_vendor, &subsystem_device, &count) != 4) {
5152 if (sscanf(p, "%hx:%hx%n", &vendor, &device, &count) != 2) {
5153 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: pci:%s\n",
5154 p);
5155 break;
5156 }
5157 subsystem_vendor = subsystem_device = 0;
5158 }
5159 p += count;
5160 if ((!vendor || (vendor == dev->vendor)) &&
5161 (!device || (device == dev->device)) &&
5162 (!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) &&
5163 (!subsystem_device || (subsystem_device == dev->subsystem_device))) {
5164 *resize = true;
5165 if (align_order == -1)
5166 align = PAGE_SIZE;
5167 else
5168 align = 1 << align_order;
5169
5170 break;
5171 }
5172 }
5173 else {
5174 if (sscanf(p, "%x:%x:%x.%x%n",
5175 &seg, &bus, &slot, &func, &count) != 4) {
5176 seg = 0;
5177 if (sscanf(p, "%x:%x.%x%n",
5178 &bus, &slot, &func, &count) != 3) {
5179
5180 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
5181 p);
5182 break;
5183 }
5184 }
5185 p += count;
5186 if (seg == pci_domain_nr(dev->bus) &&
5187 bus == dev->bus->number &&
5188 slot == PCI_SLOT(dev->devfn) &&
5189 func == PCI_FUNC(dev->devfn)) {
5190 *resize = true;
5191 if (align_order == -1)
5192 align = PAGE_SIZE;
5193 else
5194 align = 1 << align_order;
5195
5196 break;
5197 }
5198 }
5199 if (*p != ';' && *p != ',') {
5200
5201 break;
5202 }
5203 p++;
5204 }
5205out:
5206 spin_unlock(&resource_alignment_lock);
5207 return align;
5208}
5209
5210static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
5211 resource_size_t align, bool resize)
5212{
5213 struct resource *r = &dev->resource[bar];
5214 resource_size_t size;
5215
5216 if (!(r->flags & IORESOURCE_MEM))
5217 return;
5218
5219 if (r->flags & IORESOURCE_PCI_FIXED) {
5220 dev_info(&dev->dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
5221 bar, r, (unsigned long long)align);
5222 return;
5223 }
5224
5225 size = resource_size(r);
5226 if (size >= align)
5227 return;
5228
5229
5230
5231
5232
5233
5234
5235
5236
5237
5238
5239
5240
5241
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257 dev_info(&dev->dev, "BAR%d %pR: requesting alignment to %#llx\n",
5258 bar, r, (unsigned long long)align);
5259
5260 if (resize) {
5261 r->start = 0;
5262 r->end = align - 1;
5263 } else {
5264 r->flags &= ~IORESOURCE_SIZEALIGN;
5265 r->flags |= IORESOURCE_STARTALIGN;
5266 r->start = align;
5267 r->end = r->start + size - 1;
5268 }
5269 r->flags |= IORESOURCE_UNSET;
5270}
5271
5272
5273
5274
5275
5276
5277
5278
5279void pci_reassigndev_resource_alignment(struct pci_dev *dev)
5280{
5281 int i;
5282 struct resource *r;
5283 resource_size_t align;
5284 u16 command;
5285 bool resize = false;
5286
5287
5288
5289
5290
5291
5292
5293 if (dev->is_virtfn)
5294 return;
5295
5296
5297 align = pci_specified_resource_alignment(dev, &resize);
5298 if (!align)
5299 return;
5300
5301 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
5302 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
5303 dev_warn(&dev->dev,
5304 "Can't reassign resources to host bridge.\n");
5305 return;
5306 }
5307
5308 dev_info(&dev->dev,
5309 "Disabling memory decoding and releasing memory resources.\n");
5310 pci_read_config_word(dev, PCI_COMMAND, &command);
5311 command &= ~PCI_COMMAND_MEMORY;
5312 pci_write_config_word(dev, PCI_COMMAND, command);
5313
5314 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
5315 pci_request_resource_alignment(dev, i, align, resize);
5316
5317
5318
5319
5320
5321
5322 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
5323 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
5324 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
5325 r = &dev->resource[i];
5326 if (!(r->flags & IORESOURCE_MEM))
5327 continue;
5328 r->flags |= IORESOURCE_UNSET;
5329 r->end = resource_size(r) - 1;
5330 r->start = 0;
5331 }
5332 pci_disable_bridge_window(dev);
5333 }
5334}
5335
5336static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
5337{
5338 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
5339 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
5340 spin_lock(&resource_alignment_lock);
5341 strncpy(resource_alignment_param, buf, count);
5342 resource_alignment_param[count] = '\0';
5343 spin_unlock(&resource_alignment_lock);
5344 return count;
5345}
5346
5347static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
5348{
5349 size_t count;
5350 spin_lock(&resource_alignment_lock);
5351 count = snprintf(buf, size, "%s", resource_alignment_param);
5352 spin_unlock(&resource_alignment_lock);
5353 return count;
5354}
5355
5356static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
5357{
5358 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
5359}
5360
5361static ssize_t pci_resource_alignment_store(struct bus_type *bus,
5362 const char *buf, size_t count)
5363{
5364 return pci_set_resource_alignment_param(buf, count);
5365}
5366
5367static BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
5368 pci_resource_alignment_store);
5369
5370static int __init pci_resource_alignment_sysfs_init(void)
5371{
5372 return bus_create_file(&pci_bus_type,
5373 &bus_attr_resource_alignment);
5374}
5375late_initcall(pci_resource_alignment_sysfs_init);
5376
5377static void pci_no_domains(void)
5378{
5379#ifdef CONFIG_PCI_DOMAINS
5380 pci_domains_supported = 0;
5381#endif
5382}
5383
5384#ifdef CONFIG_PCI_DOMAINS
5385static atomic_t __domain_nr = ATOMIC_INIT(-1);
5386
5387int pci_get_new_domain_nr(void)
5388{
5389 return atomic_inc_return(&__domain_nr);
5390}
5391
5392#ifdef CONFIG_PCI_DOMAINS_GENERIC
5393static int of_pci_bus_find_domain_nr(struct device *parent)
5394{
5395 static int use_dt_domains = -1;
5396 int domain = -1;
5397
5398 if (parent)
5399 domain = of_get_pci_domain_nr(parent->of_node);
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414
5415
5416
5417
5418
5419
5420
5421
5422
5423
5424
5425
5426 if (domain >= 0 && use_dt_domains) {
5427 use_dt_domains = 1;
5428 } else if (domain < 0 && use_dt_domains != 1) {
5429 use_dt_domains = 0;
5430 domain = pci_get_new_domain_nr();
5431 } else {
5432 dev_err(parent, "Node %pOF has inconsistent \"linux,pci-domain\" property in DT\n",
5433 parent->of_node);
5434 domain = -1;
5435 }
5436
5437 return domain;
5438}
5439
5440int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
5441{
5442 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
5443 acpi_pci_bus_find_domain_nr(bus);
5444}
5445#endif
5446#endif
5447
5448
5449
5450
5451
5452
5453
5454
5455int __weak pci_ext_cfg_avail(void)
5456{
5457 return 1;
5458}
5459
5460void __weak pci_fixup_cardbus(struct pci_bus *bus)
5461{
5462}
5463EXPORT_SYMBOL(pci_fixup_cardbus);
5464
5465static int __init pci_setup(char *str)
5466{
5467 while (str) {
5468 char *k = strchr(str, ',');
5469 if (k)
5470 *k++ = 0;
5471 if (*str && (str = pcibios_setup(str)) && *str) {
5472 if (!strcmp(str, "nomsi")) {
5473 pci_no_msi();
5474 } else if (!strcmp(str, "noaer")) {
5475 pci_no_aer();
5476 } else if (!strncmp(str, "realloc=", 8)) {
5477 pci_realloc_get_opt(str + 8);
5478 } else if (!strncmp(str, "realloc", 7)) {
5479 pci_realloc_get_opt("on");
5480 } else if (!strcmp(str, "nodomains")) {
5481 pci_no_domains();
5482 } else if (!strncmp(str, "noari", 5)) {
5483 pcie_ari_disabled = true;
5484 } else if (!strncmp(str, "cbiosize=", 9)) {
5485 pci_cardbus_io_size = memparse(str + 9, &str);
5486 } else if (!strncmp(str, "cbmemsize=", 10)) {
5487 pci_cardbus_mem_size = memparse(str + 10, &str);
5488 } else if (!strncmp(str, "resource_alignment=", 19)) {
5489 pci_set_resource_alignment_param(str + 19,
5490 strlen(str + 19));
5491 } else if (!strncmp(str, "ecrc=", 5)) {
5492 pcie_ecrc_get_policy(str + 5);
5493 } else if (!strncmp(str, "hpiosize=", 9)) {
5494 pci_hotplug_io_size = memparse(str + 9, &str);
5495 } else if (!strncmp(str, "hpmemsize=", 10)) {
5496 pci_hotplug_mem_size = memparse(str + 10, &str);
5497 } else if (!strncmp(str, "hpbussize=", 10)) {
5498 pci_hotplug_bus_size =
5499 simple_strtoul(str + 10, &str, 0);
5500 if (pci_hotplug_bus_size > 0xff)
5501 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
5502 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
5503 pcie_bus_config = PCIE_BUS_TUNE_OFF;
5504 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
5505 pcie_bus_config = PCIE_BUS_SAFE;
5506 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
5507 pcie_bus_config = PCIE_BUS_PERFORMANCE;
5508 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
5509 pcie_bus_config = PCIE_BUS_PEER2PEER;
5510 } else if (!strncmp(str, "pcie_scan_all", 13)) {
5511 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
5512 } else {
5513 printk(KERN_ERR "PCI: Unknown option `%s'\n",
5514 str);
5515 }
5516 }
5517 str = k;
5518 }
5519 return 0;
5520}
5521early_param("pci", pci_setup);
5522