1
2
3
4
5
6
7
8
9
10
11#include <linux/acpi.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/dmi.h>
15#include <linux/init.h>
16#include <linux/of.h>
17#include <linux/of_pci.h>
18#include <linux/pci.h>
19#include <linux/pm.h>
20#include <linux/slab.h>
21#include <linux/module.h>
22#include <linux/spinlock.h>
23#include <linux/string.h>
24#include <linux/log2.h>
25#include <linux/pci-aspm.h>
26#include <linux/pm_wakeup.h>
27#include <linux/interrupt.h>
28#include <linux/device.h>
29#include <linux/pm_runtime.h>
30#include <linux/pci_hotplug.h>
31#include <linux/vmalloc.h>
32#include <linux/pci-ats.h>
33#include <asm/setup.h>
34#include <asm/dma.h>
35#include <linux/aer.h>
36#include "pci.h"
37
38const char *pci_power_names[] = {
39 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
40};
41EXPORT_SYMBOL_GPL(pci_power_names);
42
43int isa_dma_bridge_buggy;
44EXPORT_SYMBOL(isa_dma_bridge_buggy);
45
46int pci_pci_problems;
47EXPORT_SYMBOL(pci_pci_problems);
48
49unsigned int pci_pm_d3_delay;
50
51static void pci_pme_list_scan(struct work_struct *work);
52
53static LIST_HEAD(pci_pme_list);
54static DEFINE_MUTEX(pci_pme_list_mutex);
55static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
56
57struct pci_pme_device {
58 struct list_head list;
59 struct pci_dev *dev;
60};
61
62#define PME_TIMEOUT 1000
63
64static void pci_dev_d3_sleep(struct pci_dev *dev)
65{
66 unsigned int delay = dev->d3_delay;
67
68 if (delay < pci_pm_d3_delay)
69 delay = pci_pm_d3_delay;
70
71 if (delay)
72 msleep(delay);
73}
74
75#ifdef CONFIG_PCI_DOMAINS
76int pci_domains_supported = 1;
77#endif
78
79#define DEFAULT_CARDBUS_IO_SIZE (256)
80#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
81
82unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
83unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
84
85#define DEFAULT_HOTPLUG_IO_SIZE (256)
86#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
87
88unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
89unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
90
91#define DEFAULT_HOTPLUG_BUS_SIZE 1
92unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
93
94enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
95
96
97
98
99
100
101
102u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
103u8 pci_cache_line_size;
104
105
106
107
108
109unsigned int pcibios_max_latency = 255;
110
111
112static bool pcie_ari_disabled;
113
114
115static bool pci_bridge_d3_disable;
116
117static bool pci_bridge_d3_force;
118
119static int __init pcie_port_pm_setup(char *str)
120{
121 if (!strcmp(str, "off"))
122 pci_bridge_d3_disable = true;
123 else if (!strcmp(str, "force"))
124 pci_bridge_d3_force = true;
125 return 1;
126}
127__setup("pcie_port_pm=", pcie_port_pm_setup);
128
129
130
131
132
133
134
135
136unsigned char pci_bus_max_busnr(struct pci_bus *bus)
137{
138 struct pci_bus *tmp;
139 unsigned char max, n;
140
141 max = bus->busn_res.end;
142 list_for_each_entry(tmp, &bus->children, node) {
143 n = pci_bus_max_busnr(tmp);
144 if (n > max)
145 max = n;
146 }
147 return max;
148}
149EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
150
151#ifdef CONFIG_HAS_IOMEM
152void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
153{
154 struct resource *res = &pdev->resource[bar];
155
156
157
158
159 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
160 pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
161 return NULL;
162 }
163 return ioremap_nocache(res->start, resource_size(res));
164}
165EXPORT_SYMBOL_GPL(pci_ioremap_bar);
166
167void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
168{
169
170
171
172 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
173 WARN_ON(1);
174 return NULL;
175 }
176 return ioremap_wc(pci_resource_start(pdev, bar),
177 pci_resource_len(pdev, bar));
178}
179EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
180#endif
181
182
183static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
184 u8 pos, int cap, int *ttl)
185{
186 u8 id;
187 u16 ent;
188
189 pci_bus_read_config_byte(bus, devfn, pos, &pos);
190
191 while ((*ttl)--) {
192 if (pos < 0x40)
193 break;
194 pos &= ~3;
195 pci_bus_read_config_word(bus, devfn, pos, &ent);
196
197 id = ent & 0xff;
198 if (id == 0xff)
199 break;
200 if (id == cap)
201 return pos;
202 pos = (ent >> 8);
203 }
204 return 0;
205}
206
207static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
208 u8 pos, int cap)
209{
210 int ttl = PCI_FIND_CAP_TTL;
211
212 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
213}
214
215int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
216{
217 return __pci_find_next_cap(dev->bus, dev->devfn,
218 pos + PCI_CAP_LIST_NEXT, cap);
219}
220EXPORT_SYMBOL_GPL(pci_find_next_capability);
221
222static int __pci_bus_find_cap_start(struct pci_bus *bus,
223 unsigned int devfn, u8 hdr_type)
224{
225 u16 status;
226
227 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
228 if (!(status & PCI_STATUS_CAP_LIST))
229 return 0;
230
231 switch (hdr_type) {
232 case PCI_HEADER_TYPE_NORMAL:
233 case PCI_HEADER_TYPE_BRIDGE:
234 return PCI_CAPABILITY_LIST;
235 case PCI_HEADER_TYPE_CARDBUS:
236 return PCI_CB_CAPABILITY_LIST;
237 }
238
239 return 0;
240}
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261int pci_find_capability(struct pci_dev *dev, int cap)
262{
263 int pos;
264
265 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
266 if (pos)
267 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
268
269 return pos;
270}
271EXPORT_SYMBOL(pci_find_capability);
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
287{
288 int pos;
289 u8 hdr_type;
290
291 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
292
293 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
294 if (pos)
295 pos = __pci_find_next_cap(bus, devfn, pos, cap);
296
297 return pos;
298}
299EXPORT_SYMBOL(pci_bus_find_capability);
300
301
302
303
304
305
306
307
308
309
310
311
312int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
313{
314 u32 header;
315 int ttl;
316 int pos = PCI_CFG_SPACE_SIZE;
317
318
319 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
320
321 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
322 return 0;
323
324 if (start)
325 pos = start;
326
327 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
328 return 0;
329
330
331
332
333
334 if (header == 0)
335 return 0;
336
337 while (ttl-- > 0) {
338 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
339 return pos;
340
341 pos = PCI_EXT_CAP_NEXT(header);
342 if (pos < PCI_CFG_SPACE_SIZE)
343 break;
344
345 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
346 break;
347 }
348
349 return 0;
350}
351EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367int pci_find_ext_capability(struct pci_dev *dev, int cap)
368{
369 return pci_find_next_ext_capability(dev, 0, cap);
370}
371EXPORT_SYMBOL_GPL(pci_find_ext_capability);
372
373static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
374{
375 int rc, ttl = PCI_FIND_CAP_TTL;
376 u8 cap, mask;
377
378 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
379 mask = HT_3BIT_CAP_MASK;
380 else
381 mask = HT_5BIT_CAP_MASK;
382
383 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
384 PCI_CAP_ID_HT, &ttl);
385 while (pos) {
386 rc = pci_read_config_byte(dev, pos + 3, &cap);
387 if (rc != PCIBIOS_SUCCESSFUL)
388 return 0;
389
390 if ((cap & mask) == ht_cap)
391 return pos;
392
393 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
394 pos + PCI_CAP_LIST_NEXT,
395 PCI_CAP_ID_HT, &ttl);
396 }
397
398 return 0;
399}
400
401
402
403
404
405
406
407
408
409
410
411
412
413int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
414{
415 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
416}
417EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
418
419
420
421
422
423
424
425
426
427
428
429
430int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
431{
432 int pos;
433
434 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
435 if (pos)
436 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
437
438 return pos;
439}
440EXPORT_SYMBOL_GPL(pci_find_ht_capability);
441
442
443
444
445
446
447
448
449
450struct resource *pci_find_parent_resource(const struct pci_dev *dev,
451 struct resource *res)
452{
453 const struct pci_bus *bus = dev->bus;
454 struct resource *r;
455 int i;
456
457 pci_bus_for_each_resource(bus, r, i) {
458 if (!r)
459 continue;
460 if (resource_contains(r, res)) {
461
462
463
464
465
466 if (r->flags & IORESOURCE_PREFETCH &&
467 !(res->flags & IORESOURCE_PREFETCH))
468 return NULL;
469
470
471
472
473
474
475
476
477
478 return r;
479 }
480 }
481 return NULL;
482}
483EXPORT_SYMBOL(pci_find_parent_resource);
484
485
486
487
488
489
490
491
492
493
494struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
495{
496 int i;
497
498 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
499 struct resource *r = &dev->resource[i];
500
501 if (r->start && resource_contains(r, res))
502 return r;
503 }
504
505 return NULL;
506}
507EXPORT_SYMBOL(pci_find_resource);
508
509
510
511
512
513
514
515
516struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
517{
518 struct pci_dev *bridge, *highest_pcie_bridge = dev;
519
520 bridge = pci_upstream_bridge(dev);
521 while (bridge && pci_is_pcie(bridge)) {
522 highest_pcie_bridge = bridge;
523 bridge = pci_upstream_bridge(bridge);
524 }
525
526 if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
527 return NULL;
528
529 return highest_pcie_bridge;
530}
531EXPORT_SYMBOL(pci_find_pcie_root_port);
532
533
534
535
536
537
538
539
540
541int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
542{
543 int i;
544
545
546 for (i = 0; i < 4; i++) {
547 u16 status;
548 if (i)
549 msleep((1 << (i - 1)) * 100);
550
551 pci_read_config_word(dev, pos, &status);
552 if (!(status & mask))
553 return 1;
554 }
555
556 return 0;
557}
558
559
560
561
562
563
564
565
566static void pci_restore_bars(struct pci_dev *dev)
567{
568 int i;
569
570 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
571 pci_update_resource(dev, i);
572}
573
574static const struct pci_platform_pm_ops *pci_platform_pm;
575
576int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
577{
578 if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
579 !ops->choose_state || !ops->set_wakeup || !ops->need_resume)
580 return -EINVAL;
581 pci_platform_pm = ops;
582 return 0;
583}
584
585static inline bool platform_pci_power_manageable(struct pci_dev *dev)
586{
587 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
588}
589
590static inline int platform_pci_set_power_state(struct pci_dev *dev,
591 pci_power_t t)
592{
593 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
594}
595
596static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
597{
598 return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
599}
600
601static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
602{
603 return pci_platform_pm ?
604 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
605}
606
607static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
608{
609 return pci_platform_pm ?
610 pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
611}
612
613static inline bool platform_pci_need_resume(struct pci_dev *dev)
614{
615 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
616}
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
632{
633 u16 pmcsr;
634 bool need_restore = false;
635
636
637 if (dev->current_state == state)
638 return 0;
639
640 if (!dev->pm_cap)
641 return -EIO;
642
643 if (state < PCI_D0 || state > PCI_D3hot)
644 return -EINVAL;
645
646
647
648
649
650 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
651 && dev->current_state > state) {
652 pci_err(dev, "invalid power transition (from state %d to %d)\n",
653 dev->current_state, state);
654 return -EINVAL;
655 }
656
657
658 if ((state == PCI_D1 && !dev->d1_support)
659 || (state == PCI_D2 && !dev->d2_support))
660 return -EIO;
661
662 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
663
664
665
666
667
668 switch (dev->current_state) {
669 case PCI_D0:
670 case PCI_D1:
671 case PCI_D2:
672 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
673 pmcsr |= state;
674 break;
675 case PCI_D3hot:
676 case PCI_D3cold:
677 case PCI_UNKNOWN:
678 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
679 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
680 need_restore = true;
681
682 default:
683 pmcsr = 0;
684 break;
685 }
686
687
688 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
689
690
691
692 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
693 pci_dev_d3_sleep(dev);
694 else if (state == PCI_D2 || dev->current_state == PCI_D2)
695 udelay(PCI_PM_D2_DELAY);
696
697 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
698 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
699 if (dev->current_state != state && printk_ratelimit())
700 pci_info(dev, "Refused to change power state, currently in D%d\n",
701 dev->current_state);
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716 if (need_restore)
717 pci_restore_bars(dev);
718
719 if (dev->bus->self)
720 pcie_aspm_pm_state_change(dev->bus->self);
721
722 return 0;
723}
724
725
726
727
728
729
730
731
732
733
734
735
736
737void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
738{
739 if (platform_pci_get_power_state(dev) == PCI_D3cold ||
740 !pci_device_is_present(dev)) {
741 dev->current_state = PCI_D3cold;
742 } else if (dev->pm_cap) {
743 u16 pmcsr;
744
745 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
746 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
747 } else {
748 dev->current_state = state;
749 }
750}
751
752
753
754
755
756void pci_power_up(struct pci_dev *dev)
757{
758 if (platform_pci_power_manageable(dev))
759 platform_pci_set_power_state(dev, PCI_D0);
760
761 pci_raw_set_power_state(dev, PCI_D0);
762 pci_update_current_state(dev, PCI_D0);
763}
764
765
766
767
768
769
770static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
771{
772 int error;
773
774 if (platform_pci_power_manageable(dev)) {
775 error = platform_pci_set_power_state(dev, state);
776 if (!error)
777 pci_update_current_state(dev, state);
778 } else
779 error = -ENODEV;
780
781 if (error && !dev->pm_cap)
782 dev->current_state = PCI_D0;
783
784 return error;
785}
786
787
788
789
790
791
792static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
793{
794 pci_wakeup_event(pci_dev);
795 pm_request_resume(&pci_dev->dev);
796 return 0;
797}
798
799
800
801
802
803static void pci_wakeup_bus(struct pci_bus *bus)
804{
805 if (bus)
806 pci_walk_bus(bus, pci_wakeup, NULL);
807}
808
809
810
811
812
813
814static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
815{
816 if (state == PCI_D0) {
817 pci_platform_power_transition(dev, PCI_D0);
818
819
820
821
822
823
824
825 if (dev->runtime_d3cold) {
826 if (dev->d3cold_delay)
827 msleep(dev->d3cold_delay);
828
829
830
831
832
833
834 pci_wakeup_bus(dev->subordinate);
835 }
836 }
837}
838
839
840
841
842
843
844static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
845{
846 pci_power_t state = *(pci_power_t *)data;
847
848 dev->current_state = state;
849 return 0;
850}
851
852
853
854
855
856
857static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
858{
859 if (bus)
860 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
861}
862
863
864
865
866
867
868
869
870int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
871{
872 int ret;
873
874 if (state <= PCI_D0)
875 return -EINVAL;
876 ret = pci_platform_power_transition(dev, state);
877
878 if (!ret && state == PCI_D3cold)
879 __pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
880 return ret;
881}
882EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
902{
903 int error;
904
905
906 if (state > PCI_D3cold)
907 state = PCI_D3cold;
908 else if (state < PCI_D0)
909 state = PCI_D0;
910 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
911
912
913
914
915
916 return 0;
917
918
919 if (dev->current_state == state)
920 return 0;
921
922 __pci_start_power_transition(dev, state);
923
924
925
926 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
927 return 0;
928
929
930
931
932
933 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
934 PCI_D3hot : state);
935
936 if (!__pci_complete_power_transition(dev, state))
937 error = 0;
938
939 return error;
940}
941EXPORT_SYMBOL(pci_set_power_state);
942
943
944
945
946
947
948
949
950
951
952
953pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
954{
955 pci_power_t ret;
956
957 if (!dev->pm_cap)
958 return PCI_D0;
959
960 ret = platform_pci_choose_state(dev);
961 if (ret != PCI_POWER_ERROR)
962 return ret;
963
964 switch (state.event) {
965 case PM_EVENT_ON:
966 return PCI_D0;
967 case PM_EVENT_FREEZE:
968 case PM_EVENT_PRETHAW:
969
970 case PM_EVENT_SUSPEND:
971 case PM_EVENT_HIBERNATE:
972 return PCI_D3hot;
973 default:
974 pci_info(dev, "unrecognized suspend event %d\n",
975 state.event);
976 BUG();
977 }
978 return PCI_D0;
979}
980EXPORT_SYMBOL(pci_choose_state);
981
982#define PCI_EXP_SAVE_REGS 7
983
984static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
985 u16 cap, bool extended)
986{
987 struct pci_cap_saved_state *tmp;
988
989 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
990 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
991 return tmp;
992 }
993 return NULL;
994}
995
996struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
997{
998 return _pci_find_saved_cap(dev, cap, false);
999}
1000
1001struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1002{
1003 return _pci_find_saved_cap(dev, cap, true);
1004}
1005
1006static int pci_save_pcie_state(struct pci_dev *dev)
1007{
1008 int i = 0;
1009 struct pci_cap_saved_state *save_state;
1010 u16 *cap;
1011
1012 if (!pci_is_pcie(dev))
1013 return 0;
1014
1015 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1016 if (!save_state) {
1017 pci_err(dev, "buffer not found in %s\n", __func__);
1018 return -ENOMEM;
1019 }
1020
1021 cap = (u16 *)&save_state->cap.data[0];
1022 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1023 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1024 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1025 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1026 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1027 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1028 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1029
1030 return 0;
1031}
1032
1033static void pci_restore_pcie_state(struct pci_dev *dev)
1034{
1035 int i = 0;
1036 struct pci_cap_saved_state *save_state;
1037 u16 *cap;
1038
1039 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1040 if (!save_state)
1041 return;
1042
1043 cap = (u16 *)&save_state->cap.data[0];
1044 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1045 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1046 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1047 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1048 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1049 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1050 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1051}
1052
1053
1054static int pci_save_pcix_state(struct pci_dev *dev)
1055{
1056 int pos;
1057 struct pci_cap_saved_state *save_state;
1058
1059 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1060 if (!pos)
1061 return 0;
1062
1063 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1064 if (!save_state) {
1065 pci_err(dev, "buffer not found in %s\n", __func__);
1066 return -ENOMEM;
1067 }
1068
1069 pci_read_config_word(dev, pos + PCI_X_CMD,
1070 (u16 *)save_state->cap.data);
1071
1072 return 0;
1073}
1074
1075static void pci_restore_pcix_state(struct pci_dev *dev)
1076{
1077 int i = 0, pos;
1078 struct pci_cap_saved_state *save_state;
1079 u16 *cap;
1080
1081 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1082 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1083 if (!save_state || !pos)
1084 return;
1085 cap = (u16 *)&save_state->cap.data[0];
1086
1087 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1088}
1089
1090
1091
1092
1093
1094
1095int pci_save_state(struct pci_dev *dev)
1096{
1097 int i;
1098
1099 for (i = 0; i < 16; i++)
1100 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1101 dev->state_saved = true;
1102
1103 i = pci_save_pcie_state(dev);
1104 if (i != 0)
1105 return i;
1106
1107 i = pci_save_pcix_state(dev);
1108 if (i != 0)
1109 return i;
1110
1111 return pci_save_vc_state(dev);
1112}
1113EXPORT_SYMBOL(pci_save_state);
1114
1115static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1116 u32 saved_val, int retry)
1117{
1118 u32 val;
1119
1120 pci_read_config_dword(pdev, offset, &val);
1121 if (val == saved_val)
1122 return;
1123
1124 for (;;) {
1125 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1126 offset, val, saved_val);
1127 pci_write_config_dword(pdev, offset, saved_val);
1128 if (retry-- <= 0)
1129 return;
1130
1131 pci_read_config_dword(pdev, offset, &val);
1132 if (val == saved_val)
1133 return;
1134
1135 mdelay(1);
1136 }
1137}
1138
1139static void pci_restore_config_space_range(struct pci_dev *pdev,
1140 int start, int end, int retry)
1141{
1142 int index;
1143
1144 for (index = end; index >= start; index--)
1145 pci_restore_config_dword(pdev, 4 * index,
1146 pdev->saved_config_space[index],
1147 retry);
1148}
1149
1150static void pci_restore_config_space(struct pci_dev *pdev)
1151{
1152 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1153 pci_restore_config_space_range(pdev, 10, 15, 0);
1154
1155 pci_restore_config_space_range(pdev, 4, 9, 10);
1156 pci_restore_config_space_range(pdev, 0, 3, 0);
1157 } else {
1158 pci_restore_config_space_range(pdev, 0, 15, 0);
1159 }
1160}
1161
1162
1163
1164
1165
1166void pci_restore_state(struct pci_dev *dev)
1167{
1168 if (!dev->state_saved)
1169 return;
1170
1171
1172 pci_restore_pcie_state(dev);
1173 pci_restore_pasid_state(dev);
1174 pci_restore_pri_state(dev);
1175 pci_restore_ats_state(dev);
1176 pci_restore_vc_state(dev);
1177
1178 pci_cleanup_aer_error_status_regs(dev);
1179
1180 pci_restore_config_space(dev);
1181
1182 pci_restore_pcix_state(dev);
1183 pci_restore_msi_state(dev);
1184
1185
1186 pci_enable_acs(dev);
1187 pci_restore_iov_state(dev);
1188
1189 dev->state_saved = false;
1190}
1191EXPORT_SYMBOL(pci_restore_state);
1192
1193struct pci_saved_state {
1194 u32 config_space[16];
1195 struct pci_cap_saved_data cap[0];
1196};
1197
1198
1199
1200
1201
1202
1203
1204
1205struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1206{
1207 struct pci_saved_state *state;
1208 struct pci_cap_saved_state *tmp;
1209 struct pci_cap_saved_data *cap;
1210 size_t size;
1211
1212 if (!dev->state_saved)
1213 return NULL;
1214
1215 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1216
1217 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1218 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1219
1220 state = kzalloc(size, GFP_KERNEL);
1221 if (!state)
1222 return NULL;
1223
1224 memcpy(state->config_space, dev->saved_config_space,
1225 sizeof(state->config_space));
1226
1227 cap = state->cap;
1228 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1229 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1230 memcpy(cap, &tmp->cap, len);
1231 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1232 }
1233
1234
1235 return state;
1236}
1237EXPORT_SYMBOL_GPL(pci_store_saved_state);
1238
1239
1240
1241
1242
1243
1244int pci_load_saved_state(struct pci_dev *dev,
1245 struct pci_saved_state *state)
1246{
1247 struct pci_cap_saved_data *cap;
1248
1249 dev->state_saved = false;
1250
1251 if (!state)
1252 return 0;
1253
1254 memcpy(dev->saved_config_space, state->config_space,
1255 sizeof(state->config_space));
1256
1257 cap = state->cap;
1258 while (cap->size) {
1259 struct pci_cap_saved_state *tmp;
1260
1261 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1262 if (!tmp || tmp->cap.size != cap->size)
1263 return -EINVAL;
1264
1265 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1266 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1267 sizeof(struct pci_cap_saved_data) + cap->size);
1268 }
1269
1270 dev->state_saved = true;
1271 return 0;
1272}
1273EXPORT_SYMBOL_GPL(pci_load_saved_state);
1274
1275
1276
1277
1278
1279
1280
1281int pci_load_and_free_saved_state(struct pci_dev *dev,
1282 struct pci_saved_state **state)
1283{
1284 int ret = pci_load_saved_state(dev, *state);
1285 kfree(*state);
1286 *state = NULL;
1287 return ret;
1288}
1289EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1290
1291int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1292{
1293 return pci_enable_resources(dev, bars);
1294}
1295
1296static int do_pci_enable_device(struct pci_dev *dev, int bars)
1297{
1298 int err;
1299 struct pci_dev *bridge;
1300 u16 cmd;
1301 u8 pin;
1302
1303 err = pci_set_power_state(dev, PCI_D0);
1304 if (err < 0 && err != -EIO)
1305 return err;
1306
1307 bridge = pci_upstream_bridge(dev);
1308 if (bridge)
1309 pcie_aspm_powersave_config_link(bridge);
1310
1311 err = pcibios_enable_device(dev, bars);
1312 if (err < 0)
1313 return err;
1314 pci_fixup_device(pci_fixup_enable, dev);
1315
1316 if (dev->msi_enabled || dev->msix_enabled)
1317 return 0;
1318
1319 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1320 if (pin) {
1321 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1322 if (cmd & PCI_COMMAND_INTX_DISABLE)
1323 pci_write_config_word(dev, PCI_COMMAND,
1324 cmd & ~PCI_COMMAND_INTX_DISABLE);
1325 }
1326
1327 return 0;
1328}
1329
1330
1331
1332
1333
1334
1335
1336
1337int pci_reenable_device(struct pci_dev *dev)
1338{
1339 if (pci_is_enabled(dev))
1340 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1341 return 0;
1342}
1343EXPORT_SYMBOL(pci_reenable_device);
1344
1345static void pci_enable_bridge(struct pci_dev *dev)
1346{
1347 struct pci_dev *bridge;
1348 int retval;
1349
1350 bridge = pci_upstream_bridge(dev);
1351 if (bridge)
1352 pci_enable_bridge(bridge);
1353
1354 if (pci_is_enabled(dev)) {
1355 if (!dev->is_busmaster)
1356 pci_set_master(dev);
1357 return;
1358 }
1359
1360 retval = pci_enable_device(dev);
1361 if (retval)
1362 pci_err(dev, "Error enabling bridge (%d), continuing\n",
1363 retval);
1364 pci_set_master(dev);
1365}
1366
1367static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1368{
1369 struct pci_dev *bridge;
1370 int err;
1371 int i, bars = 0;
1372
1373
1374
1375
1376
1377
1378
1379 if (dev->pm_cap) {
1380 u16 pmcsr;
1381 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1382 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1383 }
1384
1385 if (atomic_inc_return(&dev->enable_cnt) > 1)
1386 return 0;
1387
1388 bridge = pci_upstream_bridge(dev);
1389 if (bridge)
1390 pci_enable_bridge(bridge);
1391
1392
1393 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1394 if (dev->resource[i].flags & flags)
1395 bars |= (1 << i);
1396 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1397 if (dev->resource[i].flags & flags)
1398 bars |= (1 << i);
1399
1400 err = do_pci_enable_device(dev, bars);
1401 if (err < 0)
1402 atomic_dec(&dev->enable_cnt);
1403 return err;
1404}
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414int pci_enable_device_io(struct pci_dev *dev)
1415{
1416 return pci_enable_device_flags(dev, IORESOURCE_IO);
1417}
1418EXPORT_SYMBOL(pci_enable_device_io);
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428int pci_enable_device_mem(struct pci_dev *dev)
1429{
1430 return pci_enable_device_flags(dev, IORESOURCE_MEM);
1431}
1432EXPORT_SYMBOL(pci_enable_device_mem);
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445int pci_enable_device(struct pci_dev *dev)
1446{
1447 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1448}
1449EXPORT_SYMBOL(pci_enable_device);
1450
1451
1452
1453
1454
1455
1456
1457struct pci_devres {
1458 unsigned int enabled:1;
1459 unsigned int pinned:1;
1460 unsigned int orig_intx:1;
1461 unsigned int restore_intx:1;
1462 unsigned int mwi:1;
1463 u32 region_mask;
1464};
1465
1466static void pcim_release(struct device *gendev, void *res)
1467{
1468 struct pci_dev *dev = to_pci_dev(gendev);
1469 struct pci_devres *this = res;
1470 int i;
1471
1472 if (dev->msi_enabled)
1473 pci_disable_msi(dev);
1474 if (dev->msix_enabled)
1475 pci_disable_msix(dev);
1476
1477 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1478 if (this->region_mask & (1 << i))
1479 pci_release_region(dev, i);
1480
1481 if (this->mwi)
1482 pci_clear_mwi(dev);
1483
1484 if (this->restore_intx)
1485 pci_intx(dev, this->orig_intx);
1486
1487 if (this->enabled && !this->pinned)
1488 pci_disable_device(dev);
1489}
1490
1491static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1492{
1493 struct pci_devres *dr, *new_dr;
1494
1495 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1496 if (dr)
1497 return dr;
1498
1499 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1500 if (!new_dr)
1501 return NULL;
1502 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1503}
1504
1505static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
1506{
1507 if (pci_is_managed(pdev))
1508 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1509 return NULL;
1510}
1511
1512
1513
1514
1515
1516
1517
1518int pcim_enable_device(struct pci_dev *pdev)
1519{
1520 struct pci_devres *dr;
1521 int rc;
1522
1523 dr = get_pci_dr(pdev);
1524 if (unlikely(!dr))
1525 return -ENOMEM;
1526 if (dr->enabled)
1527 return 0;
1528
1529 rc = pci_enable_device(pdev);
1530 if (!rc) {
1531 pdev->is_managed = 1;
1532 dr->enabled = 1;
1533 }
1534 return rc;
1535}
1536EXPORT_SYMBOL(pcim_enable_device);
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546void pcim_pin_device(struct pci_dev *pdev)
1547{
1548 struct pci_devres *dr;
1549
1550 dr = find_pci_dr(pdev);
1551 WARN_ON(!dr || !dr->enabled);
1552 if (dr)
1553 dr->pinned = 1;
1554}
1555EXPORT_SYMBOL(pcim_pin_device);
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565int __weak pcibios_add_device(struct pci_dev *dev)
1566{
1567 return 0;
1568}
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578void __weak pcibios_release_device(struct pci_dev *dev) {}
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588void __weak pcibios_disable_device(struct pci_dev *dev) {}
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1600
1601static void do_pci_disable_device(struct pci_dev *dev)
1602{
1603 u16 pci_command;
1604
1605 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1606 if (pci_command & PCI_COMMAND_MASTER) {
1607 pci_command &= ~PCI_COMMAND_MASTER;
1608 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1609 }
1610
1611 pcibios_disable_device(dev);
1612}
1613
1614
1615
1616
1617
1618
1619
1620
1621void pci_disable_enabled_device(struct pci_dev *dev)
1622{
1623 if (pci_is_enabled(dev))
1624 do_pci_disable_device(dev);
1625}
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637void pci_disable_device(struct pci_dev *dev)
1638{
1639 struct pci_devres *dr;
1640
1641 dr = find_pci_dr(dev);
1642 if (dr)
1643 dr->enabled = 0;
1644
1645 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1646 "disabling already-disabled device");
1647
1648 if (atomic_dec_return(&dev->enable_cnt) != 0)
1649 return;
1650
1651 do_pci_disable_device(dev);
1652
1653 dev->is_busmaster = 0;
1654}
1655EXPORT_SYMBOL(pci_disable_device);
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1667 enum pcie_reset_state state)
1668{
1669 return -EINVAL;
1670}
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1681{
1682 return pcibios_set_pcie_reset_state(dev, state);
1683}
1684EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694bool pci_check_pme_status(struct pci_dev *dev)
1695{
1696 int pmcsr_pos;
1697 u16 pmcsr;
1698 bool ret = false;
1699
1700 if (!dev->pm_cap)
1701 return false;
1702
1703 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1704 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1705 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1706 return false;
1707
1708
1709 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1710 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1711
1712 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1713 ret = true;
1714 }
1715
1716 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1717
1718 return ret;
1719}
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1730{
1731 if (pme_poll_reset && dev->pme_poll)
1732 dev->pme_poll = false;
1733
1734 if (pci_check_pme_status(dev)) {
1735 pci_wakeup_event(dev);
1736 pm_request_resume(&dev->dev);
1737 }
1738 return 0;
1739}
1740
1741
1742
1743
1744
1745void pci_pme_wakeup_bus(struct pci_bus *bus)
1746{
1747 if (bus)
1748 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1749}
1750
1751
1752
1753
1754
1755
1756
1757bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1758{
1759 if (!dev->pm_cap)
1760 return false;
1761
1762 return !!(dev->pme_support & (1 << state));
1763}
1764EXPORT_SYMBOL(pci_pme_capable);
1765
1766static void pci_pme_list_scan(struct work_struct *work)
1767{
1768 struct pci_pme_device *pme_dev, *n;
1769
1770 mutex_lock(&pci_pme_list_mutex);
1771 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1772 if (pme_dev->dev->pme_poll) {
1773 struct pci_dev *bridge;
1774
1775 bridge = pme_dev->dev->bus->self;
1776
1777
1778
1779
1780
1781 if (bridge && bridge->current_state != PCI_D0)
1782 continue;
1783 pci_pme_wakeup(pme_dev->dev, NULL);
1784 } else {
1785 list_del(&pme_dev->list);
1786 kfree(pme_dev);
1787 }
1788 }
1789 if (!list_empty(&pci_pme_list))
1790 queue_delayed_work(system_freezable_wq, &pci_pme_work,
1791 msecs_to_jiffies(PME_TIMEOUT));
1792 mutex_unlock(&pci_pme_list_mutex);
1793}
1794
1795static void __pci_pme_active(struct pci_dev *dev, bool enable)
1796{
1797 u16 pmcsr;
1798
1799 if (!dev->pme_support)
1800 return;
1801
1802 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1803
1804 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1805 if (!enable)
1806 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1807
1808 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1809}
1810
1811
1812
1813
1814
1815void pci_pme_restore(struct pci_dev *dev)
1816{
1817 u16 pmcsr;
1818
1819 if (!dev->pme_support)
1820 return;
1821
1822 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1823 if (dev->wakeup_prepared) {
1824 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1825 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
1826 } else {
1827 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1828 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1829 }
1830 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1831}
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841void pci_pme_active(struct pci_dev *dev, bool enable)
1842{
1843 __pci_pme_active(dev, enable);
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865 if (dev->pme_poll) {
1866 struct pci_pme_device *pme_dev;
1867 if (enable) {
1868 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1869 GFP_KERNEL);
1870 if (!pme_dev) {
1871 pci_warn(dev, "can't enable PME#\n");
1872 return;
1873 }
1874 pme_dev->dev = dev;
1875 mutex_lock(&pci_pme_list_mutex);
1876 list_add(&pme_dev->list, &pci_pme_list);
1877 if (list_is_singular(&pci_pme_list))
1878 queue_delayed_work(system_freezable_wq,
1879 &pci_pme_work,
1880 msecs_to_jiffies(PME_TIMEOUT));
1881 mutex_unlock(&pci_pme_list_mutex);
1882 } else {
1883 mutex_lock(&pci_pme_list_mutex);
1884 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1885 if (pme_dev->dev == dev) {
1886 list_del(&pme_dev->list);
1887 kfree(pme_dev);
1888 break;
1889 }
1890 }
1891 mutex_unlock(&pci_pme_list_mutex);
1892 }
1893 }
1894
1895 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
1896}
1897EXPORT_SYMBOL(pci_pme_active);
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
1919{
1920 int ret = 0;
1921
1922
1923
1924
1925
1926 if (pci_has_subordinate(dev))
1927 return 0;
1928
1929
1930 if (!!enable == !!dev->wakeup_prepared)
1931 return 0;
1932
1933
1934
1935
1936
1937
1938
1939 if (enable) {
1940 int error;
1941
1942 if (pci_pme_capable(dev, state))
1943 pci_pme_active(dev, true);
1944 else
1945 ret = 1;
1946 error = platform_pci_set_wakeup(dev, true);
1947 if (ret)
1948 ret = error;
1949 if (!ret)
1950 dev->wakeup_prepared = true;
1951 } else {
1952 platform_pci_set_wakeup(dev, false);
1953 pci_pme_active(dev, false);
1954 dev->wakeup_prepared = false;
1955 }
1956
1957 return ret;
1958}
1959EXPORT_SYMBOL(pci_enable_wake);
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1976{
1977 return pci_pme_capable(dev, PCI_D3cold) ?
1978 pci_enable_wake(dev, PCI_D3cold, enable) :
1979 pci_enable_wake(dev, PCI_D3hot, enable);
1980}
1981EXPORT_SYMBOL(pci_wake_from_d3);
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
1993{
1994 pci_power_t target_state = PCI_D3hot;
1995
1996 if (platform_pci_power_manageable(dev)) {
1997
1998
1999
2000
2001 pci_power_t state = platform_pci_choose_state(dev);
2002
2003 switch (state) {
2004 case PCI_POWER_ERROR:
2005 case PCI_UNKNOWN:
2006 break;
2007 case PCI_D1:
2008 case PCI_D2:
2009 if (pci_no_d1d2(dev))
2010 break;
2011 default:
2012 target_state = state;
2013 }
2014
2015 return target_state;
2016 }
2017
2018 if (!dev->pm_cap)
2019 target_state = PCI_D0;
2020
2021
2022
2023
2024
2025
2026 if (dev->current_state == PCI_D3cold)
2027 target_state = PCI_D3cold;
2028
2029 if (wakeup) {
2030
2031
2032
2033
2034
2035 if (dev->pme_support) {
2036 while (target_state
2037 && !(dev->pme_support & (1 << target_state)))
2038 target_state--;
2039 }
2040 }
2041
2042 return target_state;
2043}
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053int pci_prepare_to_sleep(struct pci_dev *dev)
2054{
2055 bool wakeup = device_may_wakeup(&dev->dev);
2056 pci_power_t target_state = pci_target_state(dev, wakeup);
2057 int error;
2058
2059 if (target_state == PCI_POWER_ERROR)
2060 return -EIO;
2061
2062 pci_enable_wake(dev, target_state, wakeup);
2063
2064 error = pci_set_power_state(dev, target_state);
2065
2066 if (error)
2067 pci_enable_wake(dev, target_state, false);
2068
2069 return error;
2070}
2071EXPORT_SYMBOL(pci_prepare_to_sleep);
2072
2073
2074
2075
2076
2077
2078
2079int pci_back_from_sleep(struct pci_dev *dev)
2080{
2081 pci_enable_wake(dev, PCI_D0, false);
2082 return pci_set_power_state(dev, PCI_D0);
2083}
2084EXPORT_SYMBOL(pci_back_from_sleep);
2085
2086
2087
2088
2089
2090
2091
2092
2093int pci_finish_runtime_suspend(struct pci_dev *dev)
2094{
2095 pci_power_t target_state;
2096 int error;
2097
2098 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2099 if (target_state == PCI_POWER_ERROR)
2100 return -EIO;
2101
2102 dev->runtime_d3cold = target_state == PCI_D3cold;
2103
2104 pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2105
2106 error = pci_set_power_state(dev, target_state);
2107
2108 if (error) {
2109 pci_enable_wake(dev, target_state, false);
2110 dev->runtime_d3cold = false;
2111 }
2112
2113 return error;
2114}
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124bool pci_dev_run_wake(struct pci_dev *dev)
2125{
2126 struct pci_bus *bus = dev->bus;
2127
2128 if (device_can_wakeup(&dev->dev))
2129 return true;
2130
2131 if (!dev->pme_support)
2132 return false;
2133
2134
2135 if (!pci_pme_capable(dev, pci_target_state(dev, false)))
2136 return false;
2137
2138 while (bus->parent) {
2139 struct pci_dev *bridge = bus->self;
2140
2141 if (device_can_wakeup(&bridge->dev))
2142 return true;
2143
2144 bus = bus->parent;
2145 }
2146
2147
2148 if (bus->bridge)
2149 return device_can_wakeup(bus->bridge);
2150
2151 return false;
2152}
2153EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
2168{
2169 struct device *dev = &pci_dev->dev;
2170 bool wakeup = device_may_wakeup(dev);
2171
2172 if (!pm_runtime_suspended(dev)
2173 || pci_target_state(pci_dev, wakeup) != pci_dev->current_state
2174 || platform_pci_need_resume(pci_dev))
2175 return false;
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187 spin_lock_irq(&dev->power.lock);
2188
2189 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
2190 !wakeup)
2191 __pci_pme_active(pci_dev, false);
2192
2193 spin_unlock_irq(&dev->power.lock);
2194 return true;
2195}
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205void pci_dev_complete_resume(struct pci_dev *pci_dev)
2206{
2207 struct device *dev = &pci_dev->dev;
2208
2209 if (!pci_dev_run_wake(pci_dev))
2210 return;
2211
2212 spin_lock_irq(&dev->power.lock);
2213
2214 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2215 __pci_pme_active(pci_dev, true);
2216
2217 spin_unlock_irq(&dev->power.lock);
2218}
2219
2220void pci_config_pm_runtime_get(struct pci_dev *pdev)
2221{
2222 struct device *dev = &pdev->dev;
2223 struct device *parent = dev->parent;
2224
2225 if (parent)
2226 pm_runtime_get_sync(parent);
2227 pm_runtime_get_noresume(dev);
2228
2229
2230
2231
2232 pm_runtime_barrier(dev);
2233
2234
2235
2236
2237
2238 if (pdev->current_state == PCI_D3cold)
2239 pm_runtime_resume(dev);
2240}
2241
2242void pci_config_pm_runtime_put(struct pci_dev *pdev)
2243{
2244 struct device *dev = &pdev->dev;
2245 struct device *parent = dev->parent;
2246
2247 pm_runtime_put(dev);
2248 if (parent)
2249 pm_runtime_put_sync(parent);
2250}
2251
2252
2253
2254
2255
2256
2257
2258
2259bool pci_bridge_d3_possible(struct pci_dev *bridge)
2260{
2261 unsigned int year;
2262
2263 if (!pci_is_pcie(bridge))
2264 return false;
2265
2266 switch (pci_pcie_type(bridge)) {
2267 case PCI_EXP_TYPE_ROOT_PORT:
2268 case PCI_EXP_TYPE_UPSTREAM:
2269 case PCI_EXP_TYPE_DOWNSTREAM:
2270 if (pci_bridge_d3_disable)
2271 return false;
2272
2273
2274
2275
2276
2277
2278
2279
2280 if (bridge->is_hotplug_bridge)
2281 return false;
2282
2283 if (pci_bridge_d3_force)
2284 return true;
2285
2286
2287
2288
2289
2290 if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) &&
2291 year >= 2015) {
2292 return true;
2293 }
2294 break;
2295 }
2296
2297 return false;
2298}
2299
2300static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2301{
2302 bool *d3cold_ok = data;
2303
2304 if (
2305 dev->no_d3cold || !dev->d3cold_allowed ||
2306
2307
2308 (device_may_wakeup(&dev->dev) &&
2309 !pci_pme_capable(dev, PCI_D3cold)) ||
2310
2311
2312 !pci_power_manageable(dev))
2313
2314 *d3cold_ok = false;
2315
2316 return !*d3cold_ok;
2317}
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327void pci_bridge_d3_update(struct pci_dev *dev)
2328{
2329 bool remove = !device_is_registered(&dev->dev);
2330 struct pci_dev *bridge;
2331 bool d3cold_ok = true;
2332
2333 bridge = pci_upstream_bridge(dev);
2334 if (!bridge || !pci_bridge_d3_possible(bridge))
2335 return;
2336
2337
2338
2339
2340
2341 if (remove && bridge->bridge_d3)
2342 return;
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352 if (!remove)
2353 pci_dev_check_d3cold(dev, &d3cold_ok);
2354
2355
2356
2357
2358
2359
2360
2361 if (d3cold_ok && !bridge->bridge_d3)
2362 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2363 &d3cold_ok);
2364
2365 if (bridge->bridge_d3 != d3cold_ok) {
2366 bridge->bridge_d3 = d3cold_ok;
2367
2368 pci_bridge_d3_update(bridge);
2369 }
2370}
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380void pci_d3cold_enable(struct pci_dev *dev)
2381{
2382 if (dev->no_d3cold) {
2383 dev->no_d3cold = false;
2384 pci_bridge_d3_update(dev);
2385 }
2386}
2387EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397void pci_d3cold_disable(struct pci_dev *dev)
2398{
2399 if (!dev->no_d3cold) {
2400 dev->no_d3cold = true;
2401 pci_bridge_d3_update(dev);
2402 }
2403}
2404EXPORT_SYMBOL_GPL(pci_d3cold_disable);
2405
2406
2407
2408
2409
2410void pci_pm_init(struct pci_dev *dev)
2411{
2412 int pm;
2413 u16 pmc;
2414
2415 pm_runtime_forbid(&dev->dev);
2416 pm_runtime_set_active(&dev->dev);
2417 pm_runtime_enable(&dev->dev);
2418 device_enable_async_suspend(&dev->dev);
2419 dev->wakeup_prepared = false;
2420
2421 dev->pm_cap = 0;
2422 dev->pme_support = 0;
2423
2424
2425 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2426 if (!pm)
2427 return;
2428
2429 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2430
2431 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2432 pci_err(dev, "unsupported PM cap regs version (%u)\n",
2433 pmc & PCI_PM_CAP_VER_MASK);
2434 return;
2435 }
2436
2437 dev->pm_cap = pm;
2438 dev->d3_delay = PCI_PM_D3_WAIT;
2439 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
2440 dev->bridge_d3 = pci_bridge_d3_possible(dev);
2441 dev->d3cold_allowed = true;
2442
2443 dev->d1_support = false;
2444 dev->d2_support = false;
2445 if (!pci_no_d1d2(dev)) {
2446 if (pmc & PCI_PM_CAP_D1)
2447 dev->d1_support = true;
2448 if (pmc & PCI_PM_CAP_D2)
2449 dev->d2_support = true;
2450
2451 if (dev->d1_support || dev->d2_support)
2452 pci_printk(KERN_DEBUG, dev, "supports%s%s\n",
2453 dev->d1_support ? " D1" : "",
2454 dev->d2_support ? " D2" : "");
2455 }
2456
2457 pmc &= PCI_PM_CAP_PME_MASK;
2458 if (pmc) {
2459 pci_printk(KERN_DEBUG, dev, "PME# supported from%s%s%s%s%s\n",
2460 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2461 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2462 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2463 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2464 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
2465 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
2466 dev->pme_poll = true;
2467
2468
2469
2470
2471 device_set_wakeup_capable(&dev->dev, true);
2472
2473 pci_pme_active(dev, false);
2474 }
2475}
2476
2477static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
2478{
2479 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
2480
2481 switch (prop) {
2482 case PCI_EA_P_MEM:
2483 case PCI_EA_P_VF_MEM:
2484 flags |= IORESOURCE_MEM;
2485 break;
2486 case PCI_EA_P_MEM_PREFETCH:
2487 case PCI_EA_P_VF_MEM_PREFETCH:
2488 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
2489 break;
2490 case PCI_EA_P_IO:
2491 flags |= IORESOURCE_IO;
2492 break;
2493 default:
2494 return 0;
2495 }
2496
2497 return flags;
2498}
2499
2500static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
2501 u8 prop)
2502{
2503 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
2504 return &dev->resource[bei];
2505#ifdef CONFIG_PCI_IOV
2506 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
2507 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
2508 return &dev->resource[PCI_IOV_RESOURCES +
2509 bei - PCI_EA_BEI_VF_BAR0];
2510#endif
2511 else if (bei == PCI_EA_BEI_ROM)
2512 return &dev->resource[PCI_ROM_RESOURCE];
2513 else
2514 return NULL;
2515}
2516
2517
2518static int pci_ea_read(struct pci_dev *dev, int offset)
2519{
2520 struct resource *res;
2521 int ent_size, ent_offset = offset;
2522 resource_size_t start, end;
2523 unsigned long flags;
2524 u32 dw0, bei, base, max_offset;
2525 u8 prop;
2526 bool support_64 = (sizeof(resource_size_t) >= 8);
2527
2528 pci_read_config_dword(dev, ent_offset, &dw0);
2529 ent_offset += 4;
2530
2531
2532 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
2533
2534 if (!(dw0 & PCI_EA_ENABLE))
2535 goto out;
2536
2537 bei = (dw0 & PCI_EA_BEI) >> 4;
2538 prop = (dw0 & PCI_EA_PP) >> 8;
2539
2540
2541
2542
2543
2544 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
2545 prop = (dw0 & PCI_EA_SP) >> 16;
2546 if (prop > PCI_EA_P_BRIDGE_IO)
2547 goto out;
2548
2549 res = pci_ea_get_resource(dev, bei, prop);
2550 if (!res) {
2551 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
2552 goto out;
2553 }
2554
2555 flags = pci_ea_flags(dev, prop);
2556 if (!flags) {
2557 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
2558 goto out;
2559 }
2560
2561
2562 pci_read_config_dword(dev, ent_offset, &base);
2563 start = (base & PCI_EA_FIELD_MASK);
2564 ent_offset += 4;
2565
2566
2567 pci_read_config_dword(dev, ent_offset, &max_offset);
2568 ent_offset += 4;
2569
2570
2571 if (base & PCI_EA_IS_64) {
2572 u32 base_upper;
2573
2574 pci_read_config_dword(dev, ent_offset, &base_upper);
2575 ent_offset += 4;
2576
2577 flags |= IORESOURCE_MEM_64;
2578
2579
2580 if (!support_64 && base_upper)
2581 goto out;
2582
2583 if (support_64)
2584 start |= ((u64)base_upper << 32);
2585 }
2586
2587 end = start + (max_offset | 0x03);
2588
2589
2590 if (max_offset & PCI_EA_IS_64) {
2591 u32 max_offset_upper;
2592
2593 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
2594 ent_offset += 4;
2595
2596 flags |= IORESOURCE_MEM_64;
2597
2598
2599 if (!support_64 && max_offset_upper)
2600 goto out;
2601
2602 if (support_64)
2603 end += ((u64)max_offset_upper << 32);
2604 }
2605
2606 if (end < start) {
2607 pci_err(dev, "EA Entry crosses address boundary\n");
2608 goto out;
2609 }
2610
2611 if (ent_size != ent_offset - offset) {
2612 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
2613 ent_size, ent_offset - offset);
2614 goto out;
2615 }
2616
2617 res->name = pci_name(dev);
2618 res->start = start;
2619 res->end = end;
2620 res->flags = flags;
2621
2622 if (bei <= PCI_EA_BEI_BAR5)
2623 pci_printk(KERN_DEBUG, dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2624 bei, res, prop);
2625 else if (bei == PCI_EA_BEI_ROM)
2626 pci_printk(KERN_DEBUG, dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
2627 res, prop);
2628 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
2629 pci_printk(KERN_DEBUG, dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2630 bei - PCI_EA_BEI_VF_BAR0, res, prop);
2631 else
2632 pci_printk(KERN_DEBUG, dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
2633 bei, res, prop);
2634
2635out:
2636 return offset + ent_size;
2637}
2638
2639
2640void pci_ea_init(struct pci_dev *dev)
2641{
2642 int ea;
2643 u8 num_ent;
2644 int offset;
2645 int i;
2646
2647
2648 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
2649 if (!ea)
2650 return;
2651
2652
2653 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
2654 &num_ent);
2655 num_ent &= PCI_EA_NUM_ENT_MASK;
2656
2657 offset = ea + PCI_EA_FIRST_ENT;
2658
2659
2660 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
2661 offset += 4;
2662
2663
2664 for (i = 0; i < num_ent; ++i)
2665 offset = pci_ea_read(dev, offset);
2666}
2667
2668static void pci_add_saved_cap(struct pci_dev *pci_dev,
2669 struct pci_cap_saved_state *new_cap)
2670{
2671 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
2672}
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
2683 bool extended, unsigned int size)
2684{
2685 int pos;
2686 struct pci_cap_saved_state *save_state;
2687
2688 if (extended)
2689 pos = pci_find_ext_capability(dev, cap);
2690 else
2691 pos = pci_find_capability(dev, cap);
2692
2693 if (!pos)
2694 return 0;
2695
2696 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
2697 if (!save_state)
2698 return -ENOMEM;
2699
2700 save_state->cap.cap_nr = cap;
2701 save_state->cap.cap_extended = extended;
2702 save_state->cap.size = size;
2703 pci_add_saved_cap(dev, save_state);
2704
2705 return 0;
2706}
2707
2708int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
2709{
2710 return _pci_add_cap_save_buffer(dev, cap, false, size);
2711}
2712
2713int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
2714{
2715 return _pci_add_cap_save_buffer(dev, cap, true, size);
2716}
2717
2718
2719
2720
2721
2722void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2723{
2724 int error;
2725
2726 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2727 PCI_EXP_SAVE_REGS * sizeof(u16));
2728 if (error)
2729 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
2730
2731 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2732 if (error)
2733 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
2734
2735 pci_allocate_vc_save_buffers(dev);
2736}
2737
2738void pci_free_cap_save_buffers(struct pci_dev *dev)
2739{
2740 struct pci_cap_saved_state *tmp;
2741 struct hlist_node *n;
2742
2743 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
2744 kfree(tmp);
2745}
2746
2747
2748
2749
2750
2751
2752
2753
2754void pci_configure_ari(struct pci_dev *dev)
2755{
2756 u32 cap;
2757 struct pci_dev *bridge;
2758
2759 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
2760 return;
2761
2762 bridge = dev->bus->self;
2763 if (!bridge)
2764 return;
2765
2766 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
2767 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2768 return;
2769
2770 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
2771 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
2772 PCI_EXP_DEVCTL2_ARI);
2773 bridge->ari_enabled = 1;
2774 } else {
2775 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
2776 PCI_EXP_DEVCTL2_ARI);
2777 bridge->ari_enabled = 0;
2778 }
2779}
2780
2781static int pci_acs_enable;
2782
2783
2784
2785
2786void pci_request_acs(void)
2787{
2788 pci_acs_enable = 1;
2789}
2790
2791
2792
2793
2794
2795static void pci_std_enable_acs(struct pci_dev *dev)
2796{
2797 int pos;
2798 u16 cap;
2799 u16 ctrl;
2800
2801 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2802 if (!pos)
2803 return;
2804
2805 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2806 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2807
2808
2809 ctrl |= (cap & PCI_ACS_SV);
2810
2811
2812 ctrl |= (cap & PCI_ACS_RR);
2813
2814
2815 ctrl |= (cap & PCI_ACS_CR);
2816
2817
2818 ctrl |= (cap & PCI_ACS_UF);
2819
2820 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2821}
2822
2823
2824
2825
2826
2827void pci_enable_acs(struct pci_dev *dev)
2828{
2829 if (!pci_acs_enable)
2830 return;
2831
2832 if (!pci_dev_specific_enable_acs(dev))
2833 return;
2834
2835 pci_std_enable_acs(dev);
2836}
2837
2838static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
2839{
2840 int pos;
2841 u16 cap, ctrl;
2842
2843 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2844 if (!pos)
2845 return false;
2846
2847
2848
2849
2850
2851
2852 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
2853 acs_flags &= (cap | PCI_ACS_EC);
2854
2855 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2856 return (ctrl & acs_flags) == acs_flags;
2857}
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2876{
2877 int ret;
2878
2879 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2880 if (ret >= 0)
2881 return ret > 0;
2882
2883
2884
2885
2886
2887
2888 if (!pci_is_pcie(pdev))
2889 return false;
2890
2891 switch (pci_pcie_type(pdev)) {
2892
2893
2894
2895
2896
2897 case PCI_EXP_TYPE_PCIE_BRIDGE:
2898
2899
2900
2901
2902
2903
2904 case PCI_EXP_TYPE_PCI_BRIDGE:
2905 case PCI_EXP_TYPE_RC_EC:
2906 return false;
2907
2908
2909
2910
2911
2912 case PCI_EXP_TYPE_DOWNSTREAM:
2913 case PCI_EXP_TYPE_ROOT_PORT:
2914 return pci_acs_flags_enabled(pdev, acs_flags);
2915
2916
2917
2918
2919
2920
2921
2922 case PCI_EXP_TYPE_ENDPOINT:
2923 case PCI_EXP_TYPE_UPSTREAM:
2924 case PCI_EXP_TYPE_LEG_END:
2925 case PCI_EXP_TYPE_RC_END:
2926 if (!pdev->multifunction)
2927 break;
2928
2929 return pci_acs_flags_enabled(pdev, acs_flags);
2930 }
2931
2932
2933
2934
2935
2936 return true;
2937}
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948bool pci_acs_path_enabled(struct pci_dev *start,
2949 struct pci_dev *end, u16 acs_flags)
2950{
2951 struct pci_dev *pdev, *parent = start;
2952
2953 do {
2954 pdev = parent;
2955
2956 if (!pci_acs_enabled(pdev, acs_flags))
2957 return false;
2958
2959 if (pci_is_root_bus(pdev->bus))
2960 return (end == NULL);
2961
2962 parent = pdev->bus->self;
2963 } while (pdev != end);
2964
2965 return true;
2966}
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
2978{
2979 unsigned int pos, nbars, i;
2980 u32 ctrl;
2981
2982 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
2983 if (!pos)
2984 return -ENOTSUPP;
2985
2986 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
2987 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
2988 PCI_REBAR_CTRL_NBAR_SHIFT;
2989
2990 for (i = 0; i < nbars; i++, pos += 8) {
2991 int bar_idx;
2992
2993 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
2994 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
2995 if (bar_idx == bar)
2996 return pos;
2997 }
2998
2999 return -ENOENT;
3000}
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3011{
3012 int pos;
3013 u32 cap;
3014
3015 pos = pci_rebar_find_pos(pdev, bar);
3016 if (pos < 0)
3017 return 0;
3018
3019 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3020 return (cap & PCI_REBAR_CAP_SIZES) >> 4;
3021}
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3032{
3033 int pos;
3034 u32 ctrl;
3035
3036 pos = pci_rebar_find_pos(pdev, bar);
3037 if (pos < 0)
3038 return pos;
3039
3040 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3041 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> 8;
3042}
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3054{
3055 int pos;
3056 u32 ctrl;
3057
3058 pos = pci_rebar_find_pos(pdev, bar);
3059 if (pos < 0)
3060 return pos;
3061
3062 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3063 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3064 ctrl |= size << 8;
3065 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3066 return 0;
3067}
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3083{
3084 struct pci_bus *bus = dev->bus;
3085 struct pci_dev *bridge;
3086 u32 cap, ctl2;
3087
3088 if (!pci_is_pcie(dev))
3089 return -EINVAL;
3090
3091
3092
3093
3094
3095
3096
3097
3098 switch (pci_pcie_type(dev)) {
3099 case PCI_EXP_TYPE_ENDPOINT:
3100 case PCI_EXP_TYPE_LEG_END:
3101 case PCI_EXP_TYPE_RC_END:
3102 break;
3103 default:
3104 return -EINVAL;
3105 }
3106
3107 while (bus->parent) {
3108 bridge = bus->self;
3109
3110 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3111
3112 switch (pci_pcie_type(bridge)) {
3113
3114 case PCI_EXP_TYPE_UPSTREAM:
3115 case PCI_EXP_TYPE_DOWNSTREAM:
3116 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3117 return -EINVAL;
3118 break;
3119
3120
3121 case PCI_EXP_TYPE_ROOT_PORT:
3122 if ((cap & cap_mask) != cap_mask)
3123 return -EINVAL;
3124 break;
3125 }
3126
3127
3128 if (!bridge->has_secondary_link) {
3129 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3130 &ctl2);
3131 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3132 return -EINVAL;
3133 }
3134
3135 bus = bus->parent;
3136 }
3137
3138 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3139 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3140 return 0;
3141}
3142EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3156{
3157 int slot;
3158
3159 if (pci_ari_enabled(dev->bus))
3160 slot = 0;
3161 else
3162 slot = PCI_SLOT(dev->devfn);
3163
3164 return (((pin - 1) + slot) % 4) + 1;
3165}
3166
3167int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3168{
3169 u8 pin;
3170
3171 pin = dev->pin;
3172 if (!pin)
3173 return -1;
3174
3175 while (!pci_is_root_bus(dev->bus)) {
3176 pin = pci_swizzle_interrupt_pin(dev, pin);
3177 dev = dev->bus->self;
3178 }
3179 *bridge = dev;
3180 return pin;
3181}
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3192{
3193 u8 pin = *pinp;
3194
3195 while (!pci_is_root_bus(dev->bus)) {
3196 pin = pci_swizzle_interrupt_pin(dev, pin);
3197 dev = dev->bus->self;
3198 }
3199 *pinp = pin;
3200 return PCI_SLOT(dev->devfn);
3201}
3202EXPORT_SYMBOL_GPL(pci_common_swizzle);
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213void pci_release_region(struct pci_dev *pdev, int bar)
3214{
3215 struct pci_devres *dr;
3216
3217 if (pci_resource_len(pdev, bar) == 0)
3218 return;
3219 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3220 release_region(pci_resource_start(pdev, bar),
3221 pci_resource_len(pdev, bar));
3222 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3223 release_mem_region(pci_resource_start(pdev, bar),
3224 pci_resource_len(pdev, bar));
3225
3226 dr = find_pci_dr(pdev);
3227 if (dr)
3228 dr->region_mask &= ~(1 << bar);
3229}
3230EXPORT_SYMBOL(pci_release_region);
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251static int __pci_request_region(struct pci_dev *pdev, int bar,
3252 const char *res_name, int exclusive)
3253{
3254 struct pci_devres *dr;
3255
3256 if (pci_resource_len(pdev, bar) == 0)
3257 return 0;
3258
3259 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3260 if (!request_region(pci_resource_start(pdev, bar),
3261 pci_resource_len(pdev, bar), res_name))
3262 goto err_out;
3263 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3264 if (!__request_mem_region(pci_resource_start(pdev, bar),
3265 pci_resource_len(pdev, bar), res_name,
3266 exclusive))
3267 goto err_out;
3268 }
3269
3270 dr = find_pci_dr(pdev);
3271 if (dr)
3272 dr->region_mask |= 1 << bar;
3273
3274 return 0;
3275
3276err_out:
3277 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3278 &pdev->resource[bar]);
3279 return -EBUSY;
3280}
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3297{
3298 return __pci_request_region(pdev, bar, res_name, 0);
3299}
3300EXPORT_SYMBOL(pci_request_region);
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320int pci_request_region_exclusive(struct pci_dev *pdev, int bar,
3321 const char *res_name)
3322{
3323 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
3324}
3325EXPORT_SYMBOL(pci_request_region_exclusive);
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3336{
3337 int i;
3338
3339 for (i = 0; i < 6; i++)
3340 if (bars & (1 << i))
3341 pci_release_region(pdev, i);
3342}
3343EXPORT_SYMBOL(pci_release_selected_regions);
3344
3345static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3346 const char *res_name, int excl)
3347{
3348 int i;
3349
3350 for (i = 0; i < 6; i++)
3351 if (bars & (1 << i))
3352 if (__pci_request_region(pdev, i, res_name, excl))
3353 goto err_out;
3354 return 0;
3355
3356err_out:
3357 while (--i >= 0)
3358 if (bars & (1 << i))
3359 pci_release_region(pdev, i);
3360
3361 return -EBUSY;
3362}
3363
3364
3365
3366
3367
3368
3369
3370
3371int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3372 const char *res_name)
3373{
3374 return __pci_request_selected_regions(pdev, bars, res_name, 0);
3375}
3376EXPORT_SYMBOL(pci_request_selected_regions);
3377
3378int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3379 const char *res_name)
3380{
3381 return __pci_request_selected_regions(pdev, bars, res_name,
3382 IORESOURCE_EXCLUSIVE);
3383}
3384EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395void pci_release_regions(struct pci_dev *pdev)
3396{
3397 pci_release_selected_regions(pdev, (1 << 6) - 1);
3398}
3399EXPORT_SYMBOL(pci_release_regions);
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3415{
3416 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
3417}
3418EXPORT_SYMBOL(pci_request_regions);
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3437{
3438 return pci_request_selected_regions_exclusive(pdev,
3439 ((1 << 6) - 1), res_name);
3440}
3441EXPORT_SYMBOL(pci_request_regions_exclusive);
3442
3443#ifdef PCI_IOBASE
3444struct io_range {
3445 struct list_head list;
3446 phys_addr_t start;
3447 resource_size_t size;
3448};
3449
3450static LIST_HEAD(io_range_list);
3451static DEFINE_SPINLOCK(io_range_lock);
3452#endif
3453
3454
3455
3456
3457
3458int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size)
3459{
3460 int err = 0;
3461
3462#ifdef PCI_IOBASE
3463 struct io_range *range;
3464 resource_size_t allocated_size = 0;
3465
3466
3467 spin_lock(&io_range_lock);
3468 list_for_each_entry(range, &io_range_list, list) {
3469 if (addr >= range->start && addr + size <= range->start + size) {
3470
3471 goto end_register;
3472 }
3473 allocated_size += range->size;
3474 }
3475
3476
3477 if (allocated_size + size - 1 > IO_SPACE_LIMIT) {
3478
3479 if (allocated_size + SZ_64K - 1 > IO_SPACE_LIMIT) {
3480 err = -E2BIG;
3481 goto end_register;
3482 }
3483
3484 size = SZ_64K;
3485 pr_warn("Requested IO range too big, new size set to 64K\n");
3486 }
3487
3488
3489 range = kzalloc(sizeof(*range), GFP_ATOMIC);
3490 if (!range) {
3491 err = -ENOMEM;
3492 goto end_register;
3493 }
3494
3495 range->start = addr;
3496 range->size = size;
3497
3498 list_add_tail(&range->list, &io_range_list);
3499
3500end_register:
3501 spin_unlock(&io_range_lock);
3502#endif
3503
3504 return err;
3505}
3506
3507phys_addr_t pci_pio_to_address(unsigned long pio)
3508{
3509 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
3510
3511#ifdef PCI_IOBASE
3512 struct io_range *range;
3513 resource_size_t allocated_size = 0;
3514
3515 if (pio > IO_SPACE_LIMIT)
3516 return address;
3517
3518 spin_lock(&io_range_lock);
3519 list_for_each_entry(range, &io_range_list, list) {
3520 if (pio >= allocated_size && pio < allocated_size + range->size) {
3521 address = range->start + pio - allocated_size;
3522 break;
3523 }
3524 allocated_size += range->size;
3525 }
3526 spin_unlock(&io_range_lock);
3527#endif
3528
3529 return address;
3530}
3531
3532unsigned long __weak pci_address_to_pio(phys_addr_t address)
3533{
3534#ifdef PCI_IOBASE
3535 struct io_range *res;
3536 resource_size_t offset = 0;
3537 unsigned long addr = -1;
3538
3539 spin_lock(&io_range_lock);
3540 list_for_each_entry(res, &io_range_list, list) {
3541 if (address >= res->start && address < res->start + res->size) {
3542 addr = address - res->start + offset;
3543 break;
3544 }
3545 offset += res->size;
3546 }
3547 spin_unlock(&io_range_lock);
3548
3549 return addr;
3550#else
3551 if (address > IO_SPACE_LIMIT)
3552 return (unsigned long)-1;
3553
3554 return (unsigned long) address;
3555#endif
3556}
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
3569{
3570#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3571 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3572
3573 if (!(res->flags & IORESOURCE_IO))
3574 return -EINVAL;
3575
3576 if (res->end > IO_SPACE_LIMIT)
3577 return -EINVAL;
3578
3579 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
3580 pgprot_device(PAGE_KERNEL));
3581#else
3582
3583
3584 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
3585 return -ENODEV;
3586#endif
3587}
3588EXPORT_SYMBOL(pci_remap_iospace);
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598void pci_unmap_iospace(struct resource *res)
3599{
3600#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3601 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3602
3603 unmap_kernel_range(vaddr, resource_size(res));
3604#endif
3605}
3606EXPORT_SYMBOL(pci_unmap_iospace);
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617void __iomem *devm_pci_remap_cfgspace(struct device *dev,
3618 resource_size_t offset,
3619 resource_size_t size)
3620{
3621 void __iomem **ptr, *addr;
3622
3623 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
3624 if (!ptr)
3625 return NULL;
3626
3627 addr = pci_remap_cfgspace(offset, size);
3628 if (addr) {
3629 *ptr = addr;
3630 devres_add(dev, ptr);
3631 } else
3632 devres_free(ptr);
3633
3634 return addr;
3635}
3636EXPORT_SYMBOL(devm_pci_remap_cfgspace);
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
3658 struct resource *res)
3659{
3660 resource_size_t size;
3661 const char *name;
3662 void __iomem *dest_ptr;
3663
3664 BUG_ON(!dev);
3665
3666 if (!res || resource_type(res) != IORESOURCE_MEM) {
3667 dev_err(dev, "invalid resource\n");
3668 return IOMEM_ERR_PTR(-EINVAL);
3669 }
3670
3671 size = resource_size(res);
3672 name = res->name ?: dev_name(dev);
3673
3674 if (!devm_request_mem_region(dev, res->start, size, name)) {
3675 dev_err(dev, "can't request region for resource %pR\n", res);
3676 return IOMEM_ERR_PTR(-EBUSY);
3677 }
3678
3679 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
3680 if (!dest_ptr) {
3681 dev_err(dev, "ioremap failed for resource %pR\n", res);
3682 devm_release_mem_region(dev, res->start, size);
3683 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
3684 }
3685
3686 return dest_ptr;
3687}
3688EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
3689
3690static void __pci_set_master(struct pci_dev *dev, bool enable)
3691{
3692 u16 old_cmd, cmd;
3693
3694 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
3695 if (enable)
3696 cmd = old_cmd | PCI_COMMAND_MASTER;
3697 else
3698 cmd = old_cmd & ~PCI_COMMAND_MASTER;
3699 if (cmd != old_cmd) {
3700 pci_dbg(dev, "%s bus mastering\n",
3701 enable ? "enabling" : "disabling");
3702 pci_write_config_word(dev, PCI_COMMAND, cmd);
3703 }
3704 dev->is_busmaster = enable;
3705}
3706
3707
3708
3709
3710
3711
3712
3713
3714char * __weak __init pcibios_setup(char *str)
3715{
3716 return str;
3717}
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727void __weak pcibios_set_master(struct pci_dev *dev)
3728{
3729 u8 lat;
3730
3731
3732 if (pci_is_pcie(dev))
3733 return;
3734
3735 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
3736 if (lat < 16)
3737 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
3738 else if (lat > pcibios_max_latency)
3739 lat = pcibios_max_latency;
3740 else
3741 return;
3742
3743 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
3744}
3745
3746
3747
3748
3749
3750
3751
3752
3753void pci_set_master(struct pci_dev *dev)
3754{
3755 __pci_set_master(dev, true);
3756 pcibios_set_master(dev);
3757}
3758EXPORT_SYMBOL(pci_set_master);
3759
3760
3761
3762
3763
3764void pci_clear_master(struct pci_dev *dev)
3765{
3766 __pci_set_master(dev, false);
3767}
3768EXPORT_SYMBOL(pci_clear_master);
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780int pci_set_cacheline_size(struct pci_dev *dev)
3781{
3782 u8 cacheline_size;
3783
3784 if (!pci_cache_line_size)
3785 return -EINVAL;
3786
3787
3788
3789 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
3790 if (cacheline_size >= pci_cache_line_size &&
3791 (cacheline_size % pci_cache_line_size) == 0)
3792 return 0;
3793
3794
3795 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
3796
3797 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
3798 if (cacheline_size == pci_cache_line_size)
3799 return 0;
3800
3801 pci_printk(KERN_DEBUG, dev, "cache line size of %d is not supported\n",
3802 pci_cache_line_size << 2);
3803
3804 return -EINVAL;
3805}
3806EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816int pci_set_mwi(struct pci_dev *dev)
3817{
3818#ifdef PCI_DISABLE_MWI
3819 return 0;
3820#else
3821 int rc;
3822 u16 cmd;
3823
3824 rc = pci_set_cacheline_size(dev);
3825 if (rc)
3826 return rc;
3827
3828 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3829 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
3830 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
3831 cmd |= PCI_COMMAND_INVALIDATE;
3832 pci_write_config_word(dev, PCI_COMMAND, cmd);
3833 }
3834 return 0;
3835#endif
3836}
3837EXPORT_SYMBOL(pci_set_mwi);
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847int pcim_set_mwi(struct pci_dev *dev)
3848{
3849 struct pci_devres *dr;
3850
3851 dr = find_pci_dr(dev);
3852 if (!dr)
3853 return -ENOMEM;
3854
3855 dr->mwi = 1;
3856 return pci_set_mwi(dev);
3857}
3858EXPORT_SYMBOL(pcim_set_mwi);
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869int pci_try_set_mwi(struct pci_dev *dev)
3870{
3871#ifdef PCI_DISABLE_MWI
3872 return 0;
3873#else
3874 return pci_set_mwi(dev);
3875#endif
3876}
3877EXPORT_SYMBOL(pci_try_set_mwi);
3878
3879
3880
3881
3882
3883
3884
3885void pci_clear_mwi(struct pci_dev *dev)
3886{
3887#ifndef PCI_DISABLE_MWI
3888 u16 cmd;
3889
3890 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3891 if (cmd & PCI_COMMAND_INVALIDATE) {
3892 cmd &= ~PCI_COMMAND_INVALIDATE;
3893 pci_write_config_word(dev, PCI_COMMAND, cmd);
3894 }
3895#endif
3896}
3897EXPORT_SYMBOL(pci_clear_mwi);
3898
3899
3900
3901
3902
3903
3904
3905
3906void pci_intx(struct pci_dev *pdev, int enable)
3907{
3908 u16 pci_command, new;
3909
3910 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
3911
3912 if (enable)
3913 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
3914 else
3915 new = pci_command | PCI_COMMAND_INTX_DISABLE;
3916
3917 if (new != pci_command) {
3918 struct pci_devres *dr;
3919
3920 pci_write_config_word(pdev, PCI_COMMAND, new);
3921
3922 dr = find_pci_dr(pdev);
3923 if (dr && !dr->restore_intx) {
3924 dr->restore_intx = 1;
3925 dr->orig_intx = !enable;
3926 }
3927 }
3928}
3929EXPORT_SYMBOL_GPL(pci_intx);
3930
3931static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
3932{
3933 struct pci_bus *bus = dev->bus;
3934 bool mask_updated = true;
3935 u32 cmd_status_dword;
3936 u16 origcmd, newcmd;
3937 unsigned long flags;
3938 bool irq_pending;
3939
3940
3941
3942
3943
3944 BUILD_BUG_ON(PCI_COMMAND % 4);
3945 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
3946
3947 raw_spin_lock_irqsave(&pci_lock, flags);
3948
3949 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
3950
3951 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
3952
3953
3954
3955
3956
3957
3958 if (mask != irq_pending) {
3959 mask_updated = false;
3960 goto done;
3961 }
3962
3963 origcmd = cmd_status_dword;
3964 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
3965 if (mask)
3966 newcmd |= PCI_COMMAND_INTX_DISABLE;
3967 if (newcmd != origcmd)
3968 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
3969
3970done:
3971 raw_spin_unlock_irqrestore(&pci_lock, flags);
3972
3973 return mask_updated;
3974}
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984bool pci_check_and_mask_intx(struct pci_dev *dev)
3985{
3986 return pci_check_and_set_intx_mask(dev, true);
3987}
3988EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998bool pci_check_and_unmask_intx(struct pci_dev *dev)
3999{
4000 return pci_check_and_set_intx_mask(dev, false);
4001}
4002EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4003
4004
4005
4006
4007
4008
4009
4010int pci_wait_for_pending_transaction(struct pci_dev *dev)
4011{
4012 if (!pci_is_pcie(dev))
4013 return 1;
4014
4015 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4016 PCI_EXP_DEVSTA_TRPND);
4017}
4018EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4019
4020static void pci_flr_wait(struct pci_dev *dev)
4021{
4022 int delay = 1, timeout = 60000;
4023 u32 id;
4024
4025
4026
4027
4028
4029
4030 msleep(100);
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044 pci_read_config_dword(dev, PCI_COMMAND, &id);
4045 while (id == ~0) {
4046 if (delay > timeout) {
4047 pci_warn(dev, "not ready %dms after FLR; giving up\n",
4048 100 + delay - 1);
4049 return;
4050 }
4051
4052 if (delay > 1000)
4053 pci_info(dev, "not ready %dms after FLR; waiting\n",
4054 100 + delay - 1);
4055
4056 msleep(delay);
4057 delay *= 2;
4058 pci_read_config_dword(dev, PCI_COMMAND, &id);
4059 }
4060
4061 if (delay > 1000)
4062 pci_info(dev, "ready %dms after FLR\n", 100 + delay - 1);
4063}
4064
4065
4066
4067
4068
4069
4070
4071
4072static bool pcie_has_flr(struct pci_dev *dev)
4073{
4074 u32 cap;
4075
4076 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4077 return false;
4078
4079 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
4080 return cap & PCI_EXP_DEVCAP_FLR;
4081}
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091void pcie_flr(struct pci_dev *dev)
4092{
4093 if (!pci_wait_for_pending_transaction(dev))
4094 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4095
4096 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4097 pci_flr_wait(dev);
4098}
4099EXPORT_SYMBOL_GPL(pcie_flr);
4100
4101static int pci_af_flr(struct pci_dev *dev, int probe)
4102{
4103 int pos;
4104 u8 cap;
4105
4106 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4107 if (!pos)
4108 return -ENOTTY;
4109
4110 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4111 return -ENOTTY;
4112
4113 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4114 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4115 return -ENOTTY;
4116
4117 if (probe)
4118 return 0;
4119
4120
4121
4122
4123
4124
4125 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4126 PCI_AF_STATUS_TP << 8))
4127 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4128
4129 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4130 pci_flr_wait(dev);
4131 return 0;
4132}
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149static int pci_pm_reset(struct pci_dev *dev, int probe)
4150{
4151 u16 csr;
4152
4153 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4154 return -ENOTTY;
4155
4156 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4157 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4158 return -ENOTTY;
4159
4160 if (probe)
4161 return 0;
4162
4163 if (dev->current_state != PCI_D0)
4164 return -EINVAL;
4165
4166 csr &= ~PCI_PM_CTRL_STATE_MASK;
4167 csr |= PCI_D3hot;
4168 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4169 pci_dev_d3_sleep(dev);
4170
4171 csr &= ~PCI_PM_CTRL_STATE_MASK;
4172 csr |= PCI_D0;
4173 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4174 pci_dev_d3_sleep(dev);
4175
4176 return 0;
4177}
4178
4179void pci_reset_secondary_bus(struct pci_dev *dev)
4180{
4181 u16 ctrl;
4182
4183 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4184 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4185 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4186
4187
4188
4189
4190 msleep(2);
4191
4192 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4193 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4194
4195
4196
4197
4198
4199
4200
4201
4202 ssleep(1);
4203}
4204
4205void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4206{
4207 pci_reset_secondary_bus(dev);
4208}
4209
4210
4211
4212
4213
4214
4215
4216
4217void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
4218{
4219 pcibios_reset_secondary_bus(dev);
4220}
4221EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
4222
4223static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4224{
4225 struct pci_dev *pdev;
4226
4227 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4228 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4229 return -ENOTTY;
4230
4231 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4232 if (pdev != dev)
4233 return -ENOTTY;
4234
4235 if (probe)
4236 return 0;
4237
4238 pci_reset_bridge_secondary_bus(dev->bus->self);
4239
4240 return 0;
4241}
4242
4243static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4244{
4245 int rc = -ENOTTY;
4246
4247 if (!hotplug || !try_module_get(hotplug->ops->owner))
4248 return rc;
4249
4250 if (hotplug->ops->reset_slot)
4251 rc = hotplug->ops->reset_slot(hotplug, probe);
4252
4253 module_put(hotplug->ops->owner);
4254
4255 return rc;
4256}
4257
4258static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4259{
4260 struct pci_dev *pdev;
4261
4262 if (dev->subordinate || !dev->slot ||
4263 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4264 return -ENOTTY;
4265
4266 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4267 if (pdev != dev && pdev->slot == dev->slot)
4268 return -ENOTTY;
4269
4270 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4271}
4272
4273static void pci_dev_lock(struct pci_dev *dev)
4274{
4275 pci_cfg_access_lock(dev);
4276
4277 device_lock(&dev->dev);
4278}
4279
4280
4281static int pci_dev_trylock(struct pci_dev *dev)
4282{
4283 if (pci_cfg_access_trylock(dev)) {
4284 if (device_trylock(&dev->dev))
4285 return 1;
4286 pci_cfg_access_unlock(dev);
4287 }
4288
4289 return 0;
4290}
4291
4292static void pci_dev_unlock(struct pci_dev *dev)
4293{
4294 device_unlock(&dev->dev);
4295 pci_cfg_access_unlock(dev);
4296}
4297
4298static void pci_dev_save_and_disable(struct pci_dev *dev)
4299{
4300 const struct pci_error_handlers *err_handler =
4301 dev->driver ? dev->driver->err_handler : NULL;
4302
4303
4304
4305
4306
4307
4308 if (err_handler && err_handler->reset_prepare)
4309 err_handler->reset_prepare(dev);
4310
4311
4312
4313
4314
4315
4316 pci_set_power_state(dev, PCI_D0);
4317
4318 pci_save_state(dev);
4319
4320
4321
4322
4323
4324
4325
4326 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
4327}
4328
4329static void pci_dev_restore(struct pci_dev *dev)
4330{
4331 const struct pci_error_handlers *err_handler =
4332 dev->driver ? dev->driver->err_handler : NULL;
4333
4334 pci_restore_state(dev);
4335
4336
4337
4338
4339
4340
4341 if (err_handler && err_handler->reset_done)
4342 err_handler->reset_done(dev);
4343}
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364int __pci_reset_function_locked(struct pci_dev *dev)
4365{
4366 int rc;
4367
4368 might_sleep();
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378 rc = pci_dev_specific_reset(dev, 0);
4379 if (rc != -ENOTTY)
4380 return rc;
4381 if (pcie_has_flr(dev)) {
4382 pcie_flr(dev);
4383 return 0;
4384 }
4385 rc = pci_af_flr(dev, 0);
4386 if (rc != -ENOTTY)
4387 return rc;
4388 rc = pci_pm_reset(dev, 0);
4389 if (rc != -ENOTTY)
4390 return rc;
4391 rc = pci_dev_reset_slot_function(dev, 0);
4392 if (rc != -ENOTTY)
4393 return rc;
4394 return pci_parent_bus_reset(dev, 0);
4395}
4396EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409int pci_probe_reset_function(struct pci_dev *dev)
4410{
4411 int rc;
4412
4413 might_sleep();
4414
4415 rc = pci_dev_specific_reset(dev, 1);
4416 if (rc != -ENOTTY)
4417 return rc;
4418 if (pcie_has_flr(dev))
4419 return 0;
4420 rc = pci_af_flr(dev, 1);
4421 if (rc != -ENOTTY)
4422 return rc;
4423 rc = pci_pm_reset(dev, 1);
4424 if (rc != -ENOTTY)
4425 return rc;
4426 rc = pci_dev_reset_slot_function(dev, 1);
4427 if (rc != -ENOTTY)
4428 return rc;
4429
4430 return pci_parent_bus_reset(dev, 1);
4431}
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449int pci_reset_function(struct pci_dev *dev)
4450{
4451 int rc;
4452
4453 rc = pci_probe_reset_function(dev);
4454 if (rc)
4455 return rc;
4456
4457 pci_dev_lock(dev);
4458 pci_dev_save_and_disable(dev);
4459
4460 rc = __pci_reset_function_locked(dev);
4461
4462 pci_dev_restore(dev);
4463 pci_dev_unlock(dev);
4464
4465 return rc;
4466}
4467EXPORT_SYMBOL_GPL(pci_reset_function);
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486int pci_reset_function_locked(struct pci_dev *dev)
4487{
4488 int rc;
4489
4490 rc = pci_probe_reset_function(dev);
4491 if (rc)
4492 return rc;
4493
4494 pci_dev_save_and_disable(dev);
4495
4496 rc = __pci_reset_function_locked(dev);
4497
4498 pci_dev_restore(dev);
4499
4500 return rc;
4501}
4502EXPORT_SYMBOL_GPL(pci_reset_function_locked);
4503
4504
4505
4506
4507
4508
4509
4510int pci_try_reset_function(struct pci_dev *dev)
4511{
4512 int rc;
4513
4514 rc = pci_probe_reset_function(dev);
4515 if (rc)
4516 return rc;
4517
4518 if (!pci_dev_trylock(dev))
4519 return -EAGAIN;
4520
4521 pci_dev_save_and_disable(dev);
4522 rc = __pci_reset_function_locked(dev);
4523 pci_dev_unlock(dev);
4524
4525 pci_dev_restore(dev);
4526 return rc;
4527}
4528EXPORT_SYMBOL_GPL(pci_try_reset_function);
4529
4530
4531static bool pci_bus_resetable(struct pci_bus *bus)
4532{
4533 struct pci_dev *dev;
4534
4535
4536 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
4537 return false;
4538
4539 list_for_each_entry(dev, &bus->devices, bus_list) {
4540 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
4541 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
4542 return false;
4543 }
4544
4545 return true;
4546}
4547
4548
4549static void pci_bus_lock(struct pci_bus *bus)
4550{
4551 struct pci_dev *dev;
4552
4553 list_for_each_entry(dev, &bus->devices, bus_list) {
4554 pci_dev_lock(dev);
4555 if (dev->subordinate)
4556 pci_bus_lock(dev->subordinate);
4557 }
4558}
4559
4560
4561static void pci_bus_unlock(struct pci_bus *bus)
4562{
4563 struct pci_dev *dev;
4564
4565 list_for_each_entry(dev, &bus->devices, bus_list) {
4566 if (dev->subordinate)
4567 pci_bus_unlock(dev->subordinate);
4568 pci_dev_unlock(dev);
4569 }
4570}
4571
4572
4573static int pci_bus_trylock(struct pci_bus *bus)
4574{
4575 struct pci_dev *dev;
4576
4577 list_for_each_entry(dev, &bus->devices, bus_list) {
4578 if (!pci_dev_trylock(dev))
4579 goto unlock;
4580 if (dev->subordinate) {
4581 if (!pci_bus_trylock(dev->subordinate)) {
4582 pci_dev_unlock(dev);
4583 goto unlock;
4584 }
4585 }
4586 }
4587 return 1;
4588
4589unlock:
4590 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
4591 if (dev->subordinate)
4592 pci_bus_unlock(dev->subordinate);
4593 pci_dev_unlock(dev);
4594 }
4595 return 0;
4596}
4597
4598
4599static bool pci_slot_resetable(struct pci_slot *slot)
4600{
4601 struct pci_dev *dev;
4602
4603 if (slot->bus->self &&
4604 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
4605 return false;
4606
4607 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4608 if (!dev->slot || dev->slot != slot)
4609 continue;
4610 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
4611 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
4612 return false;
4613 }
4614
4615 return true;
4616}
4617
4618
4619static void pci_slot_lock(struct pci_slot *slot)
4620{
4621 struct pci_dev *dev;
4622
4623 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4624 if (!dev->slot || dev->slot != slot)
4625 continue;
4626 pci_dev_lock(dev);
4627 if (dev->subordinate)
4628 pci_bus_lock(dev->subordinate);
4629 }
4630}
4631
4632
4633static void pci_slot_unlock(struct pci_slot *slot)
4634{
4635 struct pci_dev *dev;
4636
4637 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4638 if (!dev->slot || dev->slot != slot)
4639 continue;
4640 if (dev->subordinate)
4641 pci_bus_unlock(dev->subordinate);
4642 pci_dev_unlock(dev);
4643 }
4644}
4645
4646
4647static int pci_slot_trylock(struct pci_slot *slot)
4648{
4649 struct pci_dev *dev;
4650
4651 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4652 if (!dev->slot || dev->slot != slot)
4653 continue;
4654 if (!pci_dev_trylock(dev))
4655 goto unlock;
4656 if (dev->subordinate) {
4657 if (!pci_bus_trylock(dev->subordinate)) {
4658 pci_dev_unlock(dev);
4659 goto unlock;
4660 }
4661 }
4662 }
4663 return 1;
4664
4665unlock:
4666 list_for_each_entry_continue_reverse(dev,
4667 &slot->bus->devices, bus_list) {
4668 if (!dev->slot || dev->slot != slot)
4669 continue;
4670 if (dev->subordinate)
4671 pci_bus_unlock(dev->subordinate);
4672 pci_dev_unlock(dev);
4673 }
4674 return 0;
4675}
4676
4677
4678static void pci_bus_save_and_disable(struct pci_bus *bus)
4679{
4680 struct pci_dev *dev;
4681
4682 list_for_each_entry(dev, &bus->devices, bus_list) {
4683 pci_dev_lock(dev);
4684 pci_dev_save_and_disable(dev);
4685 pci_dev_unlock(dev);
4686 if (dev->subordinate)
4687 pci_bus_save_and_disable(dev->subordinate);
4688 }
4689}
4690
4691
4692
4693
4694
4695static void pci_bus_restore(struct pci_bus *bus)
4696{
4697 struct pci_dev *dev;
4698
4699 list_for_each_entry(dev, &bus->devices, bus_list) {
4700 pci_dev_lock(dev);
4701 pci_dev_restore(dev);
4702 pci_dev_unlock(dev);
4703 if (dev->subordinate)
4704 pci_bus_restore(dev->subordinate);
4705 }
4706}
4707
4708
4709static void pci_slot_save_and_disable(struct pci_slot *slot)
4710{
4711 struct pci_dev *dev;
4712
4713 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4714 if (!dev->slot || dev->slot != slot)
4715 continue;
4716 pci_dev_save_and_disable(dev);
4717 if (dev->subordinate)
4718 pci_bus_save_and_disable(dev->subordinate);
4719 }
4720}
4721
4722
4723
4724
4725
4726static void pci_slot_restore(struct pci_slot *slot)
4727{
4728 struct pci_dev *dev;
4729
4730 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4731 if (!dev->slot || dev->slot != slot)
4732 continue;
4733 pci_dev_restore(dev);
4734 if (dev->subordinate)
4735 pci_bus_restore(dev->subordinate);
4736 }
4737}
4738
4739static int pci_slot_reset(struct pci_slot *slot, int probe)
4740{
4741 int rc;
4742
4743 if (!slot || !pci_slot_resetable(slot))
4744 return -ENOTTY;
4745
4746 if (!probe)
4747 pci_slot_lock(slot);
4748
4749 might_sleep();
4750
4751 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
4752
4753 if (!probe)
4754 pci_slot_unlock(slot);
4755
4756 return rc;
4757}
4758
4759
4760
4761
4762
4763
4764
4765int pci_probe_reset_slot(struct pci_slot *slot)
4766{
4767 return pci_slot_reset(slot, 1);
4768}
4769EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785
4786int pci_reset_slot(struct pci_slot *slot)
4787{
4788 int rc;
4789
4790 rc = pci_slot_reset(slot, 1);
4791 if (rc)
4792 return rc;
4793
4794 pci_slot_save_and_disable(slot);
4795
4796 rc = pci_slot_reset(slot, 0);
4797
4798 pci_slot_restore(slot);
4799
4800 return rc;
4801}
4802EXPORT_SYMBOL_GPL(pci_reset_slot);
4803
4804
4805
4806
4807
4808
4809
4810int pci_try_reset_slot(struct pci_slot *slot)
4811{
4812 int rc;
4813
4814 rc = pci_slot_reset(slot, 1);
4815 if (rc)
4816 return rc;
4817
4818 pci_slot_save_and_disable(slot);
4819
4820 if (pci_slot_trylock(slot)) {
4821 might_sleep();
4822 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
4823 pci_slot_unlock(slot);
4824 } else
4825 rc = -EAGAIN;
4826
4827 pci_slot_restore(slot);
4828
4829 return rc;
4830}
4831EXPORT_SYMBOL_GPL(pci_try_reset_slot);
4832
4833static int pci_bus_reset(struct pci_bus *bus, int probe)
4834{
4835 if (!bus->self || !pci_bus_resetable(bus))
4836 return -ENOTTY;
4837
4838 if (probe)
4839 return 0;
4840
4841 pci_bus_lock(bus);
4842
4843 might_sleep();
4844
4845 pci_reset_bridge_secondary_bus(bus->self);
4846
4847 pci_bus_unlock(bus);
4848
4849 return 0;
4850}
4851
4852
4853
4854
4855
4856
4857
4858int pci_probe_reset_bus(struct pci_bus *bus)
4859{
4860 return pci_bus_reset(bus, 1);
4861}
4862EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873int pci_reset_bus(struct pci_bus *bus)
4874{
4875 int rc;
4876
4877 rc = pci_bus_reset(bus, 1);
4878 if (rc)
4879 return rc;
4880
4881 pci_bus_save_and_disable(bus);
4882
4883 rc = pci_bus_reset(bus, 0);
4884
4885 pci_bus_restore(bus);
4886
4887 return rc;
4888}
4889EXPORT_SYMBOL_GPL(pci_reset_bus);
4890
4891
4892
4893
4894
4895
4896
4897int pci_try_reset_bus(struct pci_bus *bus)
4898{
4899 int rc;
4900
4901 rc = pci_bus_reset(bus, 1);
4902 if (rc)
4903 return rc;
4904
4905 pci_bus_save_and_disable(bus);
4906
4907 if (pci_bus_trylock(bus)) {
4908 might_sleep();
4909 pci_reset_bridge_secondary_bus(bus->self);
4910 pci_bus_unlock(bus);
4911 } else
4912 rc = -EAGAIN;
4913
4914 pci_bus_restore(bus);
4915
4916 return rc;
4917}
4918EXPORT_SYMBOL_GPL(pci_try_reset_bus);
4919
4920
4921
4922
4923
4924
4925
4926
4927int pcix_get_max_mmrbc(struct pci_dev *dev)
4928{
4929 int cap;
4930 u32 stat;
4931
4932 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4933 if (!cap)
4934 return -EINVAL;
4935
4936 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
4937 return -EINVAL;
4938
4939 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
4940}
4941EXPORT_SYMBOL(pcix_get_max_mmrbc);
4942
4943
4944
4945
4946
4947
4948
4949
4950int pcix_get_mmrbc(struct pci_dev *dev)
4951{
4952 int cap;
4953 u16 cmd;
4954
4955 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4956 if (!cap)
4957 return -EINVAL;
4958
4959 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4960 return -EINVAL;
4961
4962 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
4963}
4964EXPORT_SYMBOL(pcix_get_mmrbc);
4965
4966
4967
4968
4969
4970
4971
4972
4973
4974
4975int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
4976{
4977 int cap;
4978 u32 stat, v, o;
4979 u16 cmd;
4980
4981 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
4982 return -EINVAL;
4983
4984 v = ffs(mmrbc) - 10;
4985
4986 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4987 if (!cap)
4988 return -EINVAL;
4989
4990 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
4991 return -EINVAL;
4992
4993 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
4994 return -E2BIG;
4995
4996 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4997 return -EINVAL;
4998
4999 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5000 if (o != v) {
5001 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5002 return -EIO;
5003
5004 cmd &= ~PCI_X_CMD_MAX_READ;
5005 cmd |= v << 2;
5006 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5007 return -EIO;
5008 }
5009 return 0;
5010}
5011EXPORT_SYMBOL(pcix_set_mmrbc);
5012
5013
5014
5015
5016
5017
5018
5019
5020int pcie_get_readrq(struct pci_dev *dev)
5021{
5022 u16 ctl;
5023
5024 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5025
5026 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5027}
5028EXPORT_SYMBOL(pcie_get_readrq);
5029
5030
5031
5032
5033
5034
5035
5036
5037
5038int pcie_set_readrq(struct pci_dev *dev, int rq)
5039{
5040 u16 v;
5041
5042 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5043 return -EINVAL;
5044
5045
5046
5047
5048
5049
5050
5051 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5052 int mps = pcie_get_mps(dev);
5053
5054 if (mps < rq)
5055 rq = mps;
5056 }
5057
5058 v = (ffs(rq) - 8) << 12;
5059
5060 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5061 PCI_EXP_DEVCTL_READRQ, v);
5062}
5063EXPORT_SYMBOL(pcie_set_readrq);
5064
5065
5066
5067
5068
5069
5070
5071int pcie_get_mps(struct pci_dev *dev)
5072{
5073 u16 ctl;
5074
5075 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5076
5077 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5078}
5079EXPORT_SYMBOL(pcie_get_mps);
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089int pcie_set_mps(struct pci_dev *dev, int mps)
5090{
5091 u16 v;
5092
5093 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5094 return -EINVAL;
5095
5096 v = ffs(mps) - 8;
5097 if (v > dev->pcie_mpss)
5098 return -EINVAL;
5099 v <<= 5;
5100
5101 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5102 PCI_EXP_DEVCTL_PAYLOAD, v);
5103}
5104EXPORT_SYMBOL(pcie_set_mps);
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114
5115int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
5116 enum pcie_link_width *width)
5117{
5118 int ret;
5119
5120 *speed = PCI_SPEED_UNKNOWN;
5121 *width = PCIE_LNK_WIDTH_UNKNOWN;
5122
5123 while (dev) {
5124 u16 lnksta;
5125 enum pci_bus_speed next_speed;
5126 enum pcie_link_width next_width;
5127
5128 ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5129 if (ret)
5130 return ret;
5131
5132 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5133 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5134 PCI_EXP_LNKSTA_NLW_SHIFT;
5135
5136 if (next_speed < *speed)
5137 *speed = next_speed;
5138
5139 if (next_width < *width)
5140 *width = next_width;
5141
5142 dev = dev->bus->self;
5143 }
5144
5145 return 0;
5146}
5147EXPORT_SYMBOL(pcie_get_minimum_link);
5148
5149
5150
5151
5152
5153
5154
5155
5156int pci_select_bars(struct pci_dev *dev, unsigned long flags)
5157{
5158 int i, bars = 0;
5159 for (i = 0; i < PCI_NUM_RESOURCES; i++)
5160 if (pci_resource_flags(dev, i) & flags)
5161 bars |= (1 << i);
5162 return bars;
5163}
5164EXPORT_SYMBOL(pci_select_bars);
5165
5166
5167static arch_set_vga_state_t arch_set_vga_state;
5168
5169void __init pci_register_set_vga_state(arch_set_vga_state_t func)
5170{
5171 arch_set_vga_state = func;
5172}
5173
5174static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
5175 unsigned int command_bits, u32 flags)
5176{
5177 if (arch_set_vga_state)
5178 return arch_set_vga_state(dev, decode, command_bits,
5179 flags);
5180 return 0;
5181}
5182
5183
5184
5185
5186
5187
5188
5189
5190
5191int pci_set_vga_state(struct pci_dev *dev, bool decode,
5192 unsigned int command_bits, u32 flags)
5193{
5194 struct pci_bus *bus;
5195 struct pci_dev *bridge;
5196 u16 cmd;
5197 int rc;
5198
5199 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
5200
5201
5202 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
5203 if (rc)
5204 return rc;
5205
5206 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
5207 pci_read_config_word(dev, PCI_COMMAND, &cmd);
5208 if (decode == true)
5209 cmd |= command_bits;
5210 else
5211 cmd &= ~command_bits;
5212 pci_write_config_word(dev, PCI_COMMAND, cmd);
5213 }
5214
5215 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
5216 return 0;
5217
5218 bus = dev->bus;
5219 while (bus) {
5220 bridge = bus->self;
5221 if (bridge) {
5222 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
5223 &cmd);
5224 if (decode == true)
5225 cmd |= PCI_BRIDGE_CTL_VGA;
5226 else
5227 cmd &= ~PCI_BRIDGE_CTL_VGA;
5228 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
5229 cmd);
5230 }
5231 bus = bus->parent;
5232 }
5233 return 0;
5234}
5235
5236
5237
5238
5239
5240
5241
5242
5243
5244void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
5245{
5246 if (!dev->dma_alias_mask)
5247 dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX),
5248 sizeof(long), GFP_KERNEL);
5249 if (!dev->dma_alias_mask) {
5250 pci_warn(dev, "Unable to allocate DMA alias mask\n");
5251 return;
5252 }
5253
5254 set_bit(devfn, dev->dma_alias_mask);
5255 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
5256 PCI_SLOT(devfn), PCI_FUNC(devfn));
5257}
5258
5259bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
5260{
5261 return (dev1->dma_alias_mask &&
5262 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
5263 (dev2->dma_alias_mask &&
5264 test_bit(dev1->devfn, dev2->dma_alias_mask));
5265}
5266
5267bool pci_device_is_present(struct pci_dev *pdev)
5268{
5269 u32 v;
5270
5271 if (pci_dev_is_disconnected(pdev))
5272 return false;
5273 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
5274}
5275EXPORT_SYMBOL_GPL(pci_device_is_present);
5276
5277void pci_ignore_hotplug(struct pci_dev *dev)
5278{
5279 struct pci_dev *bridge = dev->bus->self;
5280
5281 dev->ignore_hotplug = 1;
5282
5283 if (bridge)
5284 bridge->ignore_hotplug = 1;
5285}
5286EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
5287
5288resource_size_t __weak pcibios_default_alignment(void)
5289{
5290 return 0;
5291}
5292
5293#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
5294static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
5295static DEFINE_SPINLOCK(resource_alignment_lock);
5296
5297
5298
5299
5300
5301
5302
5303
5304
5305static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
5306 bool *resize)
5307{
5308 int seg, bus, slot, func, align_order, count;
5309 unsigned short vendor, device, subsystem_vendor, subsystem_device;
5310 resource_size_t align = pcibios_default_alignment();
5311 char *p;
5312
5313 spin_lock(&resource_alignment_lock);
5314 p = resource_alignment_param;
5315 if (!*p && !align)
5316 goto out;
5317 if (pci_has_flag(PCI_PROBE_ONLY)) {
5318 align = 0;
5319 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
5320 goto out;
5321 }
5322
5323 while (*p) {
5324 count = 0;
5325 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
5326 p[count] == '@') {
5327 p += count + 1;
5328 } else {
5329 align_order = -1;
5330 }
5331 if (strncmp(p, "pci:", 4) == 0) {
5332
5333 p += 4;
5334 if (sscanf(p, "%hx:%hx:%hx:%hx%n",
5335 &vendor, &device, &subsystem_vendor, &subsystem_device, &count) != 4) {
5336 if (sscanf(p, "%hx:%hx%n", &vendor, &device, &count) != 2) {
5337 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: pci:%s\n",
5338 p);
5339 break;
5340 }
5341 subsystem_vendor = subsystem_device = 0;
5342 }
5343 p += count;
5344 if ((!vendor || (vendor == dev->vendor)) &&
5345 (!device || (device == dev->device)) &&
5346 (!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) &&
5347 (!subsystem_device || (subsystem_device == dev->subsystem_device))) {
5348 *resize = true;
5349 if (align_order == -1)
5350 align = PAGE_SIZE;
5351 else
5352 align = 1 << align_order;
5353
5354 break;
5355 }
5356 }
5357 else {
5358 if (sscanf(p, "%x:%x:%x.%x%n",
5359 &seg, &bus, &slot, &func, &count) != 4) {
5360 seg = 0;
5361 if (sscanf(p, "%x:%x.%x%n",
5362 &bus, &slot, &func, &count) != 3) {
5363
5364 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
5365 p);
5366 break;
5367 }
5368 }
5369 p += count;
5370 if (seg == pci_domain_nr(dev->bus) &&
5371 bus == dev->bus->number &&
5372 slot == PCI_SLOT(dev->devfn) &&
5373 func == PCI_FUNC(dev->devfn)) {
5374 *resize = true;
5375 if (align_order == -1)
5376 align = PAGE_SIZE;
5377 else
5378 align = 1 << align_order;
5379
5380 break;
5381 }
5382 }
5383 if (*p != ';' && *p != ',') {
5384
5385 break;
5386 }
5387 p++;
5388 }
5389out:
5390 spin_unlock(&resource_alignment_lock);
5391 return align;
5392}
5393
5394static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
5395 resource_size_t align, bool resize)
5396{
5397 struct resource *r = &dev->resource[bar];
5398 resource_size_t size;
5399
5400 if (!(r->flags & IORESOURCE_MEM))
5401 return;
5402
5403 if (r->flags & IORESOURCE_PCI_FIXED) {
5404 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
5405 bar, r, (unsigned long long)align);
5406 return;
5407 }
5408
5409 size = resource_size(r);
5410 if (size >= align)
5411 return;
5412
5413
5414
5415
5416
5417
5418
5419
5420
5421
5422
5423
5424
5425
5426
5427
5428
5429
5430
5431
5432
5433
5434
5435
5436
5437
5438
5439
5440
5441 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
5442 bar, r, (unsigned long long)align);
5443
5444 if (resize) {
5445 r->start = 0;
5446 r->end = align - 1;
5447 } else {
5448 r->flags &= ~IORESOURCE_SIZEALIGN;
5449 r->flags |= IORESOURCE_STARTALIGN;
5450 r->start = align;
5451 r->end = r->start + size - 1;
5452 }
5453 r->flags |= IORESOURCE_UNSET;
5454}
5455
5456
5457
5458
5459
5460
5461
5462
5463void pci_reassigndev_resource_alignment(struct pci_dev *dev)
5464{
5465 int i;
5466 struct resource *r;
5467 resource_size_t align;
5468 u16 command;
5469 bool resize = false;
5470
5471
5472
5473
5474
5475
5476
5477 if (dev->is_virtfn)
5478 return;
5479
5480
5481 align = pci_specified_resource_alignment(dev, &resize);
5482 if (!align)
5483 return;
5484
5485 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
5486 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
5487 pci_warn(dev, "Can't reassign resources to host bridge\n");
5488 return;
5489 }
5490
5491 pci_info(dev, "Disabling memory decoding and releasing memory resources\n");
5492 pci_read_config_word(dev, PCI_COMMAND, &command);
5493 command &= ~PCI_COMMAND_MEMORY;
5494 pci_write_config_word(dev, PCI_COMMAND, command);
5495
5496 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
5497 pci_request_resource_alignment(dev, i, align, resize);
5498
5499
5500
5501
5502
5503
5504 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
5505 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
5506 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
5507 r = &dev->resource[i];
5508 if (!(r->flags & IORESOURCE_MEM))
5509 continue;
5510 r->flags |= IORESOURCE_UNSET;
5511 r->end = resource_size(r) - 1;
5512 r->start = 0;
5513 }
5514 pci_disable_bridge_window(dev);
5515 }
5516}
5517
5518static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
5519{
5520 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
5521 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
5522 spin_lock(&resource_alignment_lock);
5523 strncpy(resource_alignment_param, buf, count);
5524 resource_alignment_param[count] = '\0';
5525 spin_unlock(&resource_alignment_lock);
5526 return count;
5527}
5528
5529static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
5530{
5531 size_t count;
5532 spin_lock(&resource_alignment_lock);
5533 count = snprintf(buf, size, "%s", resource_alignment_param);
5534 spin_unlock(&resource_alignment_lock);
5535 return count;
5536}
5537
5538static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
5539{
5540 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
5541}
5542
5543static ssize_t pci_resource_alignment_store(struct bus_type *bus,
5544 const char *buf, size_t count)
5545{
5546 return pci_set_resource_alignment_param(buf, count);
5547}
5548
5549static BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
5550 pci_resource_alignment_store);
5551
5552static int __init pci_resource_alignment_sysfs_init(void)
5553{
5554 return bus_create_file(&pci_bus_type,
5555 &bus_attr_resource_alignment);
5556}
5557late_initcall(pci_resource_alignment_sysfs_init);
5558
5559static void pci_no_domains(void)
5560{
5561#ifdef CONFIG_PCI_DOMAINS
5562 pci_domains_supported = 0;
5563#endif
5564}
5565
5566#ifdef CONFIG_PCI_DOMAINS
5567static atomic_t __domain_nr = ATOMIC_INIT(-1);
5568
5569int pci_get_new_domain_nr(void)
5570{
5571 return atomic_inc_return(&__domain_nr);
5572}
5573
5574#ifdef CONFIG_PCI_DOMAINS_GENERIC
5575static int of_pci_bus_find_domain_nr(struct device *parent)
5576{
5577 static int use_dt_domains = -1;
5578 int domain = -1;
5579
5580 if (parent)
5581 domain = of_get_pci_domain_nr(parent->of_node);
5582
5583
5584
5585
5586
5587
5588
5589
5590
5591
5592
5593
5594
5595
5596
5597
5598
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608 if (domain >= 0 && use_dt_domains) {
5609 use_dt_domains = 1;
5610 } else if (domain < 0 && use_dt_domains != 1) {
5611 use_dt_domains = 0;
5612 domain = pci_get_new_domain_nr();
5613 } else {
5614 dev_err(parent, "Node %pOF has inconsistent \"linux,pci-domain\" property in DT\n",
5615 parent->of_node);
5616 domain = -1;
5617 }
5618
5619 return domain;
5620}
5621
5622int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
5623{
5624 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
5625 acpi_pci_bus_find_domain_nr(bus);
5626}
5627#endif
5628#endif
5629
5630
5631
5632
5633
5634
5635
5636
5637int __weak pci_ext_cfg_avail(void)
5638{
5639 return 1;
5640}
5641
5642void __weak pci_fixup_cardbus(struct pci_bus *bus)
5643{
5644}
5645EXPORT_SYMBOL(pci_fixup_cardbus);
5646
5647static int __init pci_setup(char *str)
5648{
5649 while (str) {
5650 char *k = strchr(str, ',');
5651 if (k)
5652 *k++ = 0;
5653 if (*str && (str = pcibios_setup(str)) && *str) {
5654 if (!strcmp(str, "nomsi")) {
5655 pci_no_msi();
5656 } else if (!strcmp(str, "noaer")) {
5657 pci_no_aer();
5658 } else if (!strncmp(str, "realloc=", 8)) {
5659 pci_realloc_get_opt(str + 8);
5660 } else if (!strncmp(str, "realloc", 7)) {
5661 pci_realloc_get_opt("on");
5662 } else if (!strcmp(str, "nodomains")) {
5663 pci_no_domains();
5664 } else if (!strncmp(str, "noari", 5)) {
5665 pcie_ari_disabled = true;
5666 } else if (!strncmp(str, "cbiosize=", 9)) {
5667 pci_cardbus_io_size = memparse(str + 9, &str);
5668 } else if (!strncmp(str, "cbmemsize=", 10)) {
5669 pci_cardbus_mem_size = memparse(str + 10, &str);
5670 } else if (!strncmp(str, "resource_alignment=", 19)) {
5671 pci_set_resource_alignment_param(str + 19,
5672 strlen(str + 19));
5673 } else if (!strncmp(str, "ecrc=", 5)) {
5674 pcie_ecrc_get_policy(str + 5);
5675 } else if (!strncmp(str, "hpiosize=", 9)) {
5676 pci_hotplug_io_size = memparse(str + 9, &str);
5677 } else if (!strncmp(str, "hpmemsize=", 10)) {
5678 pci_hotplug_mem_size = memparse(str + 10, &str);
5679 } else if (!strncmp(str, "hpbussize=", 10)) {
5680 pci_hotplug_bus_size =
5681 simple_strtoul(str + 10, &str, 0);
5682 if (pci_hotplug_bus_size > 0xff)
5683 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
5684 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
5685 pcie_bus_config = PCIE_BUS_TUNE_OFF;
5686 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
5687 pcie_bus_config = PCIE_BUS_SAFE;
5688 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
5689 pcie_bus_config = PCIE_BUS_PERFORMANCE;
5690 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
5691 pcie_bus_config = PCIE_BUS_PEER2PEER;
5692 } else if (!strncmp(str, "pcie_scan_all", 13)) {
5693 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
5694 } else {
5695 printk(KERN_ERR "PCI: Unknown option `%s'\n",
5696 str);
5697 }
5698 }
5699 str = k;
5700 }
5701 return 0;
5702}
5703early_param("pci", pci_setup);
5704