1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/pci.h>
14#include <linux/pm.h>
15#include <linux/slab.h>
16#include <linux/module.h>
17#include <linux/spinlock.h>
18#include <linux/string.h>
19#include <linux/log2.h>
20#include <linux/pci-aspm.h>
21#include <linux/pm_wakeup.h>
22#include <linux/interrupt.h>
23#include <linux/device.h>
24#include <linux/pm_runtime.h>
25#include <asm/setup.h>
26#include "pci.h"
27
28const char *pci_power_names[] = {
29 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
30};
31EXPORT_SYMBOL_GPL(pci_power_names);
32
33int isa_dma_bridge_buggy;
34EXPORT_SYMBOL(isa_dma_bridge_buggy);
35
36int pci_pci_problems;
37EXPORT_SYMBOL(pci_pci_problems);
38
39unsigned int pci_pm_d3_delay;
40
41static void pci_pme_list_scan(struct work_struct *work);
42
43static LIST_HEAD(pci_pme_list);
44static DEFINE_MUTEX(pci_pme_list_mutex);
45static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
46
47struct pci_pme_device {
48 struct list_head list;
49 struct pci_dev *dev;
50};
51
52#define PME_TIMEOUT 1000
53
54static void pci_dev_d3_sleep(struct pci_dev *dev)
55{
56 unsigned int delay = dev->d3_delay;
57
58 if (delay < pci_pm_d3_delay)
59 delay = pci_pm_d3_delay;
60
61 msleep(delay);
62}
63
64#ifdef CONFIG_PCI_DOMAINS
65int pci_domains_supported = 1;
66#endif
67
68#define DEFAULT_CARDBUS_IO_SIZE (256)
69#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
70
71unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
72unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
73
74#define DEFAULT_HOTPLUG_IO_SIZE (256)
75#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
76
77unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
79
80enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
81
82
83
84
85
86
87
88u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
89u8 pci_cache_line_size;
90
91
92
93
94
95
96
97
98unsigned char pci_bus_max_busnr(struct pci_bus* bus)
99{
100 struct list_head *tmp;
101 unsigned char max, n;
102
103 max = bus->subordinate;
104 list_for_each(tmp, &bus->children) {
105 n = pci_bus_max_busnr(pci_bus_b(tmp));
106 if(n > max)
107 max = n;
108 }
109 return max;
110}
111EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
112
113#ifdef CONFIG_HAS_IOMEM
114void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
115{
116
117
118
119 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
120 WARN_ON(1);
121 return NULL;
122 }
123 return ioremap_nocache(pci_resource_start(pdev, bar),
124 pci_resource_len(pdev, bar));
125}
126EXPORT_SYMBOL_GPL(pci_ioremap_bar);
127#endif
128
129#if 0
130
131
132
133
134
135
136unsigned char __devinit
137pci_max_busnr(void)
138{
139 struct pci_bus *bus = NULL;
140 unsigned char max, n;
141
142 max = 0;
143 while ((bus = pci_find_next_bus(bus)) != NULL) {
144 n = pci_bus_max_busnr(bus);
145 if(n > max)
146 max = n;
147 }
148 return max;
149}
150
151#endif
152
153#define PCI_FIND_CAP_TTL 48
154
155static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
156 u8 pos, int cap, int *ttl)
157{
158 u8 id;
159
160 while ((*ttl)--) {
161 pci_bus_read_config_byte(bus, devfn, pos, &pos);
162 if (pos < 0x40)
163 break;
164 pos &= ~3;
165 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
166 &id);
167 if (id == 0xff)
168 break;
169 if (id == cap)
170 return pos;
171 pos += PCI_CAP_LIST_NEXT;
172 }
173 return 0;
174}
175
176static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
177 u8 pos, int cap)
178{
179 int ttl = PCI_FIND_CAP_TTL;
180
181 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
182}
183
184int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
185{
186 return __pci_find_next_cap(dev->bus, dev->devfn,
187 pos + PCI_CAP_LIST_NEXT, cap);
188}
189EXPORT_SYMBOL_GPL(pci_find_next_capability);
190
191static int __pci_bus_find_cap_start(struct pci_bus *bus,
192 unsigned int devfn, u8 hdr_type)
193{
194 u16 status;
195
196 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
197 if (!(status & PCI_STATUS_CAP_LIST))
198 return 0;
199
200 switch (hdr_type) {
201 case PCI_HEADER_TYPE_NORMAL:
202 case PCI_HEADER_TYPE_BRIDGE:
203 return PCI_CAPABILITY_LIST;
204 case PCI_HEADER_TYPE_CARDBUS:
205 return PCI_CB_CAPABILITY_LIST;
206 default:
207 return 0;
208 }
209
210 return 0;
211}
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232int pci_find_capability(struct pci_dev *dev, int cap)
233{
234 int pos;
235
236 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
237 if (pos)
238 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
239
240 return pos;
241}
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
257{
258 int pos;
259 u8 hdr_type;
260
261 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
262
263 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
264 if (pos)
265 pos = __pci_find_next_cap(bus, devfn, pos, cap);
266
267 return pos;
268}
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284int pci_find_ext_capability(struct pci_dev *dev, int cap)
285{
286 u32 header;
287 int ttl;
288 int pos = PCI_CFG_SPACE_SIZE;
289
290
291 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
292
293 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
294 return 0;
295
296 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
297 return 0;
298
299
300
301
302
303 if (header == 0)
304 return 0;
305
306 while (ttl-- > 0) {
307 if (PCI_EXT_CAP_ID(header) == cap)
308 return pos;
309
310 pos = PCI_EXT_CAP_NEXT(header);
311 if (pos < PCI_CFG_SPACE_SIZE)
312 break;
313
314 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
315 break;
316 }
317
318 return 0;
319}
320EXPORT_SYMBOL_GPL(pci_find_ext_capability);
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
336 int cap)
337{
338 u32 header;
339 int ttl;
340 int pos = PCI_CFG_SPACE_SIZE;
341
342
343 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
344
345 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
346 return 0;
347 if (header == 0xffffffff || header == 0)
348 return 0;
349
350 while (ttl-- > 0) {
351 if (PCI_EXT_CAP_ID(header) == cap)
352 return pos;
353
354 pos = PCI_EXT_CAP_NEXT(header);
355 if (pos < PCI_CFG_SPACE_SIZE)
356 break;
357
358 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
359 break;
360 }
361
362 return 0;
363}
364
365static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
366{
367 int rc, ttl = PCI_FIND_CAP_TTL;
368 u8 cap, mask;
369
370 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
371 mask = HT_3BIT_CAP_MASK;
372 else
373 mask = HT_5BIT_CAP_MASK;
374
375 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
376 PCI_CAP_ID_HT, &ttl);
377 while (pos) {
378 rc = pci_read_config_byte(dev, pos + 3, &cap);
379 if (rc != PCIBIOS_SUCCESSFUL)
380 return 0;
381
382 if ((cap & mask) == ht_cap)
383 return pos;
384
385 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
386 pos + PCI_CAP_LIST_NEXT,
387 PCI_CAP_ID_HT, &ttl);
388 }
389
390 return 0;
391}
392
393
394
395
396
397
398
399
400
401
402
403
404
405int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
406{
407 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
408}
409EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
410
411
412
413
414
415
416
417
418
419
420
421
422int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
423{
424 int pos;
425
426 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
427 if (pos)
428 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
429
430 return pos;
431}
432EXPORT_SYMBOL_GPL(pci_find_ht_capability);
433
434
435
436
437
438
439
440
441
442
443struct resource *
444pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
445{
446 const struct pci_bus *bus = dev->bus;
447 int i;
448 struct resource *best = NULL, *r;
449
450 pci_bus_for_each_resource(bus, r, i) {
451 if (!r)
452 continue;
453 if (res->start && !(res->start >= r->start && res->end <= r->end))
454 continue;
455 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
456 continue;
457 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
458 return r;
459
460 if (r->flags & IORESOURCE_PREFETCH)
461 continue;
462
463 if (!best)
464 best = r;
465 }
466 return best;
467}
468
469
470
471
472
473
474
475
476static void
477pci_restore_bars(struct pci_dev *dev)
478{
479 int i;
480
481 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
482 pci_update_resource(dev, i);
483}
484
485static struct pci_platform_pm_ops *pci_platform_pm;
486
487int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
488{
489 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
490 || !ops->sleep_wake || !ops->can_wakeup)
491 return -EINVAL;
492 pci_platform_pm = ops;
493 return 0;
494}
495
496static inline bool platform_pci_power_manageable(struct pci_dev *dev)
497{
498 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
499}
500
501static inline int platform_pci_set_power_state(struct pci_dev *dev,
502 pci_power_t t)
503{
504 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
505}
506
507static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
508{
509 return pci_platform_pm ?
510 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
511}
512
513static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
514{
515 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
516}
517
518static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
519{
520 return pci_platform_pm ?
521 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
522}
523
524static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
525{
526 return pci_platform_pm ?
527 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
528}
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
544{
545 u16 pmcsr;
546 bool need_restore = false;
547
548
549 if (dev->current_state == state)
550 return 0;
551
552 if (!dev->pm_cap)
553 return -EIO;
554
555 if (state < PCI_D0 || state > PCI_D3hot)
556 return -EINVAL;
557
558
559
560
561
562 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
563 && dev->current_state > state) {
564 dev_err(&dev->dev, "invalid power transition "
565 "(from state %d to %d)\n", dev->current_state, state);
566 return -EINVAL;
567 }
568
569
570 if ((state == PCI_D1 && !dev->d1_support)
571 || (state == PCI_D2 && !dev->d2_support))
572 return -EIO;
573
574 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
575
576
577
578
579
580 switch (dev->current_state) {
581 case PCI_D0:
582 case PCI_D1:
583 case PCI_D2:
584 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
585 pmcsr |= state;
586 break;
587 case PCI_D3hot:
588 case PCI_D3cold:
589 case PCI_UNKNOWN:
590 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
591 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
592 need_restore = true;
593
594 default:
595 pmcsr = 0;
596 break;
597 }
598
599
600 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
601
602
603
604 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
605 pci_dev_d3_sleep(dev);
606 else if (state == PCI_D2 || dev->current_state == PCI_D2)
607 udelay(PCI_PM_D2_DELAY);
608
609 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
610 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
611 if (dev->current_state != state && printk_ratelimit())
612 dev_info(&dev->dev, "Refused to change power state, "
613 "currently in D%d\n", dev->current_state);
614
615
616
617
618
619
620
621
622
623
624
625
626
627 if (need_restore)
628 pci_restore_bars(dev);
629
630 if (dev->bus->self)
631 pcie_aspm_pm_state_change(dev->bus->self);
632
633 return 0;
634}
635
636
637
638
639
640
641
642void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
643{
644 if (dev->pm_cap) {
645 u16 pmcsr;
646
647 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
648 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
649 } else {
650 dev->current_state = state;
651 }
652}
653
654
655
656
657
658
659static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
660{
661 int error;
662
663 if (platform_pci_power_manageable(dev)) {
664 error = platform_pci_set_power_state(dev, state);
665 if (!error)
666 pci_update_current_state(dev, state);
667
668 if (!dev->pm_cap)
669 dev->current_state = PCI_D0;
670 } else {
671 error = -ENODEV;
672
673 if (!dev->pm_cap)
674 dev->current_state = PCI_D0;
675 }
676
677 return error;
678}
679
680
681
682
683
684
685static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
686{
687 if (state == PCI_D0)
688 pci_platform_power_transition(dev, PCI_D0);
689}
690
691
692
693
694
695
696
697
698int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
699{
700 return state >= PCI_D0 ?
701 pci_platform_power_transition(dev, state) : -EINVAL;
702}
703EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
721{
722 int error;
723
724
725 if (state > PCI_D3hot)
726 state = PCI_D3hot;
727 else if (state < PCI_D0)
728 state = PCI_D0;
729 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
730
731
732
733
734
735 return 0;
736
737 __pci_start_power_transition(dev, state);
738
739
740
741 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
742 return 0;
743
744 error = pci_raw_set_power_state(dev, state);
745
746 if (!__pci_complete_power_transition(dev, state))
747 error = 0;
748
749
750
751
752 if (!error && dev->bus->self)
753 pcie_aspm_powersave_config_link(dev->bus->self);
754
755 return error;
756}
757
758
759
760
761
762
763
764
765
766
767
768pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
769{
770 pci_power_t ret;
771
772 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
773 return PCI_D0;
774
775 ret = platform_pci_choose_state(dev);
776 if (ret != PCI_POWER_ERROR)
777 return ret;
778
779 switch (state.event) {
780 case PM_EVENT_ON:
781 return PCI_D0;
782 case PM_EVENT_FREEZE:
783 case PM_EVENT_PRETHAW:
784
785 case PM_EVENT_SUSPEND:
786 case PM_EVENT_HIBERNATE:
787 return PCI_D3hot;
788 default:
789 dev_info(&dev->dev, "unrecognized suspend event %d\n",
790 state.event);
791 BUG();
792 }
793 return PCI_D0;
794}
795
796EXPORT_SYMBOL(pci_choose_state);
797
798#define PCI_EXP_SAVE_REGS 7
799
800#define pcie_cap_has_devctl(type, flags) 1
801#define pcie_cap_has_lnkctl(type, flags) \
802 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
803 (type == PCI_EXP_TYPE_ROOT_PORT || \
804 type == PCI_EXP_TYPE_ENDPOINT || \
805 type == PCI_EXP_TYPE_LEG_END))
806#define pcie_cap_has_sltctl(type, flags) \
807 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
808 ((type == PCI_EXP_TYPE_ROOT_PORT) || \
809 (type == PCI_EXP_TYPE_DOWNSTREAM && \
810 (flags & PCI_EXP_FLAGS_SLOT))))
811#define pcie_cap_has_rtctl(type, flags) \
812 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
813 (type == PCI_EXP_TYPE_ROOT_PORT || \
814 type == PCI_EXP_TYPE_RC_EC))
815#define pcie_cap_has_devctl2(type, flags) \
816 ((flags & PCI_EXP_FLAGS_VERS) > 1)
817#define pcie_cap_has_lnkctl2(type, flags) \
818 ((flags & PCI_EXP_FLAGS_VERS) > 1)
819#define pcie_cap_has_sltctl2(type, flags) \
820 ((flags & PCI_EXP_FLAGS_VERS) > 1)
821
822static int pci_save_pcie_state(struct pci_dev *dev)
823{
824 int pos, i = 0;
825 struct pci_cap_saved_state *save_state;
826 u16 *cap;
827 u16 flags;
828
829 pos = pci_pcie_cap(dev);
830 if (!pos)
831 return 0;
832
833 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
834 if (!save_state) {
835 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
836 return -ENOMEM;
837 }
838 cap = (u16 *)&save_state->cap.data[0];
839
840 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
841
842 if (pcie_cap_has_devctl(dev->pcie_type, flags))
843 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
844 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
845 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
846 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
847 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
848 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
849 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
850 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
851 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
852 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
853 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
854 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
855 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
856
857 return 0;
858}
859
860static void pci_restore_pcie_state(struct pci_dev *dev)
861{
862 int i = 0, pos;
863 struct pci_cap_saved_state *save_state;
864 u16 *cap;
865 u16 flags;
866
867 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
868 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
869 if (!save_state || pos <= 0)
870 return;
871 cap = (u16 *)&save_state->cap.data[0];
872
873 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
874
875 if (pcie_cap_has_devctl(dev->pcie_type, flags))
876 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
877 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
878 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
879 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
880 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
881 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
882 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
883 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
884 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
885 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
886 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
887 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
888 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
889}
890
891
892static int pci_save_pcix_state(struct pci_dev *dev)
893{
894 int pos;
895 struct pci_cap_saved_state *save_state;
896
897 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
898 if (pos <= 0)
899 return 0;
900
901 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
902 if (!save_state) {
903 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
904 return -ENOMEM;
905 }
906
907 pci_read_config_word(dev, pos + PCI_X_CMD,
908 (u16 *)save_state->cap.data);
909
910 return 0;
911}
912
913static void pci_restore_pcix_state(struct pci_dev *dev)
914{
915 int i = 0, pos;
916 struct pci_cap_saved_state *save_state;
917 u16 *cap;
918
919 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
920 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
921 if (!save_state || pos <= 0)
922 return;
923 cap = (u16 *)&save_state->cap.data[0];
924
925 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
926}
927
928
929
930
931
932
933int
934pci_save_state(struct pci_dev *dev)
935{
936 int i;
937
938 for (i = 0; i < 16; i++)
939 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
940 dev->state_saved = true;
941 if ((i = pci_save_pcie_state(dev)) != 0)
942 return i;
943 if ((i = pci_save_pcix_state(dev)) != 0)
944 return i;
945 return 0;
946}
947
948
949
950
951
952void pci_restore_state(struct pci_dev *dev)
953{
954 int i;
955 u32 val;
956
957 if (!dev->state_saved)
958 return;
959
960
961 pci_restore_pcie_state(dev);
962
963
964
965
966
967 for (i = 15; i >= 0; i--) {
968 pci_read_config_dword(dev, i * 4, &val);
969 if (val != dev->saved_config_space[i]) {
970 dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
971 "space at offset %#x (was %#x, writing %#x)\n",
972 i, val, (int)dev->saved_config_space[i]);
973 pci_write_config_dword(dev,i * 4,
974 dev->saved_config_space[i]);
975 }
976 }
977 pci_restore_pcix_state(dev);
978 pci_restore_msi_state(dev);
979 pci_restore_iov_state(dev);
980
981 dev->state_saved = false;
982}
983
984struct pci_saved_state {
985 u32 config_space[16];
986 struct pci_cap_saved_data cap[0];
987};
988
989
990
991
992
993
994
995
996struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
997{
998 struct pci_saved_state *state;
999 struct pci_cap_saved_state *tmp;
1000 struct pci_cap_saved_data *cap;
1001 struct hlist_node *pos;
1002 size_t size;
1003
1004 if (!dev->state_saved)
1005 return NULL;
1006
1007 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1008
1009 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1010 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1011
1012 state = kzalloc(size, GFP_KERNEL);
1013 if (!state)
1014 return NULL;
1015
1016 memcpy(state->config_space, dev->saved_config_space,
1017 sizeof(state->config_space));
1018
1019 cap = state->cap;
1020 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1021 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1022 memcpy(cap, &tmp->cap, len);
1023 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1024 }
1025
1026
1027 return state;
1028}
1029EXPORT_SYMBOL_GPL(pci_store_saved_state);
1030
1031
1032
1033
1034
1035
1036int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1037{
1038 struct pci_cap_saved_data *cap;
1039
1040 dev->state_saved = false;
1041
1042 if (!state)
1043 return 0;
1044
1045 memcpy(dev->saved_config_space, state->config_space,
1046 sizeof(state->config_space));
1047
1048 cap = state->cap;
1049 while (cap->size) {
1050 struct pci_cap_saved_state *tmp;
1051
1052 tmp = pci_find_saved_cap(dev, cap->cap_nr);
1053 if (!tmp || tmp->cap.size != cap->size)
1054 return -EINVAL;
1055
1056 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1057 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1058 sizeof(struct pci_cap_saved_data) + cap->size);
1059 }
1060
1061 dev->state_saved = true;
1062 return 0;
1063}
1064EXPORT_SYMBOL_GPL(pci_load_saved_state);
1065
1066
1067
1068
1069
1070
1071
1072int pci_load_and_free_saved_state(struct pci_dev *dev,
1073 struct pci_saved_state **state)
1074{
1075 int ret = pci_load_saved_state(dev, *state);
1076 kfree(*state);
1077 *state = NULL;
1078 return ret;
1079}
1080EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1081
1082static int do_pci_enable_device(struct pci_dev *dev, int bars)
1083{
1084 int err;
1085
1086 err = pci_set_power_state(dev, PCI_D0);
1087 if (err < 0 && err != -EIO)
1088 return err;
1089 err = pcibios_enable_device(dev, bars);
1090 if (err < 0)
1091 return err;
1092 pci_fixup_device(pci_fixup_enable, dev);
1093
1094 return 0;
1095}
1096
1097
1098
1099
1100
1101
1102
1103
1104int pci_reenable_device(struct pci_dev *dev)
1105{
1106 if (pci_is_enabled(dev))
1107 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1108 return 0;
1109}
1110
1111static int __pci_enable_device_flags(struct pci_dev *dev,
1112 resource_size_t flags)
1113{
1114 int err;
1115 int i, bars = 0;
1116
1117
1118
1119
1120
1121
1122
1123 if (dev->pm_cap) {
1124 u16 pmcsr;
1125 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1126 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1127 }
1128
1129 if (atomic_add_return(1, &dev->enable_cnt) > 1)
1130 return 0;
1131
1132
1133 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1134 if (dev->resource[i].flags & flags)
1135 bars |= (1 << i);
1136 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1137 if (dev->resource[i].flags & flags)
1138 bars |= (1 << i);
1139
1140 err = do_pci_enable_device(dev, bars);
1141 if (err < 0)
1142 atomic_dec(&dev->enable_cnt);
1143 return err;
1144}
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154int pci_enable_device_io(struct pci_dev *dev)
1155{
1156 return __pci_enable_device_flags(dev, IORESOURCE_IO);
1157}
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167int pci_enable_device_mem(struct pci_dev *dev)
1168{
1169 return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1170}
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183int pci_enable_device(struct pci_dev *dev)
1184{
1185 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1186}
1187
1188
1189
1190
1191
1192
1193
1194struct pci_devres {
1195 unsigned int enabled:1;
1196 unsigned int pinned:1;
1197 unsigned int orig_intx:1;
1198 unsigned int restore_intx:1;
1199 u32 region_mask;
1200};
1201
1202static void pcim_release(struct device *gendev, void *res)
1203{
1204 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1205 struct pci_devres *this = res;
1206 int i;
1207
1208 if (dev->msi_enabled)
1209 pci_disable_msi(dev);
1210 if (dev->msix_enabled)
1211 pci_disable_msix(dev);
1212
1213 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1214 if (this->region_mask & (1 << i))
1215 pci_release_region(dev, i);
1216
1217 if (this->restore_intx)
1218 pci_intx(dev, this->orig_intx);
1219
1220 if (this->enabled && !this->pinned)
1221 pci_disable_device(dev);
1222}
1223
1224static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1225{
1226 struct pci_devres *dr, *new_dr;
1227
1228 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1229 if (dr)
1230 return dr;
1231
1232 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1233 if (!new_dr)
1234 return NULL;
1235 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1236}
1237
1238static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1239{
1240 if (pci_is_managed(pdev))
1241 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1242 return NULL;
1243}
1244
1245
1246
1247
1248
1249
1250
1251int pcim_enable_device(struct pci_dev *pdev)
1252{
1253 struct pci_devres *dr;
1254 int rc;
1255
1256 dr = get_pci_dr(pdev);
1257 if (unlikely(!dr))
1258 return -ENOMEM;
1259 if (dr->enabled)
1260 return 0;
1261
1262 rc = pci_enable_device(pdev);
1263 if (!rc) {
1264 pdev->is_managed = 1;
1265 dr->enabled = 1;
1266 }
1267 return rc;
1268}
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278void pcim_pin_device(struct pci_dev *pdev)
1279{
1280 struct pci_devres *dr;
1281
1282 dr = find_pci_dr(pdev);
1283 WARN_ON(!dr || !dr->enabled);
1284 if (dr)
1285 dr->pinned = 1;
1286}
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
1297
1298static void do_pci_disable_device(struct pci_dev *dev)
1299{
1300 u16 pci_command;
1301
1302 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1303 if (pci_command & PCI_COMMAND_MASTER) {
1304 pci_command &= ~PCI_COMMAND_MASTER;
1305 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1306 }
1307
1308 pcibios_disable_device(dev);
1309}
1310
1311
1312
1313
1314
1315
1316
1317
1318void pci_disable_enabled_device(struct pci_dev *dev)
1319{
1320 if (pci_is_enabled(dev))
1321 do_pci_disable_device(dev);
1322}
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334void
1335pci_disable_device(struct pci_dev *dev)
1336{
1337 struct pci_devres *dr;
1338
1339 dr = find_pci_dr(dev);
1340 if (dr)
1341 dr->enabled = 0;
1342
1343 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1344 return;
1345
1346 do_pci_disable_device(dev);
1347
1348 dev->is_busmaster = 0;
1349}
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1361 enum pcie_reset_state state)
1362{
1363 return -EINVAL;
1364}
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1375{
1376 return pcibios_set_pcie_reset_state(dev, state);
1377}
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387bool pci_check_pme_status(struct pci_dev *dev)
1388{
1389 int pmcsr_pos;
1390 u16 pmcsr;
1391 bool ret = false;
1392
1393 if (!dev->pm_cap)
1394 return false;
1395
1396 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1397 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1398 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1399 return false;
1400
1401
1402 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1403 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1404
1405 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1406 ret = true;
1407 }
1408
1409 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1410
1411 return ret;
1412}
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1423{
1424 if (pme_poll_reset && dev->pme_poll)
1425 dev->pme_poll = false;
1426
1427 if (pci_check_pme_status(dev)) {
1428 pci_wakeup_event(dev);
1429 pm_request_resume(&dev->dev);
1430 }
1431 return 0;
1432}
1433
1434
1435
1436
1437
1438void pci_pme_wakeup_bus(struct pci_bus *bus)
1439{
1440 if (bus)
1441 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1442}
1443
1444
1445
1446
1447
1448
1449bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1450{
1451 if (!dev->pm_cap)
1452 return false;
1453
1454 return !!(dev->pme_support & (1 << state));
1455}
1456
1457static void pci_pme_list_scan(struct work_struct *work)
1458{
1459 struct pci_pme_device *pme_dev, *n;
1460
1461 mutex_lock(&pci_pme_list_mutex);
1462 if (!list_empty(&pci_pme_list)) {
1463 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1464 if (pme_dev->dev->pme_poll) {
1465 pci_pme_wakeup(pme_dev->dev, NULL);
1466 } else {
1467 list_del(&pme_dev->list);
1468 kfree(pme_dev);
1469 }
1470 }
1471 if (!list_empty(&pci_pme_list))
1472 schedule_delayed_work(&pci_pme_work,
1473 msecs_to_jiffies(PME_TIMEOUT));
1474 }
1475 mutex_unlock(&pci_pme_list_mutex);
1476}
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486void pci_pme_active(struct pci_dev *dev, bool enable)
1487{
1488 u16 pmcsr;
1489
1490 if (!dev->pm_cap)
1491 return;
1492
1493 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1494
1495 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1496 if (!enable)
1497 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1498
1499 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511 if (dev->pme_poll) {
1512 struct pci_pme_device *pme_dev;
1513 if (enable) {
1514 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1515 GFP_KERNEL);
1516 if (!pme_dev)
1517 goto out;
1518 pme_dev->dev = dev;
1519 mutex_lock(&pci_pme_list_mutex);
1520 list_add(&pme_dev->list, &pci_pme_list);
1521 if (list_is_singular(&pci_pme_list))
1522 schedule_delayed_work(&pci_pme_work,
1523 msecs_to_jiffies(PME_TIMEOUT));
1524 mutex_unlock(&pci_pme_list_mutex);
1525 } else {
1526 mutex_lock(&pci_pme_list_mutex);
1527 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1528 if (pme_dev->dev == dev) {
1529 list_del(&pme_dev->list);
1530 kfree(pme_dev);
1531 break;
1532 }
1533 }
1534 mutex_unlock(&pci_pme_list_mutex);
1535 }
1536 }
1537
1538out:
1539 dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
1540 enable ? "enabled" : "disabled");
1541}
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1564 bool runtime, bool enable)
1565{
1566 int ret = 0;
1567
1568 if (enable && !runtime && !device_may_wakeup(&dev->dev))
1569 return -EINVAL;
1570
1571
1572 if (!!enable == !!dev->wakeup_prepared)
1573 return 0;
1574
1575
1576
1577
1578
1579
1580
1581 if (enable) {
1582 int error;
1583
1584 if (pci_pme_capable(dev, state))
1585 pci_pme_active(dev, true);
1586 else
1587 ret = 1;
1588 error = runtime ? platform_pci_run_wake(dev, true) :
1589 platform_pci_sleep_wake(dev, true);
1590 if (ret)
1591 ret = error;
1592 if (!ret)
1593 dev->wakeup_prepared = true;
1594 } else {
1595 if (runtime)
1596 platform_pci_run_wake(dev, false);
1597 else
1598 platform_pci_sleep_wake(dev, false);
1599 pci_pme_active(dev, false);
1600 dev->wakeup_prepared = false;
1601 }
1602
1603 return ret;
1604}
1605EXPORT_SYMBOL(__pci_enable_wake);
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1622{
1623 return pci_pme_capable(dev, PCI_D3cold) ?
1624 pci_enable_wake(dev, PCI_D3cold, enable) :
1625 pci_enable_wake(dev, PCI_D3hot, enable);
1626}
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636pci_power_t pci_target_state(struct pci_dev *dev)
1637{
1638 pci_power_t target_state = PCI_D3hot;
1639
1640 if (platform_pci_power_manageable(dev)) {
1641
1642
1643
1644
1645 pci_power_t state = platform_pci_choose_state(dev);
1646
1647 switch (state) {
1648 case PCI_POWER_ERROR:
1649 case PCI_UNKNOWN:
1650 break;
1651 case PCI_D1:
1652 case PCI_D2:
1653 if (pci_no_d1d2(dev))
1654 break;
1655 default:
1656 target_state = state;
1657 }
1658 } else if (!dev->pm_cap) {
1659 target_state = PCI_D0;
1660 } else if (device_may_wakeup(&dev->dev)) {
1661
1662
1663
1664
1665
1666 if (dev->pme_support) {
1667 while (target_state
1668 && !(dev->pme_support & (1 << target_state)))
1669 target_state--;
1670 }
1671 }
1672
1673 return target_state;
1674}
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684int pci_prepare_to_sleep(struct pci_dev *dev)
1685{
1686 pci_power_t target_state = pci_target_state(dev);
1687 int error;
1688
1689 if (target_state == PCI_POWER_ERROR)
1690 return -EIO;
1691
1692 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1693
1694 error = pci_set_power_state(dev, target_state);
1695
1696 if (error)
1697 pci_enable_wake(dev, target_state, false);
1698
1699 return error;
1700}
1701
1702
1703
1704
1705
1706
1707
1708int pci_back_from_sleep(struct pci_dev *dev)
1709{
1710 pci_enable_wake(dev, PCI_D0, false);
1711 return pci_set_power_state(dev, PCI_D0);
1712}
1713
1714
1715
1716
1717
1718
1719
1720
1721int pci_finish_runtime_suspend(struct pci_dev *dev)
1722{
1723 pci_power_t target_state = pci_target_state(dev);
1724 int error;
1725
1726 if (target_state == PCI_POWER_ERROR)
1727 return -EIO;
1728
1729 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1730
1731 error = pci_set_power_state(dev, target_state);
1732
1733 if (error)
1734 __pci_enable_wake(dev, target_state, true, false);
1735
1736 return error;
1737}
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747bool pci_dev_run_wake(struct pci_dev *dev)
1748{
1749 struct pci_bus *bus = dev->bus;
1750
1751 if (device_run_wake(&dev->dev))
1752 return true;
1753
1754 if (!dev->pme_support)
1755 return false;
1756
1757 while (bus->parent) {
1758 struct pci_dev *bridge = bus->self;
1759
1760 if (device_run_wake(&bridge->dev))
1761 return true;
1762
1763 bus = bus->parent;
1764 }
1765
1766
1767 if (bus->bridge)
1768 return device_run_wake(bus->bridge);
1769
1770 return false;
1771}
1772EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1773
1774
1775
1776
1777
1778void pci_pm_init(struct pci_dev *dev)
1779{
1780 int pm;
1781 u16 pmc;
1782
1783 pm_runtime_forbid(&dev->dev);
1784 device_enable_async_suspend(&dev->dev);
1785 dev->wakeup_prepared = false;
1786
1787 dev->pm_cap = 0;
1788
1789
1790 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1791 if (!pm)
1792 return;
1793
1794 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1795
1796 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1797 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1798 pmc & PCI_PM_CAP_VER_MASK);
1799 return;
1800 }
1801
1802 dev->pm_cap = pm;
1803 dev->d3_delay = PCI_PM_D3_WAIT;
1804
1805 dev->d1_support = false;
1806 dev->d2_support = false;
1807 if (!pci_no_d1d2(dev)) {
1808 if (pmc & PCI_PM_CAP_D1)
1809 dev->d1_support = true;
1810 if (pmc & PCI_PM_CAP_D2)
1811 dev->d2_support = true;
1812
1813 if (dev->d1_support || dev->d2_support)
1814 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1815 dev->d1_support ? " D1" : "",
1816 dev->d2_support ? " D2" : "");
1817 }
1818
1819 pmc &= PCI_PM_CAP_PME_MASK;
1820 if (pmc) {
1821 dev_printk(KERN_DEBUG, &dev->dev,
1822 "PME# supported from%s%s%s%s%s\n",
1823 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1824 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1825 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1826 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1827 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1828 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1829 dev->pme_poll = true;
1830
1831
1832
1833
1834 device_set_wakeup_capable(&dev->dev, true);
1835
1836 pci_pme_active(dev, false);
1837 } else {
1838 dev->pme_support = 0;
1839 }
1840}
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852void platform_pci_wakeup_init(struct pci_dev *dev)
1853{
1854 if (!platform_pci_can_wakeup(dev))
1855 return;
1856
1857 device_set_wakeup_capable(&dev->dev, true);
1858 platform_pci_sleep_wake(dev, false);
1859}
1860
1861
1862
1863
1864
1865
1866
1867static int pci_add_cap_save_buffer(
1868 struct pci_dev *dev, char cap, unsigned int size)
1869{
1870 int pos;
1871 struct pci_cap_saved_state *save_state;
1872
1873 pos = pci_find_capability(dev, cap);
1874 if (pos <= 0)
1875 return 0;
1876
1877 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1878 if (!save_state)
1879 return -ENOMEM;
1880
1881 save_state->cap.cap_nr = cap;
1882 save_state->cap.size = size;
1883 pci_add_saved_cap(dev, save_state);
1884
1885 return 0;
1886}
1887
1888
1889
1890
1891
1892void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1893{
1894 int error;
1895
1896 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1897 PCI_EXP_SAVE_REGS * sizeof(u16));
1898 if (error)
1899 dev_err(&dev->dev,
1900 "unable to preallocate PCI Express save buffer\n");
1901
1902 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1903 if (error)
1904 dev_err(&dev->dev,
1905 "unable to preallocate PCI-X save buffer\n");
1906}
1907
1908
1909
1910
1911
1912void pci_enable_ari(struct pci_dev *dev)
1913{
1914 int pos;
1915 u32 cap;
1916 u16 flags, ctrl;
1917 struct pci_dev *bridge;
1918
1919 if (!pci_is_pcie(dev) || dev->devfn)
1920 return;
1921
1922 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1923 if (!pos)
1924 return;
1925
1926 bridge = dev->bus->self;
1927 if (!bridge || !pci_is_pcie(bridge))
1928 return;
1929
1930 pos = pci_pcie_cap(bridge);
1931 if (!pos)
1932 return;
1933
1934
1935 pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
1936 if ((flags & PCI_EXP_FLAGS_VERS) < 2)
1937 return;
1938
1939 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
1940 if (!(cap & PCI_EXP_DEVCAP2_ARI))
1941 return;
1942
1943 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
1944 ctrl |= PCI_EXP_DEVCTL2_ARI;
1945 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
1946
1947 bridge->ari_enabled = 1;
1948}
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959void pci_enable_ido(struct pci_dev *dev, unsigned long type)
1960{
1961 int pos;
1962 u16 ctrl;
1963
1964 pos = pci_pcie_cap(dev);
1965 if (!pos)
1966 return;
1967
1968 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
1969 if (type & PCI_EXP_IDO_REQUEST)
1970 ctrl |= PCI_EXP_IDO_REQ_EN;
1971 if (type & PCI_EXP_IDO_COMPLETION)
1972 ctrl |= PCI_EXP_IDO_CMP_EN;
1973 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
1974}
1975EXPORT_SYMBOL(pci_enable_ido);
1976
1977
1978
1979
1980
1981
1982void pci_disable_ido(struct pci_dev *dev, unsigned long type)
1983{
1984 int pos;
1985 u16 ctrl;
1986
1987 if (!pci_is_pcie(dev))
1988 return;
1989
1990 pos = pci_pcie_cap(dev);
1991 if (!pos)
1992 return;
1993
1994 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
1995 if (type & PCI_EXP_IDO_REQUEST)
1996 ctrl &= ~PCI_EXP_IDO_REQ_EN;
1997 if (type & PCI_EXP_IDO_COMPLETION)
1998 ctrl &= ~PCI_EXP_IDO_CMP_EN;
1999 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2000}
2001EXPORT_SYMBOL(pci_disable_ido);
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2023{
2024 int pos;
2025 u32 cap;
2026 u16 ctrl;
2027 int ret;
2028
2029 if (!pci_is_pcie(dev))
2030 return -ENOTSUPP;
2031
2032 pos = pci_pcie_cap(dev);
2033 if (!pos)
2034 return -ENOTSUPP;
2035
2036 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2037 if (!(cap & PCI_EXP_OBFF_MASK))
2038 return -ENOTSUPP;
2039
2040
2041 if (dev->bus) {
2042 ret = pci_enable_obff(dev->bus->self, type);
2043 if (ret)
2044 return ret;
2045 }
2046
2047 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2048 if (cap & PCI_EXP_OBFF_WAKE)
2049 ctrl |= PCI_EXP_OBFF_WAKE_EN;
2050 else {
2051 switch (type) {
2052 case PCI_EXP_OBFF_SIGNAL_L0:
2053 if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2054 ctrl |= PCI_EXP_OBFF_MSGA_EN;
2055 break;
2056 case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2057 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2058 ctrl |= PCI_EXP_OBFF_MSGB_EN;
2059 break;
2060 default:
2061 WARN(1, "bad OBFF signal type\n");
2062 return -ENOTSUPP;
2063 }
2064 }
2065 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2066
2067 return 0;
2068}
2069EXPORT_SYMBOL(pci_enable_obff);
2070
2071
2072
2073
2074
2075
2076
2077void pci_disable_obff(struct pci_dev *dev)
2078{
2079 int pos;
2080 u16 ctrl;
2081
2082 if (!pci_is_pcie(dev))
2083 return;
2084
2085 pos = pci_pcie_cap(dev);
2086 if (!pos)
2087 return;
2088
2089 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2090 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2091 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2092}
2093EXPORT_SYMBOL(pci_disable_obff);
2094
2095
2096
2097
2098
2099
2100
2101
2102bool pci_ltr_supported(struct pci_dev *dev)
2103{
2104 int pos;
2105 u32 cap;
2106
2107 if (!pci_is_pcie(dev))
2108 return false;
2109
2110 pos = pci_pcie_cap(dev);
2111 if (!pos)
2112 return false;
2113
2114 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2115
2116 return cap & PCI_EXP_DEVCAP2_LTR;
2117}
2118EXPORT_SYMBOL(pci_ltr_supported);
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130int pci_enable_ltr(struct pci_dev *dev)
2131{
2132 int pos;
2133 u16 ctrl;
2134 int ret;
2135
2136 if (!pci_ltr_supported(dev))
2137 return -ENOTSUPP;
2138
2139 pos = pci_pcie_cap(dev);
2140 if (!pos)
2141 return -ENOTSUPP;
2142
2143
2144 if (PCI_FUNC(dev->devfn) != 0)
2145 return -EINVAL;
2146
2147
2148 if (dev->bus) {
2149 ret = pci_enable_ltr(dev->bus->self);
2150 if (ret)
2151 return ret;
2152 }
2153
2154 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2155 ctrl |= PCI_EXP_LTR_EN;
2156 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2157
2158 return 0;
2159}
2160EXPORT_SYMBOL(pci_enable_ltr);
2161
2162
2163
2164
2165
2166void pci_disable_ltr(struct pci_dev *dev)
2167{
2168 int pos;
2169 u16 ctrl;
2170
2171 if (!pci_ltr_supported(dev))
2172 return;
2173
2174 pos = pci_pcie_cap(dev);
2175 if (!pos)
2176 return;
2177
2178
2179 if (PCI_FUNC(dev->devfn) != 0)
2180 return;
2181
2182 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2183 ctrl &= ~PCI_EXP_LTR_EN;
2184 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2185}
2186EXPORT_SYMBOL(pci_disable_ltr);
2187
2188static int __pci_ltr_scale(int *val)
2189{
2190 int scale = 0;
2191
2192 while (*val > 1023) {
2193 *val = (*val + 31) / 32;
2194 scale++;
2195 }
2196 return scale;
2197}
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2208{
2209 int pos, ret, snoop_scale, nosnoop_scale;
2210 u16 val;
2211
2212 if (!pci_ltr_supported(dev))
2213 return -ENOTSUPP;
2214
2215 snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2216 nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2217
2218 if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2219 nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2220 return -EINVAL;
2221
2222 if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2223 (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2224 return -EINVAL;
2225
2226 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2227 if (!pos)
2228 return -ENOTSUPP;
2229
2230 val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2231 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2232 if (ret != 4)
2233 return -EIO;
2234
2235 val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2236 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2237 if (ret != 4)
2238 return -EIO;
2239
2240 return 0;
2241}
2242EXPORT_SYMBOL(pci_set_ltr);
2243
2244static int pci_acs_enable;
2245
2246
2247
2248
2249void pci_request_acs(void)
2250{
2251 pci_acs_enable = 1;
2252}
2253
2254
2255
2256
2257
2258void pci_enable_acs(struct pci_dev *dev)
2259{
2260 int pos;
2261 u16 cap;
2262 u16 ctrl;
2263
2264 if (!pci_acs_enable)
2265 return;
2266
2267 if (!pci_is_pcie(dev))
2268 return;
2269
2270 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2271 if (!pos)
2272 return;
2273
2274 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2275 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2276
2277
2278 ctrl |= (cap & PCI_ACS_SV);
2279
2280
2281 ctrl |= (cap & PCI_ACS_RR);
2282
2283
2284 ctrl |= (cap & PCI_ACS_CR);
2285
2286
2287 ctrl |= (cap & PCI_ACS_UF);
2288
2289 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2290}
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
2304{
2305 int slot;
2306
2307 if (pci_ari_enabled(dev->bus))
2308 slot = 0;
2309 else
2310 slot = PCI_SLOT(dev->devfn);
2311
2312 return (((pin - 1) + slot) % 4) + 1;
2313}
2314
2315int
2316pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2317{
2318 u8 pin;
2319
2320 pin = dev->pin;
2321 if (!pin)
2322 return -1;
2323
2324 while (!pci_is_root_bus(dev->bus)) {
2325 pin = pci_swizzle_interrupt_pin(dev, pin);
2326 dev = dev->bus->self;
2327 }
2328 *bridge = dev;
2329 return pin;
2330}
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2341{
2342 u8 pin = *pinp;
2343
2344 while (!pci_is_root_bus(dev->bus)) {
2345 pin = pci_swizzle_interrupt_pin(dev, pin);
2346 dev = dev->bus->self;
2347 }
2348 *pinp = pin;
2349 return PCI_SLOT(dev->devfn);
2350}
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361void pci_release_region(struct pci_dev *pdev, int bar)
2362{
2363 struct pci_devres *dr;
2364
2365 if (pci_resource_len(pdev, bar) == 0)
2366 return;
2367 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2368 release_region(pci_resource_start(pdev, bar),
2369 pci_resource_len(pdev, bar));
2370 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2371 release_mem_region(pci_resource_start(pdev, bar),
2372 pci_resource_len(pdev, bar));
2373
2374 dr = find_pci_dr(pdev);
2375 if (dr)
2376 dr->region_mask &= ~(1 << bar);
2377}
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2399 int exclusive)
2400{
2401 struct pci_devres *dr;
2402
2403 if (pci_resource_len(pdev, bar) == 0)
2404 return 0;
2405
2406 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2407 if (!request_region(pci_resource_start(pdev, bar),
2408 pci_resource_len(pdev, bar), res_name))
2409 goto err_out;
2410 }
2411 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
2412 if (!__request_mem_region(pci_resource_start(pdev, bar),
2413 pci_resource_len(pdev, bar), res_name,
2414 exclusive))
2415 goto err_out;
2416 }
2417
2418 dr = find_pci_dr(pdev);
2419 if (dr)
2420 dr->region_mask |= 1 << bar;
2421
2422 return 0;
2423
2424err_out:
2425 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2426 &pdev->resource[bar]);
2427 return -EBUSY;
2428}
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2445{
2446 return __pci_request_region(pdev, bar, res_name, 0);
2447}
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2468{
2469 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2470}
2471
2472
2473
2474
2475
2476
2477
2478
2479void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2480{
2481 int i;
2482
2483 for (i = 0; i < 6; i++)
2484 if (bars & (1 << i))
2485 pci_release_region(pdev, i);
2486}
2487
2488int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2489 const char *res_name, int excl)
2490{
2491 int i;
2492
2493 for (i = 0; i < 6; i++)
2494 if (bars & (1 << i))
2495 if (__pci_request_region(pdev, i, res_name, excl))
2496 goto err_out;
2497 return 0;
2498
2499err_out:
2500 while(--i >= 0)
2501 if (bars & (1 << i))
2502 pci_release_region(pdev, i);
2503
2504 return -EBUSY;
2505}
2506
2507
2508
2509
2510
2511
2512
2513
2514int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2515 const char *res_name)
2516{
2517 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2518}
2519
2520int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2521 int bars, const char *res_name)
2522{
2523 return __pci_request_selected_regions(pdev, bars, res_name,
2524 IORESOURCE_EXCLUSIVE);
2525}
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536void pci_release_regions(struct pci_dev *pdev)
2537{
2538 pci_release_selected_regions(pdev, (1 << 6) - 1);
2539}
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2555{
2556 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2557}
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2576{
2577 return pci_request_selected_regions_exclusive(pdev,
2578 ((1 << 6) - 1), res_name);
2579}
2580
2581static void __pci_set_master(struct pci_dev *dev, bool enable)
2582{
2583 u16 old_cmd, cmd;
2584
2585 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2586 if (enable)
2587 cmd = old_cmd | PCI_COMMAND_MASTER;
2588 else
2589 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2590 if (cmd != old_cmd) {
2591 dev_dbg(&dev->dev, "%s bus mastering\n",
2592 enable ? "enabling" : "disabling");
2593 pci_write_config_word(dev, PCI_COMMAND, cmd);
2594 }
2595 dev->is_busmaster = enable;
2596}
2597
2598
2599
2600
2601
2602
2603
2604
2605void pci_set_master(struct pci_dev *dev)
2606{
2607 __pci_set_master(dev, true);
2608 pcibios_set_master(dev);
2609}
2610
2611
2612
2613
2614
2615void pci_clear_master(struct pci_dev *dev)
2616{
2617 __pci_set_master(dev, false);
2618}
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630int pci_set_cacheline_size(struct pci_dev *dev)
2631{
2632 u8 cacheline_size;
2633
2634 if (!pci_cache_line_size)
2635 return -EINVAL;
2636
2637
2638
2639 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2640 if (cacheline_size >= pci_cache_line_size &&
2641 (cacheline_size % pci_cache_line_size) == 0)
2642 return 0;
2643
2644
2645 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2646
2647 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2648 if (cacheline_size == pci_cache_line_size)
2649 return 0;
2650
2651 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2652 "supported\n", pci_cache_line_size << 2);
2653
2654 return -EINVAL;
2655}
2656EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2657
2658#ifdef PCI_DISABLE_MWI
2659int pci_set_mwi(struct pci_dev *dev)
2660{
2661 return 0;
2662}
2663
2664int pci_try_set_mwi(struct pci_dev *dev)
2665{
2666 return 0;
2667}
2668
2669void pci_clear_mwi(struct pci_dev *dev)
2670{
2671}
2672
2673#else
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683int
2684pci_set_mwi(struct pci_dev *dev)
2685{
2686 int rc;
2687 u16 cmd;
2688
2689 rc = pci_set_cacheline_size(dev);
2690 if (rc)
2691 return rc;
2692
2693 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2694 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2695 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2696 cmd |= PCI_COMMAND_INVALIDATE;
2697 pci_write_config_word(dev, PCI_COMMAND, cmd);
2698 }
2699
2700 return 0;
2701}
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712int pci_try_set_mwi(struct pci_dev *dev)
2713{
2714 int rc = pci_set_mwi(dev);
2715 return rc;
2716}
2717
2718
2719
2720
2721
2722
2723
2724void
2725pci_clear_mwi(struct pci_dev *dev)
2726{
2727 u16 cmd;
2728
2729 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2730 if (cmd & PCI_COMMAND_INVALIDATE) {
2731 cmd &= ~PCI_COMMAND_INVALIDATE;
2732 pci_write_config_word(dev, PCI_COMMAND, cmd);
2733 }
2734}
2735#endif
2736
2737
2738
2739
2740
2741
2742
2743
2744void
2745pci_intx(struct pci_dev *pdev, int enable)
2746{
2747 u16 pci_command, new;
2748
2749 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2750
2751 if (enable) {
2752 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2753 } else {
2754 new = pci_command | PCI_COMMAND_INTX_DISABLE;
2755 }
2756
2757 if (new != pci_command) {
2758 struct pci_devres *dr;
2759
2760 pci_write_config_word(pdev, PCI_COMMAND, new);
2761
2762 dr = find_pci_dr(pdev);
2763 if (dr && !dr->restore_intx) {
2764 dr->restore_intx = 1;
2765 dr->orig_intx = !enable;
2766 }
2767 }
2768}
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778void pci_msi_off(struct pci_dev *dev)
2779{
2780 int pos;
2781 u16 control;
2782
2783 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2784 if (pos) {
2785 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2786 control &= ~PCI_MSI_FLAGS_ENABLE;
2787 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
2788 }
2789 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
2790 if (pos) {
2791 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
2792 control &= ~PCI_MSIX_FLAGS_ENABLE;
2793 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
2794 }
2795}
2796EXPORT_SYMBOL_GPL(pci_msi_off);
2797
2798int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
2799{
2800 return dma_set_max_seg_size(&dev->dev, size);
2801}
2802EXPORT_SYMBOL(pci_set_dma_max_seg_size);
2803
2804int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
2805{
2806 return dma_set_seg_boundary(&dev->dev, mask);
2807}
2808EXPORT_SYMBOL(pci_set_dma_seg_boundary);
2809
2810static int pcie_flr(struct pci_dev *dev, int probe)
2811{
2812 int i;
2813 int pos;
2814 u32 cap;
2815 u16 status, control;
2816
2817 pos = pci_pcie_cap(dev);
2818 if (!pos)
2819 return -ENOTTY;
2820
2821 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
2822 if (!(cap & PCI_EXP_DEVCAP_FLR))
2823 return -ENOTTY;
2824
2825 if (probe)
2826 return 0;
2827
2828
2829 for (i = 0; i < 4; i++) {
2830 if (i)
2831 msleep((1 << (i - 1)) * 100);
2832
2833 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
2834 if (!(status & PCI_EXP_DEVSTA_TRPND))
2835 goto clear;
2836 }
2837
2838 dev_err(&dev->dev, "transaction is not cleared; "
2839 "proceeding with reset anyway\n");
2840
2841clear:
2842 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
2843 control |= PCI_EXP_DEVCTL_BCR_FLR;
2844 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
2845
2846 msleep(100);
2847
2848 return 0;
2849}
2850
2851static int pci_af_flr(struct pci_dev *dev, int probe)
2852{
2853 int i;
2854 int pos;
2855 u8 cap;
2856 u8 status;
2857
2858 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
2859 if (!pos)
2860 return -ENOTTY;
2861
2862 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
2863 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
2864 return -ENOTTY;
2865
2866 if (probe)
2867 return 0;
2868
2869
2870 for (i = 0; i < 4; i++) {
2871 if (i)
2872 msleep((1 << (i - 1)) * 100);
2873
2874 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
2875 if (!(status & PCI_AF_STATUS_TP))
2876 goto clear;
2877 }
2878
2879 dev_err(&dev->dev, "transaction is not cleared; "
2880 "proceeding with reset anyway\n");
2881
2882clear:
2883 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
2884 msleep(100);
2885
2886 return 0;
2887}
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904static int pci_pm_reset(struct pci_dev *dev, int probe)
2905{
2906 u16 csr;
2907
2908 if (!dev->pm_cap)
2909 return -ENOTTY;
2910
2911 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
2912 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
2913 return -ENOTTY;
2914
2915 if (probe)
2916 return 0;
2917
2918 if (dev->current_state != PCI_D0)
2919 return -EINVAL;
2920
2921 csr &= ~PCI_PM_CTRL_STATE_MASK;
2922 csr |= PCI_D3hot;
2923 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2924 pci_dev_d3_sleep(dev);
2925
2926 csr &= ~PCI_PM_CTRL_STATE_MASK;
2927 csr |= PCI_D0;
2928 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2929 pci_dev_d3_sleep(dev);
2930
2931 return 0;
2932}
2933
2934static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
2935{
2936 u16 ctrl;
2937 struct pci_dev *pdev;
2938
2939 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
2940 return -ENOTTY;
2941
2942 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
2943 if (pdev != dev)
2944 return -ENOTTY;
2945
2946 if (probe)
2947 return 0;
2948
2949 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
2950 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
2951 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2952 msleep(100);
2953
2954 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
2955 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2956 msleep(100);
2957
2958 return 0;
2959}
2960
2961static int pci_dev_reset(struct pci_dev *dev, int probe)
2962{
2963 int rc;
2964
2965 might_sleep();
2966
2967 if (!probe) {
2968 pci_block_user_cfg_access(dev);
2969
2970 device_lock(&dev->dev);
2971 }
2972
2973 rc = pci_dev_specific_reset(dev, probe);
2974 if (rc != -ENOTTY)
2975 goto done;
2976
2977 rc = pcie_flr(dev, probe);
2978 if (rc != -ENOTTY)
2979 goto done;
2980
2981 rc = pci_af_flr(dev, probe);
2982 if (rc != -ENOTTY)
2983 goto done;
2984
2985 rc = pci_pm_reset(dev, probe);
2986 if (rc != -ENOTTY)
2987 goto done;
2988
2989 rc = pci_parent_bus_reset(dev, probe);
2990done:
2991 if (!probe) {
2992 device_unlock(&dev->dev);
2993 pci_unblock_user_cfg_access(dev);
2994 }
2995
2996 return rc;
2997}
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016int __pci_reset_function(struct pci_dev *dev)
3017{
3018 return pci_dev_reset(dev, 0);
3019}
3020EXPORT_SYMBOL_GPL(__pci_reset_function);
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033int pci_probe_reset_function(struct pci_dev *dev)
3034{
3035 return pci_dev_reset(dev, 1);
3036}
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054int pci_reset_function(struct pci_dev *dev)
3055{
3056 int rc;
3057
3058 rc = pci_dev_reset(dev, 1);
3059 if (rc)
3060 return rc;
3061
3062 pci_save_state(dev);
3063
3064
3065
3066
3067
3068 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3069
3070 rc = pci_dev_reset(dev, 0);
3071
3072 pci_restore_state(dev);
3073
3074 return rc;
3075}
3076EXPORT_SYMBOL_GPL(pci_reset_function);
3077
3078
3079
3080
3081
3082
3083
3084
3085int pcix_get_max_mmrbc(struct pci_dev *dev)
3086{
3087 int cap;
3088 u32 stat;
3089
3090 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3091 if (!cap)
3092 return -EINVAL;
3093
3094 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3095 return -EINVAL;
3096
3097 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
3098}
3099EXPORT_SYMBOL(pcix_get_max_mmrbc);
3100
3101
3102
3103
3104
3105
3106
3107
3108int pcix_get_mmrbc(struct pci_dev *dev)
3109{
3110 int cap;
3111 u16 cmd;
3112
3113 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3114 if (!cap)
3115 return -EINVAL;
3116
3117 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3118 return -EINVAL;
3119
3120 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
3121}
3122EXPORT_SYMBOL(pcix_get_mmrbc);
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3134{
3135 int cap;
3136 u32 stat, v, o;
3137 u16 cmd;
3138
3139 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
3140 return -EINVAL;
3141
3142 v = ffs(mmrbc) - 10;
3143
3144 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3145 if (!cap)
3146 return -EINVAL;
3147
3148 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3149 return -EINVAL;
3150
3151 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3152 return -E2BIG;
3153
3154 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3155 return -EINVAL;
3156
3157 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3158 if (o != v) {
3159 if (v > o && dev->bus &&
3160 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3161 return -EIO;
3162
3163 cmd &= ~PCI_X_CMD_MAX_READ;
3164 cmd |= v << 2;
3165 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3166 return -EIO;
3167 }
3168 return 0;
3169}
3170EXPORT_SYMBOL(pcix_set_mmrbc);
3171
3172
3173
3174
3175
3176
3177
3178
3179int pcie_get_readrq(struct pci_dev *dev)
3180{
3181 int ret, cap;
3182 u16 ctl;
3183
3184 cap = pci_pcie_cap(dev);
3185 if (!cap)
3186 return -EINVAL;
3187
3188 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3189 if (!ret)
3190 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3191
3192 return ret;
3193}
3194EXPORT_SYMBOL(pcie_get_readrq);
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204int pcie_set_readrq(struct pci_dev *dev, int rq)
3205{
3206 int cap, err = -EINVAL;
3207 u16 ctl, v;
3208
3209 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
3210 goto out;
3211
3212 cap = pci_pcie_cap(dev);
3213 if (!cap)
3214 goto out;
3215
3216 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3217 if (err)
3218 goto out;
3219
3220
3221
3222
3223
3224
3225 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3226 int mps = pcie_get_mps(dev);
3227
3228 if (mps < 0)
3229 return mps;
3230 if (mps < rq)
3231 rq = mps;
3232 }
3233
3234 v = (ffs(rq) - 8) << 12;
3235
3236 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3237 ctl &= ~PCI_EXP_DEVCTL_READRQ;
3238 ctl |= v;
3239 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3240 }
3241
3242out:
3243 return err;
3244}
3245EXPORT_SYMBOL(pcie_set_readrq);
3246
3247
3248
3249
3250
3251
3252
3253
3254int pcie_get_mps(struct pci_dev *dev)
3255{
3256 int ret, cap;
3257 u16 ctl;
3258
3259 cap = pci_pcie_cap(dev);
3260 if (!cap)
3261 return -EINVAL;
3262
3263 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3264 if (!ret)
3265 ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3266
3267 return ret;
3268}
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278int pcie_set_mps(struct pci_dev *dev, int mps)
3279{
3280 int cap, err = -EINVAL;
3281 u16 ctl, v;
3282
3283 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3284 goto out;
3285
3286 v = ffs(mps) - 8;
3287 if (v > dev->pcie_mpss)
3288 goto out;
3289 v <<= 5;
3290
3291 cap = pci_pcie_cap(dev);
3292 if (!cap)
3293 goto out;
3294
3295 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3296 if (err)
3297 goto out;
3298
3299 if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3300 ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3301 ctl |= v;
3302 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3303 }
3304out:
3305 return err;
3306}
3307
3308
3309
3310
3311
3312
3313
3314
3315int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3316{
3317 int i, bars = 0;
3318 for (i = 0; i < PCI_NUM_RESOURCES; i++)
3319 if (pci_resource_flags(dev, i) & flags)
3320 bars |= (1 << i);
3321 return bars;
3322}
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3333{
3334 int reg;
3335
3336 if (resno < PCI_ROM_RESOURCE) {
3337 *type = pci_bar_unknown;
3338 return PCI_BASE_ADDRESS_0 + 4 * resno;
3339 } else if (resno == PCI_ROM_RESOURCE) {
3340 *type = pci_bar_mem32;
3341 return dev->rom_base_reg;
3342 } else if (resno < PCI_BRIDGE_RESOURCES) {
3343
3344 reg = pci_iov_resource_bar(dev, resno, type);
3345 if (reg)
3346 return reg;
3347 }
3348
3349 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
3350 return 0;
3351}
3352
3353
3354static arch_set_vga_state_t arch_set_vga_state;
3355
3356void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3357{
3358 arch_set_vga_state = func;
3359}
3360
3361static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3362 unsigned int command_bits, u32 flags)
3363{
3364 if (arch_set_vga_state)
3365 return arch_set_vga_state(dev, decode, command_bits,
3366 flags);
3367 return 0;
3368}
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378int pci_set_vga_state(struct pci_dev *dev, bool decode,
3379 unsigned int command_bits, u32 flags)
3380{
3381 struct pci_bus *bus;
3382 struct pci_dev *bridge;
3383 u16 cmd;
3384 int rc;
3385
3386 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
3387
3388
3389 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
3390 if (rc)
3391 return rc;
3392
3393 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3394 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3395 if (decode == true)
3396 cmd |= command_bits;
3397 else
3398 cmd &= ~command_bits;
3399 pci_write_config_word(dev, PCI_COMMAND, cmd);
3400 }
3401
3402 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
3403 return 0;
3404
3405 bus = dev->bus;
3406 while (bus) {
3407 bridge = bus->self;
3408 if (bridge) {
3409 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3410 &cmd);
3411 if (decode == true)
3412 cmd |= PCI_BRIDGE_CTL_VGA;
3413 else
3414 cmd &= ~PCI_BRIDGE_CTL_VGA;
3415 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3416 cmd);
3417 }
3418 bus = bus->parent;
3419 }
3420 return 0;
3421}
3422
3423#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3424static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
3425static DEFINE_SPINLOCK(resource_alignment_lock);
3426
3427
3428
3429
3430
3431
3432
3433
3434resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3435{
3436 int seg, bus, slot, func, align_order, count;
3437 resource_size_t align = 0;
3438 char *p;
3439
3440 spin_lock(&resource_alignment_lock);
3441 p = resource_alignment_param;
3442 while (*p) {
3443 count = 0;
3444 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3445 p[count] == '@') {
3446 p += count + 1;
3447 } else {
3448 align_order = -1;
3449 }
3450 if (sscanf(p, "%x:%x:%x.%x%n",
3451 &seg, &bus, &slot, &func, &count) != 4) {
3452 seg = 0;
3453 if (sscanf(p, "%x:%x.%x%n",
3454 &bus, &slot, &func, &count) != 3) {
3455
3456 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3457 p);
3458 break;
3459 }
3460 }
3461 p += count;
3462 if (seg == pci_domain_nr(dev->bus) &&
3463 bus == dev->bus->number &&
3464 slot == PCI_SLOT(dev->devfn) &&
3465 func == PCI_FUNC(dev->devfn)) {
3466 if (align_order == -1) {
3467 align = PAGE_SIZE;
3468 } else {
3469 align = 1 << align_order;
3470 }
3471
3472 break;
3473 }
3474 if (*p != ';' && *p != ',') {
3475
3476 break;
3477 }
3478 p++;
3479 }
3480 spin_unlock(&resource_alignment_lock);
3481 return align;
3482}
3483
3484
3485
3486
3487
3488
3489
3490
3491int pci_is_reassigndev(struct pci_dev *dev)
3492{
3493 return (pci_specified_resource_alignment(dev) != 0);
3494}
3495
3496ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3497{
3498 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3499 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3500 spin_lock(&resource_alignment_lock);
3501 strncpy(resource_alignment_param, buf, count);
3502 resource_alignment_param[count] = '\0';
3503 spin_unlock(&resource_alignment_lock);
3504 return count;
3505}
3506
3507ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3508{
3509 size_t count;
3510 spin_lock(&resource_alignment_lock);
3511 count = snprintf(buf, size, "%s", resource_alignment_param);
3512 spin_unlock(&resource_alignment_lock);
3513 return count;
3514}
3515
3516static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3517{
3518 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3519}
3520
3521static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3522 const char *buf, size_t count)
3523{
3524 return pci_set_resource_alignment_param(buf, count);
3525}
3526
3527BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3528 pci_resource_alignment_store);
3529
3530static int __init pci_resource_alignment_sysfs_init(void)
3531{
3532 return bus_create_file(&pci_bus_type,
3533 &bus_attr_resource_alignment);
3534}
3535
3536late_initcall(pci_resource_alignment_sysfs_init);
3537
3538static void __devinit pci_no_domains(void)
3539{
3540#ifdef CONFIG_PCI_DOMAINS
3541 pci_domains_supported = 0;
3542#endif
3543}
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3554{
3555 return 1;
3556}
3557
3558void __weak pci_fixup_cardbus(struct pci_bus *bus)
3559{
3560}
3561EXPORT_SYMBOL(pci_fixup_cardbus);
3562
3563static int __init pci_setup(char *str)
3564{
3565 while (str) {
3566 char *k = strchr(str, ',');
3567 if (k)
3568 *k++ = 0;
3569 if (*str && (str = pcibios_setup(str)) && *str) {
3570 if (!strcmp(str, "nomsi")) {
3571 pci_no_msi();
3572 } else if (!strcmp(str, "noaer")) {
3573 pci_no_aer();
3574 } else if (!strncmp(str, "realloc", 7)) {
3575 pci_realloc();
3576 } else if (!strcmp(str, "nodomains")) {
3577 pci_no_domains();
3578 } else if (!strncmp(str, "cbiosize=", 9)) {
3579 pci_cardbus_io_size = memparse(str + 9, &str);
3580 } else if (!strncmp(str, "cbmemsize=", 10)) {
3581 pci_cardbus_mem_size = memparse(str + 10, &str);
3582 } else if (!strncmp(str, "resource_alignment=", 19)) {
3583 pci_set_resource_alignment_param(str + 19,
3584 strlen(str + 19));
3585 } else if (!strncmp(str, "ecrc=", 5)) {
3586 pcie_ecrc_get_policy(str + 5);
3587 } else if (!strncmp(str, "hpiosize=", 9)) {
3588 pci_hotplug_io_size = memparse(str + 9, &str);
3589 } else if (!strncmp(str, "hpmemsize=", 10)) {
3590 pci_hotplug_mem_size = memparse(str + 10, &str);
3591 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3592 pcie_bus_config = PCIE_BUS_TUNE_OFF;
3593 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3594 pcie_bus_config = PCIE_BUS_SAFE;
3595 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3596 pcie_bus_config = PCIE_BUS_PERFORMANCE;
3597 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3598 pcie_bus_config = PCIE_BUS_PEER2PEER;
3599 } else {
3600 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3601 str);
3602 }
3603 }
3604 str = k;
3605 }
3606 return 0;
3607}
3608early_param("pci", pci_setup);
3609
3610EXPORT_SYMBOL(pci_reenable_device);
3611EXPORT_SYMBOL(pci_enable_device_io);
3612EXPORT_SYMBOL(pci_enable_device_mem);
3613EXPORT_SYMBOL(pci_enable_device);
3614EXPORT_SYMBOL(pcim_enable_device);
3615EXPORT_SYMBOL(pcim_pin_device);
3616EXPORT_SYMBOL(pci_disable_device);
3617EXPORT_SYMBOL(pci_find_capability);
3618EXPORT_SYMBOL(pci_bus_find_capability);
3619EXPORT_SYMBOL(pci_release_regions);
3620EXPORT_SYMBOL(pci_request_regions);
3621EXPORT_SYMBOL(pci_request_regions_exclusive);
3622EXPORT_SYMBOL(pci_release_region);
3623EXPORT_SYMBOL(pci_request_region);
3624EXPORT_SYMBOL(pci_request_region_exclusive);
3625EXPORT_SYMBOL(pci_release_selected_regions);
3626EXPORT_SYMBOL(pci_request_selected_regions);
3627EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3628EXPORT_SYMBOL(pci_set_master);
3629EXPORT_SYMBOL(pci_clear_master);
3630EXPORT_SYMBOL(pci_set_mwi);
3631EXPORT_SYMBOL(pci_try_set_mwi);
3632EXPORT_SYMBOL(pci_clear_mwi);
3633EXPORT_SYMBOL_GPL(pci_intx);
3634EXPORT_SYMBOL(pci_assign_resource);
3635EXPORT_SYMBOL(pci_find_parent_resource);
3636EXPORT_SYMBOL(pci_select_bars);
3637
3638EXPORT_SYMBOL(pci_set_power_state);
3639EXPORT_SYMBOL(pci_save_state);
3640EXPORT_SYMBOL(pci_restore_state);
3641EXPORT_SYMBOL(pci_pme_capable);
3642EXPORT_SYMBOL(pci_pme_active);
3643EXPORT_SYMBOL(pci_wake_from_d3);
3644EXPORT_SYMBOL(pci_target_state);
3645EXPORT_SYMBOL(pci_prepare_to_sleep);
3646EXPORT_SYMBOL(pci_back_from_sleep);
3647EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
3648