1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/pci.h>
14#include <linux/pm.h>
15#include <linux/slab.h>
16#include <linux/module.h>
17#include <linux/spinlock.h>
18#include <linux/string.h>
19#include <linux/log2.h>
20#include <linux/pci-aspm.h>
21#include <linux/pm_wakeup.h>
22#include <linux/interrupt.h>
23#include <linux/device.h>
24#include <linux/pm_runtime.h>
25#include <asm/setup.h>
26#include "pci.h"
27
28const char *pci_power_names[] = {
29 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
30};
31EXPORT_SYMBOL_GPL(pci_power_names);
32
33int isa_dma_bridge_buggy;
34EXPORT_SYMBOL(isa_dma_bridge_buggy);
35
36int pci_pci_problems;
37EXPORT_SYMBOL(pci_pci_problems);
38
39unsigned int pci_pm_d3_delay;
40
41static void pci_pme_list_scan(struct work_struct *work);
42
43static LIST_HEAD(pci_pme_list);
44static DEFINE_MUTEX(pci_pme_list_mutex);
45static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
46
47struct pci_pme_device {
48 struct list_head list;
49 struct pci_dev *dev;
50};
51
52#define PME_TIMEOUT 1000
53
54static void pci_dev_d3_sleep(struct pci_dev *dev)
55{
56 unsigned int delay = dev->d3_delay;
57
58 if (delay < pci_pm_d3_delay)
59 delay = pci_pm_d3_delay;
60
61 msleep(delay);
62}
63
64#ifdef CONFIG_PCI_DOMAINS
65int pci_domains_supported = 1;
66#endif
67
68#define DEFAULT_CARDBUS_IO_SIZE (256)
69#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
70
71unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
72unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
73
74#define DEFAULT_HOTPLUG_IO_SIZE (256)
75#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
76
77unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
79
80
81
82
83
84
85
86u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
87u8 pci_cache_line_size;
88
89
90
91
92
93
94
95
96unsigned char pci_bus_max_busnr(struct pci_bus* bus)
97{
98 struct list_head *tmp;
99 unsigned char max, n;
100
101 max = bus->subordinate;
102 list_for_each(tmp, &bus->children) {
103 n = pci_bus_max_busnr(pci_bus_b(tmp));
104 if(n > max)
105 max = n;
106 }
107 return max;
108}
109EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
110
111#ifdef CONFIG_HAS_IOMEM
112void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
113{
114
115
116
117 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
118 WARN_ON(1);
119 return NULL;
120 }
121 return ioremap_nocache(pci_resource_start(pdev, bar),
122 pci_resource_len(pdev, bar));
123}
124EXPORT_SYMBOL_GPL(pci_ioremap_bar);
125#endif
126
127#if 0
128
129
130
131
132
133
134unsigned char __devinit
135pci_max_busnr(void)
136{
137 struct pci_bus *bus = NULL;
138 unsigned char max, n;
139
140 max = 0;
141 while ((bus = pci_find_next_bus(bus)) != NULL) {
142 n = pci_bus_max_busnr(bus);
143 if(n > max)
144 max = n;
145 }
146 return max;
147}
148
149#endif
150
151#define PCI_FIND_CAP_TTL 48
152
153static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
154 u8 pos, int cap, int *ttl)
155{
156 u8 id;
157
158 while ((*ttl)--) {
159 pci_bus_read_config_byte(bus, devfn, pos, &pos);
160 if (pos < 0x40)
161 break;
162 pos &= ~3;
163 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
164 &id);
165 if (id == 0xff)
166 break;
167 if (id == cap)
168 return pos;
169 pos += PCI_CAP_LIST_NEXT;
170 }
171 return 0;
172}
173
174static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
175 u8 pos, int cap)
176{
177 int ttl = PCI_FIND_CAP_TTL;
178
179 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
180}
181
182int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
183{
184 return __pci_find_next_cap(dev->bus, dev->devfn,
185 pos + PCI_CAP_LIST_NEXT, cap);
186}
187EXPORT_SYMBOL_GPL(pci_find_next_capability);
188
189static int __pci_bus_find_cap_start(struct pci_bus *bus,
190 unsigned int devfn, u8 hdr_type)
191{
192 u16 status;
193
194 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
195 if (!(status & PCI_STATUS_CAP_LIST))
196 return 0;
197
198 switch (hdr_type) {
199 case PCI_HEADER_TYPE_NORMAL:
200 case PCI_HEADER_TYPE_BRIDGE:
201 return PCI_CAPABILITY_LIST;
202 case PCI_HEADER_TYPE_CARDBUS:
203 return PCI_CB_CAPABILITY_LIST;
204 default:
205 return 0;
206 }
207
208 return 0;
209}
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230int pci_find_capability(struct pci_dev *dev, int cap)
231{
232 int pos;
233
234 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
235 if (pos)
236 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
237
238 return pos;
239}
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
255{
256 int pos;
257 u8 hdr_type;
258
259 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
260
261 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
262 if (pos)
263 pos = __pci_find_next_cap(bus, devfn, pos, cap);
264
265 return pos;
266}
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282int pci_find_ext_capability(struct pci_dev *dev, int cap)
283{
284 u32 header;
285 int ttl;
286 int pos = PCI_CFG_SPACE_SIZE;
287
288
289 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
290
291 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
292 return 0;
293
294 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
295 return 0;
296
297
298
299
300
301 if (header == 0)
302 return 0;
303
304 while (ttl-- > 0) {
305 if (PCI_EXT_CAP_ID(header) == cap)
306 return pos;
307
308 pos = PCI_EXT_CAP_NEXT(header);
309 if (pos < PCI_CFG_SPACE_SIZE)
310 break;
311
312 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
313 break;
314 }
315
316 return 0;
317}
318EXPORT_SYMBOL_GPL(pci_find_ext_capability);
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
334 int cap)
335{
336 u32 header;
337 int ttl;
338 int pos = PCI_CFG_SPACE_SIZE;
339
340
341 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
342
343 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
344 return 0;
345 if (header == 0xffffffff || header == 0)
346 return 0;
347
348 while (ttl-- > 0) {
349 if (PCI_EXT_CAP_ID(header) == cap)
350 return pos;
351
352 pos = PCI_EXT_CAP_NEXT(header);
353 if (pos < PCI_CFG_SPACE_SIZE)
354 break;
355
356 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
357 break;
358 }
359
360 return 0;
361}
362
363static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
364{
365 int rc, ttl = PCI_FIND_CAP_TTL;
366 u8 cap, mask;
367
368 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
369 mask = HT_3BIT_CAP_MASK;
370 else
371 mask = HT_5BIT_CAP_MASK;
372
373 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
374 PCI_CAP_ID_HT, &ttl);
375 while (pos) {
376 rc = pci_read_config_byte(dev, pos + 3, &cap);
377 if (rc != PCIBIOS_SUCCESSFUL)
378 return 0;
379
380 if ((cap & mask) == ht_cap)
381 return pos;
382
383 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
384 pos + PCI_CAP_LIST_NEXT,
385 PCI_CAP_ID_HT, &ttl);
386 }
387
388 return 0;
389}
390
391
392
393
394
395
396
397
398
399
400
401
402
403int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
404{
405 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
406}
407EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
408
409
410
411
412
413
414
415
416
417
418
419
420int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
421{
422 int pos;
423
424 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
425 if (pos)
426 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
427
428 return pos;
429}
430EXPORT_SYMBOL_GPL(pci_find_ht_capability);
431
432
433
434
435
436
437
438
439
440
441struct resource *
442pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
443{
444 const struct pci_bus *bus = dev->bus;
445 int i;
446 struct resource *best = NULL, *r;
447
448 pci_bus_for_each_resource(bus, r, i) {
449 if (!r)
450 continue;
451 if (res->start && !(res->start >= r->start && res->end <= r->end))
452 continue;
453 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
454 continue;
455 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
456 return r;
457
458 if (r->flags & IORESOURCE_PREFETCH)
459 continue;
460
461 if (!best)
462 best = r;
463 }
464 return best;
465}
466
467
468
469
470
471
472
473
474static void
475pci_restore_bars(struct pci_dev *dev)
476{
477 int i;
478
479 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
480 pci_update_resource(dev, i);
481}
482
483static struct pci_platform_pm_ops *pci_platform_pm;
484
485int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
486{
487 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
488 || !ops->sleep_wake || !ops->can_wakeup)
489 return -EINVAL;
490 pci_platform_pm = ops;
491 return 0;
492}
493
494static inline bool platform_pci_power_manageable(struct pci_dev *dev)
495{
496 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
497}
498
499static inline int platform_pci_set_power_state(struct pci_dev *dev,
500 pci_power_t t)
501{
502 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
503}
504
505static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
506{
507 return pci_platform_pm ?
508 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
509}
510
511static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
512{
513 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
514}
515
516static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
517{
518 return pci_platform_pm ?
519 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
520}
521
522static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
523{
524 return pci_platform_pm ?
525 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
526}
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
542{
543 u16 pmcsr;
544 bool need_restore = false;
545
546
547 if (dev->current_state == state)
548 return 0;
549
550 if (!dev->pm_cap)
551 return -EIO;
552
553 if (state < PCI_D0 || state > PCI_D3hot)
554 return -EINVAL;
555
556
557
558
559
560 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
561 && dev->current_state > state) {
562 dev_err(&dev->dev, "invalid power transition "
563 "(from state %d to %d)\n", dev->current_state, state);
564 return -EINVAL;
565 }
566
567
568 if ((state == PCI_D1 && !dev->d1_support)
569 || (state == PCI_D2 && !dev->d2_support))
570 return -EIO;
571
572 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
573
574
575
576
577
578 switch (dev->current_state) {
579 case PCI_D0:
580 case PCI_D1:
581 case PCI_D2:
582 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
583 pmcsr |= state;
584 break;
585 case PCI_D3hot:
586 case PCI_D3cold:
587 case PCI_UNKNOWN:
588 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
589 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
590 need_restore = true;
591
592 default:
593 pmcsr = 0;
594 break;
595 }
596
597
598 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
599
600
601
602 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
603 pci_dev_d3_sleep(dev);
604 else if (state == PCI_D2 || dev->current_state == PCI_D2)
605 udelay(PCI_PM_D2_DELAY);
606
607 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
608 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
609 if (dev->current_state != state && printk_ratelimit())
610 dev_info(&dev->dev, "Refused to change power state, "
611 "currently in D%d\n", dev->current_state);
612
613
614
615
616
617
618
619
620
621
622
623
624
625 if (need_restore)
626 pci_restore_bars(dev);
627
628 if (dev->bus->self)
629 pcie_aspm_pm_state_change(dev->bus->self);
630
631 return 0;
632}
633
634
635
636
637
638
639
640void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
641{
642 if (dev->pm_cap) {
643 u16 pmcsr;
644
645 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
646 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
647 } else {
648 dev->current_state = state;
649 }
650}
651
652
653
654
655
656
657static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
658{
659 int error;
660
661 if (platform_pci_power_manageable(dev)) {
662 error = platform_pci_set_power_state(dev, state);
663 if (!error)
664 pci_update_current_state(dev, state);
665 } else {
666 error = -ENODEV;
667
668 if (!dev->pm_cap)
669 dev->current_state = PCI_D0;
670 }
671
672 return error;
673}
674
675
676
677
678
679
680static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
681{
682 if (state == PCI_D0)
683 pci_platform_power_transition(dev, PCI_D0);
684}
685
686
687
688
689
690
691
692
693int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
694{
695 return state >= PCI_D0 ?
696 pci_platform_power_transition(dev, state) : -EINVAL;
697}
698EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
716{
717 int error;
718
719
720 if (state > PCI_D3hot)
721 state = PCI_D3hot;
722 else if (state < PCI_D0)
723 state = PCI_D0;
724 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
725
726
727
728
729
730 return 0;
731
732 __pci_start_power_transition(dev, state);
733
734
735
736 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
737 return 0;
738
739 error = pci_raw_set_power_state(dev, state);
740
741 if (!__pci_complete_power_transition(dev, state))
742 error = 0;
743
744 return error;
745}
746
747
748
749
750
751
752
753
754
755
756
757pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
758{
759 pci_power_t ret;
760
761 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
762 return PCI_D0;
763
764 ret = platform_pci_choose_state(dev);
765 if (ret != PCI_POWER_ERROR)
766 return ret;
767
768 switch (state.event) {
769 case PM_EVENT_ON:
770 return PCI_D0;
771 case PM_EVENT_FREEZE:
772 case PM_EVENT_PRETHAW:
773
774 case PM_EVENT_SUSPEND:
775 case PM_EVENT_HIBERNATE:
776 return PCI_D3hot;
777 default:
778 dev_info(&dev->dev, "unrecognized suspend event %d\n",
779 state.event);
780 BUG();
781 }
782 return PCI_D0;
783}
784
785EXPORT_SYMBOL(pci_choose_state);
786
787#define PCI_EXP_SAVE_REGS 7
788
789#define pcie_cap_has_devctl(type, flags) 1
790#define pcie_cap_has_lnkctl(type, flags) \
791 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
792 (type == PCI_EXP_TYPE_ROOT_PORT || \
793 type == PCI_EXP_TYPE_ENDPOINT || \
794 type == PCI_EXP_TYPE_LEG_END))
795#define pcie_cap_has_sltctl(type, flags) \
796 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
797 ((type == PCI_EXP_TYPE_ROOT_PORT) || \
798 (type == PCI_EXP_TYPE_DOWNSTREAM && \
799 (flags & PCI_EXP_FLAGS_SLOT))))
800#define pcie_cap_has_rtctl(type, flags) \
801 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
802 (type == PCI_EXP_TYPE_ROOT_PORT || \
803 type == PCI_EXP_TYPE_RC_EC))
804#define pcie_cap_has_devctl2(type, flags) \
805 ((flags & PCI_EXP_FLAGS_VERS) > 1)
806#define pcie_cap_has_lnkctl2(type, flags) \
807 ((flags & PCI_EXP_FLAGS_VERS) > 1)
808#define pcie_cap_has_sltctl2(type, flags) \
809 ((flags & PCI_EXP_FLAGS_VERS) > 1)
810
811static int pci_save_pcie_state(struct pci_dev *dev)
812{
813 int pos, i = 0;
814 struct pci_cap_saved_state *save_state;
815 u16 *cap;
816 u16 flags;
817
818 pos = pci_pcie_cap(dev);
819 if (!pos)
820 return 0;
821
822 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
823 if (!save_state) {
824 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
825 return -ENOMEM;
826 }
827 cap = (u16 *)&save_state->data[0];
828
829 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
830
831 if (pcie_cap_has_devctl(dev->pcie_type, flags))
832 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
833 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
834 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
835 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
836 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
837 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
838 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
839 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
840 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
841 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
842 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
843 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
844 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
845
846 return 0;
847}
848
849static void pci_restore_pcie_state(struct pci_dev *dev)
850{
851 int i = 0, pos;
852 struct pci_cap_saved_state *save_state;
853 u16 *cap;
854 u16 flags;
855
856 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
857 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
858 if (!save_state || pos <= 0)
859 return;
860 cap = (u16 *)&save_state->data[0];
861
862 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
863
864 if (pcie_cap_has_devctl(dev->pcie_type, flags))
865 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
866 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
867 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
868 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
869 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
870 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
871 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
872 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
873 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
874 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
875 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
876 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
877 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
878}
879
880
881static int pci_save_pcix_state(struct pci_dev *dev)
882{
883 int pos;
884 struct pci_cap_saved_state *save_state;
885
886 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
887 if (pos <= 0)
888 return 0;
889
890 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
891 if (!save_state) {
892 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
893 return -ENOMEM;
894 }
895
896 pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->data);
897
898 return 0;
899}
900
901static void pci_restore_pcix_state(struct pci_dev *dev)
902{
903 int i = 0, pos;
904 struct pci_cap_saved_state *save_state;
905 u16 *cap;
906
907 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
908 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
909 if (!save_state || pos <= 0)
910 return;
911 cap = (u16 *)&save_state->data[0];
912
913 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
914}
915
916
917
918
919
920
921int
922pci_save_state(struct pci_dev *dev)
923{
924 int i;
925
926 for (i = 0; i < 16; i++)
927 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
928 dev->state_saved = true;
929 if ((i = pci_save_pcie_state(dev)) != 0)
930 return i;
931 if ((i = pci_save_pcix_state(dev)) != 0)
932 return i;
933 return 0;
934}
935
936
937
938
939
940void pci_restore_state(struct pci_dev *dev)
941{
942 int i;
943 u32 val;
944
945 if (!dev->state_saved)
946 return;
947
948
949 pci_restore_pcie_state(dev);
950
951
952
953
954
955 for (i = 15; i >= 0; i--) {
956 pci_read_config_dword(dev, i * 4, &val);
957 if (val != dev->saved_config_space[i]) {
958 dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
959 "space at offset %#x (was %#x, writing %#x)\n",
960 i, val, (int)dev->saved_config_space[i]);
961 pci_write_config_dword(dev,i * 4,
962 dev->saved_config_space[i]);
963 }
964 }
965 pci_restore_pcix_state(dev);
966 pci_restore_msi_state(dev);
967 pci_restore_iov_state(dev);
968
969 dev->state_saved = false;
970}
971
972static int do_pci_enable_device(struct pci_dev *dev, int bars)
973{
974 int err;
975
976 err = pci_set_power_state(dev, PCI_D0);
977 if (err < 0 && err != -EIO)
978 return err;
979 err = pcibios_enable_device(dev, bars);
980 if (err < 0)
981 return err;
982 pci_fixup_device(pci_fixup_enable, dev);
983
984 return 0;
985}
986
987
988
989
990
991
992
993
994int pci_reenable_device(struct pci_dev *dev)
995{
996 if (pci_is_enabled(dev))
997 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
998 return 0;
999}
1000
1001static int __pci_enable_device_flags(struct pci_dev *dev,
1002 resource_size_t flags)
1003{
1004 int err;
1005 int i, bars = 0;
1006
1007
1008
1009
1010
1011
1012
1013 if (dev->pm_cap) {
1014 u16 pmcsr;
1015 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1016 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1017 }
1018
1019 if (atomic_add_return(1, &dev->enable_cnt) > 1)
1020 return 0;
1021
1022 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1023 if (dev->resource[i].flags & flags)
1024 bars |= (1 << i);
1025
1026 err = do_pci_enable_device(dev, bars);
1027 if (err < 0)
1028 atomic_dec(&dev->enable_cnt);
1029 return err;
1030}
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040int pci_enable_device_io(struct pci_dev *dev)
1041{
1042 return __pci_enable_device_flags(dev, IORESOURCE_IO);
1043}
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053int pci_enable_device_mem(struct pci_dev *dev)
1054{
1055 return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1056}
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069int pci_enable_device(struct pci_dev *dev)
1070{
1071 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1072}
1073
1074
1075
1076
1077
1078
1079
1080struct pci_devres {
1081 unsigned int enabled:1;
1082 unsigned int pinned:1;
1083 unsigned int orig_intx:1;
1084 unsigned int restore_intx:1;
1085 u32 region_mask;
1086};
1087
1088static void pcim_release(struct device *gendev, void *res)
1089{
1090 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1091 struct pci_devres *this = res;
1092 int i;
1093
1094 if (dev->msi_enabled)
1095 pci_disable_msi(dev);
1096 if (dev->msix_enabled)
1097 pci_disable_msix(dev);
1098
1099 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1100 if (this->region_mask & (1 << i))
1101 pci_release_region(dev, i);
1102
1103 if (this->restore_intx)
1104 pci_intx(dev, this->orig_intx);
1105
1106 if (this->enabled && !this->pinned)
1107 pci_disable_device(dev);
1108}
1109
1110static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1111{
1112 struct pci_devres *dr, *new_dr;
1113
1114 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1115 if (dr)
1116 return dr;
1117
1118 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1119 if (!new_dr)
1120 return NULL;
1121 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1122}
1123
1124static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1125{
1126 if (pci_is_managed(pdev))
1127 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1128 return NULL;
1129}
1130
1131
1132
1133
1134
1135
1136
1137int pcim_enable_device(struct pci_dev *pdev)
1138{
1139 struct pci_devres *dr;
1140 int rc;
1141
1142 dr = get_pci_dr(pdev);
1143 if (unlikely(!dr))
1144 return -ENOMEM;
1145 if (dr->enabled)
1146 return 0;
1147
1148 rc = pci_enable_device(pdev);
1149 if (!rc) {
1150 pdev->is_managed = 1;
1151 dr->enabled = 1;
1152 }
1153 return rc;
1154}
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164void pcim_pin_device(struct pci_dev *pdev)
1165{
1166 struct pci_devres *dr;
1167
1168 dr = find_pci_dr(pdev);
1169 WARN_ON(!dr || !dr->enabled);
1170 if (dr)
1171 dr->pinned = 1;
1172}
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
1183
1184static void do_pci_disable_device(struct pci_dev *dev)
1185{
1186 u16 pci_command;
1187
1188 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1189 if (pci_command & PCI_COMMAND_MASTER) {
1190 pci_command &= ~PCI_COMMAND_MASTER;
1191 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1192 }
1193
1194 pcibios_disable_device(dev);
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204void pci_disable_enabled_device(struct pci_dev *dev)
1205{
1206 if (pci_is_enabled(dev))
1207 do_pci_disable_device(dev);
1208}
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220void
1221pci_disable_device(struct pci_dev *dev)
1222{
1223 struct pci_devres *dr;
1224
1225 dr = find_pci_dr(dev);
1226 if (dr)
1227 dr->enabled = 0;
1228
1229 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1230 return;
1231
1232 do_pci_disable_device(dev);
1233
1234 dev->is_busmaster = 0;
1235}
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1247 enum pcie_reset_state state)
1248{
1249 return -EINVAL;
1250}
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1261{
1262 return pcibios_set_pcie_reset_state(dev, state);
1263}
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273bool pci_check_pme_status(struct pci_dev *dev)
1274{
1275 int pmcsr_pos;
1276 u16 pmcsr;
1277 bool ret = false;
1278
1279 if (!dev->pm_cap)
1280 return false;
1281
1282 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1283 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1284 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1285 return false;
1286
1287
1288 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1289 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1290
1291 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1292 ret = true;
1293 }
1294
1295 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1296
1297 return ret;
1298}
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
1309{
1310 if (pci_check_pme_status(dev)) {
1311 pci_wakeup_event(dev);
1312 pm_request_resume(&dev->dev);
1313 }
1314 return 0;
1315}
1316
1317
1318
1319
1320
1321void pci_pme_wakeup_bus(struct pci_bus *bus)
1322{
1323 if (bus)
1324 pci_walk_bus(bus, pci_pme_wakeup, NULL);
1325}
1326
1327
1328
1329
1330
1331
1332bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1333{
1334 if (!dev->pm_cap)
1335 return false;
1336
1337 return !!(dev->pme_support & (1 << state));
1338}
1339
1340static void pci_pme_list_scan(struct work_struct *work)
1341{
1342 struct pci_pme_device *pme_dev;
1343
1344 mutex_lock(&pci_pme_list_mutex);
1345 if (!list_empty(&pci_pme_list)) {
1346 list_for_each_entry(pme_dev, &pci_pme_list, list)
1347 pci_pme_wakeup(pme_dev->dev, NULL);
1348 schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT));
1349 }
1350 mutex_unlock(&pci_pme_list_mutex);
1351}
1352
1353
1354
1355
1356
1357
1358
1359static bool pci_external_pme(struct pci_dev *dev)
1360{
1361 if (pci_is_pcie(dev) || dev->bus->number == 0)
1362 return false;
1363 return true;
1364}
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374void pci_pme_active(struct pci_dev *dev, bool enable)
1375{
1376 u16 pmcsr;
1377
1378 if (!dev->pm_cap)
1379 return;
1380
1381 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1382
1383 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1384 if (!enable)
1385 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1386
1387 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399 if (pci_external_pme(dev)) {
1400 struct pci_pme_device *pme_dev;
1401 if (enable) {
1402 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1403 GFP_KERNEL);
1404 if (!pme_dev)
1405 goto out;
1406 pme_dev->dev = dev;
1407 mutex_lock(&pci_pme_list_mutex);
1408 list_add(&pme_dev->list, &pci_pme_list);
1409 if (list_is_singular(&pci_pme_list))
1410 schedule_delayed_work(&pci_pme_work,
1411 msecs_to_jiffies(PME_TIMEOUT));
1412 mutex_unlock(&pci_pme_list_mutex);
1413 } else {
1414 mutex_lock(&pci_pme_list_mutex);
1415 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1416 if (pme_dev->dev == dev) {
1417 list_del(&pme_dev->list);
1418 kfree(pme_dev);
1419 break;
1420 }
1421 }
1422 mutex_unlock(&pci_pme_list_mutex);
1423 }
1424 }
1425
1426out:
1427 dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
1428 enable ? "enabled" : "disabled");
1429}
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1452 bool runtime, bool enable)
1453{
1454 int ret = 0;
1455
1456 if (enable && !runtime && !device_may_wakeup(&dev->dev))
1457 return -EINVAL;
1458
1459
1460 if (!!enable == !!dev->wakeup_prepared)
1461 return 0;
1462
1463
1464
1465
1466
1467
1468
1469 if (enable) {
1470 int error;
1471
1472 if (pci_pme_capable(dev, state))
1473 pci_pme_active(dev, true);
1474 else
1475 ret = 1;
1476 error = runtime ? platform_pci_run_wake(dev, true) :
1477 platform_pci_sleep_wake(dev, true);
1478 if (ret)
1479 ret = error;
1480 if (!ret)
1481 dev->wakeup_prepared = true;
1482 } else {
1483 if (runtime)
1484 platform_pci_run_wake(dev, false);
1485 else
1486 platform_pci_sleep_wake(dev, false);
1487 pci_pme_active(dev, false);
1488 dev->wakeup_prepared = false;
1489 }
1490
1491 return ret;
1492}
1493EXPORT_SYMBOL(__pci_enable_wake);
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1510{
1511 return pci_pme_capable(dev, PCI_D3cold) ?
1512 pci_enable_wake(dev, PCI_D3cold, enable) :
1513 pci_enable_wake(dev, PCI_D3hot, enable);
1514}
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524pci_power_t pci_target_state(struct pci_dev *dev)
1525{
1526 pci_power_t target_state = PCI_D3hot;
1527
1528 if (platform_pci_power_manageable(dev)) {
1529
1530
1531
1532
1533 pci_power_t state = platform_pci_choose_state(dev);
1534
1535 switch (state) {
1536 case PCI_POWER_ERROR:
1537 case PCI_UNKNOWN:
1538 break;
1539 case PCI_D1:
1540 case PCI_D2:
1541 if (pci_no_d1d2(dev))
1542 break;
1543 default:
1544 target_state = state;
1545 }
1546 } else if (!dev->pm_cap) {
1547 target_state = PCI_D0;
1548 } else if (device_may_wakeup(&dev->dev)) {
1549
1550
1551
1552
1553
1554 if (dev->pme_support) {
1555 while (target_state
1556 && !(dev->pme_support & (1 << target_state)))
1557 target_state--;
1558 }
1559 }
1560
1561 return target_state;
1562}
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572int pci_prepare_to_sleep(struct pci_dev *dev)
1573{
1574 pci_power_t target_state = pci_target_state(dev);
1575 int error;
1576
1577 if (target_state == PCI_POWER_ERROR)
1578 return -EIO;
1579
1580 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1581
1582 error = pci_set_power_state(dev, target_state);
1583
1584 if (error)
1585 pci_enable_wake(dev, target_state, false);
1586
1587 return error;
1588}
1589
1590
1591
1592
1593
1594
1595
1596int pci_back_from_sleep(struct pci_dev *dev)
1597{
1598 pci_enable_wake(dev, PCI_D0, false);
1599 return pci_set_power_state(dev, PCI_D0);
1600}
1601
1602
1603
1604
1605
1606
1607
1608
1609int pci_finish_runtime_suspend(struct pci_dev *dev)
1610{
1611 pci_power_t target_state = pci_target_state(dev);
1612 int error;
1613
1614 if (target_state == PCI_POWER_ERROR)
1615 return -EIO;
1616
1617 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1618
1619 error = pci_set_power_state(dev, target_state);
1620
1621 if (error)
1622 __pci_enable_wake(dev, target_state, true, false);
1623
1624 return error;
1625}
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635bool pci_dev_run_wake(struct pci_dev *dev)
1636{
1637 struct pci_bus *bus = dev->bus;
1638
1639 if (device_run_wake(&dev->dev))
1640 return true;
1641
1642 if (!dev->pme_support)
1643 return false;
1644
1645 while (bus->parent) {
1646 struct pci_dev *bridge = bus->self;
1647
1648 if (device_run_wake(&bridge->dev))
1649 return true;
1650
1651 bus = bus->parent;
1652 }
1653
1654
1655 if (bus->bridge)
1656 return device_run_wake(bus->bridge);
1657
1658 return false;
1659}
1660EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1661
1662
1663
1664
1665
1666void pci_pm_init(struct pci_dev *dev)
1667{
1668 int pm;
1669 u16 pmc;
1670
1671 pm_runtime_forbid(&dev->dev);
1672 device_enable_async_suspend(&dev->dev);
1673 dev->wakeup_prepared = false;
1674
1675 dev->pm_cap = 0;
1676
1677
1678 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1679 if (!pm)
1680 return;
1681
1682 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1683
1684 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1685 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1686 pmc & PCI_PM_CAP_VER_MASK);
1687 return;
1688 }
1689
1690 dev->pm_cap = pm;
1691 dev->d3_delay = PCI_PM_D3_WAIT;
1692
1693 dev->d1_support = false;
1694 dev->d2_support = false;
1695 if (!pci_no_d1d2(dev)) {
1696 if (pmc & PCI_PM_CAP_D1)
1697 dev->d1_support = true;
1698 if (pmc & PCI_PM_CAP_D2)
1699 dev->d2_support = true;
1700
1701 if (dev->d1_support || dev->d2_support)
1702 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1703 dev->d1_support ? " D1" : "",
1704 dev->d2_support ? " D2" : "");
1705 }
1706
1707 pmc &= PCI_PM_CAP_PME_MASK;
1708 if (pmc) {
1709 dev_printk(KERN_DEBUG, &dev->dev,
1710 "PME# supported from%s%s%s%s%s\n",
1711 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1712 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1713 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1714 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1715 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1716 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1717
1718
1719
1720
1721 device_set_wakeup_capable(&dev->dev, true);
1722
1723 pci_pme_active(dev, false);
1724 } else {
1725 dev->pme_support = 0;
1726 }
1727}
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739void platform_pci_wakeup_init(struct pci_dev *dev)
1740{
1741 if (!platform_pci_can_wakeup(dev))
1742 return;
1743
1744 device_set_wakeup_capable(&dev->dev, true);
1745 platform_pci_sleep_wake(dev, false);
1746}
1747
1748
1749
1750
1751
1752
1753
1754static int pci_add_cap_save_buffer(
1755 struct pci_dev *dev, char cap, unsigned int size)
1756{
1757 int pos;
1758 struct pci_cap_saved_state *save_state;
1759
1760 pos = pci_find_capability(dev, cap);
1761 if (pos <= 0)
1762 return 0;
1763
1764 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1765 if (!save_state)
1766 return -ENOMEM;
1767
1768 save_state->cap_nr = cap;
1769 pci_add_saved_cap(dev, save_state);
1770
1771 return 0;
1772}
1773
1774
1775
1776
1777
1778void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1779{
1780 int error;
1781
1782 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1783 PCI_EXP_SAVE_REGS * sizeof(u16));
1784 if (error)
1785 dev_err(&dev->dev,
1786 "unable to preallocate PCI Express save buffer\n");
1787
1788 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1789 if (error)
1790 dev_err(&dev->dev,
1791 "unable to preallocate PCI-X save buffer\n");
1792}
1793
1794
1795
1796
1797
1798void pci_enable_ari(struct pci_dev *dev)
1799{
1800 int pos;
1801 u32 cap;
1802 u16 ctrl;
1803 struct pci_dev *bridge;
1804
1805 if (!pci_is_pcie(dev) || dev->devfn)
1806 return;
1807
1808 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1809 if (!pos)
1810 return;
1811
1812 bridge = dev->bus->self;
1813 if (!bridge || !pci_is_pcie(bridge))
1814 return;
1815
1816 pos = pci_pcie_cap(bridge);
1817 if (!pos)
1818 return;
1819
1820 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
1821 if (!(cap & PCI_EXP_DEVCAP2_ARI))
1822 return;
1823
1824 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
1825 ctrl |= PCI_EXP_DEVCTL2_ARI;
1826 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
1827
1828 bridge->ari_enabled = 1;
1829}
1830
1831static int pci_acs_enable;
1832
1833
1834
1835
1836void pci_request_acs(void)
1837{
1838 pci_acs_enable = 1;
1839}
1840
1841
1842
1843
1844
1845void pci_enable_acs(struct pci_dev *dev)
1846{
1847 int pos;
1848 u16 cap;
1849 u16 ctrl;
1850
1851 if (!pci_acs_enable)
1852 return;
1853
1854 if (!pci_is_pcie(dev))
1855 return;
1856
1857 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
1858 if (!pos)
1859 return;
1860
1861 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
1862 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
1863
1864
1865 ctrl |= (cap & PCI_ACS_SV);
1866
1867
1868 ctrl |= (cap & PCI_ACS_RR);
1869
1870
1871 ctrl |= (cap & PCI_ACS_CR);
1872
1873
1874 ctrl |= (cap & PCI_ACS_UF);
1875
1876 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
1877}
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
1891{
1892 int slot;
1893
1894 if (pci_ari_enabled(dev->bus))
1895 slot = 0;
1896 else
1897 slot = PCI_SLOT(dev->devfn);
1898
1899 return (((pin - 1) + slot) % 4) + 1;
1900}
1901
1902int
1903pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
1904{
1905 u8 pin;
1906
1907 pin = dev->pin;
1908 if (!pin)
1909 return -1;
1910
1911 while (!pci_is_root_bus(dev->bus)) {
1912 pin = pci_swizzle_interrupt_pin(dev, pin);
1913 dev = dev->bus->self;
1914 }
1915 *bridge = dev;
1916 return pin;
1917}
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
1928{
1929 u8 pin = *pinp;
1930
1931 while (!pci_is_root_bus(dev->bus)) {
1932 pin = pci_swizzle_interrupt_pin(dev, pin);
1933 dev = dev->bus->self;
1934 }
1935 *pinp = pin;
1936 return PCI_SLOT(dev->devfn);
1937}
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948void pci_release_region(struct pci_dev *pdev, int bar)
1949{
1950 struct pci_devres *dr;
1951
1952 if (pci_resource_len(pdev, bar) == 0)
1953 return;
1954 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
1955 release_region(pci_resource_start(pdev, bar),
1956 pci_resource_len(pdev, bar));
1957 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
1958 release_mem_region(pci_resource_start(pdev, bar),
1959 pci_resource_len(pdev, bar));
1960
1961 dr = find_pci_dr(pdev);
1962 if (dr)
1963 dr->region_mask &= ~(1 << bar);
1964}
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
1986 int exclusive)
1987{
1988 struct pci_devres *dr;
1989
1990 if (pci_resource_len(pdev, bar) == 0)
1991 return 0;
1992
1993 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
1994 if (!request_region(pci_resource_start(pdev, bar),
1995 pci_resource_len(pdev, bar), res_name))
1996 goto err_out;
1997 }
1998 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
1999 if (!__request_mem_region(pci_resource_start(pdev, bar),
2000 pci_resource_len(pdev, bar), res_name,
2001 exclusive))
2002 goto err_out;
2003 }
2004
2005 dr = find_pci_dr(pdev);
2006 if (dr)
2007 dr->region_mask |= 1 << bar;
2008
2009 return 0;
2010
2011err_out:
2012 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2013 &pdev->resource[bar]);
2014 return -EBUSY;
2015}
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2032{
2033 return __pci_request_region(pdev, bar, res_name, 0);
2034}
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2055{
2056 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2057}
2058
2059
2060
2061
2062
2063
2064
2065
2066void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2067{
2068 int i;
2069
2070 for (i = 0; i < 6; i++)
2071 if (bars & (1 << i))
2072 pci_release_region(pdev, i);
2073}
2074
2075int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2076 const char *res_name, int excl)
2077{
2078 int i;
2079
2080 for (i = 0; i < 6; i++)
2081 if (bars & (1 << i))
2082 if (__pci_request_region(pdev, i, res_name, excl))
2083 goto err_out;
2084 return 0;
2085
2086err_out:
2087 while(--i >= 0)
2088 if (bars & (1 << i))
2089 pci_release_region(pdev, i);
2090
2091 return -EBUSY;
2092}
2093
2094
2095
2096
2097
2098
2099
2100
2101int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2102 const char *res_name)
2103{
2104 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2105}
2106
2107int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2108 int bars, const char *res_name)
2109{
2110 return __pci_request_selected_regions(pdev, bars, res_name,
2111 IORESOURCE_EXCLUSIVE);
2112}
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123void pci_release_regions(struct pci_dev *pdev)
2124{
2125 pci_release_selected_regions(pdev, (1 << 6) - 1);
2126}
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2142{
2143 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2144}
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2163{
2164 return pci_request_selected_regions_exclusive(pdev,
2165 ((1 << 6) - 1), res_name);
2166}
2167
2168static void __pci_set_master(struct pci_dev *dev, bool enable)
2169{
2170 u16 old_cmd, cmd;
2171
2172 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2173 if (enable)
2174 cmd = old_cmd | PCI_COMMAND_MASTER;
2175 else
2176 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2177 if (cmd != old_cmd) {
2178 dev_dbg(&dev->dev, "%s bus mastering\n",
2179 enable ? "enabling" : "disabling");
2180 pci_write_config_word(dev, PCI_COMMAND, cmd);
2181 }
2182 dev->is_busmaster = enable;
2183}
2184
2185
2186
2187
2188
2189
2190
2191
2192void pci_set_master(struct pci_dev *dev)
2193{
2194 __pci_set_master(dev, true);
2195 pcibios_set_master(dev);
2196}
2197
2198
2199
2200
2201
2202void pci_clear_master(struct pci_dev *dev)
2203{
2204 __pci_set_master(dev, false);
2205}
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217int pci_set_cacheline_size(struct pci_dev *dev)
2218{
2219 u8 cacheline_size;
2220
2221 if (!pci_cache_line_size)
2222 return -EINVAL;
2223
2224
2225
2226 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2227 if (cacheline_size >= pci_cache_line_size &&
2228 (cacheline_size % pci_cache_line_size) == 0)
2229 return 0;
2230
2231
2232 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2233
2234 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2235 if (cacheline_size == pci_cache_line_size)
2236 return 0;
2237
2238 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2239 "supported\n", pci_cache_line_size << 2);
2240
2241 return -EINVAL;
2242}
2243EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2244
2245#ifdef PCI_DISABLE_MWI
2246int pci_set_mwi(struct pci_dev *dev)
2247{
2248 return 0;
2249}
2250
2251int pci_try_set_mwi(struct pci_dev *dev)
2252{
2253 return 0;
2254}
2255
2256void pci_clear_mwi(struct pci_dev *dev)
2257{
2258}
2259
2260#else
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270int
2271pci_set_mwi(struct pci_dev *dev)
2272{
2273 int rc;
2274 u16 cmd;
2275
2276 rc = pci_set_cacheline_size(dev);
2277 if (rc)
2278 return rc;
2279
2280 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2281 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2282 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2283 cmd |= PCI_COMMAND_INVALIDATE;
2284 pci_write_config_word(dev, PCI_COMMAND, cmd);
2285 }
2286
2287 return 0;
2288}
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299int pci_try_set_mwi(struct pci_dev *dev)
2300{
2301 int rc = pci_set_mwi(dev);
2302 return rc;
2303}
2304
2305
2306
2307
2308
2309
2310
2311void
2312pci_clear_mwi(struct pci_dev *dev)
2313{
2314 u16 cmd;
2315
2316 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2317 if (cmd & PCI_COMMAND_INVALIDATE) {
2318 cmd &= ~PCI_COMMAND_INVALIDATE;
2319 pci_write_config_word(dev, PCI_COMMAND, cmd);
2320 }
2321}
2322#endif
2323
2324
2325
2326
2327
2328
2329
2330
2331void
2332pci_intx(struct pci_dev *pdev, int enable)
2333{
2334 u16 pci_command, new;
2335
2336 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2337
2338 if (enable) {
2339 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2340 } else {
2341 new = pci_command | PCI_COMMAND_INTX_DISABLE;
2342 }
2343
2344 if (new != pci_command) {
2345 struct pci_devres *dr;
2346
2347 pci_write_config_word(pdev, PCI_COMMAND, new);
2348
2349 dr = find_pci_dr(pdev);
2350 if (dr && !dr->restore_intx) {
2351 dr->restore_intx = 1;
2352 dr->orig_intx = !enable;
2353 }
2354 }
2355}
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365void pci_msi_off(struct pci_dev *dev)
2366{
2367 int pos;
2368 u16 control;
2369
2370 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2371 if (pos) {
2372 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2373 control &= ~PCI_MSI_FLAGS_ENABLE;
2374 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
2375 }
2376 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
2377 if (pos) {
2378 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
2379 control &= ~PCI_MSIX_FLAGS_ENABLE;
2380 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
2381 }
2382}
2383EXPORT_SYMBOL_GPL(pci_msi_off);
2384
2385int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
2386{
2387 return dma_set_max_seg_size(&dev->dev, size);
2388}
2389EXPORT_SYMBOL(pci_set_dma_max_seg_size);
2390
2391int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
2392{
2393 return dma_set_seg_boundary(&dev->dev, mask);
2394}
2395EXPORT_SYMBOL(pci_set_dma_seg_boundary);
2396
2397static int pcie_flr(struct pci_dev *dev, int probe)
2398{
2399 int i;
2400 int pos;
2401 u32 cap;
2402 u16 status, control;
2403
2404 pos = pci_pcie_cap(dev);
2405 if (!pos)
2406 return -ENOTTY;
2407
2408 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
2409 if (!(cap & PCI_EXP_DEVCAP_FLR))
2410 return -ENOTTY;
2411
2412 if (probe)
2413 return 0;
2414
2415
2416 for (i = 0; i < 4; i++) {
2417 if (i)
2418 msleep((1 << (i - 1)) * 100);
2419
2420 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
2421 if (!(status & PCI_EXP_DEVSTA_TRPND))
2422 goto clear;
2423 }
2424
2425 dev_err(&dev->dev, "transaction is not cleared; "
2426 "proceeding with reset anyway\n");
2427
2428clear:
2429 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
2430 control |= PCI_EXP_DEVCTL_BCR_FLR;
2431 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
2432
2433 msleep(100);
2434
2435 return 0;
2436}
2437
2438static int pci_af_flr(struct pci_dev *dev, int probe)
2439{
2440 int i;
2441 int pos;
2442 u8 cap;
2443 u8 status;
2444
2445 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
2446 if (!pos)
2447 return -ENOTTY;
2448
2449 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
2450 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
2451 return -ENOTTY;
2452
2453 if (probe)
2454 return 0;
2455
2456
2457 for (i = 0; i < 4; i++) {
2458 if (i)
2459 msleep((1 << (i - 1)) * 100);
2460
2461 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
2462 if (!(status & PCI_AF_STATUS_TP))
2463 goto clear;
2464 }
2465
2466 dev_err(&dev->dev, "transaction is not cleared; "
2467 "proceeding with reset anyway\n");
2468
2469clear:
2470 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
2471 msleep(100);
2472
2473 return 0;
2474}
2475
2476static int pci_pm_reset(struct pci_dev *dev, int probe)
2477{
2478 u16 csr;
2479
2480 if (!dev->pm_cap)
2481 return -ENOTTY;
2482
2483 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
2484 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
2485 return -ENOTTY;
2486
2487 if (probe)
2488 return 0;
2489
2490 if (dev->current_state != PCI_D0)
2491 return -EINVAL;
2492
2493 csr &= ~PCI_PM_CTRL_STATE_MASK;
2494 csr |= PCI_D3hot;
2495 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2496 pci_dev_d3_sleep(dev);
2497
2498 csr &= ~PCI_PM_CTRL_STATE_MASK;
2499 csr |= PCI_D0;
2500 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2501 pci_dev_d3_sleep(dev);
2502
2503 return 0;
2504}
2505
2506static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
2507{
2508 u16 ctrl;
2509 struct pci_dev *pdev;
2510
2511 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
2512 return -ENOTTY;
2513
2514 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
2515 if (pdev != dev)
2516 return -ENOTTY;
2517
2518 if (probe)
2519 return 0;
2520
2521 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
2522 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
2523 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2524 msleep(100);
2525
2526 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
2527 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2528 msleep(100);
2529
2530 return 0;
2531}
2532
2533static int pci_dev_reset(struct pci_dev *dev, int probe)
2534{
2535 int rc;
2536
2537 might_sleep();
2538
2539 if (!probe) {
2540 pci_block_user_cfg_access(dev);
2541
2542 device_lock(&dev->dev);
2543 }
2544
2545 rc = pci_dev_specific_reset(dev, probe);
2546 if (rc != -ENOTTY)
2547 goto done;
2548
2549 rc = pcie_flr(dev, probe);
2550 if (rc != -ENOTTY)
2551 goto done;
2552
2553 rc = pci_af_flr(dev, probe);
2554 if (rc != -ENOTTY)
2555 goto done;
2556
2557 rc = pci_pm_reset(dev, probe);
2558 if (rc != -ENOTTY)
2559 goto done;
2560
2561 rc = pci_parent_bus_reset(dev, probe);
2562done:
2563 if (!probe) {
2564 device_unlock(&dev->dev);
2565 pci_unblock_user_cfg_access(dev);
2566 }
2567
2568 return rc;
2569}
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588int __pci_reset_function(struct pci_dev *dev)
2589{
2590 return pci_dev_reset(dev, 0);
2591}
2592EXPORT_SYMBOL_GPL(__pci_reset_function);
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605int pci_probe_reset_function(struct pci_dev *dev)
2606{
2607 return pci_dev_reset(dev, 1);
2608}
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626int pci_reset_function(struct pci_dev *dev)
2627{
2628 int rc;
2629
2630 rc = pci_dev_reset(dev, 1);
2631 if (rc)
2632 return rc;
2633
2634 pci_save_state(dev);
2635
2636
2637
2638
2639
2640 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
2641
2642 rc = pci_dev_reset(dev, 0);
2643
2644 pci_restore_state(dev);
2645
2646 return rc;
2647}
2648EXPORT_SYMBOL_GPL(pci_reset_function);
2649
2650
2651
2652
2653
2654
2655
2656
2657int pcix_get_max_mmrbc(struct pci_dev *dev)
2658{
2659 int cap;
2660 u32 stat;
2661
2662 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2663 if (!cap)
2664 return -EINVAL;
2665
2666 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
2667 return -EINVAL;
2668
2669 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
2670}
2671EXPORT_SYMBOL(pcix_get_max_mmrbc);
2672
2673
2674
2675
2676
2677
2678
2679
2680int pcix_get_mmrbc(struct pci_dev *dev)
2681{
2682 int cap;
2683 u16 cmd;
2684
2685 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2686 if (!cap)
2687 return -EINVAL;
2688
2689 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
2690 return -EINVAL;
2691
2692 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
2693}
2694EXPORT_SYMBOL(pcix_get_mmrbc);
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
2706{
2707 int cap;
2708 u32 stat, v, o;
2709 u16 cmd;
2710
2711 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
2712 return -EINVAL;
2713
2714 v = ffs(mmrbc) - 10;
2715
2716 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2717 if (!cap)
2718 return -EINVAL;
2719
2720 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
2721 return -EINVAL;
2722
2723 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
2724 return -E2BIG;
2725
2726 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
2727 return -EINVAL;
2728
2729 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
2730 if (o != v) {
2731 if (v > o && dev->bus &&
2732 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
2733 return -EIO;
2734
2735 cmd &= ~PCI_X_CMD_MAX_READ;
2736 cmd |= v << 2;
2737 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
2738 return -EIO;
2739 }
2740 return 0;
2741}
2742EXPORT_SYMBOL(pcix_set_mmrbc);
2743
2744
2745
2746
2747
2748
2749
2750
2751int pcie_get_readrq(struct pci_dev *dev)
2752{
2753 int ret, cap;
2754 u16 ctl;
2755
2756 cap = pci_pcie_cap(dev);
2757 if (!cap)
2758 return -EINVAL;
2759
2760 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
2761 if (!ret)
2762 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
2763
2764 return ret;
2765}
2766EXPORT_SYMBOL(pcie_get_readrq);
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776int pcie_set_readrq(struct pci_dev *dev, int rq)
2777{
2778 int cap, err = -EINVAL;
2779 u16 ctl, v;
2780
2781 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
2782 goto out;
2783
2784 v = (ffs(rq) - 8) << 12;
2785
2786 cap = pci_pcie_cap(dev);
2787 if (!cap)
2788 goto out;
2789
2790 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
2791 if (err)
2792 goto out;
2793
2794 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
2795 ctl &= ~PCI_EXP_DEVCTL_READRQ;
2796 ctl |= v;
2797 err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl);
2798 }
2799
2800out:
2801 return err;
2802}
2803EXPORT_SYMBOL(pcie_set_readrq);
2804
2805
2806
2807
2808
2809
2810
2811
2812int pci_select_bars(struct pci_dev *dev, unsigned long flags)
2813{
2814 int i, bars = 0;
2815 for (i = 0; i < PCI_NUM_RESOURCES; i++)
2816 if (pci_resource_flags(dev, i) & flags)
2817 bars |= (1 << i);
2818 return bars;
2819}
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
2830{
2831 int reg;
2832
2833 if (resno < PCI_ROM_RESOURCE) {
2834 *type = pci_bar_unknown;
2835 return PCI_BASE_ADDRESS_0 + 4 * resno;
2836 } else if (resno == PCI_ROM_RESOURCE) {
2837 *type = pci_bar_mem32;
2838 return dev->rom_base_reg;
2839 } else if (resno < PCI_BRIDGE_RESOURCES) {
2840
2841 reg = pci_iov_resource_bar(dev, resno, type);
2842 if (reg)
2843 return reg;
2844 }
2845
2846 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
2847 return 0;
2848}
2849
2850
2851static arch_set_vga_state_t arch_set_vga_state;
2852
2853void __init pci_register_set_vga_state(arch_set_vga_state_t func)
2854{
2855 arch_set_vga_state = func;
2856}
2857
2858static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
2859 unsigned int command_bits, bool change_bridge)
2860{
2861 if (arch_set_vga_state)
2862 return arch_set_vga_state(dev, decode, command_bits,
2863 change_bridge);
2864 return 0;
2865}
2866
2867
2868
2869
2870
2871
2872
2873
2874int pci_set_vga_state(struct pci_dev *dev, bool decode,
2875 unsigned int command_bits, bool change_bridge)
2876{
2877 struct pci_bus *bus;
2878 struct pci_dev *bridge;
2879 u16 cmd;
2880 int rc;
2881
2882 WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
2883
2884
2885 rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge);
2886 if (rc)
2887 return rc;
2888
2889 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2890 if (decode == true)
2891 cmd |= command_bits;
2892 else
2893 cmd &= ~command_bits;
2894 pci_write_config_word(dev, PCI_COMMAND, cmd);
2895
2896 if (change_bridge == false)
2897 return 0;
2898
2899 bus = dev->bus;
2900 while (bus) {
2901 bridge = bus->self;
2902 if (bridge) {
2903 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
2904 &cmd);
2905 if (decode == true)
2906 cmd |= PCI_BRIDGE_CTL_VGA;
2907 else
2908 cmd &= ~PCI_BRIDGE_CTL_VGA;
2909 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
2910 cmd);
2911 }
2912 bus = bus->parent;
2913 }
2914 return 0;
2915}
2916
2917#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
2918static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
2919static DEFINE_SPINLOCK(resource_alignment_lock);
2920
2921
2922
2923
2924
2925
2926
2927
2928resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
2929{
2930 int seg, bus, slot, func, align_order, count;
2931 resource_size_t align = 0;
2932 char *p;
2933
2934 spin_lock(&resource_alignment_lock);
2935 p = resource_alignment_param;
2936 while (*p) {
2937 count = 0;
2938 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
2939 p[count] == '@') {
2940 p += count + 1;
2941 } else {
2942 align_order = -1;
2943 }
2944 if (sscanf(p, "%x:%x:%x.%x%n",
2945 &seg, &bus, &slot, &func, &count) != 4) {
2946 seg = 0;
2947 if (sscanf(p, "%x:%x.%x%n",
2948 &bus, &slot, &func, &count) != 3) {
2949
2950 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
2951 p);
2952 break;
2953 }
2954 }
2955 p += count;
2956 if (seg == pci_domain_nr(dev->bus) &&
2957 bus == dev->bus->number &&
2958 slot == PCI_SLOT(dev->devfn) &&
2959 func == PCI_FUNC(dev->devfn)) {
2960 if (align_order == -1) {
2961 align = PAGE_SIZE;
2962 } else {
2963 align = 1 << align_order;
2964 }
2965
2966 break;
2967 }
2968 if (*p != ';' && *p != ',') {
2969
2970 break;
2971 }
2972 p++;
2973 }
2974 spin_unlock(&resource_alignment_lock);
2975 return align;
2976}
2977
2978
2979
2980
2981
2982
2983
2984
2985int pci_is_reassigndev(struct pci_dev *dev)
2986{
2987 return (pci_specified_resource_alignment(dev) != 0);
2988}
2989
2990ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
2991{
2992 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
2993 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
2994 spin_lock(&resource_alignment_lock);
2995 strncpy(resource_alignment_param, buf, count);
2996 resource_alignment_param[count] = '\0';
2997 spin_unlock(&resource_alignment_lock);
2998 return count;
2999}
3000
3001ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3002{
3003 size_t count;
3004 spin_lock(&resource_alignment_lock);
3005 count = snprintf(buf, size, "%s", resource_alignment_param);
3006 spin_unlock(&resource_alignment_lock);
3007 return count;
3008}
3009
3010static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3011{
3012 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3013}
3014
3015static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3016 const char *buf, size_t count)
3017{
3018 return pci_set_resource_alignment_param(buf, count);
3019}
3020
3021BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3022 pci_resource_alignment_store);
3023
3024static int __init pci_resource_alignment_sysfs_init(void)
3025{
3026 return bus_create_file(&pci_bus_type,
3027 &bus_attr_resource_alignment);
3028}
3029
3030late_initcall(pci_resource_alignment_sysfs_init);
3031
3032static void __devinit pci_no_domains(void)
3033{
3034#ifdef CONFIG_PCI_DOMAINS
3035 pci_domains_supported = 0;
3036#endif
3037}
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3048{
3049 return 1;
3050}
3051
3052void __weak pci_fixup_cardbus(struct pci_bus *bus)
3053{
3054}
3055EXPORT_SYMBOL(pci_fixup_cardbus);
3056
3057static int __init pci_setup(char *str)
3058{
3059 while (str) {
3060 char *k = strchr(str, ',');
3061 if (k)
3062 *k++ = 0;
3063 if (*str && (str = pcibios_setup(str)) && *str) {
3064 if (!strcmp(str, "nomsi")) {
3065 pci_no_msi();
3066 } else if (!strcmp(str, "noaer")) {
3067 pci_no_aer();
3068 } else if (!strcmp(str, "nodomains")) {
3069 pci_no_domains();
3070 } else if (!strncmp(str, "cbiosize=", 9)) {
3071 pci_cardbus_io_size = memparse(str + 9, &str);
3072 } else if (!strncmp(str, "cbmemsize=", 10)) {
3073 pci_cardbus_mem_size = memparse(str + 10, &str);
3074 } else if (!strncmp(str, "resource_alignment=", 19)) {
3075 pci_set_resource_alignment_param(str + 19,
3076 strlen(str + 19));
3077 } else if (!strncmp(str, "ecrc=", 5)) {
3078 pcie_ecrc_get_policy(str + 5);
3079 } else if (!strncmp(str, "hpiosize=", 9)) {
3080 pci_hotplug_io_size = memparse(str + 9, &str);
3081 } else if (!strncmp(str, "hpmemsize=", 10)) {
3082 pci_hotplug_mem_size = memparse(str + 10, &str);
3083 } else {
3084 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3085 str);
3086 }
3087 }
3088 str = k;
3089 }
3090 return 0;
3091}
3092early_param("pci", pci_setup);
3093
3094EXPORT_SYMBOL(pci_reenable_device);
3095EXPORT_SYMBOL(pci_enable_device_io);
3096EXPORT_SYMBOL(pci_enable_device_mem);
3097EXPORT_SYMBOL(pci_enable_device);
3098EXPORT_SYMBOL(pcim_enable_device);
3099EXPORT_SYMBOL(pcim_pin_device);
3100EXPORT_SYMBOL(pci_disable_device);
3101EXPORT_SYMBOL(pci_find_capability);
3102EXPORT_SYMBOL(pci_bus_find_capability);
3103EXPORT_SYMBOL(pci_release_regions);
3104EXPORT_SYMBOL(pci_request_regions);
3105EXPORT_SYMBOL(pci_request_regions_exclusive);
3106EXPORT_SYMBOL(pci_release_region);
3107EXPORT_SYMBOL(pci_request_region);
3108EXPORT_SYMBOL(pci_request_region_exclusive);
3109EXPORT_SYMBOL(pci_release_selected_regions);
3110EXPORT_SYMBOL(pci_request_selected_regions);
3111EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3112EXPORT_SYMBOL(pci_set_master);
3113EXPORT_SYMBOL(pci_clear_master);
3114EXPORT_SYMBOL(pci_set_mwi);
3115EXPORT_SYMBOL(pci_try_set_mwi);
3116EXPORT_SYMBOL(pci_clear_mwi);
3117EXPORT_SYMBOL_GPL(pci_intx);
3118EXPORT_SYMBOL(pci_assign_resource);
3119EXPORT_SYMBOL(pci_find_parent_resource);
3120EXPORT_SYMBOL(pci_select_bars);
3121
3122EXPORT_SYMBOL(pci_set_power_state);
3123EXPORT_SYMBOL(pci_save_state);
3124EXPORT_SYMBOL(pci_restore_state);
3125EXPORT_SYMBOL(pci_pme_capable);
3126EXPORT_SYMBOL(pci_pme_active);
3127EXPORT_SYMBOL(pci_wake_from_d3);
3128EXPORT_SYMBOL(pci_target_state);
3129EXPORT_SYMBOL(pci_prepare_to_sleep);
3130EXPORT_SYMBOL(pci_back_from_sleep);
3131EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
3132