1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/pci.h>
14#include <linux/pm.h>
15#include <linux/slab.h>
16#include <linux/module.h>
17#include <linux/spinlock.h>
18#include <linux/string.h>
19#include <linux/log2.h>
20#include <linux/pci-aspm.h>
21#include <linux/pm_wakeup.h>
22#include <linux/interrupt.h>
23#include <linux/device.h>
24#include <linux/pm_runtime.h>
25#include <asm/setup.h>
26#include "pci.h"
27
28const char *pci_power_names[] = {
29 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
30};
31EXPORT_SYMBOL_GPL(pci_power_names);
32
33int isa_dma_bridge_buggy;
34EXPORT_SYMBOL(isa_dma_bridge_buggy);
35
36int pci_pci_problems;
37EXPORT_SYMBOL(pci_pci_problems);
38
39unsigned int pci_pm_d3_delay;
40
41static void pci_pme_list_scan(struct work_struct *work);
42
43static LIST_HEAD(pci_pme_list);
44static DEFINE_MUTEX(pci_pme_list_mutex);
45static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
46
47struct pci_pme_device {
48 struct list_head list;
49 struct pci_dev *dev;
50};
51
52#define PME_TIMEOUT 1000
53
54static void pci_dev_d3_sleep(struct pci_dev *dev)
55{
56 unsigned int delay = dev->d3_delay;
57
58 if (delay < pci_pm_d3_delay)
59 delay = pci_pm_d3_delay;
60
61 msleep(delay);
62}
63
64#ifdef CONFIG_PCI_DOMAINS
65int pci_domains_supported = 1;
66#endif
67
68#define DEFAULT_CARDBUS_IO_SIZE (256)
69#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
70
71unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
72unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
73
74#define DEFAULT_HOTPLUG_IO_SIZE (256)
75#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
76
77unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
79
80enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
81
82
83
84
85
86
87
88u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
89u8 pci_cache_line_size;
90
91
92
93
94
95unsigned int pcibios_max_latency = 255;
96
97
98static bool pcie_ari_disabled;
99
100
101
102
103
104
105
106
107unsigned char pci_bus_max_busnr(struct pci_bus* bus)
108{
109 struct list_head *tmp;
110 unsigned char max, n;
111
112 max = bus->subordinate;
113 list_for_each(tmp, &bus->children) {
114 n = pci_bus_max_busnr(pci_bus_b(tmp));
115 if(n > max)
116 max = n;
117 }
118 return max;
119}
120EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
121
122#ifdef CONFIG_HAS_IOMEM
123void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
124{
125
126
127
128 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
129 WARN_ON(1);
130 return NULL;
131 }
132 return ioremap_nocache(pci_resource_start(pdev, bar),
133 pci_resource_len(pdev, bar));
134}
135EXPORT_SYMBOL_GPL(pci_ioremap_bar);
136#endif
137
138#if 0
139
140
141
142
143
144
145unsigned char __devinit
146pci_max_busnr(void)
147{
148 struct pci_bus *bus = NULL;
149 unsigned char max, n;
150
151 max = 0;
152 while ((bus = pci_find_next_bus(bus)) != NULL) {
153 n = pci_bus_max_busnr(bus);
154 if(n > max)
155 max = n;
156 }
157 return max;
158}
159
160#endif
161
162#define PCI_FIND_CAP_TTL 48
163
164static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
165 u8 pos, int cap, int *ttl)
166{
167 u8 id;
168
169 while ((*ttl)--) {
170 pci_bus_read_config_byte(bus, devfn, pos, &pos);
171 if (pos < 0x40)
172 break;
173 pos &= ~3;
174 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
175 &id);
176 if (id == 0xff)
177 break;
178 if (id == cap)
179 return pos;
180 pos += PCI_CAP_LIST_NEXT;
181 }
182 return 0;
183}
184
185static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
186 u8 pos, int cap)
187{
188 int ttl = PCI_FIND_CAP_TTL;
189
190 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
191}
192
193int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
194{
195 return __pci_find_next_cap(dev->bus, dev->devfn,
196 pos + PCI_CAP_LIST_NEXT, cap);
197}
198EXPORT_SYMBOL_GPL(pci_find_next_capability);
199
200static int __pci_bus_find_cap_start(struct pci_bus *bus,
201 unsigned int devfn, u8 hdr_type)
202{
203 u16 status;
204
205 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
206 if (!(status & PCI_STATUS_CAP_LIST))
207 return 0;
208
209 switch (hdr_type) {
210 case PCI_HEADER_TYPE_NORMAL:
211 case PCI_HEADER_TYPE_BRIDGE:
212 return PCI_CAPABILITY_LIST;
213 case PCI_HEADER_TYPE_CARDBUS:
214 return PCI_CB_CAPABILITY_LIST;
215 default:
216 return 0;
217 }
218
219 return 0;
220}
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241int pci_find_capability(struct pci_dev *dev, int cap)
242{
243 int pos;
244
245 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
246 if (pos)
247 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
248
249 return pos;
250}
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
266{
267 int pos;
268 u8 hdr_type;
269
270 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
271
272 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
273 if (pos)
274 pos = __pci_find_next_cap(bus, devfn, pos, cap);
275
276 return pos;
277}
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293int pci_find_ext_capability(struct pci_dev *dev, int cap)
294{
295 u32 header;
296 int ttl;
297 int pos = PCI_CFG_SPACE_SIZE;
298
299
300 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
301
302 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
303 return 0;
304
305 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
306 return 0;
307
308
309
310
311
312 if (header == 0)
313 return 0;
314
315 while (ttl-- > 0) {
316 if (PCI_EXT_CAP_ID(header) == cap)
317 return pos;
318
319 pos = PCI_EXT_CAP_NEXT(header);
320 if (pos < PCI_CFG_SPACE_SIZE)
321 break;
322
323 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
324 break;
325 }
326
327 return 0;
328}
329EXPORT_SYMBOL_GPL(pci_find_ext_capability);
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
345 int cap)
346{
347 u32 header;
348 int ttl;
349 int pos = PCI_CFG_SPACE_SIZE;
350
351
352 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
353
354 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
355 return 0;
356 if (header == 0xffffffff || header == 0)
357 return 0;
358
359 while (ttl-- > 0) {
360 if (PCI_EXT_CAP_ID(header) == cap)
361 return pos;
362
363 pos = PCI_EXT_CAP_NEXT(header);
364 if (pos < PCI_CFG_SPACE_SIZE)
365 break;
366
367 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
368 break;
369 }
370
371 return 0;
372}
373
374static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
375{
376 int rc, ttl = PCI_FIND_CAP_TTL;
377 u8 cap, mask;
378
379 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
380 mask = HT_3BIT_CAP_MASK;
381 else
382 mask = HT_5BIT_CAP_MASK;
383
384 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
385 PCI_CAP_ID_HT, &ttl);
386 while (pos) {
387 rc = pci_read_config_byte(dev, pos + 3, &cap);
388 if (rc != PCIBIOS_SUCCESSFUL)
389 return 0;
390
391 if ((cap & mask) == ht_cap)
392 return pos;
393
394 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
395 pos + PCI_CAP_LIST_NEXT,
396 PCI_CAP_ID_HT, &ttl);
397 }
398
399 return 0;
400}
401
402
403
404
405
406
407
408
409
410
411
412
413
414int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
415{
416 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
417}
418EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
419
420
421
422
423
424
425
426
427
428
429
430
431int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
432{
433 int pos;
434
435 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
436 if (pos)
437 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
438
439 return pos;
440}
441EXPORT_SYMBOL_GPL(pci_find_ht_capability);
442
443
444
445
446
447
448
449
450
451
452struct resource *
453pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
454{
455 const struct pci_bus *bus = dev->bus;
456 int i;
457 struct resource *best = NULL, *r;
458
459 pci_bus_for_each_resource(bus, r, i) {
460 if (!r)
461 continue;
462 if (res->start && !(res->start >= r->start && res->end <= r->end))
463 continue;
464 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
465 continue;
466 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
467 return r;
468
469 if (r->flags & IORESOURCE_PREFETCH)
470 continue;
471
472 if (!best)
473 best = r;
474 }
475 return best;
476}
477
478
479
480
481
482
483
484
485static void
486pci_restore_bars(struct pci_dev *dev)
487{
488 int i;
489
490 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
491 pci_update_resource(dev, i);
492}
493
494static struct pci_platform_pm_ops *pci_platform_pm;
495
496int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
497{
498 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
499 || !ops->sleep_wake || !ops->can_wakeup)
500 return -EINVAL;
501 pci_platform_pm = ops;
502 return 0;
503}
504
505static inline bool platform_pci_power_manageable(struct pci_dev *dev)
506{
507 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
508}
509
510static inline int platform_pci_set_power_state(struct pci_dev *dev,
511 pci_power_t t)
512{
513 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
514}
515
516static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
517{
518 return pci_platform_pm ?
519 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
520}
521
522static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
523{
524 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
525}
526
527static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
528{
529 return pci_platform_pm ?
530 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
531}
532
533static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
534{
535 return pci_platform_pm ?
536 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
537}
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
553{
554 u16 pmcsr;
555 bool need_restore = false;
556
557
558 if (dev->current_state == state)
559 return 0;
560
561 if (!dev->pm_cap)
562 return -EIO;
563
564 if (state < PCI_D0 || state > PCI_D3hot)
565 return -EINVAL;
566
567
568
569
570
571 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
572 && dev->current_state > state) {
573 dev_err(&dev->dev, "invalid power transition "
574 "(from state %d to %d)\n", dev->current_state, state);
575 return -EINVAL;
576 }
577
578
579 if ((state == PCI_D1 && !dev->d1_support)
580 || (state == PCI_D2 && !dev->d2_support))
581 return -EIO;
582
583 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
584
585
586
587
588
589 switch (dev->current_state) {
590 case PCI_D0:
591 case PCI_D1:
592 case PCI_D2:
593 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
594 pmcsr |= state;
595 break;
596 case PCI_D3hot:
597 case PCI_D3cold:
598 case PCI_UNKNOWN:
599 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
600 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
601 need_restore = true;
602
603 default:
604 pmcsr = 0;
605 break;
606 }
607
608
609 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
610
611
612
613 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
614 pci_dev_d3_sleep(dev);
615 else if (state == PCI_D2 || dev->current_state == PCI_D2)
616 udelay(PCI_PM_D2_DELAY);
617
618 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
619 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
620 if (dev->current_state != state && printk_ratelimit())
621 dev_info(&dev->dev, "Refused to change power state, "
622 "currently in D%d\n", dev->current_state);
623
624
625
626
627
628
629
630
631
632
633
634
635
636 if (need_restore)
637 pci_restore_bars(dev);
638
639 if (dev->bus->self)
640 pcie_aspm_pm_state_change(dev->bus->self);
641
642 return 0;
643}
644
645
646
647
648
649
650
651void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
652{
653 if (dev->pm_cap) {
654 u16 pmcsr;
655
656 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
657 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
658 } else {
659 dev->current_state = state;
660 }
661}
662
663
664
665
666
667
668static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
669{
670 int error;
671
672 if (platform_pci_power_manageable(dev)) {
673 error = platform_pci_set_power_state(dev, state);
674 if (!error)
675 pci_update_current_state(dev, state);
676
677 if (!dev->pm_cap)
678 dev->current_state = PCI_D0;
679 } else {
680 error = -ENODEV;
681
682 if (!dev->pm_cap)
683 dev->current_state = PCI_D0;
684 }
685
686 return error;
687}
688
689
690
691
692
693
694static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
695{
696 if (state == PCI_D0)
697 pci_platform_power_transition(dev, PCI_D0);
698}
699
700
701
702
703
704
705
706
707int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
708{
709 return state >= PCI_D0 ?
710 pci_platform_power_transition(dev, state) : -EINVAL;
711}
712EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
730{
731 int error;
732
733
734 if (state > PCI_D3hot)
735 state = PCI_D3hot;
736 else if (state < PCI_D0)
737 state = PCI_D0;
738 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
739
740
741
742
743
744 return 0;
745
746 __pci_start_power_transition(dev, state);
747
748
749
750 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
751 return 0;
752
753 error = pci_raw_set_power_state(dev, state);
754
755 if (!__pci_complete_power_transition(dev, state))
756 error = 0;
757
758
759
760
761 if (!error && dev->bus->self)
762 pcie_aspm_powersave_config_link(dev->bus->self);
763
764 return error;
765}
766
767
768
769
770
771
772
773
774
775
776
777pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
778{
779 pci_power_t ret;
780
781 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
782 return PCI_D0;
783
784 ret = platform_pci_choose_state(dev);
785 if (ret != PCI_POWER_ERROR)
786 return ret;
787
788 switch (state.event) {
789 case PM_EVENT_ON:
790 return PCI_D0;
791 case PM_EVENT_FREEZE:
792 case PM_EVENT_PRETHAW:
793
794 case PM_EVENT_SUSPEND:
795 case PM_EVENT_HIBERNATE:
796 return PCI_D3hot;
797 default:
798 dev_info(&dev->dev, "unrecognized suspend event %d\n",
799 state.event);
800 BUG();
801 }
802 return PCI_D0;
803}
804
805EXPORT_SYMBOL(pci_choose_state);
806
807#define PCI_EXP_SAVE_REGS 7
808
809#define pcie_cap_has_devctl(type, flags) 1
810#define pcie_cap_has_lnkctl(type, flags) \
811 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
812 (type == PCI_EXP_TYPE_ROOT_PORT || \
813 type == PCI_EXP_TYPE_ENDPOINT || \
814 type == PCI_EXP_TYPE_LEG_END))
815#define pcie_cap_has_sltctl(type, flags) \
816 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
817 ((type == PCI_EXP_TYPE_ROOT_PORT) || \
818 (type == PCI_EXP_TYPE_DOWNSTREAM && \
819 (flags & PCI_EXP_FLAGS_SLOT))))
820#define pcie_cap_has_rtctl(type, flags) \
821 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
822 (type == PCI_EXP_TYPE_ROOT_PORT || \
823 type == PCI_EXP_TYPE_RC_EC))
824#define pcie_cap_has_devctl2(type, flags) \
825 ((flags & PCI_EXP_FLAGS_VERS) > 1)
826#define pcie_cap_has_lnkctl2(type, flags) \
827 ((flags & PCI_EXP_FLAGS_VERS) > 1)
828#define pcie_cap_has_sltctl2(type, flags) \
829 ((flags & PCI_EXP_FLAGS_VERS) > 1)
830
831static struct pci_cap_saved_state *pci_find_saved_cap(
832 struct pci_dev *pci_dev, char cap)
833{
834 struct pci_cap_saved_state *tmp;
835 struct hlist_node *pos;
836
837 hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
838 if (tmp->cap.cap_nr == cap)
839 return tmp;
840 }
841 return NULL;
842}
843
844static int pci_save_pcie_state(struct pci_dev *dev)
845{
846 int pos, i = 0;
847 struct pci_cap_saved_state *save_state;
848 u16 *cap;
849 u16 flags;
850
851 pos = pci_pcie_cap(dev);
852 if (!pos)
853 return 0;
854
855 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
856 if (!save_state) {
857 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
858 return -ENOMEM;
859 }
860 cap = (u16 *)&save_state->cap.data[0];
861
862 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
863
864 if (pcie_cap_has_devctl(dev->pcie_type, flags))
865 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
866 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
867 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
868 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
869 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
870 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
871 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
872 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
873 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
874 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
875 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
876 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
877 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
878
879 return 0;
880}
881
882static void pci_restore_pcie_state(struct pci_dev *dev)
883{
884 int i = 0, pos;
885 struct pci_cap_saved_state *save_state;
886 u16 *cap;
887 u16 flags;
888
889 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
890 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
891 if (!save_state || pos <= 0)
892 return;
893 cap = (u16 *)&save_state->cap.data[0];
894
895 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
896
897 if (pcie_cap_has_devctl(dev->pcie_type, flags))
898 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
899 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
900 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
901 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
902 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
903 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
904 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
905 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
906 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
907 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
908 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
909 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
910 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
911}
912
913
914static int pci_save_pcix_state(struct pci_dev *dev)
915{
916 int pos;
917 struct pci_cap_saved_state *save_state;
918
919 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
920 if (pos <= 0)
921 return 0;
922
923 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
924 if (!save_state) {
925 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
926 return -ENOMEM;
927 }
928
929 pci_read_config_word(dev, pos + PCI_X_CMD,
930 (u16 *)save_state->cap.data);
931
932 return 0;
933}
934
935static void pci_restore_pcix_state(struct pci_dev *dev)
936{
937 int i = 0, pos;
938 struct pci_cap_saved_state *save_state;
939 u16 *cap;
940
941 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
942 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
943 if (!save_state || pos <= 0)
944 return;
945 cap = (u16 *)&save_state->cap.data[0];
946
947 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
948}
949
950
951
952
953
954
955int
956pci_save_state(struct pci_dev *dev)
957{
958 int i;
959
960 for (i = 0; i < 16; i++)
961 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
962 dev->state_saved = true;
963 if ((i = pci_save_pcie_state(dev)) != 0)
964 return i;
965 if ((i = pci_save_pcix_state(dev)) != 0)
966 return i;
967 return 0;
968}
969
970static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
971 u32 saved_val, int retry)
972{
973 u32 val;
974
975 pci_read_config_dword(pdev, offset, &val);
976 if (val == saved_val)
977 return;
978
979 for (;;) {
980 dev_dbg(&pdev->dev, "restoring config space at offset "
981 "%#x (was %#x, writing %#x)\n", offset, val, saved_val);
982 pci_write_config_dword(pdev, offset, saved_val);
983 if (retry-- <= 0)
984 return;
985
986 pci_read_config_dword(pdev, offset, &val);
987 if (val == saved_val)
988 return;
989
990 mdelay(1);
991 }
992}
993
994static void pci_restore_config_space_range(struct pci_dev *pdev,
995 int start, int end, int retry)
996{
997 int index;
998
999 for (index = end; index >= start; index--)
1000 pci_restore_config_dword(pdev, 4 * index,
1001 pdev->saved_config_space[index],
1002 retry);
1003}
1004
1005static void pci_restore_config_space(struct pci_dev *pdev)
1006{
1007 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1008 pci_restore_config_space_range(pdev, 10, 15, 0);
1009
1010 pci_restore_config_space_range(pdev, 4, 9, 10);
1011 pci_restore_config_space_range(pdev, 0, 3, 0);
1012 } else {
1013 pci_restore_config_space_range(pdev, 0, 15, 0);
1014 }
1015}
1016
1017
1018
1019
1020
1021void pci_restore_state(struct pci_dev *dev)
1022{
1023 if (!dev->state_saved)
1024 return;
1025
1026
1027 pci_restore_pcie_state(dev);
1028 pci_restore_ats_state(dev);
1029
1030 pci_restore_config_space(dev);
1031
1032 pci_restore_pcix_state(dev);
1033 pci_restore_msi_state(dev);
1034 pci_restore_iov_state(dev);
1035
1036 dev->state_saved = false;
1037}
1038
1039struct pci_saved_state {
1040 u32 config_space[16];
1041 struct pci_cap_saved_data cap[0];
1042};
1043
1044
1045
1046
1047
1048
1049
1050
1051struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1052{
1053 struct pci_saved_state *state;
1054 struct pci_cap_saved_state *tmp;
1055 struct pci_cap_saved_data *cap;
1056 struct hlist_node *pos;
1057 size_t size;
1058
1059 if (!dev->state_saved)
1060 return NULL;
1061
1062 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1063
1064 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1065 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1066
1067 state = kzalloc(size, GFP_KERNEL);
1068 if (!state)
1069 return NULL;
1070
1071 memcpy(state->config_space, dev->saved_config_space,
1072 sizeof(state->config_space));
1073
1074 cap = state->cap;
1075 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1076 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1077 memcpy(cap, &tmp->cap, len);
1078 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1079 }
1080
1081
1082 return state;
1083}
1084EXPORT_SYMBOL_GPL(pci_store_saved_state);
1085
1086
1087
1088
1089
1090
1091int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1092{
1093 struct pci_cap_saved_data *cap;
1094
1095 dev->state_saved = false;
1096
1097 if (!state)
1098 return 0;
1099
1100 memcpy(dev->saved_config_space, state->config_space,
1101 sizeof(state->config_space));
1102
1103 cap = state->cap;
1104 while (cap->size) {
1105 struct pci_cap_saved_state *tmp;
1106
1107 tmp = pci_find_saved_cap(dev, cap->cap_nr);
1108 if (!tmp || tmp->cap.size != cap->size)
1109 return -EINVAL;
1110
1111 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1112 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1113 sizeof(struct pci_cap_saved_data) + cap->size);
1114 }
1115
1116 dev->state_saved = true;
1117 return 0;
1118}
1119EXPORT_SYMBOL_GPL(pci_load_saved_state);
1120
1121
1122
1123
1124
1125
1126
1127int pci_load_and_free_saved_state(struct pci_dev *dev,
1128 struct pci_saved_state **state)
1129{
1130 int ret = pci_load_saved_state(dev, *state);
1131 kfree(*state);
1132 *state = NULL;
1133 return ret;
1134}
1135EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1136
1137static int do_pci_enable_device(struct pci_dev *dev, int bars)
1138{
1139 int err;
1140
1141 err = pci_set_power_state(dev, PCI_D0);
1142 if (err < 0 && err != -EIO)
1143 return err;
1144 err = pcibios_enable_device(dev, bars);
1145 if (err < 0)
1146 return err;
1147 pci_fixup_device(pci_fixup_enable, dev);
1148
1149 return 0;
1150}
1151
1152
1153
1154
1155
1156
1157
1158
1159int pci_reenable_device(struct pci_dev *dev)
1160{
1161 if (pci_is_enabled(dev))
1162 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1163 return 0;
1164}
1165
1166static int __pci_enable_device_flags(struct pci_dev *dev,
1167 resource_size_t flags)
1168{
1169 int err;
1170 int i, bars = 0;
1171
1172
1173
1174
1175
1176
1177
1178 if (dev->pm_cap) {
1179 u16 pmcsr;
1180 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1181 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1182 }
1183
1184 if (atomic_add_return(1, &dev->enable_cnt) > 1)
1185 return 0;
1186
1187
1188 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1189 if (dev->resource[i].flags & flags)
1190 bars |= (1 << i);
1191 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1192 if (dev->resource[i].flags & flags)
1193 bars |= (1 << i);
1194
1195 err = do_pci_enable_device(dev, bars);
1196 if (err < 0)
1197 atomic_dec(&dev->enable_cnt);
1198 return err;
1199}
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209int pci_enable_device_io(struct pci_dev *dev)
1210{
1211 return __pci_enable_device_flags(dev, IORESOURCE_IO);
1212}
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222int pci_enable_device_mem(struct pci_dev *dev)
1223{
1224 return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1225}
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238int pci_enable_device(struct pci_dev *dev)
1239{
1240 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1241}
1242
1243
1244
1245
1246
1247
1248
1249struct pci_devres {
1250 unsigned int enabled:1;
1251 unsigned int pinned:1;
1252 unsigned int orig_intx:1;
1253 unsigned int restore_intx:1;
1254 u32 region_mask;
1255};
1256
1257static void pcim_release(struct device *gendev, void *res)
1258{
1259 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1260 struct pci_devres *this = res;
1261 int i;
1262
1263 if (dev->msi_enabled)
1264 pci_disable_msi(dev);
1265 if (dev->msix_enabled)
1266 pci_disable_msix(dev);
1267
1268 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1269 if (this->region_mask & (1 << i))
1270 pci_release_region(dev, i);
1271
1272 if (this->restore_intx)
1273 pci_intx(dev, this->orig_intx);
1274
1275 if (this->enabled && !this->pinned)
1276 pci_disable_device(dev);
1277}
1278
1279static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1280{
1281 struct pci_devres *dr, *new_dr;
1282
1283 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1284 if (dr)
1285 return dr;
1286
1287 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1288 if (!new_dr)
1289 return NULL;
1290 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1291}
1292
1293static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1294{
1295 if (pci_is_managed(pdev))
1296 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1297 return NULL;
1298}
1299
1300
1301
1302
1303
1304
1305
1306int pcim_enable_device(struct pci_dev *pdev)
1307{
1308 struct pci_devres *dr;
1309 int rc;
1310
1311 dr = get_pci_dr(pdev);
1312 if (unlikely(!dr))
1313 return -ENOMEM;
1314 if (dr->enabled)
1315 return 0;
1316
1317 rc = pci_enable_device(pdev);
1318 if (!rc) {
1319 pdev->is_managed = 1;
1320 dr->enabled = 1;
1321 }
1322 return rc;
1323}
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333void pcim_pin_device(struct pci_dev *pdev)
1334{
1335 struct pci_devres *dr;
1336
1337 dr = find_pci_dr(pdev);
1338 WARN_ON(!dr || !dr->enabled);
1339 if (dr)
1340 dr->pinned = 1;
1341}
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
1352
1353static void do_pci_disable_device(struct pci_dev *dev)
1354{
1355 u16 pci_command;
1356
1357 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1358 if (pci_command & PCI_COMMAND_MASTER) {
1359 pci_command &= ~PCI_COMMAND_MASTER;
1360 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1361 }
1362
1363 pcibios_disable_device(dev);
1364}
1365
1366
1367
1368
1369
1370
1371
1372
1373void pci_disable_enabled_device(struct pci_dev *dev)
1374{
1375 if (pci_is_enabled(dev))
1376 do_pci_disable_device(dev);
1377}
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389void
1390pci_disable_device(struct pci_dev *dev)
1391{
1392 struct pci_devres *dr;
1393
1394 dr = find_pci_dr(dev);
1395 if (dr)
1396 dr->enabled = 0;
1397
1398 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1399 return;
1400
1401 do_pci_disable_device(dev);
1402
1403 dev->is_busmaster = 0;
1404}
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1416 enum pcie_reset_state state)
1417{
1418 return -EINVAL;
1419}
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1430{
1431 return pcibios_set_pcie_reset_state(dev, state);
1432}
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442bool pci_check_pme_status(struct pci_dev *dev)
1443{
1444 int pmcsr_pos;
1445 u16 pmcsr;
1446 bool ret = false;
1447
1448 if (!dev->pm_cap)
1449 return false;
1450
1451 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1452 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1453 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1454 return false;
1455
1456
1457 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1458 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1459
1460 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1461 ret = true;
1462 }
1463
1464 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1465
1466 return ret;
1467}
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1478{
1479 if (pme_poll_reset && dev->pme_poll)
1480 dev->pme_poll = false;
1481
1482 if (pci_check_pme_status(dev)) {
1483 pci_wakeup_event(dev);
1484 pm_request_resume(&dev->dev);
1485 }
1486 return 0;
1487}
1488
1489
1490
1491
1492
1493void pci_pme_wakeup_bus(struct pci_bus *bus)
1494{
1495 if (bus)
1496 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1497}
1498
1499
1500
1501
1502
1503
1504bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1505{
1506 if (!dev->pm_cap)
1507 return false;
1508
1509 return !!(dev->pme_support & (1 << state));
1510}
1511
1512static void pci_pme_list_scan(struct work_struct *work)
1513{
1514 struct pci_pme_device *pme_dev, *n;
1515
1516 mutex_lock(&pci_pme_list_mutex);
1517 if (!list_empty(&pci_pme_list)) {
1518 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1519 if (pme_dev->dev->pme_poll) {
1520 pci_pme_wakeup(pme_dev->dev, NULL);
1521 } else {
1522 list_del(&pme_dev->list);
1523 kfree(pme_dev);
1524 }
1525 }
1526 if (!list_empty(&pci_pme_list))
1527 schedule_delayed_work(&pci_pme_work,
1528 msecs_to_jiffies(PME_TIMEOUT));
1529 }
1530 mutex_unlock(&pci_pme_list_mutex);
1531}
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541void pci_pme_active(struct pci_dev *dev, bool enable)
1542{
1543 u16 pmcsr;
1544
1545 if (!dev->pm_cap)
1546 return;
1547
1548 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1549
1550 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1551 if (!enable)
1552 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1553
1554 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566 if (dev->pme_poll) {
1567 struct pci_pme_device *pme_dev;
1568 if (enable) {
1569 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1570 GFP_KERNEL);
1571 if (!pme_dev)
1572 goto out;
1573 pme_dev->dev = dev;
1574 mutex_lock(&pci_pme_list_mutex);
1575 list_add(&pme_dev->list, &pci_pme_list);
1576 if (list_is_singular(&pci_pme_list))
1577 schedule_delayed_work(&pci_pme_work,
1578 msecs_to_jiffies(PME_TIMEOUT));
1579 mutex_unlock(&pci_pme_list_mutex);
1580 } else {
1581 mutex_lock(&pci_pme_list_mutex);
1582 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1583 if (pme_dev->dev == dev) {
1584 list_del(&pme_dev->list);
1585 kfree(pme_dev);
1586 break;
1587 }
1588 }
1589 mutex_unlock(&pci_pme_list_mutex);
1590 }
1591 }
1592
1593out:
1594 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
1595}
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1618 bool runtime, bool enable)
1619{
1620 int ret = 0;
1621
1622 if (enable && !runtime && !device_may_wakeup(&dev->dev))
1623 return -EINVAL;
1624
1625
1626 if (!!enable == !!dev->wakeup_prepared)
1627 return 0;
1628
1629
1630
1631
1632
1633
1634
1635 if (enable) {
1636 int error;
1637
1638 if (pci_pme_capable(dev, state))
1639 pci_pme_active(dev, true);
1640 else
1641 ret = 1;
1642 error = runtime ? platform_pci_run_wake(dev, true) :
1643 platform_pci_sleep_wake(dev, true);
1644 if (ret)
1645 ret = error;
1646 if (!ret)
1647 dev->wakeup_prepared = true;
1648 } else {
1649 if (runtime)
1650 platform_pci_run_wake(dev, false);
1651 else
1652 platform_pci_sleep_wake(dev, false);
1653 pci_pme_active(dev, false);
1654 dev->wakeup_prepared = false;
1655 }
1656
1657 return ret;
1658}
1659EXPORT_SYMBOL(__pci_enable_wake);
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1676{
1677 return pci_pme_capable(dev, PCI_D3cold) ?
1678 pci_enable_wake(dev, PCI_D3cold, enable) :
1679 pci_enable_wake(dev, PCI_D3hot, enable);
1680}
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690pci_power_t pci_target_state(struct pci_dev *dev)
1691{
1692 pci_power_t target_state = PCI_D3hot;
1693
1694 if (platform_pci_power_manageable(dev)) {
1695
1696
1697
1698
1699 pci_power_t state = platform_pci_choose_state(dev);
1700
1701 switch (state) {
1702 case PCI_POWER_ERROR:
1703 case PCI_UNKNOWN:
1704 break;
1705 case PCI_D1:
1706 case PCI_D2:
1707 if (pci_no_d1d2(dev))
1708 break;
1709 default:
1710 target_state = state;
1711 }
1712 } else if (!dev->pm_cap) {
1713 target_state = PCI_D0;
1714 } else if (device_may_wakeup(&dev->dev)) {
1715
1716
1717
1718
1719
1720 if (dev->pme_support) {
1721 while (target_state
1722 && !(dev->pme_support & (1 << target_state)))
1723 target_state--;
1724 }
1725 }
1726
1727 return target_state;
1728}
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738int pci_prepare_to_sleep(struct pci_dev *dev)
1739{
1740 pci_power_t target_state = pci_target_state(dev);
1741 int error;
1742
1743 if (target_state == PCI_POWER_ERROR)
1744 return -EIO;
1745
1746 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1747
1748 error = pci_set_power_state(dev, target_state);
1749
1750 if (error)
1751 pci_enable_wake(dev, target_state, false);
1752
1753 return error;
1754}
1755
1756
1757
1758
1759
1760
1761
1762int pci_back_from_sleep(struct pci_dev *dev)
1763{
1764 pci_enable_wake(dev, PCI_D0, false);
1765 return pci_set_power_state(dev, PCI_D0);
1766}
1767
1768
1769
1770
1771
1772
1773
1774
1775int pci_finish_runtime_suspend(struct pci_dev *dev)
1776{
1777 pci_power_t target_state = pci_target_state(dev);
1778 int error;
1779
1780 if (target_state == PCI_POWER_ERROR)
1781 return -EIO;
1782
1783 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1784
1785 error = pci_set_power_state(dev, target_state);
1786
1787 if (error)
1788 __pci_enable_wake(dev, target_state, true, false);
1789
1790 return error;
1791}
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801bool pci_dev_run_wake(struct pci_dev *dev)
1802{
1803 struct pci_bus *bus = dev->bus;
1804
1805 if (device_run_wake(&dev->dev))
1806 return true;
1807
1808 if (!dev->pme_support)
1809 return false;
1810
1811 while (bus->parent) {
1812 struct pci_dev *bridge = bus->self;
1813
1814 if (device_run_wake(&bridge->dev))
1815 return true;
1816
1817 bus = bus->parent;
1818 }
1819
1820
1821 if (bus->bridge)
1822 return device_run_wake(bus->bridge);
1823
1824 return false;
1825}
1826EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1827
1828
1829
1830
1831
1832void pci_pm_init(struct pci_dev *dev)
1833{
1834 int pm;
1835 u16 pmc;
1836
1837 pm_runtime_forbid(&dev->dev);
1838 device_enable_async_suspend(&dev->dev);
1839 dev->wakeup_prepared = false;
1840
1841 dev->pm_cap = 0;
1842
1843
1844 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1845 if (!pm)
1846 return;
1847
1848 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1849
1850 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1851 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1852 pmc & PCI_PM_CAP_VER_MASK);
1853 return;
1854 }
1855
1856 dev->pm_cap = pm;
1857 dev->d3_delay = PCI_PM_D3_WAIT;
1858
1859 dev->d1_support = false;
1860 dev->d2_support = false;
1861 if (!pci_no_d1d2(dev)) {
1862 if (pmc & PCI_PM_CAP_D1)
1863 dev->d1_support = true;
1864 if (pmc & PCI_PM_CAP_D2)
1865 dev->d2_support = true;
1866
1867 if (dev->d1_support || dev->d2_support)
1868 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1869 dev->d1_support ? " D1" : "",
1870 dev->d2_support ? " D2" : "");
1871 }
1872
1873 pmc &= PCI_PM_CAP_PME_MASK;
1874 if (pmc) {
1875 dev_printk(KERN_DEBUG, &dev->dev,
1876 "PME# supported from%s%s%s%s%s\n",
1877 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1878 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1879 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1880 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1881 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1882 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1883 dev->pme_poll = true;
1884
1885
1886
1887
1888 device_set_wakeup_capable(&dev->dev, true);
1889
1890 pci_pme_active(dev, false);
1891 } else {
1892 dev->pme_support = 0;
1893 }
1894}
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906void platform_pci_wakeup_init(struct pci_dev *dev)
1907{
1908 if (!platform_pci_can_wakeup(dev))
1909 return;
1910
1911 device_set_wakeup_capable(&dev->dev, true);
1912 platform_pci_sleep_wake(dev, false);
1913}
1914
1915static void pci_add_saved_cap(struct pci_dev *pci_dev,
1916 struct pci_cap_saved_state *new_cap)
1917{
1918 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
1919}
1920
1921
1922
1923
1924
1925
1926
1927static int pci_add_cap_save_buffer(
1928 struct pci_dev *dev, char cap, unsigned int size)
1929{
1930 int pos;
1931 struct pci_cap_saved_state *save_state;
1932
1933 pos = pci_find_capability(dev, cap);
1934 if (pos <= 0)
1935 return 0;
1936
1937 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1938 if (!save_state)
1939 return -ENOMEM;
1940
1941 save_state->cap.cap_nr = cap;
1942 save_state->cap.size = size;
1943 pci_add_saved_cap(dev, save_state);
1944
1945 return 0;
1946}
1947
1948
1949
1950
1951
1952void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1953{
1954 int error;
1955
1956 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1957 PCI_EXP_SAVE_REGS * sizeof(u16));
1958 if (error)
1959 dev_err(&dev->dev,
1960 "unable to preallocate PCI Express save buffer\n");
1961
1962 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1963 if (error)
1964 dev_err(&dev->dev,
1965 "unable to preallocate PCI-X save buffer\n");
1966}
1967
1968void pci_free_cap_save_buffers(struct pci_dev *dev)
1969{
1970 struct pci_cap_saved_state *tmp;
1971 struct hlist_node *pos, *n;
1972
1973 hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
1974 kfree(tmp);
1975}
1976
1977
1978
1979
1980
1981void pci_enable_ari(struct pci_dev *dev)
1982{
1983 int pos;
1984 u32 cap;
1985 u16 flags, ctrl;
1986 struct pci_dev *bridge;
1987
1988 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
1989 return;
1990
1991 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1992 if (!pos)
1993 return;
1994
1995 bridge = dev->bus->self;
1996 if (!bridge || !pci_is_pcie(bridge))
1997 return;
1998
1999 pos = pci_pcie_cap(bridge);
2000 if (!pos)
2001 return;
2002
2003
2004 pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
2005 if ((flags & PCI_EXP_FLAGS_VERS) < 2)
2006 return;
2007
2008 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
2009 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2010 return;
2011
2012 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
2013 ctrl |= PCI_EXP_DEVCTL2_ARI;
2014 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
2015
2016 bridge->ari_enabled = 1;
2017}
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2029{
2030 int pos;
2031 u16 ctrl;
2032
2033 pos = pci_pcie_cap(dev);
2034 if (!pos)
2035 return;
2036
2037 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2038 if (type & PCI_EXP_IDO_REQUEST)
2039 ctrl |= PCI_EXP_IDO_REQ_EN;
2040 if (type & PCI_EXP_IDO_COMPLETION)
2041 ctrl |= PCI_EXP_IDO_CMP_EN;
2042 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2043}
2044EXPORT_SYMBOL(pci_enable_ido);
2045
2046
2047
2048
2049
2050
2051void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2052{
2053 int pos;
2054 u16 ctrl;
2055
2056 if (!pci_is_pcie(dev))
2057 return;
2058
2059 pos = pci_pcie_cap(dev);
2060 if (!pos)
2061 return;
2062
2063 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2064 if (type & PCI_EXP_IDO_REQUEST)
2065 ctrl &= ~PCI_EXP_IDO_REQ_EN;
2066 if (type & PCI_EXP_IDO_COMPLETION)
2067 ctrl &= ~PCI_EXP_IDO_CMP_EN;
2068 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2069}
2070EXPORT_SYMBOL(pci_disable_ido);
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2092{
2093 int pos;
2094 u32 cap;
2095 u16 ctrl;
2096 int ret;
2097
2098 if (!pci_is_pcie(dev))
2099 return -ENOTSUPP;
2100
2101 pos = pci_pcie_cap(dev);
2102 if (!pos)
2103 return -ENOTSUPP;
2104
2105 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2106 if (!(cap & PCI_EXP_OBFF_MASK))
2107 return -ENOTSUPP;
2108
2109
2110 if (dev->bus) {
2111 ret = pci_enable_obff(dev->bus->self, type);
2112 if (ret)
2113 return ret;
2114 }
2115
2116 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2117 if (cap & PCI_EXP_OBFF_WAKE)
2118 ctrl |= PCI_EXP_OBFF_WAKE_EN;
2119 else {
2120 switch (type) {
2121 case PCI_EXP_OBFF_SIGNAL_L0:
2122 if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2123 ctrl |= PCI_EXP_OBFF_MSGA_EN;
2124 break;
2125 case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2126 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2127 ctrl |= PCI_EXP_OBFF_MSGB_EN;
2128 break;
2129 default:
2130 WARN(1, "bad OBFF signal type\n");
2131 return -ENOTSUPP;
2132 }
2133 }
2134 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2135
2136 return 0;
2137}
2138EXPORT_SYMBOL(pci_enable_obff);
2139
2140
2141
2142
2143
2144
2145
2146void pci_disable_obff(struct pci_dev *dev)
2147{
2148 int pos;
2149 u16 ctrl;
2150
2151 if (!pci_is_pcie(dev))
2152 return;
2153
2154 pos = pci_pcie_cap(dev);
2155 if (!pos)
2156 return;
2157
2158 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2159 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2160 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2161}
2162EXPORT_SYMBOL(pci_disable_obff);
2163
2164
2165
2166
2167
2168
2169
2170
2171bool pci_ltr_supported(struct pci_dev *dev)
2172{
2173 int pos;
2174 u32 cap;
2175
2176 if (!pci_is_pcie(dev))
2177 return false;
2178
2179 pos = pci_pcie_cap(dev);
2180 if (!pos)
2181 return false;
2182
2183 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2184
2185 return cap & PCI_EXP_DEVCAP2_LTR;
2186}
2187EXPORT_SYMBOL(pci_ltr_supported);
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199int pci_enable_ltr(struct pci_dev *dev)
2200{
2201 int pos;
2202 u16 ctrl;
2203 int ret;
2204
2205 if (!pci_ltr_supported(dev))
2206 return -ENOTSUPP;
2207
2208 pos = pci_pcie_cap(dev);
2209 if (!pos)
2210 return -ENOTSUPP;
2211
2212
2213 if (PCI_FUNC(dev->devfn) != 0)
2214 return -EINVAL;
2215
2216
2217 if (dev->bus) {
2218 ret = pci_enable_ltr(dev->bus->self);
2219 if (ret)
2220 return ret;
2221 }
2222
2223 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2224 ctrl |= PCI_EXP_LTR_EN;
2225 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2226
2227 return 0;
2228}
2229EXPORT_SYMBOL(pci_enable_ltr);
2230
2231
2232
2233
2234
2235void pci_disable_ltr(struct pci_dev *dev)
2236{
2237 int pos;
2238 u16 ctrl;
2239
2240 if (!pci_ltr_supported(dev))
2241 return;
2242
2243 pos = pci_pcie_cap(dev);
2244 if (!pos)
2245 return;
2246
2247
2248 if (PCI_FUNC(dev->devfn) != 0)
2249 return;
2250
2251 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2252 ctrl &= ~PCI_EXP_LTR_EN;
2253 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2254}
2255EXPORT_SYMBOL(pci_disable_ltr);
2256
2257static int __pci_ltr_scale(int *val)
2258{
2259 int scale = 0;
2260
2261 while (*val > 1023) {
2262 *val = (*val + 31) / 32;
2263 scale++;
2264 }
2265 return scale;
2266}
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2277{
2278 int pos, ret, snoop_scale, nosnoop_scale;
2279 u16 val;
2280
2281 if (!pci_ltr_supported(dev))
2282 return -ENOTSUPP;
2283
2284 snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2285 nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2286
2287 if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2288 nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2289 return -EINVAL;
2290
2291 if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2292 (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2293 return -EINVAL;
2294
2295 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2296 if (!pos)
2297 return -ENOTSUPP;
2298
2299 val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2300 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2301 if (ret != 4)
2302 return -EIO;
2303
2304 val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2305 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2306 if (ret != 4)
2307 return -EIO;
2308
2309 return 0;
2310}
2311EXPORT_SYMBOL(pci_set_ltr);
2312
2313static int pci_acs_enable;
2314
2315
2316
2317
2318void pci_request_acs(void)
2319{
2320 pci_acs_enable = 1;
2321}
2322
2323
2324
2325
2326
2327void pci_enable_acs(struct pci_dev *dev)
2328{
2329 int pos;
2330 u16 cap;
2331 u16 ctrl;
2332
2333 if (!pci_acs_enable)
2334 return;
2335
2336 if (!pci_is_pcie(dev))
2337 return;
2338
2339 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2340 if (!pos)
2341 return;
2342
2343 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2344 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2345
2346
2347 ctrl |= (cap & PCI_ACS_SV);
2348
2349
2350 ctrl |= (cap & PCI_ACS_RR);
2351
2352
2353 ctrl |= (cap & PCI_ACS_CR);
2354
2355
2356 ctrl |= (cap & PCI_ACS_UF);
2357
2358 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2359}
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
2373{
2374 int slot;
2375
2376 if (pci_ari_enabled(dev->bus))
2377 slot = 0;
2378 else
2379 slot = PCI_SLOT(dev->devfn);
2380
2381 return (((pin - 1) + slot) % 4) + 1;
2382}
2383
2384int
2385pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2386{
2387 u8 pin;
2388
2389 pin = dev->pin;
2390 if (!pin)
2391 return -1;
2392
2393 while (!pci_is_root_bus(dev->bus)) {
2394 pin = pci_swizzle_interrupt_pin(dev, pin);
2395 dev = dev->bus->self;
2396 }
2397 *bridge = dev;
2398 return pin;
2399}
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2410{
2411 u8 pin = *pinp;
2412
2413 while (!pci_is_root_bus(dev->bus)) {
2414 pin = pci_swizzle_interrupt_pin(dev, pin);
2415 dev = dev->bus->self;
2416 }
2417 *pinp = pin;
2418 return PCI_SLOT(dev->devfn);
2419}
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430void pci_release_region(struct pci_dev *pdev, int bar)
2431{
2432 struct pci_devres *dr;
2433
2434 if (pci_resource_len(pdev, bar) == 0)
2435 return;
2436 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2437 release_region(pci_resource_start(pdev, bar),
2438 pci_resource_len(pdev, bar));
2439 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2440 release_mem_region(pci_resource_start(pdev, bar),
2441 pci_resource_len(pdev, bar));
2442
2443 dr = find_pci_dr(pdev);
2444 if (dr)
2445 dr->region_mask &= ~(1 << bar);
2446}
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2468 int exclusive)
2469{
2470 struct pci_devres *dr;
2471
2472 if (pci_resource_len(pdev, bar) == 0)
2473 return 0;
2474
2475 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2476 if (!request_region(pci_resource_start(pdev, bar),
2477 pci_resource_len(pdev, bar), res_name))
2478 goto err_out;
2479 }
2480 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
2481 if (!__request_mem_region(pci_resource_start(pdev, bar),
2482 pci_resource_len(pdev, bar), res_name,
2483 exclusive))
2484 goto err_out;
2485 }
2486
2487 dr = find_pci_dr(pdev);
2488 if (dr)
2489 dr->region_mask |= 1 << bar;
2490
2491 return 0;
2492
2493err_out:
2494 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2495 &pdev->resource[bar]);
2496 return -EBUSY;
2497}
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2514{
2515 return __pci_request_region(pdev, bar, res_name, 0);
2516}
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2537{
2538 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2539}
2540
2541
2542
2543
2544
2545
2546
2547
2548void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2549{
2550 int i;
2551
2552 for (i = 0; i < 6; i++)
2553 if (bars & (1 << i))
2554 pci_release_region(pdev, i);
2555}
2556
2557int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2558 const char *res_name, int excl)
2559{
2560 int i;
2561
2562 for (i = 0; i < 6; i++)
2563 if (bars & (1 << i))
2564 if (__pci_request_region(pdev, i, res_name, excl))
2565 goto err_out;
2566 return 0;
2567
2568err_out:
2569 while(--i >= 0)
2570 if (bars & (1 << i))
2571 pci_release_region(pdev, i);
2572
2573 return -EBUSY;
2574}
2575
2576
2577
2578
2579
2580
2581
2582
2583int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2584 const char *res_name)
2585{
2586 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2587}
2588
2589int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2590 int bars, const char *res_name)
2591{
2592 return __pci_request_selected_regions(pdev, bars, res_name,
2593 IORESOURCE_EXCLUSIVE);
2594}
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605void pci_release_regions(struct pci_dev *pdev)
2606{
2607 pci_release_selected_regions(pdev, (1 << 6) - 1);
2608}
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2624{
2625 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2626}
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2645{
2646 return pci_request_selected_regions_exclusive(pdev,
2647 ((1 << 6) - 1), res_name);
2648}
2649
2650static void __pci_set_master(struct pci_dev *dev, bool enable)
2651{
2652 u16 old_cmd, cmd;
2653
2654 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2655 if (enable)
2656 cmd = old_cmd | PCI_COMMAND_MASTER;
2657 else
2658 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2659 if (cmd != old_cmd) {
2660 dev_dbg(&dev->dev, "%s bus mastering\n",
2661 enable ? "enabling" : "disabling");
2662 pci_write_config_word(dev, PCI_COMMAND, cmd);
2663 }
2664 dev->is_busmaster = enable;
2665}
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675void __weak pcibios_set_master(struct pci_dev *dev)
2676{
2677 u8 lat;
2678
2679
2680 if (pci_is_pcie(dev))
2681 return;
2682
2683 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2684 if (lat < 16)
2685 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2686 else if (lat > pcibios_max_latency)
2687 lat = pcibios_max_latency;
2688 else
2689 return;
2690 dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
2691 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2692}
2693
2694
2695
2696
2697
2698
2699
2700
2701void pci_set_master(struct pci_dev *dev)
2702{
2703 __pci_set_master(dev, true);
2704 pcibios_set_master(dev);
2705}
2706
2707
2708
2709
2710
2711void pci_clear_master(struct pci_dev *dev)
2712{
2713 __pci_set_master(dev, false);
2714}
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726int pci_set_cacheline_size(struct pci_dev *dev)
2727{
2728 u8 cacheline_size;
2729
2730 if (!pci_cache_line_size)
2731 return -EINVAL;
2732
2733
2734
2735 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2736 if (cacheline_size >= pci_cache_line_size &&
2737 (cacheline_size % pci_cache_line_size) == 0)
2738 return 0;
2739
2740
2741 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2742
2743 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2744 if (cacheline_size == pci_cache_line_size)
2745 return 0;
2746
2747 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2748 "supported\n", pci_cache_line_size << 2);
2749
2750 return -EINVAL;
2751}
2752EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2753
2754#ifdef PCI_DISABLE_MWI
2755int pci_set_mwi(struct pci_dev *dev)
2756{
2757 return 0;
2758}
2759
2760int pci_try_set_mwi(struct pci_dev *dev)
2761{
2762 return 0;
2763}
2764
2765void pci_clear_mwi(struct pci_dev *dev)
2766{
2767}
2768
2769#else
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779int
2780pci_set_mwi(struct pci_dev *dev)
2781{
2782 int rc;
2783 u16 cmd;
2784
2785 rc = pci_set_cacheline_size(dev);
2786 if (rc)
2787 return rc;
2788
2789 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2790 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2791 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2792 cmd |= PCI_COMMAND_INVALIDATE;
2793 pci_write_config_word(dev, PCI_COMMAND, cmd);
2794 }
2795
2796 return 0;
2797}
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808int pci_try_set_mwi(struct pci_dev *dev)
2809{
2810 int rc = pci_set_mwi(dev);
2811 return rc;
2812}
2813
2814
2815
2816
2817
2818
2819
2820void
2821pci_clear_mwi(struct pci_dev *dev)
2822{
2823 u16 cmd;
2824
2825 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2826 if (cmd & PCI_COMMAND_INVALIDATE) {
2827 cmd &= ~PCI_COMMAND_INVALIDATE;
2828 pci_write_config_word(dev, PCI_COMMAND, cmd);
2829 }
2830}
2831#endif
2832
2833
2834
2835
2836
2837
2838
2839
2840void
2841pci_intx(struct pci_dev *pdev, int enable)
2842{
2843 u16 pci_command, new;
2844
2845 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2846
2847 if (enable) {
2848 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2849 } else {
2850 new = pci_command | PCI_COMMAND_INTX_DISABLE;
2851 }
2852
2853 if (new != pci_command) {
2854 struct pci_devres *dr;
2855
2856 pci_write_config_word(pdev, PCI_COMMAND, new);
2857
2858 dr = find_pci_dr(pdev);
2859 if (dr && !dr->restore_intx) {
2860 dr->restore_intx = 1;
2861 dr->orig_intx = !enable;
2862 }
2863 }
2864}
2865
2866
2867
2868
2869
2870
2871
2872
2873bool pci_intx_mask_supported(struct pci_dev *dev)
2874{
2875 bool mask_supported = false;
2876 u16 orig, new;
2877
2878 pci_cfg_access_lock(dev);
2879
2880 pci_read_config_word(dev, PCI_COMMAND, &orig);
2881 pci_write_config_word(dev, PCI_COMMAND,
2882 orig ^ PCI_COMMAND_INTX_DISABLE);
2883 pci_read_config_word(dev, PCI_COMMAND, &new);
2884
2885
2886
2887
2888
2889
2890 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2891 dev_err(&dev->dev, "Command register changed from "
2892 "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2893 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2894 mask_supported = true;
2895 pci_write_config_word(dev, PCI_COMMAND, orig);
2896 }
2897
2898 pci_cfg_access_unlock(dev);
2899 return mask_supported;
2900}
2901EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
2902
2903static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2904{
2905 struct pci_bus *bus = dev->bus;
2906 bool mask_updated = true;
2907 u32 cmd_status_dword;
2908 u16 origcmd, newcmd;
2909 unsigned long flags;
2910 bool irq_pending;
2911
2912
2913
2914
2915
2916 BUILD_BUG_ON(PCI_COMMAND % 4);
2917 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
2918
2919 raw_spin_lock_irqsave(&pci_lock, flags);
2920
2921 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2922
2923 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2924
2925
2926
2927
2928
2929
2930 if (mask != irq_pending) {
2931 mask_updated = false;
2932 goto done;
2933 }
2934
2935 origcmd = cmd_status_dword;
2936 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
2937 if (mask)
2938 newcmd |= PCI_COMMAND_INTX_DISABLE;
2939 if (newcmd != origcmd)
2940 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
2941
2942done:
2943 raw_spin_unlock_irqrestore(&pci_lock, flags);
2944
2945 return mask_updated;
2946}
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956bool pci_check_and_mask_intx(struct pci_dev *dev)
2957{
2958 return pci_check_and_set_intx_mask(dev, true);
2959}
2960EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970bool pci_check_and_unmask_intx(struct pci_dev *dev)
2971{
2972 return pci_check_and_set_intx_mask(dev, false);
2973}
2974EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984void pci_msi_off(struct pci_dev *dev)
2985{
2986 int pos;
2987 u16 control;
2988
2989 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2990 if (pos) {
2991 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2992 control &= ~PCI_MSI_FLAGS_ENABLE;
2993 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
2994 }
2995 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
2996 if (pos) {
2997 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
2998 control &= ~PCI_MSIX_FLAGS_ENABLE;
2999 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3000 }
3001}
3002EXPORT_SYMBOL_GPL(pci_msi_off);
3003
3004int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3005{
3006 return dma_set_max_seg_size(&dev->dev, size);
3007}
3008EXPORT_SYMBOL(pci_set_dma_max_seg_size);
3009
3010int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3011{
3012 return dma_set_seg_boundary(&dev->dev, mask);
3013}
3014EXPORT_SYMBOL(pci_set_dma_seg_boundary);
3015
3016static int pcie_flr(struct pci_dev *dev, int probe)
3017{
3018 int i;
3019 int pos;
3020 u32 cap;
3021 u16 status, control;
3022
3023 pos = pci_pcie_cap(dev);
3024 if (!pos)
3025 return -ENOTTY;
3026
3027 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
3028 if (!(cap & PCI_EXP_DEVCAP_FLR))
3029 return -ENOTTY;
3030
3031 if (probe)
3032 return 0;
3033
3034
3035 for (i = 0; i < 4; i++) {
3036 if (i)
3037 msleep((1 << (i - 1)) * 100);
3038
3039 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
3040 if (!(status & PCI_EXP_DEVSTA_TRPND))
3041 goto clear;
3042 }
3043
3044 dev_err(&dev->dev, "transaction is not cleared; "
3045 "proceeding with reset anyway\n");
3046
3047clear:
3048 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
3049 control |= PCI_EXP_DEVCTL_BCR_FLR;
3050 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
3051
3052 msleep(100);
3053
3054 return 0;
3055}
3056
3057static int pci_af_flr(struct pci_dev *dev, int probe)
3058{
3059 int i;
3060 int pos;
3061 u8 cap;
3062 u8 status;
3063
3064 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3065 if (!pos)
3066 return -ENOTTY;
3067
3068 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
3069 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3070 return -ENOTTY;
3071
3072 if (probe)
3073 return 0;
3074
3075
3076 for (i = 0; i < 4; i++) {
3077 if (i)
3078 msleep((1 << (i - 1)) * 100);
3079
3080 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3081 if (!(status & PCI_AF_STATUS_TP))
3082 goto clear;
3083 }
3084
3085 dev_err(&dev->dev, "transaction is not cleared; "
3086 "proceeding with reset anyway\n");
3087
3088clear:
3089 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
3090 msleep(100);
3091
3092 return 0;
3093}
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110static int pci_pm_reset(struct pci_dev *dev, int probe)
3111{
3112 u16 csr;
3113
3114 if (!dev->pm_cap)
3115 return -ENOTTY;
3116
3117 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3118 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3119 return -ENOTTY;
3120
3121 if (probe)
3122 return 0;
3123
3124 if (dev->current_state != PCI_D0)
3125 return -EINVAL;
3126
3127 csr &= ~PCI_PM_CTRL_STATE_MASK;
3128 csr |= PCI_D3hot;
3129 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3130 pci_dev_d3_sleep(dev);
3131
3132 csr &= ~PCI_PM_CTRL_STATE_MASK;
3133 csr |= PCI_D0;
3134 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3135 pci_dev_d3_sleep(dev);
3136
3137 return 0;
3138}
3139
3140static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3141{
3142 u16 ctrl;
3143 struct pci_dev *pdev;
3144
3145 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
3146 return -ENOTTY;
3147
3148 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3149 if (pdev != dev)
3150 return -ENOTTY;
3151
3152 if (probe)
3153 return 0;
3154
3155 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
3156 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3157 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3158 msleep(100);
3159
3160 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3161 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3162 msleep(100);
3163
3164 return 0;
3165}
3166
3167static int pci_dev_reset(struct pci_dev *dev, int probe)
3168{
3169 int rc;
3170
3171 might_sleep();
3172
3173 if (!probe) {
3174 pci_cfg_access_lock(dev);
3175
3176 device_lock(&dev->dev);
3177 }
3178
3179 rc = pci_dev_specific_reset(dev, probe);
3180 if (rc != -ENOTTY)
3181 goto done;
3182
3183 rc = pcie_flr(dev, probe);
3184 if (rc != -ENOTTY)
3185 goto done;
3186
3187 rc = pci_af_flr(dev, probe);
3188 if (rc != -ENOTTY)
3189 goto done;
3190
3191 rc = pci_pm_reset(dev, probe);
3192 if (rc != -ENOTTY)
3193 goto done;
3194
3195 rc = pci_parent_bus_reset(dev, probe);
3196done:
3197 if (!probe) {
3198 device_unlock(&dev->dev);
3199 pci_cfg_access_unlock(dev);
3200 }
3201
3202 return rc;
3203}
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222int __pci_reset_function(struct pci_dev *dev)
3223{
3224 return pci_dev_reset(dev, 0);
3225}
3226EXPORT_SYMBOL_GPL(__pci_reset_function);
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247int __pci_reset_function_locked(struct pci_dev *dev)
3248{
3249 return pci_dev_reset(dev, 1);
3250}
3251EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264int pci_probe_reset_function(struct pci_dev *dev)
3265{
3266 return pci_dev_reset(dev, 1);
3267}
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285int pci_reset_function(struct pci_dev *dev)
3286{
3287 int rc;
3288
3289 rc = pci_dev_reset(dev, 1);
3290 if (rc)
3291 return rc;
3292
3293 pci_save_state(dev);
3294
3295
3296
3297
3298
3299 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3300
3301 rc = pci_dev_reset(dev, 0);
3302
3303 pci_restore_state(dev);
3304
3305 return rc;
3306}
3307EXPORT_SYMBOL_GPL(pci_reset_function);
3308
3309
3310
3311
3312
3313
3314
3315
3316int pcix_get_max_mmrbc(struct pci_dev *dev)
3317{
3318 int cap;
3319 u32 stat;
3320
3321 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3322 if (!cap)
3323 return -EINVAL;
3324
3325 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3326 return -EINVAL;
3327
3328 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
3329}
3330EXPORT_SYMBOL(pcix_get_max_mmrbc);
3331
3332
3333
3334
3335
3336
3337
3338
3339int pcix_get_mmrbc(struct pci_dev *dev)
3340{
3341 int cap;
3342 u16 cmd;
3343
3344 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3345 if (!cap)
3346 return -EINVAL;
3347
3348 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3349 return -EINVAL;
3350
3351 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
3352}
3353EXPORT_SYMBOL(pcix_get_mmrbc);
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3365{
3366 int cap;
3367 u32 stat, v, o;
3368 u16 cmd;
3369
3370 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
3371 return -EINVAL;
3372
3373 v = ffs(mmrbc) - 10;
3374
3375 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3376 if (!cap)
3377 return -EINVAL;
3378
3379 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3380 return -EINVAL;
3381
3382 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3383 return -E2BIG;
3384
3385 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3386 return -EINVAL;
3387
3388 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3389 if (o != v) {
3390 if (v > o && dev->bus &&
3391 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3392 return -EIO;
3393
3394 cmd &= ~PCI_X_CMD_MAX_READ;
3395 cmd |= v << 2;
3396 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3397 return -EIO;
3398 }
3399 return 0;
3400}
3401EXPORT_SYMBOL(pcix_set_mmrbc);
3402
3403
3404
3405
3406
3407
3408
3409
3410int pcie_get_readrq(struct pci_dev *dev)
3411{
3412 int ret, cap;
3413 u16 ctl;
3414
3415 cap = pci_pcie_cap(dev);
3416 if (!cap)
3417 return -EINVAL;
3418
3419 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3420 if (!ret)
3421 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3422
3423 return ret;
3424}
3425EXPORT_SYMBOL(pcie_get_readrq);
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435int pcie_set_readrq(struct pci_dev *dev, int rq)
3436{
3437 int cap, err = -EINVAL;
3438 u16 ctl, v;
3439
3440 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
3441 goto out;
3442
3443 cap = pci_pcie_cap(dev);
3444 if (!cap)
3445 goto out;
3446
3447 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3448 if (err)
3449 goto out;
3450
3451
3452
3453
3454
3455
3456 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3457 int mps = pcie_get_mps(dev);
3458
3459 if (mps < 0)
3460 return mps;
3461 if (mps < rq)
3462 rq = mps;
3463 }
3464
3465 v = (ffs(rq) - 8) << 12;
3466
3467 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3468 ctl &= ~PCI_EXP_DEVCTL_READRQ;
3469 ctl |= v;
3470 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3471 }
3472
3473out:
3474 return err;
3475}
3476EXPORT_SYMBOL(pcie_set_readrq);
3477
3478
3479
3480
3481
3482
3483
3484
3485int pcie_get_mps(struct pci_dev *dev)
3486{
3487 int ret, cap;
3488 u16 ctl;
3489
3490 cap = pci_pcie_cap(dev);
3491 if (!cap)
3492 return -EINVAL;
3493
3494 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3495 if (!ret)
3496 ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3497
3498 return ret;
3499}
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509int pcie_set_mps(struct pci_dev *dev, int mps)
3510{
3511 int cap, err = -EINVAL;
3512 u16 ctl, v;
3513
3514 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3515 goto out;
3516
3517 v = ffs(mps) - 8;
3518 if (v > dev->pcie_mpss)
3519 goto out;
3520 v <<= 5;
3521
3522 cap = pci_pcie_cap(dev);
3523 if (!cap)
3524 goto out;
3525
3526 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3527 if (err)
3528 goto out;
3529
3530 if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3531 ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3532 ctl |= v;
3533 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3534 }
3535out:
3536 return err;
3537}
3538
3539
3540
3541
3542
3543
3544
3545
3546int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3547{
3548 int i, bars = 0;
3549 for (i = 0; i < PCI_NUM_RESOURCES; i++)
3550 if (pci_resource_flags(dev, i) & flags)
3551 bars |= (1 << i);
3552 return bars;
3553}
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3564{
3565 int reg;
3566
3567 if (resno < PCI_ROM_RESOURCE) {
3568 *type = pci_bar_unknown;
3569 return PCI_BASE_ADDRESS_0 + 4 * resno;
3570 } else if (resno == PCI_ROM_RESOURCE) {
3571 *type = pci_bar_mem32;
3572 return dev->rom_base_reg;
3573 } else if (resno < PCI_BRIDGE_RESOURCES) {
3574
3575 reg = pci_iov_resource_bar(dev, resno, type);
3576 if (reg)
3577 return reg;
3578 }
3579
3580 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
3581 return 0;
3582}
3583
3584
3585static arch_set_vga_state_t arch_set_vga_state;
3586
3587void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3588{
3589 arch_set_vga_state = func;
3590}
3591
3592static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3593 unsigned int command_bits, u32 flags)
3594{
3595 if (arch_set_vga_state)
3596 return arch_set_vga_state(dev, decode, command_bits,
3597 flags);
3598 return 0;
3599}
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609int pci_set_vga_state(struct pci_dev *dev, bool decode,
3610 unsigned int command_bits, u32 flags)
3611{
3612 struct pci_bus *bus;
3613 struct pci_dev *bridge;
3614 u16 cmd;
3615 int rc;
3616
3617 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
3618
3619
3620 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
3621 if (rc)
3622 return rc;
3623
3624 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3625 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3626 if (decode == true)
3627 cmd |= command_bits;
3628 else
3629 cmd &= ~command_bits;
3630 pci_write_config_word(dev, PCI_COMMAND, cmd);
3631 }
3632
3633 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
3634 return 0;
3635
3636 bus = dev->bus;
3637 while (bus) {
3638 bridge = bus->self;
3639 if (bridge) {
3640 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3641 &cmd);
3642 if (decode == true)
3643 cmd |= PCI_BRIDGE_CTL_VGA;
3644 else
3645 cmd &= ~PCI_BRIDGE_CTL_VGA;
3646 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3647 cmd);
3648 }
3649 bus = bus->parent;
3650 }
3651 return 0;
3652}
3653
3654#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3655static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
3656static DEFINE_SPINLOCK(resource_alignment_lock);
3657
3658
3659
3660
3661
3662
3663
3664
3665resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3666{
3667 int seg, bus, slot, func, align_order, count;
3668 resource_size_t align = 0;
3669 char *p;
3670
3671 spin_lock(&resource_alignment_lock);
3672 p = resource_alignment_param;
3673 while (*p) {
3674 count = 0;
3675 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3676 p[count] == '@') {
3677 p += count + 1;
3678 } else {
3679 align_order = -1;
3680 }
3681 if (sscanf(p, "%x:%x:%x.%x%n",
3682 &seg, &bus, &slot, &func, &count) != 4) {
3683 seg = 0;
3684 if (sscanf(p, "%x:%x.%x%n",
3685 &bus, &slot, &func, &count) != 3) {
3686
3687 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3688 p);
3689 break;
3690 }
3691 }
3692 p += count;
3693 if (seg == pci_domain_nr(dev->bus) &&
3694 bus == dev->bus->number &&
3695 slot == PCI_SLOT(dev->devfn) &&
3696 func == PCI_FUNC(dev->devfn)) {
3697 if (align_order == -1) {
3698 align = PAGE_SIZE;
3699 } else {
3700 align = 1 << align_order;
3701 }
3702
3703 break;
3704 }
3705 if (*p != ';' && *p != ',') {
3706
3707 break;
3708 }
3709 p++;
3710 }
3711 spin_unlock(&resource_alignment_lock);
3712 return align;
3713}
3714
3715
3716
3717
3718
3719
3720
3721
3722int pci_is_reassigndev(struct pci_dev *dev)
3723{
3724 return (pci_specified_resource_alignment(dev) != 0);
3725}
3726
3727
3728
3729
3730
3731
3732
3733
3734void pci_reassigndev_resource_alignment(struct pci_dev *dev)
3735{
3736 int i;
3737 struct resource *r;
3738 resource_size_t align, size;
3739 u16 command;
3740
3741 if (!pci_is_reassigndev(dev))
3742 return;
3743
3744 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
3745 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
3746 dev_warn(&dev->dev,
3747 "Can't reassign resources to host bridge.\n");
3748 return;
3749 }
3750
3751 dev_info(&dev->dev,
3752 "Disabling memory decoding and releasing memory resources.\n");
3753 pci_read_config_word(dev, PCI_COMMAND, &command);
3754 command &= ~PCI_COMMAND_MEMORY;
3755 pci_write_config_word(dev, PCI_COMMAND, command);
3756
3757 align = pci_specified_resource_alignment(dev);
3758 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
3759 r = &dev->resource[i];
3760 if (!(r->flags & IORESOURCE_MEM))
3761 continue;
3762 size = resource_size(r);
3763 if (size < align) {
3764 size = align;
3765 dev_info(&dev->dev,
3766 "Rounding up size of resource #%d to %#llx.\n",
3767 i, (unsigned long long)size);
3768 }
3769 r->end = size - 1;
3770 r->start = 0;
3771 }
3772
3773
3774
3775
3776 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3777 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
3778 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
3779 r = &dev->resource[i];
3780 if (!(r->flags & IORESOURCE_MEM))
3781 continue;
3782 r->end = resource_size(r) - 1;
3783 r->start = 0;
3784 }
3785 pci_disable_bridge_window(dev);
3786 }
3787}
3788
3789ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3790{
3791 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3792 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3793 spin_lock(&resource_alignment_lock);
3794 strncpy(resource_alignment_param, buf, count);
3795 resource_alignment_param[count] = '\0';
3796 spin_unlock(&resource_alignment_lock);
3797 return count;
3798}
3799
3800ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3801{
3802 size_t count;
3803 spin_lock(&resource_alignment_lock);
3804 count = snprintf(buf, size, "%s", resource_alignment_param);
3805 spin_unlock(&resource_alignment_lock);
3806 return count;
3807}
3808
3809static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3810{
3811 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3812}
3813
3814static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3815 const char *buf, size_t count)
3816{
3817 return pci_set_resource_alignment_param(buf, count);
3818}
3819
3820BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3821 pci_resource_alignment_store);
3822
3823static int __init pci_resource_alignment_sysfs_init(void)
3824{
3825 return bus_create_file(&pci_bus_type,
3826 &bus_attr_resource_alignment);
3827}
3828
3829late_initcall(pci_resource_alignment_sysfs_init);
3830
3831static void __devinit pci_no_domains(void)
3832{
3833#ifdef CONFIG_PCI_DOMAINS
3834 pci_domains_supported = 0;
3835#endif
3836}
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3847{
3848 return 1;
3849}
3850
3851void __weak pci_fixup_cardbus(struct pci_bus *bus)
3852{
3853}
3854EXPORT_SYMBOL(pci_fixup_cardbus);
3855
3856static int __init pci_setup(char *str)
3857{
3858 while (str) {
3859 char *k = strchr(str, ',');
3860 if (k)
3861 *k++ = 0;
3862 if (*str && (str = pcibios_setup(str)) && *str) {
3863 if (!strcmp(str, "nomsi")) {
3864 pci_no_msi();
3865 } else if (!strcmp(str, "noaer")) {
3866 pci_no_aer();
3867 } else if (!strncmp(str, "realloc=", 8)) {
3868 pci_realloc_get_opt(str + 8);
3869 } else if (!strncmp(str, "realloc", 7)) {
3870 pci_realloc_get_opt("on");
3871 } else if (!strcmp(str, "nodomains")) {
3872 pci_no_domains();
3873 } else if (!strncmp(str, "noari", 5)) {
3874 pcie_ari_disabled = true;
3875 } else if (!strncmp(str, "cbiosize=", 9)) {
3876 pci_cardbus_io_size = memparse(str + 9, &str);
3877 } else if (!strncmp(str, "cbmemsize=", 10)) {
3878 pci_cardbus_mem_size = memparse(str + 10, &str);
3879 } else if (!strncmp(str, "resource_alignment=", 19)) {
3880 pci_set_resource_alignment_param(str + 19,
3881 strlen(str + 19));
3882 } else if (!strncmp(str, "ecrc=", 5)) {
3883 pcie_ecrc_get_policy(str + 5);
3884 } else if (!strncmp(str, "hpiosize=", 9)) {
3885 pci_hotplug_io_size = memparse(str + 9, &str);
3886 } else if (!strncmp(str, "hpmemsize=", 10)) {
3887 pci_hotplug_mem_size = memparse(str + 10, &str);
3888 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3889 pcie_bus_config = PCIE_BUS_TUNE_OFF;
3890 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3891 pcie_bus_config = PCIE_BUS_SAFE;
3892 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3893 pcie_bus_config = PCIE_BUS_PERFORMANCE;
3894 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3895 pcie_bus_config = PCIE_BUS_PEER2PEER;
3896 } else {
3897 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3898 str);
3899 }
3900 }
3901 str = k;
3902 }
3903 return 0;
3904}
3905early_param("pci", pci_setup);
3906
3907EXPORT_SYMBOL(pci_reenable_device);
3908EXPORT_SYMBOL(pci_enable_device_io);
3909EXPORT_SYMBOL(pci_enable_device_mem);
3910EXPORT_SYMBOL(pci_enable_device);
3911EXPORT_SYMBOL(pcim_enable_device);
3912EXPORT_SYMBOL(pcim_pin_device);
3913EXPORT_SYMBOL(pci_disable_device);
3914EXPORT_SYMBOL(pci_find_capability);
3915EXPORT_SYMBOL(pci_bus_find_capability);
3916EXPORT_SYMBOL(pci_release_regions);
3917EXPORT_SYMBOL(pci_request_regions);
3918EXPORT_SYMBOL(pci_request_regions_exclusive);
3919EXPORT_SYMBOL(pci_release_region);
3920EXPORT_SYMBOL(pci_request_region);
3921EXPORT_SYMBOL(pci_request_region_exclusive);
3922EXPORT_SYMBOL(pci_release_selected_regions);
3923EXPORT_SYMBOL(pci_request_selected_regions);
3924EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3925EXPORT_SYMBOL(pci_set_master);
3926EXPORT_SYMBOL(pci_clear_master);
3927EXPORT_SYMBOL(pci_set_mwi);
3928EXPORT_SYMBOL(pci_try_set_mwi);
3929EXPORT_SYMBOL(pci_clear_mwi);
3930EXPORT_SYMBOL_GPL(pci_intx);
3931EXPORT_SYMBOL(pci_assign_resource);
3932EXPORT_SYMBOL(pci_find_parent_resource);
3933EXPORT_SYMBOL(pci_select_bars);
3934
3935EXPORT_SYMBOL(pci_set_power_state);
3936EXPORT_SYMBOL(pci_save_state);
3937EXPORT_SYMBOL(pci_restore_state);
3938EXPORT_SYMBOL(pci_pme_capable);
3939EXPORT_SYMBOL(pci_pme_active);
3940EXPORT_SYMBOL(pci_wake_from_d3);
3941EXPORT_SYMBOL(pci_target_state);
3942EXPORT_SYMBOL(pci_prepare_to_sleep);
3943EXPORT_SYMBOL(pci_back_from_sleep);
3944EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
3945