1
2
3
4
5
6
7
8
9#include <linux/list.h>
10#include <linux/pci.h>
11#include <linux/rbtree.h>
12#include <linux/slab.h>
13#include <linux/spinlock.h>
14#include <linux/atomic.h>
15#include <linux/debugfs.h>
16#include <asm/pci-bridge.h>
17#include <asm/ppc-pci.h>
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40struct pci_io_addr_range {
41 struct rb_node rb_node;
42 resource_size_t addr_lo;
43 resource_size_t addr_hi;
44 struct eeh_dev *edev;
45 struct pci_dev *pcidev;
46 unsigned long flags;
47};
48
49static struct pci_io_addr_cache {
50 struct rb_root rb_root;
51 spinlock_t piar_lock;
52} pci_io_addr_cache_root;
53
54static inline struct eeh_dev *__eeh_addr_cache_get_device(unsigned long addr)
55{
56 struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node;
57
58 while (n) {
59 struct pci_io_addr_range *piar;
60 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
61
62 if (addr < piar->addr_lo)
63 n = n->rb_left;
64 else if (addr > piar->addr_hi)
65 n = n->rb_right;
66 else
67 return piar->edev;
68 }
69
70 return NULL;
71}
72
73
74
75
76
77
78
79
80
81
82struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr)
83{
84 struct eeh_dev *edev;
85 unsigned long flags;
86
87 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
88 edev = __eeh_addr_cache_get_device(addr);
89 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
90 return edev;
91}
92
93#ifdef DEBUG
94
95
96
97
98static void eeh_addr_cache_print(struct pci_io_addr_cache *cache)
99{
100 struct rb_node *n;
101 int cnt = 0;
102
103 n = rb_first(&cache->rb_root);
104 while (n) {
105 struct pci_io_addr_range *piar;
106 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
107 pr_info("PCI: %s addr range %d [%pap-%pap]: %s\n",
108 (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt,
109 &piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev));
110 cnt++;
111 n = rb_next(n);
112 }
113}
114#endif
115
116
117static struct pci_io_addr_range *
118eeh_addr_cache_insert(struct pci_dev *dev, resource_size_t alo,
119 resource_size_t ahi, unsigned long flags)
120{
121 struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node;
122 struct rb_node *parent = NULL;
123 struct pci_io_addr_range *piar;
124
125
126 while (*p) {
127 parent = *p;
128 piar = rb_entry(parent, struct pci_io_addr_range, rb_node);
129 if (ahi < piar->addr_lo) {
130 p = &parent->rb_left;
131 } else if (alo > piar->addr_hi) {
132 p = &parent->rb_right;
133 } else {
134 if (dev != piar->pcidev ||
135 alo != piar->addr_lo || ahi != piar->addr_hi) {
136 pr_warn("PIAR: overlapping address range\n");
137 }
138 return piar;
139 }
140 }
141 piar = kzalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC);
142 if (!piar)
143 return NULL;
144
145 piar->addr_lo = alo;
146 piar->addr_hi = ahi;
147 piar->edev = pci_dev_to_eeh_dev(dev);
148 piar->pcidev = dev;
149 piar->flags = flags;
150
151 eeh_edev_dbg(piar->edev, "PIAR: insert range=[%pap:%pap]\n",
152 &alo, &ahi);
153
154 rb_link_node(&piar->rb_node, parent, p);
155 rb_insert_color(&piar->rb_node, &pci_io_addr_cache_root.rb_root);
156
157 return piar;
158}
159
160static void __eeh_addr_cache_insert_dev(struct pci_dev *dev)
161{
162 struct eeh_dev *edev;
163 int i;
164
165 edev = pci_dev_to_eeh_dev(dev);
166 if (!edev) {
167 pr_warn("PCI: no EEH dev found for %s\n",
168 pci_name(dev));
169 return;
170 }
171
172
173 if (!edev->pe) {
174 dev_dbg(&dev->dev, "EEH: Skip building address cache\n");
175 return;
176 }
177
178
179
180
181
182 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
183 resource_size_t start = pci_resource_start(dev,i);
184 resource_size_t end = pci_resource_end(dev,i);
185 unsigned long flags = pci_resource_flags(dev,i);
186
187
188 if (0 == (flags & (IORESOURCE_IO | IORESOURCE_MEM)))
189 continue;
190 if (start == 0 || ~start == 0 || end == 0 || ~end == 0)
191 continue;
192 eeh_addr_cache_insert(dev, start, end, flags);
193 }
194}
195
196
197
198
199
200
201
202
203
204void eeh_addr_cache_insert_dev(struct pci_dev *dev)
205{
206 unsigned long flags;
207
208 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
209 __eeh_addr_cache_insert_dev(dev);
210 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
211}
212
213static inline void __eeh_addr_cache_rmv_dev(struct pci_dev *dev)
214{
215 struct rb_node *n;
216
217restart:
218 n = rb_first(&pci_io_addr_cache_root.rb_root);
219 while (n) {
220 struct pci_io_addr_range *piar;
221 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
222
223 if (piar->pcidev == dev) {
224 eeh_edev_dbg(piar->edev, "PIAR: remove range=[%pap:%pap]\n",
225 &piar->addr_lo, &piar->addr_hi);
226 rb_erase(n, &pci_io_addr_cache_root.rb_root);
227 kfree(piar);
228 goto restart;
229 }
230 n = rb_next(n);
231 }
232}
233
234
235
236
237
238
239
240
241
242
243void eeh_addr_cache_rmv_dev(struct pci_dev *dev)
244{
245 unsigned long flags;
246
247 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
248 __eeh_addr_cache_rmv_dev(dev);
249 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
250}
251
252
253
254
255
256
257
258void eeh_addr_cache_init(void)
259{
260 spin_lock_init(&pci_io_addr_cache_root.piar_lock);
261}
262
263static int eeh_addr_cache_show(struct seq_file *s, void *v)
264{
265 struct pci_io_addr_range *piar;
266 struct rb_node *n;
267 unsigned long flags;
268
269 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
270 for (n = rb_first(&pci_io_addr_cache_root.rb_root); n; n = rb_next(n)) {
271 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
272
273 seq_printf(s, "%s addr range [%pap-%pap]: %s\n",
274 (piar->flags & IORESOURCE_IO) ? "i/o" : "mem",
275 &piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev));
276 }
277 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
278
279 return 0;
280}
281DEFINE_SHOW_ATTRIBUTE(eeh_addr_cache);
282
283void eeh_cache_debugfs_init(void)
284{
285 debugfs_create_file_unsafe("eeh_address_cache", 0400,
286 arch_debugfs_dir, NULL,
287 &eeh_addr_cache_fops);
288}
289