1
2
3
4
5
6
7
8
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/pci.h>
12#include <linux/errno.h>
13#include <linux/ioport.h>
14#include <linux/proc_fs.h>
15#include <linux/slab.h>
16
17#include "pci.h"
18
19void pci_add_resource_offset(struct list_head *resources, struct resource *res,
20 resource_size_t offset)
21{
22 struct resource_entry *entry;
23
24 entry = resource_list_create_entry(res, 0);
25 if (!entry) {
26 pr_err("PCI: can't add host bridge window %pR\n", res);
27 return;
28 }
29
30 entry->offset = offset;
31 resource_list_add_tail(entry, resources);
32}
33EXPORT_SYMBOL(pci_add_resource_offset);
34
35void pci_add_resource(struct list_head *resources, struct resource *res)
36{
37 pci_add_resource_offset(resources, res, 0);
38}
39EXPORT_SYMBOL(pci_add_resource);
40
41void pci_free_resource_list(struct list_head *resources)
42{
43 resource_list_free(resources);
44}
45EXPORT_SYMBOL(pci_free_resource_list);
46
47void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
48 unsigned int flags)
49{
50 struct pci_bus_resource *bus_res;
51
52 bus_res = kzalloc(sizeof(struct pci_bus_resource), GFP_KERNEL);
53 if (!bus_res) {
54 dev_err(&bus->dev, "can't add %pR resource\n", res);
55 return;
56 }
57
58 bus_res->res = res;
59 bus_res->flags = flags;
60 list_add_tail(&bus_res->list, &bus->resources);
61}
62
63struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n)
64{
65 struct pci_bus_resource *bus_res;
66
67 if (n < PCI_BRIDGE_RESOURCE_NUM)
68 return bus->resource[n];
69
70 n -= PCI_BRIDGE_RESOURCE_NUM;
71 list_for_each_entry(bus_res, &bus->resources, list) {
72 if (n-- == 0)
73 return bus_res->res;
74 }
75 return NULL;
76}
77EXPORT_SYMBOL_GPL(pci_bus_resource_n);
78
79void pci_bus_remove_resources(struct pci_bus *bus)
80{
81 int i;
82 struct pci_bus_resource *bus_res, *tmp;
83
84 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
85 bus->resource[i] = NULL;
86
87 list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
88 list_del(&bus_res->list);
89 kfree(bus_res);
90 }
91}
92
93int devm_request_pci_bus_resources(struct device *dev,
94 struct list_head *resources)
95{
96 struct resource_entry *win;
97 struct resource *parent, *res;
98 int err;
99
100 resource_list_for_each_entry(win, resources) {
101 res = win->res;
102 switch (resource_type(res)) {
103 case IORESOURCE_IO:
104 parent = &ioport_resource;
105 break;
106 case IORESOURCE_MEM:
107 parent = &iomem_resource;
108 break;
109 default:
110 continue;
111 }
112
113 err = devm_request_resource(dev, parent, res);
114 if (err)
115 return err;
116 }
117
118 return 0;
119}
120EXPORT_SYMBOL_GPL(devm_request_pci_bus_resources);
121
122static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL};
123#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
124static struct pci_bus_region pci_64_bit = {0,
125 (pci_bus_addr_t) 0xffffffffffffffffULL};
126static struct pci_bus_region pci_high = {(pci_bus_addr_t) 0x100000000ULL,
127 (pci_bus_addr_t) 0xffffffffffffffffULL};
128#endif
129
130
131
132
133
134
135
136static void pci_clip_resource_to_region(struct pci_bus *bus,
137 struct resource *res,
138 struct pci_bus_region *region)
139{
140 struct pci_bus_region r;
141
142 pcibios_resource_to_bus(bus, &r, res);
143 if (r.start < region->start)
144 r.start = region->start;
145 if (r.end > region->end)
146 r.end = region->end;
147
148 if (r.end < r.start)
149 res->end = res->start - 1;
150 else
151 pcibios_bus_to_resource(bus, res, &r);
152}
153
154static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
155 resource_size_t size, resource_size_t align,
156 resource_size_t min, unsigned long type_mask,
157 resource_size_t (*alignf)(void *,
158 const struct resource *,
159 resource_size_t,
160 resource_size_t),
161 void *alignf_data,
162 struct pci_bus_region *region)
163{
164 int i, ret;
165 struct resource *r, avail;
166 resource_size_t max;
167
168 type_mask |= IORESOURCE_TYPE_BITS;
169
170 pci_bus_for_each_resource(bus, r, i) {
171 resource_size_t min_used = min;
172
173 if (!r)
174 continue;
175
176
177 if ((res->flags ^ r->flags) & type_mask)
178 continue;
179
180
181
182 if ((r->flags & IORESOURCE_PREFETCH) &&
183 !(res->flags & IORESOURCE_PREFETCH))
184 continue;
185
186 avail = *r;
187 pci_clip_resource_to_region(bus, &avail, region);
188
189
190
191
192
193
194
195 if (avail.start)
196 min_used = avail.start;
197
198 max = avail.end;
199
200
201 ret = allocate_resource(r, res, size, min_used, max,
202 align, alignf, alignf_data);
203 if (ret == 0)
204 return 0;
205 }
206 return -ENOMEM;
207}
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
225 resource_size_t size, resource_size_t align,
226 resource_size_t min, unsigned long type_mask,
227 resource_size_t (*alignf)(void *,
228 const struct resource *,
229 resource_size_t,
230 resource_size_t),
231 void *alignf_data)
232{
233#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
234 int rc;
235
236 if (res->flags & IORESOURCE_MEM_64) {
237 rc = pci_bus_alloc_from_region(bus, res, size, align, min,
238 type_mask, alignf, alignf_data,
239 &pci_high);
240 if (rc == 0)
241 return 0;
242
243 return pci_bus_alloc_from_region(bus, res, size, align, min,
244 type_mask, alignf, alignf_data,
245 &pci_64_bit);
246 }
247#endif
248
249 return pci_bus_alloc_from_region(bus, res, size, align, min,
250 type_mask, alignf, alignf_data,
251 &pci_32_bit);
252}
253EXPORT_SYMBOL(pci_bus_alloc_resource);
254
255
256
257
258
259
260
261bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
262{
263 struct pci_bus *bus = dev->bus;
264 struct resource *res = &dev->resource[idx];
265 struct resource orig_res = *res;
266 struct resource *r;
267 int i;
268
269 pci_bus_for_each_resource(bus, r, i) {
270 resource_size_t start, end;
271
272 if (!r)
273 continue;
274
275 if (resource_type(res) != resource_type(r))
276 continue;
277
278 start = max(r->start, res->start);
279 end = min(r->end, res->end);
280
281 if (start > end)
282 continue;
283
284 if (res->start == start && res->end == end)
285 return false;
286
287 res->start = start;
288 res->end = end;
289 res->flags &= ~IORESOURCE_UNSET;
290 orig_res.flags &= ~IORESOURCE_UNSET;
291 pci_info(dev, "%pR clipped to %pR\n", &orig_res, res);
292
293 return true;
294 }
295
296 return false;
297}
298
299void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
300
301void __weak pcibios_bus_add_device(struct pci_dev *pdev) { }
302
303
304
305
306
307
308
309void pci_bus_add_device(struct pci_dev *dev)
310{
311 int retval;
312
313
314
315
316
317 pcibios_bus_add_device(dev);
318 pci_fixup_device(pci_fixup_final, dev);
319 pci_create_sysfs_dev_files(dev);
320 pci_proc_attach_device(dev);
321 pci_bridge_d3_update(dev);
322
323 dev->match_driver = true;
324 retval = device_attach(&dev->dev);
325 if (retval < 0 && retval != -EPROBE_DEFER) {
326 pci_warn(dev, "device attach failed (%d)\n", retval);
327 pci_proc_detach_device(dev);
328 pci_remove_sysfs_dev_files(dev);
329 return;
330 }
331
332 pci_dev_assign_added(dev, true);
333}
334EXPORT_SYMBOL_GPL(pci_bus_add_device);
335
336
337
338
339
340
341
342void pci_bus_add_devices(const struct pci_bus *bus)
343{
344 struct pci_dev *dev;
345 struct pci_bus *child;
346
347 list_for_each_entry(dev, &bus->devices, bus_list) {
348
349 if (pci_dev_is_added(dev))
350 continue;
351 pci_bus_add_device(dev);
352 }
353
354 list_for_each_entry(dev, &bus->devices, bus_list) {
355
356 if (!pci_dev_is_added(dev))
357 continue;
358 child = dev->subordinate;
359 if (child)
360 pci_bus_add_devices(child);
361 }
362}
363EXPORT_SYMBOL(pci_bus_add_devices);
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
379 void *userdata)
380{
381 struct pci_dev *dev;
382 struct pci_bus *bus;
383 struct list_head *next;
384 int retval;
385
386 bus = top;
387 down_read(&pci_bus_sem);
388 next = top->devices.next;
389 for (;;) {
390 if (next == &bus->devices) {
391
392 if (bus == top)
393 break;
394 next = bus->self->bus_list.next;
395 bus = bus->self->bus;
396 continue;
397 }
398 dev = list_entry(next, struct pci_dev, bus_list);
399 if (dev->subordinate) {
400
401 next = dev->subordinate->devices.next;
402 bus = dev->subordinate;
403 } else
404 next = dev->bus_list.next;
405
406 retval = cb(dev, userdata);
407 if (retval)
408 break;
409 }
410 up_read(&pci_bus_sem);
411}
412EXPORT_SYMBOL_GPL(pci_walk_bus);
413
414struct pci_bus *pci_bus_get(struct pci_bus *bus)
415{
416 if (bus)
417 get_device(&bus->dev);
418 return bus;
419}
420
421void pci_bus_put(struct pci_bus *bus)
422{
423 if (bus)
424 put_device(&bus->dev);
425}
426