1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/types.h>
28#include <linux/kernel.h>
29#include <linux/export.h>
30#include <linux/pci.h>
31#include <linux/init.h>
32#include <linux/ioport.h>
33#include <linux/errno.h>
34#include <linux/bootmem.h>
35
36#include <asm/pat.h>
37#include <asm/e820.h>
38#include <asm/pci_x86.h>
39#include <asm/io_apic.h>
40
41
42
43
44
45
46struct pcibios_fwaddrmap {
47 struct list_head list;
48 struct pci_dev *dev;
49 resource_size_t fw_addr[DEVICE_COUNT_RESOURCE];
50};
51
52static LIST_HEAD(pcibios_fwaddrmappings);
53static DEFINE_SPINLOCK(pcibios_fwaddrmap_lock);
54static bool pcibios_fw_addr_done;
55
56
57static struct pcibios_fwaddrmap *pcibios_fwaddrmap_lookup(struct pci_dev *dev)
58{
59 struct pcibios_fwaddrmap *map;
60
61 WARN_ON_SMP(!spin_is_locked(&pcibios_fwaddrmap_lock));
62
63 list_for_each_entry(map, &pcibios_fwaddrmappings, list)
64 if (map->dev == dev)
65 return map;
66
67 return NULL;
68}
69
70static void
71pcibios_save_fw_addr(struct pci_dev *dev, int idx, resource_size_t fw_addr)
72{
73 unsigned long flags;
74 struct pcibios_fwaddrmap *map;
75
76 if (pcibios_fw_addr_done)
77 return;
78
79 spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
80 map = pcibios_fwaddrmap_lookup(dev);
81 if (!map) {
82 spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
83 map = kzalloc(sizeof(*map), GFP_KERNEL);
84 if (!map)
85 return;
86
87 map->dev = pci_dev_get(dev);
88 map->fw_addr[idx] = fw_addr;
89 INIT_LIST_HEAD(&map->list);
90
91 spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
92 list_add_tail(&map->list, &pcibios_fwaddrmappings);
93 } else
94 map->fw_addr[idx] = fw_addr;
95 spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
96}
97
98resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx)
99{
100 unsigned long flags;
101 struct pcibios_fwaddrmap *map;
102 resource_size_t fw_addr = 0;
103
104 if (pcibios_fw_addr_done)
105 return 0;
106
107 spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
108 map = pcibios_fwaddrmap_lookup(dev);
109 if (map)
110 fw_addr = map->fw_addr[idx];
111 spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
112
113 return fw_addr;
114}
115
116static void __init pcibios_fw_addr_list_del(void)
117{
118 unsigned long flags;
119 struct pcibios_fwaddrmap *entry, *next;
120
121 spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
122 list_for_each_entry_safe(entry, next, &pcibios_fwaddrmappings, list) {
123 list_del(&entry->list);
124 pci_dev_put(entry->dev);
125 kfree(entry);
126 }
127 spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
128 pcibios_fw_addr_done = true;
129}
130
131static int
132skip_isa_ioresource_align(struct pci_dev *dev) {
133
134 if ((pci_probe & PCI_CAN_SKIP_ISA_ALIGN) &&
135 !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
136 return 1;
137 return 0;
138}
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153resource_size_t
154pcibios_align_resource(void *data, const struct resource *res,
155 resource_size_t size, resource_size_t align)
156{
157 struct pci_dev *dev = data;
158 resource_size_t start = res->start;
159
160 if (res->flags & IORESOURCE_IO) {
161 if (skip_isa_ioresource_align(dev))
162 return start;
163 if (start & 0x300)
164 start = (start + 0x3ff) & ~0x3ff;
165 } else if (res->flags & IORESOURCE_MEM) {
166
167 if (start < BIOS_END)
168 start = BIOS_END;
169 }
170 return start;
171}
172EXPORT_SYMBOL(pcibios_align_resource);
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
209{
210 int idx;
211 struct resource *r;
212
213 for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
214 r = &dev->resource[idx];
215 if (!r->flags)
216 continue;
217 if (r->parent)
218 continue;
219 if (!r->start || pci_claim_resource(dev, idx) < 0) {
220
221
222
223
224
225
226 r->start = r->end = 0;
227 r->flags = 0;
228 }
229 }
230}
231
232static void pcibios_allocate_bus_resources(struct pci_bus *bus)
233{
234 struct pci_bus *child;
235
236
237 if (bus->self)
238 pcibios_allocate_bridge_resources(bus->self);
239 list_for_each_entry(child, &bus->children, node)
240 pcibios_allocate_bus_resources(child);
241}
242
243struct pci_check_idx_range {
244 int start;
245 int end;
246};
247
248static void pcibios_allocate_dev_resources(struct pci_dev *dev, int pass)
249{
250 int idx, disabled, i;
251 u16 command;
252 struct resource *r;
253
254 struct pci_check_idx_range idx_range[] = {
255 { PCI_STD_RESOURCES, PCI_STD_RESOURCE_END },
256#ifdef CONFIG_PCI_IOV
257 { PCI_IOV_RESOURCES, PCI_IOV_RESOURCE_END },
258#endif
259 };
260
261 pci_read_config_word(dev, PCI_COMMAND, &command);
262 for (i = 0; i < ARRAY_SIZE(idx_range); i++)
263 for (idx = idx_range[i].start; idx <= idx_range[i].end; idx++) {
264 r = &dev->resource[idx];
265 if (r->parent)
266 continue;
267 if (!r->start)
268 continue;
269 if (r->flags & IORESOURCE_IO)
270 disabled = !(command & PCI_COMMAND_IO);
271 else
272 disabled = !(command & PCI_COMMAND_MEMORY);
273 if (pass == disabled) {
274 dev_dbg(&dev->dev,
275 "BAR %d: reserving %pr (d=%d, p=%d)\n",
276 idx, r, disabled, pass);
277 if (pci_claim_resource(dev, idx) < 0) {
278 if (r->flags & IORESOURCE_PCI_FIXED) {
279 dev_info(&dev->dev, "BAR %d %pR is immovable\n",
280 idx, r);
281 } else {
282
283 pcibios_save_fw_addr(dev,
284 idx, r->start);
285 r->end -= r->start;
286 r->start = 0;
287 }
288 }
289 }
290 }
291 if (!pass) {
292 r = &dev->resource[PCI_ROM_RESOURCE];
293 if (r->flags & IORESOURCE_ROM_ENABLE) {
294
295
296 u32 reg;
297 dev_dbg(&dev->dev, "disabling ROM %pR\n", r);
298 r->flags &= ~IORESOURCE_ROM_ENABLE;
299 pci_read_config_dword(dev, dev->rom_base_reg, ®);
300 pci_write_config_dword(dev, dev->rom_base_reg,
301 reg & ~PCI_ROM_ADDRESS_ENABLE);
302 }
303 }
304}
305
306static void pcibios_allocate_resources(struct pci_bus *bus, int pass)
307{
308 struct pci_dev *dev;
309 struct pci_bus *child;
310
311 list_for_each_entry(dev, &bus->devices, bus_list) {
312 pcibios_allocate_dev_resources(dev, pass);
313
314 child = dev->subordinate;
315 if (child)
316 pcibios_allocate_resources(child, pass);
317 }
318}
319
320static void pcibios_allocate_dev_rom_resource(struct pci_dev *dev)
321{
322 struct resource *r;
323
324
325
326
327
328
329 r = &dev->resource[PCI_ROM_RESOURCE];
330 if (!r->flags || !r->start)
331 return;
332 if (r->parent)
333 return;
334
335 if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
336 r->end -= r->start;
337 r->start = 0;
338 }
339}
340static void pcibios_allocate_rom_resources(struct pci_bus *bus)
341{
342 struct pci_dev *dev;
343 struct pci_bus *child;
344
345 list_for_each_entry(dev, &bus->devices, bus_list) {
346 pcibios_allocate_dev_rom_resource(dev);
347
348 child = dev->subordinate;
349 if (child)
350 pcibios_allocate_rom_resources(child);
351 }
352}
353
354static int __init pcibios_assign_resources(void)
355{
356 struct pci_bus *bus;
357
358 if (!(pci_probe & PCI_ASSIGN_ROMS))
359 list_for_each_entry(bus, &pci_root_buses, node)
360 pcibios_allocate_rom_resources(bus);
361
362 pci_assign_unassigned_resources();
363 pcibios_fw_addr_list_del();
364
365 return 0;
366}
367
368
369
370
371
372fs_initcall(pcibios_assign_resources);
373
374void pcibios_resource_survey_bus(struct pci_bus *bus)
375{
376 dev_printk(KERN_DEBUG, &bus->dev, "Allocating resources\n");
377
378 pcibios_allocate_bus_resources(bus);
379
380 pcibios_allocate_resources(bus, 0);
381 pcibios_allocate_resources(bus, 1);
382
383 if (!(pci_probe & PCI_ASSIGN_ROMS))
384 pcibios_allocate_rom_resources(bus);
385}
386
387void __init pcibios_resource_survey(void)
388{
389 struct pci_bus *bus;
390
391 DBG("PCI: Allocating resources\n");
392
393 list_for_each_entry(bus, &pci_root_buses, node)
394 pcibios_allocate_bus_resources(bus);
395
396 list_for_each_entry(bus, &pci_root_buses, node)
397 pcibios_allocate_resources(bus, 0);
398 list_for_each_entry(bus, &pci_root_buses, node)
399 pcibios_allocate_resources(bus, 1);
400
401 e820_reserve_resources_late();
402
403
404
405
406
407 ioapic_insert_resources();
408}
409
410static const struct vm_operations_struct pci_mmap_ops = {
411 .access = generic_access_phys,
412};
413
414int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
415 enum pci_mmap_state mmap_state, int write_combine)
416{
417 unsigned long prot;
418
419
420
421
422 if (mmap_state == pci_mmap_io)
423 return -EINVAL;
424
425 prot = pgprot_val(vma->vm_page_prot);
426
427
428
429
430
431
432 if (!pat_enabled && write_combine)
433 return -EINVAL;
434
435 if (pat_enabled && write_combine)
436 prot |= _PAGE_CACHE_WC;
437 else if (pat_enabled || boot_cpu_data.x86 > 3)
438
439
440
441
442
443 prot |= _PAGE_CACHE_UC_MINUS;
444
445 vma->vm_page_prot = __pgprot(prot);
446
447 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
448 vma->vm_end - vma->vm_start,
449 vma->vm_page_prot))
450 return -EAGAIN;
451
452 vma->vm_ops = &pci_mmap_ops;
453
454 return 0;
455}
456