1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/kernel.h>
23#include <linux/irq.h>
24#include <linux/pci.h>
25#include <linux/spinlock.h>
26
27#include <asm/mach/pci.h>
28#include <asm/mach-types.h>
29
30#include <mach/nanoengine.h>
31#include <mach/hardware.h>
32
33static DEFINE_SPINLOCK(nano_lock);
34
35static int nanoengine_get_pci_address(struct pci_bus *bus,
36 unsigned int devfn, int where, unsigned long *address)
37{
38 int ret = PCIBIOS_DEVICE_NOT_FOUND;
39 unsigned int busnr = bus->number;
40
41 *address = NANO_PCI_CONFIG_SPACE_VIRT +
42 ((bus->number << 16) | (devfn << 8) | (where & ~3));
43
44 ret = (busnr > 255 || devfn > 255 || where > 255) ?
45 PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
46
47 return ret;
48}
49
50static int nanoengine_read_config(struct pci_bus *bus, unsigned int devfn, int where,
51 int size, u32 *val)
52{
53 int ret;
54 unsigned long address;
55 unsigned long flags;
56 u32 v;
57
58
59
60
61 if (bus->number != 0 || (devfn >> 3) != 0) {
62 v = -1;
63 nanoengine_get_pci_address(bus, devfn, where, &address);
64 goto exit_function;
65 }
66
67 spin_lock_irqsave(&nano_lock, flags);
68
69 ret = nanoengine_get_pci_address(bus, devfn, where, &address);
70 if (ret != PCIBIOS_SUCCESSFUL)
71 return ret;
72 v = __raw_readl(address);
73
74 spin_unlock_irqrestore(&nano_lock, flags);
75
76 v >>= ((where & 3) * 8);
77 v &= (unsigned long)(-1) >> ((4 - size) * 8);
78
79exit_function:
80 *val = v;
81 return PCIBIOS_SUCCESSFUL;
82}
83
84static int nanoengine_write_config(struct pci_bus *bus, unsigned int devfn, int where,
85 int size, u32 val)
86{
87 int ret;
88 unsigned long address;
89 unsigned long flags;
90 unsigned shift;
91 u32 v;
92
93 shift = (where & 3) * 8;
94
95 spin_lock_irqsave(&nano_lock, flags);
96
97 ret = nanoengine_get_pci_address(bus, devfn, where, &address);
98 if (ret != PCIBIOS_SUCCESSFUL)
99 return ret;
100 v = __raw_readl(address);
101 switch (size) {
102 case 1:
103 v &= ~(0xFF << shift);
104 v |= val << shift;
105 break;
106 case 2:
107 v &= ~(0xFFFF << shift);
108 v |= val << shift;
109 break;
110 case 4:
111 v = val;
112 break;
113 }
114 __raw_writel(v, address);
115
116 spin_unlock_irqrestore(&nano_lock, flags);
117
118 return PCIBIOS_SUCCESSFUL;
119}
120
121static struct pci_ops pci_nano_ops = {
122 .read = nanoengine_read_config,
123 .write = nanoengine_write_config,
124};
125
126static int __init pci_nanoengine_map_irq(const struct pci_dev *dev, u8 slot,
127 u8 pin)
128{
129 return NANOENGINE_IRQ_GPIO_PCI;
130}
131
132static struct resource pci_io_ports =
133 DEFINE_RES_IO_NAMED(0x400, 0x400, "PCI IO");
134
135static struct resource pci_non_prefetchable_memory = {
136 .name = "PCI non-prefetchable",
137 .start = NANO_PCI_MEM_RW_PHYS,
138
139
140 .end = NANO_PCI_MEM_RW_PHYS + NANO_PCI_MEM_RW_SIZE - 1,
141
142 .flags = IORESOURCE_MEM,
143};
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213static struct resource pci_prefetchable_memory = {
214 .name = "PCI prefetchable",
215 .start = 0x78000000,
216 .end = 0x78000000 + NANO_PCI_MEM_RW_SIZE - 1,
217 .flags = IORESOURCE_MEM | IORESOURCE_PREFETCH,
218};
219
220static int __init pci_nanoengine_setup_resources(struct pci_sys_data *sys)
221{
222 if (request_resource(&ioport_resource, &pci_io_ports)) {
223 printk(KERN_ERR "PCI: unable to allocate io port region\n");
224 return -EBUSY;
225 }
226 if (request_resource(&iomem_resource, &pci_non_prefetchable_memory)) {
227 release_resource(&pci_io_ports);
228 printk(KERN_ERR "PCI: unable to allocate non prefetchable\n");
229 return -EBUSY;
230 }
231 if (request_resource(&iomem_resource, &pci_prefetchable_memory)) {
232 release_resource(&pci_io_ports);
233 release_resource(&pci_non_prefetchable_memory);
234 printk(KERN_ERR "PCI: unable to allocate prefetchable\n");
235 return -EBUSY;
236 }
237 pci_add_resource_offset(&sys->resources, &pci_io_ports, sys->io_offset);
238 pci_add_resource_offset(&sys->resources,
239 &pci_non_prefetchable_memory, sys->mem_offset);
240 pci_add_resource_offset(&sys->resources,
241 &pci_prefetchable_memory, sys->mem_offset);
242
243 return 1;
244}
245
246int __init pci_nanoengine_setup(int nr, struct pci_sys_data *sys)
247{
248 int ret = 0;
249
250 pcibios_min_io = 0;
251 pcibios_min_mem = 0;
252
253 if (nr == 0) {
254 sys->mem_offset = NANO_PCI_MEM_RW_PHYS;
255 sys->io_offset = 0x400;
256 ret = pci_nanoengine_setup_resources(sys);
257
258
259
260 GPDR = (GPDR & ~GPIO_MBREQ) | GPIO_MBGNT;
261 GAFR |= GPIO_MBGNT | GPIO_MBREQ;
262 TUCR |= TUCR_MBGPIO;
263 }
264
265 return ret;
266}
267
268static struct hw_pci nanoengine_pci __initdata = {
269 .map_irq = pci_nanoengine_map_irq,
270 .nr_controllers = 1,
271 .ops = &pci_nano_ops,
272 .setup = pci_nanoengine_setup,
273};
274
275static int __init nanoengine_pci_init(void)
276{
277 if (machine_is_nanoengine())
278 pci_common_init(&nanoengine_pci);
279 return 0;
280}
281
282subsys_initcall(nanoengine_pci_init);
283