1
2
3
4
5
6
7
8
9
10
11
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/init.h>
15#include <linux/klist.h>
16#include <linux/agp_backend.h>
17#include <linux/log2.h>
18#include <linux/slab.h>
19
20#include <asm/parisc-device.h>
21#include <asm/ropes.h>
22
23#include "agp.h"
24
25#define DRVNAME "quicksilver"
26#define DRVPFX DRVNAME ": "
27
28#define AGP8X_MODE_BIT 3
29#define AGP8X_MODE (1 << AGP8X_MODE_BIT)
30
31static unsigned long
32parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
33 int type);
34
35static struct _parisc_agp_info {
36 void __iomem *ioc_regs;
37 void __iomem *lba_regs;
38
39 int lba_cap_offset;
40
41 u64 *gatt;
42 u64 gatt_entries;
43
44 u64 gart_base;
45 u64 gart_size;
46
47 int io_page_size;
48 int io_pages_per_kpage;
49} parisc_agp_info;
50
51static struct gatt_mask parisc_agp_masks[] =
52{
53 {
54 .mask = SBA_PDIR_VALID_BIT,
55 .type = 0
56 }
57};
58
59static struct aper_size_info_fixed parisc_agp_sizes[] =
60{
61 {0, 0, 0},
62};
63
64static int
65parisc_agp_fetch_size(void)
66{
67 int size;
68
69 size = parisc_agp_info.gart_size / MB(1);
70 parisc_agp_sizes[0].size = size;
71 agp_bridge->current_size = (void *) &parisc_agp_sizes[0];
72
73 return size;
74}
75
76static int
77parisc_agp_configure(void)
78{
79 struct _parisc_agp_info *info = &parisc_agp_info;
80
81 agp_bridge->gart_bus_addr = info->gart_base;
82 agp_bridge->capndx = info->lba_cap_offset;
83 agp_bridge->mode = readl(info->lba_regs+info->lba_cap_offset+PCI_AGP_STATUS);
84
85 return 0;
86}
87
88static void
89parisc_agp_tlbflush(struct agp_memory *mem)
90{
91 struct _parisc_agp_info *info = &parisc_agp_info;
92
93 writeq(info->gart_base | ilog2(info->gart_size), info->ioc_regs+IOC_PCOM);
94 readq(info->ioc_regs+IOC_PCOM);
95}
96
97static int
98parisc_agp_create_gatt_table(struct agp_bridge_data *bridge)
99{
100 struct _parisc_agp_info *info = &parisc_agp_info;
101 int i;
102
103 for (i = 0; i < info->gatt_entries; i++) {
104 info->gatt[i] = (unsigned long)agp_bridge->scratch_page;
105 }
106
107 return 0;
108}
109
110static int
111parisc_agp_free_gatt_table(struct agp_bridge_data *bridge)
112{
113 struct _parisc_agp_info *info = &parisc_agp_info;
114
115 info->gatt[0] = SBA_AGPGART_COOKIE;
116
117 return 0;
118}
119
120static int
121parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
122{
123 struct _parisc_agp_info *info = &parisc_agp_info;
124 int i, k;
125 off_t j, io_pg_start;
126 int io_pg_count;
127
128 if (type != mem->type ||
129 agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
130 return -EINVAL;
131 }
132
133 io_pg_start = info->io_pages_per_kpage * pg_start;
134 io_pg_count = info->io_pages_per_kpage * mem->page_count;
135 if ((io_pg_start + io_pg_count) > info->gatt_entries) {
136 return -EINVAL;
137 }
138
139 j = io_pg_start;
140 while (j < (io_pg_start + io_pg_count)) {
141 if (info->gatt[j])
142 return -EBUSY;
143 j++;
144 }
145
146 if (!mem->is_flushed) {
147 global_cache_flush();
148 mem->is_flushed = true;
149 }
150
151 for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
152 unsigned long paddr;
153
154 paddr = page_to_phys(mem->pages[i]);
155 for (k = 0;
156 k < info->io_pages_per_kpage;
157 k++, j++, paddr += info->io_page_size) {
158 info->gatt[j] =
159 parisc_agp_mask_memory(agp_bridge,
160 paddr, type);
161 }
162 }
163
164 agp_bridge->driver->tlb_flush(mem);
165
166 return 0;
167}
168
169static int
170parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
171{
172 struct _parisc_agp_info *info = &parisc_agp_info;
173 int i, io_pg_start, io_pg_count;
174
175 if (type != mem->type ||
176 agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
177 return -EINVAL;
178 }
179
180 io_pg_start = info->io_pages_per_kpage * pg_start;
181 io_pg_count = info->io_pages_per_kpage * mem->page_count;
182 for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
183 info->gatt[i] = agp_bridge->scratch_page;
184 }
185
186 agp_bridge->driver->tlb_flush(mem);
187 return 0;
188}
189
190static unsigned long
191parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
192 int type)
193{
194 return SBA_PDIR_VALID_BIT | addr;
195}
196
197static void
198parisc_agp_enable(struct agp_bridge_data *bridge, u32 mode)
199{
200 struct _parisc_agp_info *info = &parisc_agp_info;
201 u32 command;
202
203 command = readl(info->lba_regs + info->lba_cap_offset + PCI_AGP_STATUS);
204
205 command = agp_collect_device_status(bridge, mode, command);
206 command |= 0x00000100;
207
208 writel(command, info->lba_regs + info->lba_cap_offset + PCI_AGP_COMMAND);
209
210 agp_device_command(command, (mode & AGP8X_MODE) != 0);
211}
212
213static const struct agp_bridge_driver parisc_agp_driver = {
214 .owner = THIS_MODULE,
215 .size_type = FIXED_APER_SIZE,
216 .configure = parisc_agp_configure,
217 .fetch_size = parisc_agp_fetch_size,
218 .tlb_flush = parisc_agp_tlbflush,
219 .mask_memory = parisc_agp_mask_memory,
220 .masks = parisc_agp_masks,
221 .agp_enable = parisc_agp_enable,
222 .cache_flush = global_cache_flush,
223 .create_gatt_table = parisc_agp_create_gatt_table,
224 .free_gatt_table = parisc_agp_free_gatt_table,
225 .insert_memory = parisc_agp_insert_memory,
226 .remove_memory = parisc_agp_remove_memory,
227 .alloc_by_type = agp_generic_alloc_by_type,
228 .free_by_type = agp_generic_free_by_type,
229 .agp_alloc_page = agp_generic_alloc_page,
230 .agp_alloc_pages = agp_generic_alloc_pages,
231 .agp_destroy_page = agp_generic_destroy_page,
232 .agp_destroy_pages = agp_generic_destroy_pages,
233 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
234 .cant_use_aperture = true,
235};
236
237static int __init
238agp_ioc_init(void __iomem *ioc_regs)
239{
240 struct _parisc_agp_info *info = &parisc_agp_info;
241 u64 iova_base, *io_pdir, io_tlb_ps;
242 int io_tlb_shift;
243
244 printk(KERN_INFO DRVPFX "IO PDIR shared with sba_iommu\n");
245
246 info->ioc_regs = ioc_regs;
247
248 io_tlb_ps = readq(info->ioc_regs+IOC_TCNFG);
249 switch (io_tlb_ps) {
250 case 0: io_tlb_shift = 12; break;
251 case 1: io_tlb_shift = 13; break;
252 case 2: io_tlb_shift = 14; break;
253 case 3: io_tlb_shift = 16; break;
254 default:
255 printk(KERN_ERR DRVPFX "Invalid IOTLB page size "
256 "configuration 0x%llx\n", io_tlb_ps);
257 info->gatt = NULL;
258 info->gatt_entries = 0;
259 return -ENODEV;
260 }
261 info->io_page_size = 1 << io_tlb_shift;
262 info->io_pages_per_kpage = PAGE_SIZE / info->io_page_size;
263
264 iova_base = readq(info->ioc_regs+IOC_IBASE) & ~0x1;
265 info->gart_base = iova_base + PLUTO_IOVA_SIZE - PLUTO_GART_SIZE;
266
267 info->gart_size = PLUTO_GART_SIZE;
268 info->gatt_entries = info->gart_size / info->io_page_size;
269
270 io_pdir = phys_to_virt(readq(info->ioc_regs+IOC_PDIR_BASE));
271 info->gatt = &io_pdir[(PLUTO_IOVA_SIZE/2) >> PAGE_SHIFT];
272
273 if (info->gatt[0] != SBA_AGPGART_COOKIE) {
274 info->gatt = NULL;
275 info->gatt_entries = 0;
276 printk(KERN_ERR DRVPFX "No reserved IO PDIR entry found; "
277 "GART disabled\n");
278 return -ENODEV;
279 }
280
281 return 0;
282}
283
284static int
285lba_find_capability(int cap)
286{
287 struct _parisc_agp_info *info = &parisc_agp_info;
288 u16 status;
289 u8 pos, id;
290 int ttl = 48;
291
292 status = readw(info->lba_regs + PCI_STATUS);
293 if (!(status & PCI_STATUS_CAP_LIST))
294 return 0;
295 pos = readb(info->lba_regs + PCI_CAPABILITY_LIST);
296 while (ttl-- && pos >= 0x40) {
297 pos &= ~3;
298 id = readb(info->lba_regs + pos + PCI_CAP_LIST_ID);
299 if (id == 0xff)
300 break;
301 if (id == cap)
302 return pos;
303 pos = readb(info->lba_regs + pos + PCI_CAP_LIST_NEXT);
304 }
305 return 0;
306}
307
308static int __init
309agp_lba_init(void __iomem *lba_hpa)
310{
311 struct _parisc_agp_info *info = &parisc_agp_info;
312 int cap;
313
314 info->lba_regs = lba_hpa;
315 info->lba_cap_offset = lba_find_capability(PCI_CAP_ID_AGP);
316
317 cap = readl(lba_hpa + info->lba_cap_offset) & 0xff;
318 if (cap != PCI_CAP_ID_AGP) {
319 printk(KERN_ERR DRVPFX "Invalid capability ID 0x%02x at 0x%x\n",
320 cap, info->lba_cap_offset);
321 return -ENODEV;
322 }
323
324 return 0;
325}
326
327static int __init
328parisc_agp_setup(void __iomem *ioc_hpa, void __iomem *lba_hpa)
329{
330 struct pci_dev *fake_bridge_dev = NULL;
331 struct agp_bridge_data *bridge;
332 int error = 0;
333
334 fake_bridge_dev = pci_alloc_dev(NULL);
335 if (!fake_bridge_dev) {
336 error = -ENOMEM;
337 goto fail;
338 }
339
340 error = agp_ioc_init(ioc_hpa);
341 if (error)
342 goto fail;
343
344 error = agp_lba_init(lba_hpa);
345 if (error)
346 goto fail;
347
348 bridge = agp_alloc_bridge();
349 if (!bridge) {
350 error = -ENOMEM;
351 goto fail;
352 }
353 bridge->driver = &parisc_agp_driver;
354
355 fake_bridge_dev->vendor = PCI_VENDOR_ID_HP;
356 fake_bridge_dev->device = PCI_DEVICE_ID_HP_PCIX_LBA;
357 bridge->dev = fake_bridge_dev;
358
359 error = agp_add_bridge(bridge);
360 if (error)
361 goto fail;
362 return 0;
363
364fail:
365 kfree(fake_bridge_dev);
366 return error;
367}
368
369static int
370find_quicksilver(struct device *dev, void *data)
371{
372 struct parisc_device **lba = data;
373 struct parisc_device *padev = to_parisc_device(dev);
374
375 if (IS_QUICKSILVER(padev))
376 *lba = padev;
377
378 return 0;
379}
380
381static int
382parisc_agp_init(void)
383{
384 extern struct sba_device *sba_list;
385
386 int err = -1;
387 struct parisc_device *sba = NULL, *lba = NULL;
388 struct lba_device *lbadev = NULL;
389
390 if (!sba_list)
391 goto out;
392
393
394 sba = sba_list->dev;
395 if (!IS_PLUTO(sba)) {
396 printk(KERN_INFO DRVPFX "No Pluto found, so no AGPGART for you.\n");
397 goto out;
398 }
399
400
401 device_for_each_child(&sba->dev, &lba, find_quicksilver);
402
403 if (!lba) {
404 printk(KERN_INFO DRVPFX "No AGP devices found.\n");
405 goto out;
406 }
407
408 lbadev = parisc_get_drvdata(lba);
409
410
411 parisc_agp_setup(sba_list->ioc[0].ioc_hpa, lbadev->hba.base_addr);
412
413 return 0;
414
415out:
416 return err;
417}
418
419module_init(parisc_agp_init);
420
421MODULE_AUTHOR("Kyle McMartin <kyle@parisc-linux.org>");
422MODULE_LICENSE("GPL");
423