1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/pci.h>
26#include <linux/slab.h>
27#include <linux/dma-mapping.h>
28#include <linux/export.h>
29#include <drm/drm_pci.h>
30#include <drm/drmP.h>
31#include "drm_internal.h"
32#include "drm_legacy.h"
33
34
35
36
37
38
39
40
41
42
43
44
45
46drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
47{
48 drm_dma_handle_t *dmah;
49 unsigned long addr;
50 size_t sz;
51
52
53
54
55
56 if (align > size)
57 return NULL;
58
59 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
60 if (!dmah)
61 return NULL;
62
63 dmah->size = size;
64 dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
65
66 if (dmah->vaddr == NULL) {
67 kfree(dmah);
68 return NULL;
69 }
70
71 memset(dmah->vaddr, 0, size);
72
73
74
75 for (addr = (unsigned long)dmah->vaddr, sz = size;
76 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
77 SetPageReserved(virt_to_page((void *)addr));
78 }
79
80 return dmah;
81}
82
83EXPORT_SYMBOL(drm_pci_alloc);
84
85
86
87
88
89
90void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
91{
92 unsigned long addr;
93 size_t sz;
94
95 if (dmah->vaddr) {
96
97
98 for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
99 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
100 ClearPageReserved(virt_to_page((void *)addr));
101 }
102 dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
103 dmah->busaddr);
104 }
105}
106
107
108
109
110
111
112
113
114
115void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
116{
117 __drm_legacy_pci_free(dev, dmah);
118 kfree(dmah);
119}
120
121EXPORT_SYMBOL(drm_pci_free);
122
123#ifdef CONFIG_PCI
124
125static int drm_get_pci_domain(struct drm_device *dev)
126{
127#ifndef __alpha__
128
129
130
131
132 if (dev->if_version < 0x10004)
133 return 0;
134#endif
135
136 return pci_domain_nr(dev->pdev->bus);
137}
138
139int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
140{
141 master->unique = kasprintf(GFP_KERNEL, "pci:%04x:%02x:%02x.%d",
142 drm_get_pci_domain(dev),
143 dev->pdev->bus->number,
144 PCI_SLOT(dev->pdev->devfn),
145 PCI_FUNC(dev->pdev->devfn));
146 if (!master->unique)
147 return -ENOMEM;
148
149 master->unique_len = strlen(master->unique);
150 return 0;
151}
152
153static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
154{
155 if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
156 (p->busnum & 0xff) != dev->pdev->bus->number ||
157 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
158 return -EINVAL;
159
160 p->irq = dev->pdev->irq;
161
162 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
163 p->irq);
164 return 0;
165}
166
167
168
169
170
171
172
173
174
175
176
177
178
179int drm_irq_by_busid(struct drm_device *dev, void *data,
180 struct drm_file *file_priv)
181{
182 struct drm_irq_busid *p = data;
183
184 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
185 return -EINVAL;
186
187
188 if (WARN_ON(!dev->pdev))
189 return -EINVAL;
190
191 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
192 return -EINVAL;
193
194 return drm_pci_irq_by_busid(dev, p);
195}
196
197static void drm_pci_agp_init(struct drm_device *dev)
198{
199 if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
200 if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
201 dev->agp = drm_agp_init(dev);
202 if (dev->agp) {
203 dev->agp->agp_mtrr = arch_phys_wc_add(
204 dev->agp->agp_info.aper_base,
205 dev->agp->agp_info.aper_size *
206 1024 * 1024);
207 }
208 }
209}
210
211void drm_pci_agp_destroy(struct drm_device *dev)
212{
213 if (dev->agp) {
214 arch_phys_wc_del(dev->agp->agp_mtrr);
215 drm_legacy_agp_clear(dev);
216 kfree(dev->agp);
217 dev->agp = NULL;
218 }
219}
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
237 struct drm_driver *driver)
238{
239 struct drm_device *dev;
240 int ret;
241
242 DRM_DEBUG("\n");
243
244 dev = drm_dev_alloc(driver, &pdev->dev);
245 if (IS_ERR(dev))
246 return PTR_ERR(dev);
247
248 ret = pci_enable_device(pdev);
249 if (ret)
250 goto err_free;
251
252 dev->pdev = pdev;
253#ifdef __alpha__
254 dev->hose = pdev->sysdata;
255#endif
256
257 if (drm_core_check_feature(dev, DRIVER_MODESET))
258 pci_set_drvdata(pdev, dev);
259
260 drm_pci_agp_init(dev);
261
262 ret = drm_dev_register(dev, ent->driver_data);
263 if (ret)
264 goto err_agp;
265
266
267
268 if (drm_core_check_feature(dev, DRIVER_LEGACY))
269 list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list);
270
271 return 0;
272
273err_agp:
274 drm_pci_agp_destroy(dev);
275 pci_disable_device(pdev);
276err_free:
277 drm_dev_put(dev);
278 return ret;
279}
280EXPORT_SYMBOL(drm_get_pci_dev);
281
282
283
284
285
286
287
288
289
290
291int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
292{
293 struct pci_dev *pdev = NULL;
294 const struct pci_device_id *pid;
295 int i;
296
297 DRM_DEBUG("\n");
298
299 if (WARN_ON(!(driver->driver_features & DRIVER_LEGACY)))
300 return -EINVAL;
301
302
303 INIT_LIST_HEAD(&driver->legacy_dev_list);
304 for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
305 pid = &pdriver->id_table[i];
306
307
308
309
310
311
312
313 pdev = NULL;
314 while ((pdev =
315 pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
316 pid->subdevice, pdev)) != NULL) {
317 if ((pdev->class & pid->class_mask) != pid->class)
318 continue;
319
320
321 pci_dev_get(pdev);
322 drm_get_pci_dev(pdev, pid, driver);
323 }
324 }
325 return 0;
326}
327EXPORT_SYMBOL(drm_legacy_pci_init);
328
329int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
330{
331 struct pci_dev *root;
332 u32 lnkcap, lnkcap2;
333
334 *mask = 0;
335 if (!dev->pdev)
336 return -EINVAL;
337
338 root = dev->pdev->bus->self;
339
340
341 if (root->vendor == PCI_VENDOR_ID_VIA ||
342 root->vendor == PCI_VENDOR_ID_SERVERWORKS)
343 return -EINVAL;
344
345 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
346 pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2);
347
348 if (lnkcap2) {
349 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
350 *mask |= DRM_PCIE_SPEED_25;
351 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
352 *mask |= DRM_PCIE_SPEED_50;
353 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
354 *mask |= DRM_PCIE_SPEED_80;
355 } else {
356 if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
357 *mask |= DRM_PCIE_SPEED_25;
358 if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
359 *mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50);
360 }
361
362 DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
363 return 0;
364}
365EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
366
367int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw)
368{
369 struct pci_dev *root;
370 u32 lnkcap;
371
372 *mlw = 0;
373 if (!dev->pdev)
374 return -EINVAL;
375
376 root = dev->pdev->bus->self;
377
378 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
379
380 *mlw = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
381
382 DRM_INFO("probing mlw for device %x:%x = %x\n", root->vendor, root->device, lnkcap);
383 return 0;
384}
385EXPORT_SYMBOL(drm_pcie_get_max_link_width);
386
387#else
388
389void drm_pci_agp_destroy(struct drm_device *dev) {}
390
391int drm_irq_by_busid(struct drm_device *dev, void *data,
392 struct drm_file *file_priv)
393{
394 return -EINVAL;
395}
396#endif
397
398
399
400
401
402
403
404
405
406void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
407{
408 struct drm_device *dev, *tmp;
409 DRM_DEBUG("\n");
410
411 if (!(driver->driver_features & DRIVER_LEGACY)) {
412 WARN_ON(1);
413 } else {
414 list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
415 legacy_dev_list) {
416 list_del(&dev->legacy_dev_list);
417 drm_put_dev(dev);
418 }
419 }
420 DRM_INFO("Module unloaded\n");
421}
422EXPORT_SYMBOL(drm_legacy_pci_exit);
423