1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "priv.h"
25#include "agp.h"
26
27#include <core/option.h>
28#include <core/pci.h>
29#include <subdev/mc.h>
30
31u32
32nvkm_pci_rd32(struct nvkm_pci *pci, u16 addr)
33{
34 return pci->func->rd32(pci, addr);
35}
36
37void
38nvkm_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
39{
40 pci->func->wr08(pci, addr, data);
41}
42
43void
44nvkm_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
45{
46 pci->func->wr32(pci, addr, data);
47}
48
49u32
50nvkm_pci_mask(struct nvkm_pci *pci, u16 addr, u32 mask, u32 value)
51{
52 u32 data = pci->func->rd32(pci, addr);
53 pci->func->wr32(pci, addr, (data & ~mask) | value);
54 return data;
55}
56
57void
58nvkm_pci_rom_shadow(struct nvkm_pci *pci, bool shadow)
59{
60 u32 data = nvkm_pci_rd32(pci, 0x0050);
61 if (shadow)
62 data |= 0x00000001;
63 else
64 data &= ~0x00000001;
65 nvkm_pci_wr32(pci, 0x0050, data);
66}
67
68static irqreturn_t
69nvkm_pci_intr(int irq, void *arg)
70{
71 struct nvkm_pci *pci = arg;
72 struct nvkm_device *device = pci->subdev.device;
73 bool handled = false;
74 nvkm_mc_intr_unarm(device);
75 if (pci->msi)
76 pci->func->msi_rearm(pci);
77 nvkm_mc_intr(device, &handled);
78 nvkm_mc_intr_rearm(device);
79 return handled ? IRQ_HANDLED : IRQ_NONE;
80}
81
82static int
83nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
84{
85 struct nvkm_pci *pci = nvkm_pci(subdev);
86
87 if (pci->irq >= 0) {
88 free_irq(pci->irq, pci);
89 pci->irq = -1;
90 };
91
92 if (pci->agp.bridge)
93 nvkm_agp_fini(pci);
94
95 return 0;
96}
97
98static int
99nvkm_pci_preinit(struct nvkm_subdev *subdev)
100{
101 struct nvkm_pci *pci = nvkm_pci(subdev);
102 if (pci->agp.bridge)
103 nvkm_agp_preinit(pci);
104 return 0;
105}
106
107static int
108nvkm_pci_oneinit(struct nvkm_subdev *subdev)
109{
110 struct nvkm_pci *pci = nvkm_pci(subdev);
111 if (pci_is_pcie(pci->pdev))
112 return nvkm_pcie_oneinit(pci);
113 return 0;
114}
115
116static int
117nvkm_pci_init(struct nvkm_subdev *subdev)
118{
119 struct nvkm_pci *pci = nvkm_pci(subdev);
120 struct pci_dev *pdev = pci->pdev;
121 int ret;
122
123 if (pci->agp.bridge) {
124 ret = nvkm_agp_init(pci);
125 if (ret)
126 return ret;
127 } else if (pci_is_pcie(pci->pdev)) {
128 nvkm_pcie_init(pci);
129 }
130
131 if (pci->func->init)
132 pci->func->init(pci);
133
134 ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
135 if (ret)
136 return ret;
137
138 pci->irq = pdev->irq;
139 return ret;
140}
141
142static void *
143nvkm_pci_dtor(struct nvkm_subdev *subdev)
144{
145 struct nvkm_pci *pci = nvkm_pci(subdev);
146 nvkm_agp_dtor(pci);
147 if (pci->msi)
148 pci_disable_msi(pci->pdev);
149 return nvkm_pci(subdev);
150}
151
152static const struct nvkm_subdev_func
153nvkm_pci_func = {
154 .dtor = nvkm_pci_dtor,
155 .oneinit = nvkm_pci_oneinit,
156 .preinit = nvkm_pci_preinit,
157 .init = nvkm_pci_init,
158 .fini = nvkm_pci_fini,
159};
160
161int
162nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
163 int index, struct nvkm_pci **ppci)
164{
165 struct nvkm_pci *pci;
166
167 if (!(pci = *ppci = kzalloc(sizeof(**ppci), GFP_KERNEL)))
168 return -ENOMEM;
169 nvkm_subdev_ctor(&nvkm_pci_func, device, index, &pci->subdev);
170 pci->func = func;
171 pci->pdev = device->func->pci(device)->pdev;
172 pci->irq = -1;
173 pci->pcie.speed = -1;
174 pci->pcie.width = -1;
175
176 if (device->type == NVKM_DEVICE_AGP)
177 nvkm_agp_ctor(pci);
178
179 switch (pci->pdev->device & 0x0ff0) {
180 case 0x00f0:
181 case 0x02e0:
182
183 break;
184 default:
185 switch (device->chipset) {
186 case 0xaa:
187
188 break;
189 default:
190 pci->msi = true;
191 break;
192 }
193 }
194
195#ifdef __BIG_ENDIAN
196 pci->msi = false;
197#endif
198
199 pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
200 if (pci->msi && func->msi_rearm) {
201 pci->msi = pci_enable_msi(pci->pdev) == 0;
202 if (pci->msi)
203 nvkm_debug(&pci->subdev, "MSI enabled\n");
204 } else {
205 pci->msi = false;
206 }
207
208 return 0;
209}
210