1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "priv.h"
25#include "agp.h"
26
27#include <core/option.h>
28#include <core/pci.h>
29#include <subdev/mc.h>
30
31u32
32nvkm_pci_rd32(struct nvkm_pci *pci, u16 addr)
33{
34 return pci->func->rd32(pci, addr);
35}
36
37void
38nvkm_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
39{
40 pci->func->wr08(pci, addr, data);
41}
42
43void
44nvkm_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
45{
46 pci->func->wr32(pci, addr, data);
47}
48
49u32
50nvkm_pci_mask(struct nvkm_pci *pci, u16 addr, u32 mask, u32 value)
51{
52 u32 data = pci->func->rd32(pci, addr);
53 pci->func->wr32(pci, addr, (data & ~mask) | value);
54 return data;
55}
56
57void
58nvkm_pci_rom_shadow(struct nvkm_pci *pci, bool shadow)
59{
60 u32 data = nvkm_pci_rd32(pci, 0x0050);
61 if (shadow)
62 data |= 0x00000001;
63 else
64 data &= ~0x00000001;
65 nvkm_pci_wr32(pci, 0x0050, data);
66}
67
68static irqreturn_t
69nvkm_pci_intr(int irq, void *arg)
70{
71 struct nvkm_pci *pci = arg;
72 struct nvkm_mc *mc = pci->subdev.device->mc;
73 bool handled = false;
74 if (likely(mc)) {
75 nvkm_mc_intr_unarm(mc);
76 if (pci->msi)
77 pci->func->msi_rearm(pci);
78 nvkm_mc_intr(mc, &handled);
79 nvkm_mc_intr_rearm(mc);
80 }
81 return handled ? IRQ_HANDLED : IRQ_NONE;
82}
83
84static int
85nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
86{
87 struct nvkm_pci *pci = nvkm_pci(subdev);
88
89 if (pci->irq >= 0) {
90 free_irq(pci->irq, pci);
91 pci->irq = -1;
92 };
93
94 if (pci->agp.bridge)
95 nvkm_agp_fini(pci);
96
97 return 0;
98}
99
100static int
101nvkm_pci_preinit(struct nvkm_subdev *subdev)
102{
103 struct nvkm_pci *pci = nvkm_pci(subdev);
104 if (pci->agp.bridge)
105 nvkm_agp_preinit(pci);
106 return 0;
107}
108
109static int
110nvkm_pci_oneinit(struct nvkm_subdev *subdev)
111{
112 struct nvkm_pci *pci = nvkm_pci(subdev);
113 if (pci_is_pcie(pci->pdev))
114 return nvkm_pcie_oneinit(pci);
115 return 0;
116}
117
118static int
119nvkm_pci_init(struct nvkm_subdev *subdev)
120{
121 struct nvkm_pci *pci = nvkm_pci(subdev);
122 struct pci_dev *pdev = pci->pdev;
123 int ret;
124
125 if (pci->agp.bridge) {
126 ret = nvkm_agp_init(pci);
127 if (ret)
128 return ret;
129 } else if (pci_is_pcie(pci->pdev)) {
130 nvkm_pcie_init(pci);
131 }
132
133 if (pci->func->init)
134 pci->func->init(pci);
135
136 ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
137 if (ret)
138 return ret;
139
140 pci->irq = pdev->irq;
141 return ret;
142}
143
144static void *
145nvkm_pci_dtor(struct nvkm_subdev *subdev)
146{
147 struct nvkm_pci *pci = nvkm_pci(subdev);
148 nvkm_agp_dtor(pci);
149 if (pci->msi)
150 pci_disable_msi(pci->pdev);
151 return nvkm_pci(subdev);
152}
153
154static const struct nvkm_subdev_func
155nvkm_pci_func = {
156 .dtor = nvkm_pci_dtor,
157 .oneinit = nvkm_pci_oneinit,
158 .preinit = nvkm_pci_preinit,
159 .init = nvkm_pci_init,
160 .fini = nvkm_pci_fini,
161};
162
163int
164nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
165 int index, struct nvkm_pci **ppci)
166{
167 struct nvkm_pci *pci;
168
169 if (!(pci = *ppci = kzalloc(sizeof(**ppci), GFP_KERNEL)))
170 return -ENOMEM;
171 nvkm_subdev_ctor(&nvkm_pci_func, device, index, 0, &pci->subdev);
172 pci->func = func;
173 pci->pdev = device->func->pci(device)->pdev;
174 pci->irq = -1;
175 pci->pcie.speed = -1;
176 pci->pcie.width = -1;
177
178 if (device->type == NVKM_DEVICE_AGP)
179 nvkm_agp_ctor(pci);
180
181 switch (pci->pdev->device & 0x0ff0) {
182 case 0x00f0:
183 case 0x02e0:
184
185 break;
186 default:
187 switch (device->chipset) {
188 case 0xaa:
189
190 break;
191 default:
192 pci->msi = true;
193 break;
194 }
195 }
196
197 pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
198 if (pci->msi && func->msi_rearm) {
199 pci->msi = pci_enable_msi(pci->pdev) == 0;
200 if (pci->msi)
201 nvkm_debug(&pci->subdev, "MSI enabled\n");
202 } else {
203 pci->msi = false;
204 }
205
206 return 0;
207}
208