1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "priv.h"
25
26#include <subdev/timer.h>
27
28void
29nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
30{
31 if (pmu && pmu->func->pgob)
32 pmu->func->pgob(pmu, enable);
33}
34
35int
36nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
37 u32 process, u32 message, u32 data0, u32 data1)
38{
39 struct nvkm_subdev *subdev = &pmu->subdev;
40 struct nvkm_device *device = subdev->device;
41 u32 addr;
42
43 mutex_lock(&subdev->mutex);
44
45 addr = nvkm_rd32(device, 0x10a4a0);
46 if (nvkm_msec(device, 2000,
47 u32 tmp = nvkm_rd32(device, 0x10a4b0);
48 if (tmp != (addr ^ 8))
49 break;
50 ) < 0) {
51 mutex_unlock(&subdev->mutex);
52 return -EBUSY;
53 }
54
55
56
57
58
59 if (reply) {
60 pmu->recv.message = message;
61 pmu->recv.process = process;
62 }
63
64
65 do {
66 nvkm_wr32(device, 0x10a580, 0x00000001);
67 } while (nvkm_rd32(device, 0x10a580) != 0x00000001);
68
69
70 nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
71 pmu->send.base));
72 nvkm_wr32(device, 0x10a1c4, process);
73 nvkm_wr32(device, 0x10a1c4, message);
74 nvkm_wr32(device, 0x10a1c4, data0);
75 nvkm_wr32(device, 0x10a1c4, data1);
76 nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
77
78
79 nvkm_wr32(device, 0x10a580, 0x00000000);
80
81
82 if (reply) {
83 wait_event(pmu->recv.wait, (pmu->recv.process == 0));
84 reply[0] = pmu->recv.data[0];
85 reply[1] = pmu->recv.data[1];
86 }
87
88 mutex_unlock(&subdev->mutex);
89 return 0;
90}
91
92static void
93nvkm_pmu_recv(struct work_struct *work)
94{
95 struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work);
96 struct nvkm_subdev *subdev = &pmu->subdev;
97 struct nvkm_device *device = subdev->device;
98 u32 process, message, data0, data1;
99
100
101 u32 addr = nvkm_rd32(device, 0x10a4cc);
102 if (addr == nvkm_rd32(device, 0x10a4c8))
103 return;
104
105
106 do {
107 nvkm_wr32(device, 0x10a580, 0x00000002);
108 } while (nvkm_rd32(device, 0x10a580) != 0x00000002);
109
110
111 nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
112 pmu->recv.base));
113 process = nvkm_rd32(device, 0x10a1c4);
114 message = nvkm_rd32(device, 0x10a1c4);
115 data0 = nvkm_rd32(device, 0x10a1c4);
116 data1 = nvkm_rd32(device, 0x10a1c4);
117 nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
118
119
120 nvkm_wr32(device, 0x10a580, 0x00000000);
121
122
123 if (pmu->recv.process) {
124 if (process == pmu->recv.process &&
125 message == pmu->recv.message) {
126 pmu->recv.data[0] = data0;
127 pmu->recv.data[1] = data1;
128 pmu->recv.process = 0;
129 wake_up(&pmu->recv.wait);
130 return;
131 }
132 }
133
134
135
136
137 nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
138 (char)((process & 0x000000ff) >> 0),
139 (char)((process & 0x0000ff00) >> 8),
140 (char)((process & 0x00ff0000) >> 16),
141 (char)((process & 0xff000000) >> 24),
142 process, message, data0, data1);
143}
144
145static void
146nvkm_pmu_intr(struct nvkm_subdev *subdev)
147{
148 struct nvkm_pmu *pmu = nvkm_pmu(subdev);
149 struct nvkm_device *device = pmu->subdev.device;
150 u32 disp = nvkm_rd32(device, 0x10a01c);
151 u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
152
153 if (intr & 0x00000020) {
154 u32 stat = nvkm_rd32(device, 0x10a16c);
155 if (stat & 0x80000000) {
156 nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
157 stat & 0x00ffffff,
158 nvkm_rd32(device, 0x10a168));
159 nvkm_wr32(device, 0x10a16c, 0x00000000);
160 intr &= ~0x00000020;
161 }
162 }
163
164 if (intr & 0x00000040) {
165 schedule_work(&pmu->recv.work);
166 nvkm_wr32(device, 0x10a004, 0x00000040);
167 intr &= ~0x00000040;
168 }
169
170 if (intr & 0x00000080) {
171 nvkm_info(subdev, "wr32 %06x %08x\n",
172 nvkm_rd32(device, 0x10a7a0),
173 nvkm_rd32(device, 0x10a7a4));
174 nvkm_wr32(device, 0x10a004, 0x00000080);
175 intr &= ~0x00000080;
176 }
177
178 if (intr) {
179 nvkm_error(subdev, "intr %08x\n", intr);
180 nvkm_wr32(device, 0x10a004, intr);
181 }
182}
183
184static int
185nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
186{
187 struct nvkm_pmu *pmu = nvkm_pmu(subdev);
188 struct nvkm_device *device = pmu->subdev.device;
189
190 nvkm_wr32(device, 0x10a014, 0x00000060);
191 flush_work(&pmu->recv.work);
192 return 0;
193}
194
195static int
196nvkm_pmu_init(struct nvkm_subdev *subdev)
197{
198 struct nvkm_pmu *pmu = nvkm_pmu(subdev);
199 struct nvkm_device *device = pmu->subdev.device;
200 int i;
201
202
203 nvkm_wr32(device, 0x10a014, 0x0000ffff);
204 nvkm_msec(device, 2000,
205 if (!nvkm_rd32(device, 0x10a04c))
206 break;
207 );
208 nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
209 nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
210 nvkm_rd32(device, 0x000200);
211 nvkm_msec(device, 2000,
212 if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
213 break;
214 );
215
216
217 nvkm_wr32(device, 0x10a1c0, 0x01000000);
218 for (i = 0; i < pmu->func->data.size / 4; i++)
219 nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
220
221
222 nvkm_wr32(device, 0x10a180, 0x01000000);
223 for (i = 0; i < pmu->func->code.size / 4; i++) {
224 if ((i & 0x3f) == 0)
225 nvkm_wr32(device, 0x10a188, i >> 6);
226 nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
227 }
228
229
230 nvkm_wr32(device, 0x10a10c, 0x00000000);
231 nvkm_wr32(device, 0x10a104, 0x00000000);
232 nvkm_wr32(device, 0x10a100, 0x00000002);
233
234
235 if (nvkm_msec(device, 2000,
236 if (nvkm_rd32(device, 0x10a4d0))
237 break;
238 ) < 0)
239 return -EBUSY;
240 pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
241 pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
242
243
244 if (nvkm_msec(device, 2000,
245 if (nvkm_rd32(device, 0x10a4dc))
246 break;
247 ) < 0)
248 return -EBUSY;
249 pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
250 pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
251
252 nvkm_wr32(device, 0x10a010, 0x000000e0);
253 return 0;
254}
255
256static void *
257nvkm_pmu_dtor(struct nvkm_subdev *subdev)
258{
259 return nvkm_pmu(subdev);
260}
261
262static const struct nvkm_subdev_func
263nvkm_pmu = {
264 .dtor = nvkm_pmu_dtor,
265 .init = nvkm_pmu_init,
266 .fini = nvkm_pmu_fini,
267 .intr = nvkm_pmu_intr,
268};
269
270int
271nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device,
272 int index, struct nvkm_pmu **ppmu)
273{
274 struct nvkm_pmu *pmu;
275 if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
276 return -ENOMEM;
277 nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev);
278 pmu->func = func;
279 INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
280 init_waitqueue_head(&pmu->recv.wait);
281 return 0;
282}
283