1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <engine/falcon.h>
23
24#include <core/gpuobj.h>
25#include <subdev/mc.h>
26#include <subdev/timer.h>
27#include <engine/fifo.h>
28
29static int
30nvkm_falcon_oclass_get(struct nvkm_oclass *oclass, int index)
31{
32 struct nvkm_falcon *falcon = nvkm_falcon(oclass->engine);
33 int c = 0;
34
35 while (falcon->func->sclass[c].oclass) {
36 if (c++ == index) {
37 oclass->base = falcon->func->sclass[index];
38 return index;
39 }
40 }
41
42 return c;
43}
44
45static int
46nvkm_falcon_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
47 int align, struct nvkm_gpuobj **pgpuobj)
48{
49 return nvkm_gpuobj_new(object->engine->subdev.device, 256,
50 align, true, parent, pgpuobj);
51}
52
53static const struct nvkm_object_func
54nvkm_falcon_cclass = {
55 .bind = nvkm_falcon_cclass_bind,
56};
57
58static void
59nvkm_falcon_intr(struct nvkm_engine *engine)
60{
61 struct nvkm_falcon *falcon = nvkm_falcon(engine);
62 struct nvkm_subdev *subdev = &falcon->engine.subdev;
63 struct nvkm_device *device = subdev->device;
64 const u32 base = falcon->addr;
65 u32 dest = nvkm_rd32(device, base + 0x01c);
66 u32 intr = nvkm_rd32(device, base + 0x008) & dest & ~(dest >> 16);
67 u32 inst = nvkm_rd32(device, base + 0x050) & 0x3fffffff;
68 struct nvkm_fifo_chan *chan;
69 unsigned long flags;
70
71 chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
72
73 if (intr & 0x00000040) {
74 if (falcon->func->intr) {
75 falcon->func->intr(falcon, chan);
76 nvkm_wr32(device, base + 0x004, 0x00000040);
77 intr &= ~0x00000040;
78 }
79 }
80
81 if (intr & 0x00000010) {
82 nvkm_debug(subdev, "ucode halted\n");
83 nvkm_wr32(device, base + 0x004, 0x00000010);
84 intr &= ~0x00000010;
85 }
86
87 if (intr) {
88 nvkm_error(subdev, "intr %08x\n", intr);
89 nvkm_wr32(device, base + 0x004, intr);
90 }
91
92 nvkm_fifo_chan_put(device->fifo, flags, &chan);
93}
94
95static int
96nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
97{
98 struct nvkm_falcon *falcon = nvkm_falcon(engine);
99 struct nvkm_device *device = falcon->engine.subdev.device;
100 const u32 base = falcon->addr;
101
102 if (!suspend) {
103 nvkm_memory_unref(&falcon->core);
104 if (falcon->external) {
105 vfree(falcon->data.data);
106 vfree(falcon->code.data);
107 falcon->code.data = NULL;
108 }
109 }
110
111 if (nvkm_mc_enabled(device, engine->subdev.index)) {
112 nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
113 nvkm_wr32(device, base + 0x014, 0xffffffff);
114 }
115 return 0;
116}
117
118static void *
119vmemdup(const void *src, size_t len)
120{
121 void *p = vmalloc(len);
122
123 if (p)
124 memcpy(p, src, len);
125 return p;
126}
127
128static int
129nvkm_falcon_oneinit(struct nvkm_engine *engine)
130{
131 struct nvkm_falcon *falcon = nvkm_falcon(engine);
132 struct nvkm_subdev *subdev = &falcon->engine.subdev;
133 struct nvkm_device *device = subdev->device;
134 const u32 base = falcon->addr;
135 u32 caps;
136
137
138 if (device->chipset < 0xa3 ||
139 device->chipset == 0xaa || device->chipset == 0xac) {
140 falcon->version = 0;
141 falcon->secret = (falcon->addr == 0x087000) ? 1 : 0;
142 } else {
143 caps = nvkm_rd32(device, base + 0x12c);
144 falcon->version = (caps & 0x0000000f);
145 falcon->secret = (caps & 0x00000030) >> 4;
146 }
147
148 caps = nvkm_rd32(device, base + 0x108);
149 falcon->code.limit = (caps & 0x000001ff) << 8;
150 falcon->data.limit = (caps & 0x0003fe00) >> 1;
151
152 nvkm_debug(subdev, "falcon version: %d\n", falcon->version);
153 nvkm_debug(subdev, "secret level: %d\n", falcon->secret);
154 nvkm_debug(subdev, "code limit: %d\n", falcon->code.limit);
155 nvkm_debug(subdev, "data limit: %d\n", falcon->data.limit);
156 return 0;
157}
158
159static int
160nvkm_falcon_init(struct nvkm_engine *engine)
161{
162 struct nvkm_falcon *falcon = nvkm_falcon(engine);
163 struct nvkm_subdev *subdev = &falcon->engine.subdev;
164 struct nvkm_device *device = subdev->device;
165 const struct firmware *fw;
166 char name[32] = "internal";
167 const u32 base = falcon->addr;
168 int ret, i;
169
170
171 if (falcon->secret && falcon->version < 4) {
172 if (!falcon->version) {
173 nvkm_msec(device, 2000,
174 if (nvkm_rd32(device, base + 0x008) & 0x00000010)
175 break;
176 );
177 } else {
178 nvkm_msec(device, 2000,
179 if (!(nvkm_rd32(device, base + 0x180) & 0x80000000))
180 break;
181 );
182 }
183 nvkm_wr32(device, base + 0x004, 0x00000010);
184 }
185
186
187 nvkm_wr32(device, base + 0x014, 0xffffffff);
188
189
190
191
192 if (!falcon->code.data) {
193 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
194 device->chipset, falcon->addr >> 12);
195
196 ret = request_firmware(&fw, name, device->dev);
197 if (ret == 0) {
198 falcon->code.data = vmemdup(fw->data, fw->size);
199 falcon->code.size = fw->size;
200 falcon->data.data = NULL;
201 falcon->data.size = 0;
202 release_firmware(fw);
203 }
204
205 falcon->external = true;
206 }
207
208
209
210
211 if (!falcon->code.data) {
212 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
213 device->chipset, falcon->addr >> 12);
214
215 ret = request_firmware(&fw, name, device->dev);
216 if (ret) {
217 nvkm_error(subdev, "unable to load firmware data\n");
218 return -ENODEV;
219 }
220
221 falcon->data.data = vmemdup(fw->data, fw->size);
222 falcon->data.size = fw->size;
223 release_firmware(fw);
224 if (!falcon->data.data)
225 return -ENOMEM;
226
227 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
228 device->chipset, falcon->addr >> 12);
229
230 ret = request_firmware(&fw, name, device->dev);
231 if (ret) {
232 nvkm_error(subdev, "unable to load firmware code\n");
233 return -ENODEV;
234 }
235
236 falcon->code.data = vmemdup(fw->data, fw->size);
237 falcon->code.size = fw->size;
238 release_firmware(fw);
239 if (!falcon->code.data)
240 return -ENOMEM;
241 }
242
243 nvkm_debug(subdev, "firmware: %s (%s)\n", name, falcon->data.data ?
244 "static code/data segments" : "self-bootstrapping");
245
246
247 if (!falcon->data.data && !falcon->core) {
248 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
249 falcon->code.size, 256, false,
250 &falcon->core);
251 if (ret) {
252 nvkm_error(subdev, "core allocation failed, %d\n", ret);
253 return ret;
254 }
255
256 nvkm_kmap(falcon->core);
257 for (i = 0; i < falcon->code.size; i += 4)
258 nvkm_wo32(falcon->core, i, falcon->code.data[i / 4]);
259 nvkm_done(falcon->core);
260 }
261
262
263 if (falcon->core) {
264 u64 addr = nvkm_memory_addr(falcon->core);
265 if (device->card_type < NV_C0)
266 nvkm_wr32(device, base + 0x618, 0x04000000);
267 else
268 nvkm_wr32(device, base + 0x618, 0x00000114);
269 nvkm_wr32(device, base + 0x11c, 0);
270 nvkm_wr32(device, base + 0x110, addr >> 8);
271 nvkm_wr32(device, base + 0x114, 0);
272 nvkm_wr32(device, base + 0x118, 0x00006610);
273 } else {
274 if (falcon->code.size > falcon->code.limit ||
275 falcon->data.size > falcon->data.limit) {
276 nvkm_error(subdev, "ucode exceeds falcon limit(s)\n");
277 return -EINVAL;
278 }
279
280 if (falcon->version < 3) {
281 nvkm_wr32(device, base + 0xff8, 0x00100000);
282 for (i = 0; i < falcon->code.size / 4; i++)
283 nvkm_wr32(device, base + 0xff4, falcon->code.data[i]);
284 } else {
285 nvkm_wr32(device, base + 0x180, 0x01000000);
286 for (i = 0; i < falcon->code.size / 4; i++) {
287 if ((i & 0x3f) == 0)
288 nvkm_wr32(device, base + 0x188, i >> 6);
289 nvkm_wr32(device, base + 0x184, falcon->code.data[i]);
290 }
291 }
292 }
293
294
295 if (falcon->version < 3) {
296 nvkm_wr32(device, base + 0xff8, 0x00000000);
297 for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
298 nvkm_wr32(device, base + 0xff4, falcon->data.data[i]);
299 for (; i < falcon->data.limit; i += 4)
300 nvkm_wr32(device, base + 0xff4, 0x00000000);
301 } else {
302 nvkm_wr32(device, base + 0x1c0, 0x01000000);
303 for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
304 nvkm_wr32(device, base + 0x1c4, falcon->data.data[i]);
305 for (; i < falcon->data.limit / 4; i++)
306 nvkm_wr32(device, base + 0x1c4, 0x00000000);
307 }
308
309
310 nvkm_wr32(device, base + 0x10c, 0x00000001);
311 nvkm_wr32(device, base + 0x104, 0x00000000);
312 nvkm_wr32(device, base + 0x100, 0x00000002);
313 nvkm_wr32(device, base + 0x048, 0x00000003);
314
315 if (falcon->func->init)
316 falcon->func->init(falcon);
317 return 0;
318}
319
320static void *
321nvkm_falcon_dtor(struct nvkm_engine *engine)
322{
323 return nvkm_falcon(engine);
324}
325
326static const struct nvkm_engine_func
327nvkm_falcon = {
328 .dtor = nvkm_falcon_dtor,
329 .oneinit = nvkm_falcon_oneinit,
330 .init = nvkm_falcon_init,
331 .fini = nvkm_falcon_fini,
332 .intr = nvkm_falcon_intr,
333 .fifo.sclass = nvkm_falcon_oclass_get,
334 .cclass = &nvkm_falcon_cclass,
335};
336
337int
338nvkm_falcon_new_(const struct nvkm_falcon_func *func,
339 struct nvkm_device *device, int index, bool enable,
340 u32 addr, struct nvkm_engine **pengine)
341{
342 struct nvkm_falcon *falcon;
343
344 if (!(falcon = kzalloc(sizeof(*falcon), GFP_KERNEL)))
345 return -ENOMEM;
346 falcon->func = func;
347 falcon->addr = addr;
348 falcon->code.data = func->code.data;
349 falcon->code.size = func->code.size;
350 falcon->data.data = func->data.data;
351 falcon->data.size = func->data.size;
352 *pengine = &falcon->engine;
353
354 return nvkm_engine_ctor(&nvkm_falcon, device, index,
355 enable, &falcon->engine);
356}
357