1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "changf100.h"
25
26#include <core/client.h>
27#include <core/gpuobj.h>
28#include <subdev/fb.h>
29#include <subdev/timer.h>
30
31#include <nvif/class.h>
32#include <nvif/cl906f.h>
33#include <nvif/unpack.h>
34
35static u32
36gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
37{
38 switch (engine->subdev.index) {
39 case NVKM_ENGINE_SW : return 0;
40 case NVKM_ENGINE_GR : return 0x0210;
41 case NVKM_ENGINE_CE0 : return 0x0230;
42 case NVKM_ENGINE_CE1 : return 0x0240;
43 case NVKM_ENGINE_MSPDEC: return 0x0250;
44 case NVKM_ENGINE_MSPPP : return 0x0260;
45 case NVKM_ENGINE_MSVLD : return 0x0270;
46 default:
47 WARN_ON(1);
48 return 0;
49 }
50}
51
52static int
53gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
54 struct nvkm_engine *engine, bool suspend)
55{
56 const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
57 struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
58 struct nvkm_subdev *subdev = &chan->fifo->base.engine.subdev;
59 struct nvkm_device *device = subdev->device;
60 struct nvkm_gpuobj *inst = chan->base.inst;
61 int ret = 0;
62
63 mutex_lock(&subdev->mutex);
64 nvkm_wr32(device, 0x002634, chan->base.chid);
65 if (nvkm_msec(device, 2000,
66 if (nvkm_rd32(device, 0x002634) == chan->base.chid)
67 break;
68 ) < 0) {
69 nvkm_error(subdev, "channel %d [%s] kick timeout\n",
70 chan->base.chid, chan->base.object.client->name);
71 ret = -ETIMEDOUT;
72 }
73 mutex_unlock(&subdev->mutex);
74
75 if (ret && suspend)
76 return ret;
77
78 if (offset) {
79 nvkm_kmap(inst);
80 nvkm_wo32(inst, offset + 0x00, 0x00000000);
81 nvkm_wo32(inst, offset + 0x04, 0x00000000);
82 nvkm_done(inst);
83 }
84
85 return ret;
86}
87
88static int
89gf100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
90 struct nvkm_engine *engine)
91{
92 const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
93 struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
94 struct nvkm_gpuobj *inst = chan->base.inst;
95
96 if (offset) {
97 u64 addr = chan->engn[engine->subdev.index].vma.offset;
98 nvkm_kmap(inst);
99 nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
100 nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
101 nvkm_done(inst);
102 }
103
104 return 0;
105}
106
107static void
108gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
109 struct nvkm_engine *engine)
110{
111 struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
112 nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
113 nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
114}
115
116static int
117gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
118 struct nvkm_engine *engine,
119 struct nvkm_object *object)
120{
121 struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
122 int engn = engine->subdev.index;
123 int ret;
124
125 if (!gf100_fifo_gpfifo_engine_addr(engine))
126 return 0;
127
128 ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst);
129 if (ret)
130 return ret;
131
132 return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
133 NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
134}
135
136static void
137gf100_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
138{
139 struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
140 struct gf100_fifo *fifo = chan->fifo;
141 struct nvkm_device *device = fifo->base.engine.subdev.device;
142 u32 coff = chan->base.chid * 8;
143
144 if (!list_empty(&chan->head) && !chan->killed) {
145 gf100_fifo_runlist_remove(fifo, chan);
146 nvkm_mask(device, 0x003004 + coff, 0x00000001, 0x00000000);
147 gf100_fifo_runlist_commit(fifo);
148 }
149
150 gf100_fifo_intr_engine(fifo);
151
152 nvkm_wr32(device, 0x003000 + coff, 0x00000000);
153}
154
155static void
156gf100_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
157{
158 struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
159 struct gf100_fifo *fifo = chan->fifo;
160 struct nvkm_device *device = fifo->base.engine.subdev.device;
161 u32 addr = chan->base.inst->addr >> 12;
162 u32 coff = chan->base.chid * 8;
163
164 nvkm_wr32(device, 0x003000 + coff, 0xc0000000 | addr);
165
166 if (list_empty(&chan->head) && !chan->killed) {
167 gf100_fifo_runlist_insert(fifo, chan);
168 nvkm_wr32(device, 0x003004 + coff, 0x001f0001);
169 gf100_fifo_runlist_commit(fifo);
170 }
171}
172
173static void *
174gf100_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
175{
176 struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
177 nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
178 nvkm_gpuobj_del(&chan->pgd);
179 return chan;
180}
181
182static const struct nvkm_fifo_chan_func
183gf100_fifo_gpfifo_func = {
184 .dtor = gf100_fifo_gpfifo_dtor,
185 .init = gf100_fifo_gpfifo_init,
186 .fini = gf100_fifo_gpfifo_fini,
187 .ntfy = g84_fifo_chan_ntfy,
188 .engine_ctor = gf100_fifo_gpfifo_engine_ctor,
189 .engine_dtor = gf100_fifo_gpfifo_engine_dtor,
190 .engine_init = gf100_fifo_gpfifo_engine_init,
191 .engine_fini = gf100_fifo_gpfifo_engine_fini,
192};
193
194static int
195gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
196 void *data, u32 size, struct nvkm_object **pobject)
197{
198 union {
199 struct fermi_channel_gpfifo_v0 v0;
200 } *args = data;
201 struct gf100_fifo *fifo = gf100_fifo(base);
202 struct nvkm_device *device = fifo->base.engine.subdev.device;
203 struct nvkm_object *parent = oclass->parent;
204 struct gf100_fifo_chan *chan;
205 u64 usermem, ioffset, ilength;
206 int ret = -ENOSYS, i;
207
208 nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
209 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
210 nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
211 "ioffset %016llx ilength %08x\n",
212 args->v0.version, args->v0.vm, args->v0.ioffset,
213 args->v0.ilength);
214 } else
215 return ret;
216
217
218 if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
219 return -ENOMEM;
220 *pobject = &chan->base.object;
221 chan->fifo = fifo;
222 INIT_LIST_HEAD(&chan->head);
223
224 ret = nvkm_fifo_chan_ctor(&gf100_fifo_gpfifo_func, &fifo->base,
225 0x1000, 0x1000, true, args->v0.vm, 0,
226 (1ULL << NVKM_ENGINE_CE0) |
227 (1ULL << NVKM_ENGINE_CE1) |
228 (1ULL << NVKM_ENGINE_GR) |
229 (1ULL << NVKM_ENGINE_MSPDEC) |
230 (1ULL << NVKM_ENGINE_MSPPP) |
231 (1ULL << NVKM_ENGINE_MSVLD) |
232 (1ULL << NVKM_ENGINE_SW),
233 1, fifo->user.bar.offset, 0x1000,
234 oclass, &chan->base);
235 if (ret)
236 return ret;
237
238 args->v0.chid = chan->base.chid;
239
240
241 ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
242 if (ret)
243 return ret;
244
245 nvkm_kmap(chan->base.inst);
246 nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
247 nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
248 nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
249 nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
250 nvkm_done(chan->base.inst);
251
252 ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
253 if (ret)
254 return ret;
255
256
257
258 usermem = chan->base.chid * 0x1000;
259 ioffset = args->v0.ioffset;
260 ilength = order_base_2(args->v0.ilength / 8);
261
262 nvkm_kmap(fifo->user.mem);
263 for (i = 0; i < 0x1000; i += 4)
264 nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
265 nvkm_done(fifo->user.mem);
266 usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
267
268
269 nvkm_kmap(chan->base.inst);
270 nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem));
271 nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem));
272 nvkm_wo32(chan->base.inst, 0x10, 0x0000face);
273 nvkm_wo32(chan->base.inst, 0x30, 0xfffff902);
274 nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset));
275 nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) |
276 (ilength << 16));
277 nvkm_wo32(chan->base.inst, 0x54, 0x00000002);
278 nvkm_wo32(chan->base.inst, 0x84, 0x20400000);
279 nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
280 nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
281 nvkm_wo32(chan->base.inst, 0xa4, 0x1f1f1f1f);
282 nvkm_wo32(chan->base.inst, 0xa8, 0x1f1f1f1f);
283 nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
284 nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
285 nvkm_wo32(chan->base.inst, 0xf8, 0x10003080);
286 nvkm_wo32(chan->base.inst, 0xfc, 0x10000010);
287 nvkm_done(chan->base.inst);
288 return 0;
289}
290
291const struct nvkm_fifo_chan_oclass
292gf100_fifo_gpfifo_oclass = {
293 .base.oclass = FERMI_CHANNEL_GPFIFO,
294 .base.minver = 0,
295 .base.maxver = 0,
296 .ctor = gf100_fifo_gpfifo_new,
297};
298