1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <core/client.h>
26#include <core/enum.h>
27#include <core/engctx.h>
28#include <core/object.h>
29
30#include <subdev/fb.h>
31#include <subdev/bios.h>
32
33struct nv50_fb_priv {
34 struct nouveau_fb base;
35 struct page *r100c08_page;
36 dma_addr_t r100c08;
37};
38
39static int types[0x80] = {
40 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
41 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
42 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
43 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
44 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
45 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
46 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
47 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
48};
49
50static bool
51nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype)
52{
53 return types[(memtype & 0xff00) >> 8] != 0;
54}
55
56static u32
57nv50_fb_vram_rblock(struct nouveau_fb *pfb)
58{
59 int i, parts, colbits, rowbitsa, rowbitsb, banks;
60 u64 rowsize, predicted;
61 u32 r0, r4, rt, ru, rblock_size;
62
63 r0 = nv_rd32(pfb, 0x100200);
64 r4 = nv_rd32(pfb, 0x100204);
65 rt = nv_rd32(pfb, 0x100250);
66 ru = nv_rd32(pfb, 0x001540);
67 nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
68
69 for (i = 0, parts = 0; i < 8; i++) {
70 if (ru & (0x00010000 << i))
71 parts++;
72 }
73
74 colbits = (r4 & 0x0000f000) >> 12;
75 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
76 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
77 banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
78
79 rowsize = parts * banks * (1 << colbits) * 8;
80 predicted = rowsize << rowbitsa;
81 if (r0 & 0x00000004)
82 predicted += rowsize << rowbitsb;
83
84 if (predicted != pfb->ram.size) {
85 nv_warn(pfb, "memory controller reports %d MiB VRAM\n",
86 (u32)(pfb->ram.size >> 20));
87 }
88
89 rblock_size = rowsize;
90 if (rt & 1)
91 rblock_size *= 3;
92
93 nv_debug(pfb, "rblock %d bytes\n", rblock_size);
94 return rblock_size;
95}
96
97static int
98nv50_fb_vram_init(struct nouveau_fb *pfb)
99{
100 struct nouveau_device *device = nv_device(pfb);
101 struct nouveau_bios *bios = nouveau_bios(device);
102 const u32 rsvd_head = ( 256 * 1024) >> 12;
103 const u32 rsvd_tail = (1024 * 1024) >> 12;
104 u32 size, tags = 0;
105 int ret;
106
107 pfb->ram.size = nv_rd32(pfb, 0x10020c);
108 pfb->ram.size = (pfb->ram.size & 0xffffff00) |
109 ((pfb->ram.size & 0x000000ff) << 32);
110
111 size = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
112 switch (device->chipset) {
113 case 0xaa:
114 case 0xac:
115 case 0xaf:
116 ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, 1);
117 if (ret)
118 return ret;
119
120 pfb->ram.type = NV_MEM_TYPE_STOLEN;
121 pfb->ram.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
122 break;
123 default:
124 switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
125 case 0: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
126 case 1:
127 if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
128 pfb->ram.type = NV_MEM_TYPE_DDR3;
129 else
130 pfb->ram.type = NV_MEM_TYPE_DDR2;
131 break;
132 case 2: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
133 case 3: pfb->ram.type = NV_MEM_TYPE_GDDR4; break;
134 case 4: pfb->ram.type = NV_MEM_TYPE_GDDR5; break;
135 default:
136 break;
137 }
138
139 ret = nouveau_mm_init(&pfb->vram, rsvd_head, size,
140 nv50_fb_vram_rblock(pfb) >> 12);
141 if (ret)
142 return ret;
143
144 pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
145 tags = nv_rd32(pfb, 0x100320);
146 break;
147 }
148
149 return tags;
150}
151
152static int
153nv50_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
154 u32 memtype, struct nouveau_mem **pmem)
155{
156 struct nv50_fb_priv *priv = (void *)pfb;
157 struct nouveau_mm *heap = &priv->base.vram;
158 struct nouveau_mm *tags = &priv->base.tags;
159 struct nouveau_mm_node *r;
160 struct nouveau_mem *mem;
161 int comp = (memtype & 0x300) >> 8;
162 int type = (memtype & 0x07f);
163 int back = (memtype & 0x800);
164 int min, max, ret;
165
166 max = (size >> 12);
167 min = ncmin ? (ncmin >> 12) : max;
168 align >>= 12;
169
170 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
171 if (!mem)
172 return -ENOMEM;
173
174 mutex_lock(&pfb->base.mutex);
175 if (comp) {
176 if (align == 16) {
177 int n = (max >> 4) * comp;
178
179 ret = nouveau_mm_head(tags, 1, n, n, 1, &mem->tag);
180 if (ret)
181 mem->tag = NULL;
182 }
183
184 if (unlikely(!mem->tag))
185 comp = 0;
186 }
187
188 INIT_LIST_HEAD(&mem->regions);
189 mem->memtype = (comp << 7) | type;
190 mem->size = max;
191
192 type = types[type];
193 do {
194 if (back)
195 ret = nouveau_mm_tail(heap, type, max, min, align, &r);
196 else
197 ret = nouveau_mm_head(heap, type, max, min, align, &r);
198 if (ret) {
199 mutex_unlock(&pfb->base.mutex);
200 pfb->ram.put(pfb, &mem);
201 return ret;
202 }
203
204 list_add_tail(&r->rl_entry, &mem->regions);
205 max -= r->length;
206 } while (max);
207 mutex_unlock(&pfb->base.mutex);
208
209 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
210 mem->offset = (u64)r->offset << 12;
211 *pmem = mem;
212 return 0;
213}
214
215void
216nv50_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
217{
218 struct nv50_fb_priv *priv = (void *)pfb;
219 struct nouveau_mm_node *this;
220 struct nouveau_mem *mem;
221
222 mem = *pmem;
223 *pmem = NULL;
224 if (unlikely(mem == NULL))
225 return;
226
227 mutex_lock(&pfb->base.mutex);
228 while (!list_empty(&mem->regions)) {
229 this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
230
231 list_del(&this->rl_entry);
232 nouveau_mm_free(&priv->base.vram, &this);
233 }
234
235 nouveau_mm_free(&priv->base.tags, &mem->tag);
236 mutex_unlock(&pfb->base.mutex);
237
238 kfree(mem);
239}
240
241static const struct nouveau_enum vm_dispatch_subclients[] = {
242 { 0x00000000, "GRCTX", NULL },
243 { 0x00000001, "NOTIFY", NULL },
244 { 0x00000002, "QUERY", NULL },
245 { 0x00000003, "COND", NULL },
246 { 0x00000004, "M2M_IN", NULL },
247 { 0x00000005, "M2M_OUT", NULL },
248 { 0x00000006, "M2M_NOTIFY", NULL },
249 {}
250};
251
252static const struct nouveau_enum vm_ccache_subclients[] = {
253 { 0x00000000, "CB", NULL },
254 { 0x00000001, "TIC", NULL },
255 { 0x00000002, "TSC", NULL },
256 {}
257};
258
259static const struct nouveau_enum vm_prop_subclients[] = {
260 { 0x00000000, "RT0", NULL },
261 { 0x00000001, "RT1", NULL },
262 { 0x00000002, "RT2", NULL },
263 { 0x00000003, "RT3", NULL },
264 { 0x00000004, "RT4", NULL },
265 { 0x00000005, "RT5", NULL },
266 { 0x00000006, "RT6", NULL },
267 { 0x00000007, "RT7", NULL },
268 { 0x00000008, "ZETA", NULL },
269 { 0x00000009, "LOCAL", NULL },
270 { 0x0000000a, "GLOBAL", NULL },
271 { 0x0000000b, "STACK", NULL },
272 { 0x0000000c, "DST2D", NULL },
273 {}
274};
275
276static const struct nouveau_enum vm_pfifo_subclients[] = {
277 { 0x00000000, "PUSHBUF", NULL },
278 { 0x00000001, "SEMAPHORE", NULL },
279 {}
280};
281
282static const struct nouveau_enum vm_bar_subclients[] = {
283 { 0x00000000, "FB", NULL },
284 { 0x00000001, "IN", NULL },
285 {}
286};
287
288static const struct nouveau_enum vm_client[] = {
289 { 0x00000000, "STRMOUT", NULL },
290 { 0x00000003, "DISPATCH", vm_dispatch_subclients },
291 { 0x00000004, "PFIFO_WRITE", NULL },
292 { 0x00000005, "CCACHE", vm_ccache_subclients },
293 { 0x00000006, "PPPP", NULL },
294 { 0x00000007, "CLIPID", NULL },
295 { 0x00000008, "PFIFO_READ", NULL },
296 { 0x00000009, "VFETCH", NULL },
297 { 0x0000000a, "TEXTURE", NULL },
298 { 0x0000000b, "PROP", vm_prop_subclients },
299 { 0x0000000c, "PVP", NULL },
300 { 0x0000000d, "PBSP", NULL },
301 { 0x0000000e, "PCRYPT", NULL },
302 { 0x0000000f, "PCOUNTER", NULL },
303 { 0x00000011, "PDAEMON", NULL },
304 {}
305};
306
307static const struct nouveau_enum vm_engine[] = {
308 { 0x00000000, "PGRAPH", NULL, NVDEV_ENGINE_GR },
309 { 0x00000001, "PVP", NULL, NVDEV_ENGINE_VP },
310 { 0x00000004, "PEEPHOLE", NULL },
311 { 0x00000005, "PFIFO", vm_pfifo_subclients, NVDEV_ENGINE_FIFO },
312 { 0x00000006, "BAR", vm_bar_subclients },
313 { 0x00000008, "PPPP", NULL, NVDEV_ENGINE_PPP },
314 { 0x00000008, "PMPEG", NULL, NVDEV_ENGINE_MPEG },
315 { 0x00000009, "PBSP", NULL, NVDEV_ENGINE_BSP },
316 { 0x0000000a, "PCRYPT", NULL, NVDEV_ENGINE_CRYPT },
317 { 0x0000000b, "PCOUNTER", NULL },
318 { 0x0000000c, "SEMAPHORE_BG", NULL },
319 { 0x0000000d, "PCOPY", NULL, NVDEV_ENGINE_COPY0 },
320 { 0x0000000e, "PDAEMON", NULL },
321 {}
322};
323
324static const struct nouveau_enum vm_fault[] = {
325 { 0x00000000, "PT_NOT_PRESENT", NULL },
326 { 0x00000001, "PT_TOO_SHORT", NULL },
327 { 0x00000002, "PAGE_NOT_PRESENT", NULL },
328 { 0x00000003, "PAGE_SYSTEM_ONLY", NULL },
329 { 0x00000004, "PAGE_READ_ONLY", NULL },
330 { 0x00000006, "NULL_DMAOBJ", NULL },
331 { 0x00000007, "WRONG_MEMTYPE", NULL },
332 { 0x0000000b, "VRAM_LIMIT", NULL },
333 { 0x0000000f, "DMAOBJ_LIMIT", NULL },
334 {}
335};
336
337static void
338nv50_fb_intr(struct nouveau_subdev *subdev)
339{
340 struct nouveau_device *device = nv_device(subdev);
341 struct nouveau_engine *engine;
342 struct nv50_fb_priv *priv = (void *)subdev;
343 const struct nouveau_enum *en, *cl;
344 struct nouveau_object *engctx = NULL;
345 u32 trap[6], idx, chan;
346 u8 st0, st1, st2, st3;
347 int i;
348
349 idx = nv_rd32(priv, 0x100c90);
350 if (!(idx & 0x80000000))
351 return;
352 idx &= 0x00ffffff;
353
354 for (i = 0; i < 6; i++) {
355 nv_wr32(priv, 0x100c90, idx | i << 24);
356 trap[i] = nv_rd32(priv, 0x100c94);
357 }
358 nv_wr32(priv, 0x100c90, idx | 0x80000000);
359
360
361 if (device->chipset < 0xa3 ||
362 device->chipset == 0xaa || device->chipset == 0xac) {
363 st0 = (trap[0] & 0x0000000f) >> 0;
364 st1 = (trap[0] & 0x000000f0) >> 4;
365 st2 = (trap[0] & 0x00000f00) >> 8;
366 st3 = (trap[0] & 0x0000f000) >> 12;
367 } else {
368 st0 = (trap[0] & 0x000000ff) >> 0;
369 st1 = (trap[0] & 0x0000ff00) >> 8;
370 st2 = (trap[0] & 0x00ff0000) >> 16;
371 st3 = (trap[0] & 0xff000000) >> 24;
372 }
373 chan = (trap[2] << 16) | trap[1];
374
375 en = nouveau_enum_find(vm_engine, st0);
376
377 if (en && en->data2) {
378 const struct nouveau_enum *orig_en = en;
379 while (en->name && en->value == st0 && en->data2) {
380 engine = nouveau_engine(subdev, en->data2);
381 if (engine) {
382 engctx = nouveau_engctx_get(engine, chan);
383 if (engctx)
384 break;
385 }
386 en++;
387 }
388 if (!engctx)
389 en = orig_en;
390 }
391
392 nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x [%s] ",
393 (trap[5] & 0x00000100) ? "read" : "write",
394 trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan,
395 nouveau_client_name(engctx));
396
397 nouveau_engctx_put(engctx);
398
399 if (en)
400 pr_cont("%s/", en->name);
401 else
402 pr_cont("%02x/", st0);
403
404 cl = nouveau_enum_find(vm_client, st2);
405 if (cl)
406 pr_cont("%s/", cl->name);
407 else
408 pr_cont("%02x/", st2);
409
410 if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3);
411 else if (en && en->data) cl = nouveau_enum_find(en->data, st3);
412 else cl = NULL;
413 if (cl)
414 pr_cont("%s", cl->name);
415 else
416 pr_cont("%02x", st3);
417
418 pr_cont(" reason: ");
419 en = nouveau_enum_find(vm_fault, st1);
420 if (en)
421 pr_cont("%s\n", en->name);
422 else
423 pr_cont("0x%08x\n", st1);
424}
425
426static int
427nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
428 struct nouveau_oclass *oclass, void *data, u32 size,
429 struct nouveau_object **pobject)
430{
431 struct nouveau_device *device = nv_device(parent);
432 struct nv50_fb_priv *priv;
433 int ret;
434
435 ret = nouveau_fb_create(parent, engine, oclass, &priv);
436 *pobject = nv_object(priv);
437 if (ret)
438 return ret;
439
440 priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
441 if (priv->r100c08_page) {
442 priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
443 0, PAGE_SIZE,
444 PCI_DMA_BIDIRECTIONAL);
445 if (pci_dma_mapping_error(device->pdev, priv->r100c08))
446 nv_warn(priv, "failed 0x100c08 page map\n");
447 } else {
448 nv_warn(priv, "failed 0x100c08 page alloc\n");
449 }
450
451 priv->base.memtype_valid = nv50_fb_memtype_valid;
452 priv->base.ram.init = nv50_fb_vram_init;
453 priv->base.ram.get = nv50_fb_vram_new;
454 priv->base.ram.put = nv50_fb_vram_del;
455 nv_subdev(priv)->intr = nv50_fb_intr;
456 return nouveau_fb_preinit(&priv->base);
457}
458
459static void
460nv50_fb_dtor(struct nouveau_object *object)
461{
462 struct nouveau_device *device = nv_device(object);
463 struct nv50_fb_priv *priv = (void *)object;
464
465 if (priv->r100c08_page) {
466 pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
467 PCI_DMA_BIDIRECTIONAL);
468 __free_page(priv->r100c08_page);
469 }
470
471 nouveau_fb_destroy(&priv->base);
472}
473
474static int
475nv50_fb_init(struct nouveau_object *object)
476{
477 struct nouveau_device *device = nv_device(object);
478 struct nv50_fb_priv *priv = (void *)object;
479 int ret;
480
481 ret = nouveau_fb_init(&priv->base);
482 if (ret)
483 return ret;
484
485
486
487
488
489 nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
490
491
492
493 switch (device->chipset) {
494 case 0x50:
495 nv_wr32(priv, 0x100c90, 0x000707ff);
496 break;
497 case 0xa3:
498 case 0xa5:
499 case 0xa8:
500 nv_wr32(priv, 0x100c90, 0x000d0fff);
501 break;
502 case 0xaf:
503 nv_wr32(priv, 0x100c90, 0x089d1fff);
504 break;
505 default:
506 nv_wr32(priv, 0x100c90, 0x001d07ff);
507 break;
508 }
509
510 return 0;
511}
512
513struct nouveau_oclass
514nv50_fb_oclass = {
515 .handle = NV_SUBDEV(FB, 0x50),
516 .ofuncs = &(struct nouveau_ofuncs) {
517 .ctor = nv50_fb_ctor,
518 .dtor = nv50_fb_dtor,
519 .init = nv50_fb_init,
520 .fini = _nouveau_fb_fini,
521 },
522};
523