1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#define nv04_dmaobj(p) container_of((p), struct nv04_dmaobj, base)
25#include "user.h"
26
27#include <core/gpuobj.h>
28#include <subdev/fb.h>
29#include <subdev/mmu/vmm.h>
30
31#include <nvif/class.h>
32
33struct nv04_dmaobj {
34 struct nvkm_dmaobj base;
35 bool clone;
36 u32 flags0;
37 u32 flags2;
38};
39
40static int
41nv04_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
42 int align, struct nvkm_gpuobj **pgpuobj)
43{
44 struct nv04_dmaobj *dmaobj = nv04_dmaobj(base);
45 struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
46 u64 offset = dmaobj->base.start & 0xfffff000;
47 u64 adjust = dmaobj->base.start & 0x00000fff;
48 u32 length = dmaobj->base.limit - dmaobj->base.start;
49 int ret;
50
51 if (dmaobj->clone) {
52 struct nvkm_memory *pgt =
53 device->mmu->vmm->pd->pt[0]->memory;
54 if (!dmaobj->base.start)
55 return nvkm_gpuobj_wrap(pgt, pgpuobj);
56 nvkm_kmap(pgt);
57 offset = nvkm_ro32(pgt, 8 + (offset >> 10));
58 offset &= 0xfffff000;
59 nvkm_done(pgt);
60 }
61
62 ret = nvkm_gpuobj_new(device, 16, align, false, parent, pgpuobj);
63 if (ret == 0) {
64 nvkm_kmap(*pgpuobj);
65 nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | (adjust << 20));
66 nvkm_wo32(*pgpuobj, 0x04, length);
67 nvkm_wo32(*pgpuobj, 0x08, dmaobj->flags2 | offset);
68 nvkm_wo32(*pgpuobj, 0x0c, dmaobj->flags2 | offset);
69 nvkm_done(*pgpuobj);
70 }
71
72 return ret;
73}
74
75static const struct nvkm_dmaobj_func
76nv04_dmaobj_func = {
77 .bind = nv04_dmaobj_bind,
78};
79
80int
81nv04_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
82 void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
83{
84 struct nvkm_device *device = dma->engine.subdev.device;
85 struct nv04_dmaobj *dmaobj;
86 int ret;
87
88 if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
89 return -ENOMEM;
90 *pdmaobj = &dmaobj->base;
91
92 ret = nvkm_dmaobj_ctor(&nv04_dmaobj_func, dma, oclass,
93 &data, &size, &dmaobj->base);
94 if (ret)
95 return ret;
96
97 if (dmaobj->base.target == NV_MEM_TARGET_VM) {
98 if (device->mmu->func == &nv04_mmu)
99 dmaobj->clone = true;
100 dmaobj->base.target = NV_MEM_TARGET_PCI;
101 dmaobj->base.access = NV_MEM_ACCESS_RW;
102 }
103
104 dmaobj->flags0 = oclass->base.oclass;
105 switch (dmaobj->base.target) {
106 case NV_MEM_TARGET_VRAM:
107 dmaobj->flags0 |= 0x00003000;
108 break;
109 case NV_MEM_TARGET_PCI:
110 dmaobj->flags0 |= 0x00023000;
111 break;
112 case NV_MEM_TARGET_PCI_NOSNOOP:
113 dmaobj->flags0 |= 0x00033000;
114 break;
115 default:
116 return -EINVAL;
117 }
118
119 switch (dmaobj->base.access) {
120 case NV_MEM_ACCESS_RO:
121 dmaobj->flags0 |= 0x00004000;
122 break;
123 case NV_MEM_ACCESS_WO:
124 dmaobj->flags0 |= 0x00008000;
125 case NV_MEM_ACCESS_RW:
126 dmaobj->flags2 |= 0x00000002;
127 break;
128 default:
129 return -EINVAL;
130 }
131
132 return 0;
133}
134