1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "priv.h"
25
26#include <core/device.h>
27#include <core/gpuobj.h>
28#include <subdev/fb.h>
29#include <subdev/mmu.h>
30#include <subdev/timer.h>
31
32struct nv50_bar_priv {
33 struct nvkm_bar base;
34 spinlock_t lock;
35 struct nvkm_gpuobj *mem;
36 struct nvkm_gpuobj *pad;
37 struct nvkm_gpuobj *pgd;
38 struct nvkm_vm *bar1_vm;
39 struct nvkm_gpuobj *bar1;
40 struct nvkm_vm *bar3_vm;
41 struct nvkm_gpuobj *bar3;
42};
43
44static int
45nv50_bar_kmap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
46 struct nvkm_vma *vma)
47{
48 struct nv50_bar_priv *priv = (void *)bar;
49 int ret;
50
51 ret = nvkm_vm_get(priv->bar3_vm, mem->size << 12, 12, flags, vma);
52 if (ret)
53 return ret;
54
55 nvkm_vm_map(vma, mem);
56 return 0;
57}
58
59static int
60nv50_bar_umap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
61 struct nvkm_vma *vma)
62{
63 struct nv50_bar_priv *priv = (void *)bar;
64 int ret;
65
66 ret = nvkm_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma);
67 if (ret)
68 return ret;
69
70 nvkm_vm_map(vma, mem);
71 return 0;
72}
73
74static void
75nv50_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma)
76{
77 nvkm_vm_unmap(vma);
78 nvkm_vm_put(vma);
79}
80
81static void
82nv50_bar_flush(struct nvkm_bar *bar)
83{
84 struct nv50_bar_priv *priv = (void *)bar;
85 unsigned long flags;
86 spin_lock_irqsave(&priv->lock, flags);
87 nv_wr32(priv, 0x00330c, 0x00000001);
88 if (!nv_wait(priv, 0x00330c, 0x00000002, 0x00000000))
89 nv_warn(priv, "flush timeout\n");
90 spin_unlock_irqrestore(&priv->lock, flags);
91}
92
93void
94g84_bar_flush(struct nvkm_bar *bar)
95{
96 struct nv50_bar_priv *priv = (void *)bar;
97 unsigned long flags;
98 spin_lock_irqsave(&priv->lock, flags);
99 nv_wr32(bar, 0x070000, 0x00000001);
100 if (!nv_wait(priv, 0x070000, 0x00000002, 0x00000000))
101 nv_warn(priv, "flush timeout\n");
102 spin_unlock_irqrestore(&priv->lock, flags);
103}
104
105static int
106nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
107 struct nvkm_oclass *oclass, void *data, u32 size,
108 struct nvkm_object **pobject)
109{
110 struct nvkm_device *device = nv_device(parent);
111 struct nvkm_object *heap;
112 struct nvkm_vm *vm;
113 struct nv50_bar_priv *priv;
114 u64 start, limit;
115 int ret;
116
117 ret = nvkm_bar_create(parent, engine, oclass, &priv);
118 *pobject = nv_object(priv);
119 if (ret)
120 return ret;
121
122 ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
123 NVOBJ_FLAG_HEAP, &priv->mem);
124 heap = nv_object(priv->mem);
125 if (ret)
126 return ret;
127
128 ret = nvkm_gpuobj_new(nv_object(priv), heap,
129 (device->chipset == 0x50) ? 0x1400 : 0x0200,
130 0, 0, &priv->pad);
131 if (ret)
132 return ret;
133
134 ret = nvkm_gpuobj_new(nv_object(priv), heap, 0x4000, 0, 0, &priv->pgd);
135 if (ret)
136 return ret;
137
138
139 start = 0x0100000000ULL;
140 limit = start + nv_device_resource_len(device, 3);
141
142 ret = nvkm_vm_new(device, start, limit, start, &vm);
143 if (ret)
144 return ret;
145
146 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
147
148 ret = nvkm_gpuobj_new(nv_object(priv), heap,
149 ((limit-- - start) >> 12) * 8, 0x1000,
150 NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]);
151 vm->pgt[0].refcount[0] = 1;
152 if (ret)
153 return ret;
154
155 ret = nvkm_vm_ref(vm, &priv->bar3_vm, priv->pgd);
156 nvkm_vm_ref(NULL, &vm, NULL);
157 if (ret)
158 return ret;
159
160 ret = nvkm_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar3);
161 if (ret)
162 return ret;
163
164 nv_wo32(priv->bar3, 0x00, 0x7fc00000);
165 nv_wo32(priv->bar3, 0x04, lower_32_bits(limit));
166 nv_wo32(priv->bar3, 0x08, lower_32_bits(start));
167 nv_wo32(priv->bar3, 0x0c, upper_32_bits(limit) << 24 |
168 upper_32_bits(start));
169 nv_wo32(priv->bar3, 0x10, 0x00000000);
170 nv_wo32(priv->bar3, 0x14, 0x00000000);
171
172
173 start = 0x0000000000ULL;
174 limit = start + nv_device_resource_len(device, 1);
175
176 ret = nvkm_vm_new(device, start, limit--, start, &vm);
177 if (ret)
178 return ret;
179
180 atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
181
182 ret = nvkm_vm_ref(vm, &priv->bar1_vm, priv->pgd);
183 nvkm_vm_ref(NULL, &vm, NULL);
184 if (ret)
185 return ret;
186
187 ret = nvkm_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar1);
188 if (ret)
189 return ret;
190
191 nv_wo32(priv->bar1, 0x00, 0x7fc00000);
192 nv_wo32(priv->bar1, 0x04, lower_32_bits(limit));
193 nv_wo32(priv->bar1, 0x08, lower_32_bits(start));
194 nv_wo32(priv->bar1, 0x0c, upper_32_bits(limit) << 24 |
195 upper_32_bits(start));
196 nv_wo32(priv->bar1, 0x10, 0x00000000);
197 nv_wo32(priv->bar1, 0x14, 0x00000000);
198
199 priv->base.alloc = nvkm_bar_alloc;
200 priv->base.kmap = nv50_bar_kmap;
201 priv->base.umap = nv50_bar_umap;
202 priv->base.unmap = nv50_bar_unmap;
203 if (device->chipset == 0x50)
204 priv->base.flush = nv50_bar_flush;
205 else
206 priv->base.flush = g84_bar_flush;
207 spin_lock_init(&priv->lock);
208 return 0;
209}
210
211static void
212nv50_bar_dtor(struct nvkm_object *object)
213{
214 struct nv50_bar_priv *priv = (void *)object;
215 nvkm_gpuobj_ref(NULL, &priv->bar1);
216 nvkm_vm_ref(NULL, &priv->bar1_vm, priv->pgd);
217 nvkm_gpuobj_ref(NULL, &priv->bar3);
218 if (priv->bar3_vm) {
219 nvkm_gpuobj_ref(NULL, &priv->bar3_vm->pgt[0].obj[0]);
220 nvkm_vm_ref(NULL, &priv->bar3_vm, priv->pgd);
221 }
222 nvkm_gpuobj_ref(NULL, &priv->pgd);
223 nvkm_gpuobj_ref(NULL, &priv->pad);
224 nvkm_gpuobj_ref(NULL, &priv->mem);
225 nvkm_bar_destroy(&priv->base);
226}
227
228static int
229nv50_bar_init(struct nvkm_object *object)
230{
231 struct nv50_bar_priv *priv = (void *)object;
232 int ret, i;
233
234 ret = nvkm_bar_init(&priv->base);
235 if (ret)
236 return ret;
237
238 nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
239 nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
240 nv_wr32(priv, 0x100c80, 0x00060001);
241 if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000)) {
242 nv_error(priv, "vm flush timeout\n");
243 return -EBUSY;
244 }
245
246 nv_wr32(priv, 0x001704, 0x00000000 | priv->mem->addr >> 12);
247 nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12);
248 nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4);
249 nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4);
250 for (i = 0; i < 8; i++)
251 nv_wr32(priv, 0x001900 + (i * 4), 0x00000000);
252 return 0;
253}
254
255static int
256nv50_bar_fini(struct nvkm_object *object, bool suspend)
257{
258 struct nv50_bar_priv *priv = (void *)object;
259 return nvkm_bar_fini(&priv->base, suspend);
260}
261
262struct nvkm_oclass
263nv50_bar_oclass = {
264 .handle = NV_SUBDEV(BAR, 0x50),
265 .ofuncs = &(struct nvkm_ofuncs) {
266 .ctor = nv50_bar_ctor,
267 .dtor = nv50_bar_dtor,
268 .init = nv50_bar_init,
269 .fini = nv50_bar_fini,
270 },
271};
272