1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_ramht.h"
5
6
7static int
8nouveau_fifo_ctx_size(struct drm_device *dev)
9{
10 struct drm_nouveau_private *dev_priv = dev->dev_private;
11
12 if (dev_priv->chipset >= 0x40)
13 return 128;
14 else
15 if (dev_priv->chipset >= 0x17)
16 return 64;
17
18 return 32;
19}
20
21int nv04_instmem_init(struct drm_device *dev)
22{
23 struct drm_nouveau_private *dev_priv = dev->dev_private;
24 struct nouveau_gpuobj *ramht = NULL;
25 u32 offset, length;
26 int ret;
27
28
29 dev_priv->ramin_available = true;
30
31
32 if (dev_priv->card_type >= NV_40) {
33 u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
34 u32 rsvd;
35
36
37 if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
38 else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
39 else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
40 else rsvd = 0x4a40 * vs;
41 rsvd += 16 * 1024;
42 rsvd *= dev_priv->engine.fifo.channels;
43
44
45 if (pci_is_pcie(dev->pdev))
46 rsvd += 512 * 1024;
47
48
49 rsvd += 512 * 1024;
50
51 dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
52 } else {
53 dev_priv->ramin_rsvd_vram = 512 * 1024;
54 }
55
56
57 ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096,
58 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
59 if (ret)
60 return ret;
61
62 ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht);
63 nouveau_gpuobj_ref(NULL, &ramht);
64 if (ret)
65 return ret;
66
67
68 ret = nouveau_gpuobj_new_fake(dev, 0x11200, ~0, 512,
69 NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramro);
70 if (ret)
71 return ret;
72
73
74 length = dev_priv->engine.fifo.channels * nouveau_fifo_ctx_size(dev);
75 switch (dev_priv->card_type) {
76 case NV_40:
77 offset = 0x20000;
78 break;
79 default:
80 offset = 0x11400;
81 break;
82 }
83
84 ret = nouveau_gpuobj_new_fake(dev, offset, ~0, length,
85 NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramfc);
86 if (ret)
87 return ret;
88
89
90 offset += length;
91
92
93
94
95
96
97
98
99
100 if (dev_priv->card_type >= NV_40) {
101 if (offset < 0x40000)
102 offset = 0x40000;
103 }
104
105 ret = drm_mm_init(&dev_priv->ramin_heap, offset,
106 dev_priv->ramin_rsvd_vram - offset);
107 if (ret) {
108 NV_ERROR(dev, "Failed to init RAMIN heap: %d\n", ret);
109 return ret;
110 }
111
112 return 0;
113}
114
115void
116nv04_instmem_takedown(struct drm_device *dev)
117{
118 struct drm_nouveau_private *dev_priv = dev->dev_private;
119
120 nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
121 nouveau_gpuobj_ref(NULL, &dev_priv->ramro);
122 nouveau_gpuobj_ref(NULL, &dev_priv->ramfc);
123
124 if (drm_mm_initialized(&dev_priv->ramin_heap))
125 drm_mm_takedown(&dev_priv->ramin_heap);
126}
127
128int
129nv04_instmem_suspend(struct drm_device *dev)
130{
131 return 0;
132}
133
134void
135nv04_instmem_resume(struct drm_device *dev)
136{
137}
138
139int
140nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
141 u32 size, u32 align)
142{
143 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
144 struct drm_mm_node *ramin = NULL;
145
146 do {
147 if (drm_mm_pre_get(&dev_priv->ramin_heap))
148 return -ENOMEM;
149
150 spin_lock(&dev_priv->ramin_lock);
151 ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
152 if (ramin == NULL) {
153 spin_unlock(&dev_priv->ramin_lock);
154 return -ENOMEM;
155 }
156
157 ramin = drm_mm_get_block_atomic(ramin, size, align);
158 spin_unlock(&dev_priv->ramin_lock);
159 } while (ramin == NULL);
160
161 gpuobj->node = ramin;
162 gpuobj->vinst = ramin->start;
163 return 0;
164}
165
166void
167nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
168{
169 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
170
171 spin_lock(&dev_priv->ramin_lock);
172 drm_mm_put_block(gpuobj->node);
173 gpuobj->node = NULL;
174 spin_unlock(&dev_priv->ramin_lock);
175}
176
177int
178nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
179{
180 gpuobj->pinst = gpuobj->vinst;
181 return 0;
182}
183
184void
185nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
186{
187}
188
189void
190nv04_instmem_flush(struct drm_device *dev)
191{
192}
193