1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/types.h>
24
25
26#include <dspbridge/host_os.h>
27
28
29#include <dspbridge/dbdefs.h>
30
31
32#include <dspbridge/sync.h>
33
34
35#include <dspbridge/dev.h>
36#include <dspbridge/proc.h>
37
38
39#include <dspbridge/dmm.h>
40
41
42#define DMM_ADDR_VIRTUAL(a) \
43 (((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\
44 dyn_mem_map_beg)
45#define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K)
46
47
48struct dmm_object {
49
50
51 spinlock_t dmm_lock;
52};
53
54struct map_page {
55 u32 region_size:15;
56 u32 mapped_size:15;
57 u32 reserved:1;
58 u32 mapped:1;
59};
60
61
62static struct map_page *virtual_mapping_table;
63static u32 free_region;
64static u32 free_size;
65static u32 dyn_mem_map_beg;
66static u32 table_size;
67
68
69static struct map_page *get_region(u32 addr);
70static struct map_page *get_free_region(u32 len);
71static struct map_page *get_mapped_region(u32 addrs);
72
73
74
75
76
77
78
79
80int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size)
81{
82 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
83 int status = 0;
84
85 status = dmm_delete_tables(dmm_obj);
86 if (!status) {
87 dyn_mem_map_beg = addr;
88 table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K;
89
90 virtual_mapping_table = __vmalloc(table_size *
91 sizeof(struct map_page), GFP_KERNEL |
92 __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
93 if (virtual_mapping_table == NULL)
94 status = -ENOMEM;
95 else {
96
97
98 free_region = 0;
99 free_size = table_size * PG_SIZE4K;
100 virtual_mapping_table[0].region_size = table_size;
101 }
102 }
103
104 if (status)
105 pr_err("%s: failure, status 0x%x\n", __func__, status);
106
107 return status;
108}
109
110
111
112
113
114
115int dmm_create(struct dmm_object **dmm_manager,
116 struct dev_object *hdev_obj,
117 const struct dmm_mgrattrs *mgr_attrts)
118{
119 struct dmm_object *dmm_obj = NULL;
120 int status = 0;
121
122 *dmm_manager = NULL;
123
124 dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL);
125 if (dmm_obj != NULL) {
126 spin_lock_init(&dmm_obj->dmm_lock);
127 *dmm_manager = dmm_obj;
128 } else {
129 status = -ENOMEM;
130 }
131
132 return status;
133}
134
135
136
137
138
139
140int dmm_destroy(struct dmm_object *dmm_mgr)
141{
142 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
143 int status = 0;
144
145 if (dmm_mgr) {
146 status = dmm_delete_tables(dmm_obj);
147 if (!status)
148 kfree(dmm_obj);
149 } else
150 status = -EFAULT;
151
152 return status;
153}
154
155
156
157
158
159
160int dmm_delete_tables(struct dmm_object *dmm_mgr)
161{
162 int status = 0;
163
164
165 if (dmm_mgr)
166 vfree(virtual_mapping_table);
167 else
168 status = -EFAULT;
169 return status;
170}
171
172
173
174
175
176
177
178int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
179{
180 int status = 0;
181 struct dev_object *hdev_obj;
182
183 if (hprocessor != NULL)
184 status = proc_get_dev_object(hprocessor, &hdev_obj);
185 else
186 hdev_obj = dev_get_first();
187
188 if (!status)
189 status = dev_get_dmm_mgr(hdev_obj, dmm_manager);
190
191 return status;
192}
193
194
195
196
197
198
199
200
201
202int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size)
203{
204 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
205 struct map_page *chunk;
206 int status = 0;
207
208 spin_lock(&dmm_obj->dmm_lock);
209
210
211 chunk = (struct map_page *)get_region(addr);
212 if (chunk != NULL) {
213
214 chunk->mapped = true;
215 chunk->mapped_size = (size / PG_SIZE4K);
216 } else
217 status = -ENOENT;
218 spin_unlock(&dmm_obj->dmm_lock);
219
220 dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, "
221 "chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
222
223 return status;
224}
225
226
227
228
229
230
231int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size,
232 u32 *prsv_addr)
233{
234 int status = 0;
235 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
236 struct map_page *node;
237 u32 rsv_addr = 0;
238 u32 rsv_size = 0;
239
240 spin_lock(&dmm_obj->dmm_lock);
241
242
243 node = get_free_region(size);
244 if (node != NULL) {
245
246 rsv_addr = DMM_ADDR_VIRTUAL(node);
247
248 rsv_size = size / PG_SIZE4K;
249 if (rsv_size < node->region_size) {
250
251 node[rsv_size].mapped = false;
252 node[rsv_size].reserved = false;
253 node[rsv_size].region_size =
254 node->region_size - rsv_size;
255 node[rsv_size].mapped_size = 0;
256 }
257
258
259 node->mapped = false;
260 node->reserved = true;
261 node->region_size = rsv_size;
262 node->mapped_size = 0;
263
264 *prsv_addr = rsv_addr;
265 } else
266
267 status = -ENOMEM;
268
269 spin_unlock(&dmm_obj->dmm_lock);
270
271 dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, "
272 "rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size,
273 prsv_addr, status, rsv_addr, rsv_size);
274
275 return status;
276}
277
278
279
280
281
282
283int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize)
284{
285 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
286 struct map_page *chunk;
287 int status = 0;
288
289 spin_lock(&dmm_obj->dmm_lock);
290 chunk = get_mapped_region(addr);
291 if (chunk == NULL)
292 status = -ENOENT;
293
294 if (!status) {
295
296 *psize = chunk->mapped_size * PG_SIZE4K;
297 chunk->mapped = false;
298 chunk->mapped_size = 0;
299 }
300 spin_unlock(&dmm_obj->dmm_lock);
301
302 dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, "
303 "chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk);
304
305 return status;
306}
307
308
309
310
311
312
313int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr)
314{
315 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
316 struct map_page *chunk;
317 u32 i;
318 int status = 0;
319 u32 chunk_size;
320
321 spin_lock(&dmm_obj->dmm_lock);
322
323
324 chunk = get_mapped_region(rsv_addr);
325 if (chunk == NULL)
326 status = -ENOENT;
327
328 if (!status) {
329
330 i = 0;
331 while (i < chunk->region_size) {
332 if (chunk[i].mapped) {
333
334 chunk_size = chunk[i].mapped_size;
335
336 chunk[i].mapped = false;
337 chunk[i].mapped_size = 0;
338 i += chunk_size;
339 } else
340 i++;
341 }
342
343 chunk->reserved = false;
344
345
346
347
348 }
349 spin_unlock(&dmm_obj->dmm_lock);
350
351 dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p",
352 __func__, dmm_mgr, rsv_addr, status, chunk);
353
354 return status;
355}
356
357
358
359
360
361
362static struct map_page *get_region(u32 addr)
363{
364 struct map_page *curr_region = NULL;
365 u32 i = 0;
366
367 if (virtual_mapping_table != NULL) {
368
369 i = DMM_ADDR_TO_INDEX(addr);
370 if (i < table_size)
371 curr_region = virtual_mapping_table + i;
372 }
373
374 dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n",
375 __func__, curr_region, free_region, free_size);
376 return curr_region;
377}
378
379
380
381
382
383
384static struct map_page *get_free_region(u32 len)
385{
386 struct map_page *curr_region = NULL;
387 u32 i = 0;
388 u32 region_size = 0;
389 u32 next_i = 0;
390
391 if (virtual_mapping_table == NULL)
392 return curr_region;
393 if (len > free_size) {
394
395
396 while (i < table_size) {
397 region_size = virtual_mapping_table[i].region_size;
398 next_i = i + region_size;
399 if (virtual_mapping_table[i].reserved == false) {
400
401 if (next_i < table_size &&
402 virtual_mapping_table[next_i].reserved
403 == false) {
404 virtual_mapping_table[i].region_size +=
405 virtual_mapping_table
406 [next_i].region_size;
407 continue;
408 }
409 region_size *= PG_SIZE4K;
410 if (region_size > free_size) {
411 free_region = i;
412 free_size = region_size;
413 }
414 }
415 i = next_i;
416 }
417 }
418 if (len <= free_size) {
419 curr_region = virtual_mapping_table + free_region;
420 free_region += (len / PG_SIZE4K);
421 free_size -= len;
422 }
423 return curr_region;
424}
425
426
427
428
429
430
431static struct map_page *get_mapped_region(u32 addrs)
432{
433 u32 i = 0;
434 struct map_page *curr_region = NULL;
435
436 if (virtual_mapping_table == NULL)
437 return curr_region;
438
439 i = DMM_ADDR_TO_INDEX(addrs);
440 if (i < table_size && (virtual_mapping_table[i].mapped ||
441 virtual_mapping_table[i].reserved))
442 curr_region = virtual_mapping_table + i;
443 return curr_region;
444}
445
446#ifdef DSP_DMM_DEBUG
447u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr)
448{
449 struct map_page *curr_node = NULL;
450 u32 i;
451 u32 freemem = 0;
452 u32 bigsize = 0;
453
454 spin_lock(&dmm_mgr->dmm_lock);
455
456 if (virtual_mapping_table != NULL) {
457 for (i = 0; i < table_size; i +=
458 virtual_mapping_table[i].region_size) {
459 curr_node = virtual_mapping_table + i;
460 if (curr_node->reserved) {
461
462
463
464
465
466
467 } else {
468
469
470
471 freemem += (curr_node->region_size * PG_SIZE4K);
472 if (curr_node->region_size > bigsize)
473 bigsize = curr_node->region_size;
474 }
475 }
476 }
477 spin_unlock(&dmm_mgr->dmm_lock);
478 printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
479 freemem / (1024 * 1024));
480 printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
481 (((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024));
482 printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
483 (bigsize * PG_SIZE4K / (1024 * 1024)));
484
485 return 0;
486}
487#endif
488