1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#include <linux/types.h>
41
42
43#include <dspbridge/dbdefs.h>
44
45
46#include <dspbridge/dbc.h>
47
48
49#include <dspbridge/list.h>
50
51
52#include <dspbridge/rmm.h>
53
54
55
56
57
58struct rmm_header {
59 struct rmm_header *next;
60 u32 size;
61 u32 addr;
62};
63
64
65
66
67
68struct rmm_ovly_sect {
69 struct list_head list_elem;
70 u32 addr;
71 u32 size;
72 s32 page;
73};
74
75
76
77
78struct rmm_target_obj {
79 struct rmm_segment *seg_tab;
80 struct rmm_header **free_list;
81 u32 num_segs;
82 struct lst_list *ovly_list;
83};
84
85static u32 refs;
86
87static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
88 u32 align, u32 *dsp_address);
89static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
90 u32 size);
91
92
93
94
95int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
96 u32 align, u32 *dsp_address, bool reserve)
97{
98 struct rmm_ovly_sect *sect;
99 struct rmm_ovly_sect *prev_sect = NULL;
100 struct rmm_ovly_sect *new_sect;
101 u32 addr;
102 int status = 0;
103
104 DBC_REQUIRE(target);
105 DBC_REQUIRE(dsp_address != NULL);
106 DBC_REQUIRE(size > 0);
107 DBC_REQUIRE(reserve || (target->num_segs > 0));
108 DBC_REQUIRE(refs > 0);
109
110 if (!reserve) {
111 if (!alloc_block(target, segid, size, align, dsp_address)) {
112 status = -ENOMEM;
113 } else {
114
115
116 target->seg_tab[segid].number++;
117 }
118 goto func_end;
119 }
120
121
122 addr = *dsp_address;
123 sect = (struct rmm_ovly_sect *)lst_first(target->ovly_list);
124
125
126 while (sect != NULL) {
127 if (addr <= sect->addr) {
128
129 if ((addr + size > sect->addr) || (prev_sect &&
130 (prev_sect->addr +
131 prev_sect->size >
132 addr))) {
133 status = -ENXIO;
134 }
135 break;
136 }
137 prev_sect = sect;
138 sect = (struct rmm_ovly_sect *)lst_next(target->ovly_list,
139 (struct list_head *)
140 sect);
141 }
142 if (!status) {
143
144 new_sect = kzalloc(sizeof(struct rmm_ovly_sect), GFP_KERNEL);
145 if (new_sect == NULL) {
146 status = -ENOMEM;
147 } else {
148 lst_init_elem((struct list_head *)new_sect);
149 new_sect->addr = addr;
150 new_sect->size = size;
151 new_sect->page = segid;
152 if (sect == NULL) {
153
154 lst_put_tail(target->ovly_list,
155 (struct list_head *)new_sect);
156 } else {
157
158 lst_insert_before(target->ovly_list,
159 (struct list_head *)new_sect,
160 (struct list_head *)sect);
161 }
162 }
163 }
164func_end:
165 return status;
166}
167
168
169
170
171int rmm_create(struct rmm_target_obj **target_obj,
172 struct rmm_segment seg_tab[], u32 num_segs)
173{
174 struct rmm_header *hptr;
175 struct rmm_segment *sptr, *tmp;
176 struct rmm_target_obj *target;
177 s32 i;
178 int status = 0;
179
180 DBC_REQUIRE(target_obj != NULL);
181 DBC_REQUIRE(num_segs == 0 || seg_tab != NULL);
182
183
184 target = kzalloc(sizeof(struct rmm_target_obj), GFP_KERNEL);
185
186 if (target == NULL)
187 status = -ENOMEM;
188
189 if (status)
190 goto func_cont;
191
192 target->num_segs = num_segs;
193 if (!(num_segs > 0))
194 goto func_cont;
195
196
197 target->free_list = kzalloc(num_segs * sizeof(struct rmm_header *),
198 GFP_KERNEL);
199 if (target->free_list == NULL) {
200 status = -ENOMEM;
201 } else {
202
203 for (i = 0; i < (s32) num_segs; i++) {
204 target->free_list[i] =
205 kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
206 if (target->free_list[i] == NULL) {
207 status = -ENOMEM;
208 break;
209 }
210 }
211
212 target->seg_tab = kzalloc(num_segs * sizeof(struct rmm_segment),
213 GFP_KERNEL);
214 if (target->seg_tab == NULL) {
215 status = -ENOMEM;
216 } else {
217
218 sptr = target->seg_tab;
219 for (i = 0, tmp = seg_tab; num_segs > 0;
220 num_segs--, i++) {
221 *sptr = *tmp;
222 hptr = target->free_list[i];
223 hptr->addr = tmp->base;
224 hptr->size = tmp->length;
225 hptr->next = NULL;
226 tmp++;
227 sptr++;
228 }
229 }
230 }
231func_cont:
232
233 if (!status) {
234 target->ovly_list = kzalloc(sizeof(struct lst_list),
235 GFP_KERNEL);
236 if (target->ovly_list == NULL)
237 status = -ENOMEM;
238 else
239 INIT_LIST_HEAD(&target->ovly_list->head);
240 }
241
242 if (!status) {
243 *target_obj = target;
244 } else {
245 *target_obj = NULL;
246 if (target)
247 rmm_delete(target);
248
249 }
250
251 DBC_ENSURE((!status && *target_obj)
252 || (status && *target_obj == NULL));
253
254 return status;
255}
256
257
258
259
260void rmm_delete(struct rmm_target_obj *target)
261{
262 struct rmm_ovly_sect *ovly_section;
263 struct rmm_header *hptr;
264 struct rmm_header *next;
265 u32 i;
266
267 DBC_REQUIRE(target);
268
269 kfree(target->seg_tab);
270
271 if (target->ovly_list) {
272 while ((ovly_section = (struct rmm_ovly_sect *)lst_get_head
273 (target->ovly_list))) {
274 kfree(ovly_section);
275 }
276 DBC_ASSERT(LST_IS_EMPTY(target->ovly_list));
277 kfree(target->ovly_list);
278 }
279
280 if (target->free_list != NULL) {
281
282 for (i = 0; i < target->num_segs; i++) {
283 hptr = next = target->free_list[i];
284 while (next) {
285 hptr = next;
286 next = hptr->next;
287 kfree(hptr);
288 }
289 }
290 kfree(target->free_list);
291 }
292
293 kfree(target);
294}
295
296
297
298
299void rmm_exit(void)
300{
301 DBC_REQUIRE(refs > 0);
302
303 refs--;
304
305 DBC_ENSURE(refs >= 0);
306}
307
308
309
310
311bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
312 bool reserved)
313{
314 struct rmm_ovly_sect *sect;
315 bool ret = true;
316
317 DBC_REQUIRE(target);
318
319 DBC_REQUIRE(reserved || segid < target->num_segs);
320 DBC_REQUIRE(reserved || (dsp_addr >= target->seg_tab[segid].base &&
321 (dsp_addr + size) <= (target->seg_tab[segid].
322 base +
323 target->seg_tab[segid].
324 length)));
325
326
327
328
329 if (!reserved) {
330 ret = free_block(target, segid, dsp_addr, size);
331 if (ret)
332 target->seg_tab[segid].number--;
333
334 } else {
335
336 sect = (struct rmm_ovly_sect *)lst_first(target->ovly_list);
337 while (sect != NULL) {
338 if (dsp_addr == sect->addr) {
339 DBC_ASSERT(size == sect->size);
340
341 lst_remove_elem(target->ovly_list,
342 (struct list_head *)sect);
343 kfree(sect);
344 break;
345 }
346 sect =
347 (struct rmm_ovly_sect *)lst_next(target->ovly_list,
348 (struct list_head
349 *)sect);
350 }
351 if (sect == NULL)
352 ret = false;
353
354 }
355 return ret;
356}
357
358
359
360
361bool rmm_init(void)
362{
363 DBC_REQUIRE(refs >= 0);
364
365 refs++;
366
367 return true;
368}
369
370
371
372
373bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
374 struct dsp_memstat *mem_stat_buf)
375{
376 struct rmm_header *head;
377 bool ret = false;
378 u32 max_free_size = 0;
379 u32 total_free_size = 0;
380 u32 free_blocks = 0;
381
382 DBC_REQUIRE(mem_stat_buf != NULL);
383 DBC_ASSERT(target != NULL);
384
385 if ((u32) segid < target->num_segs) {
386 head = target->free_list[segid];
387
388
389 while (head != NULL) {
390 max_free_size = max(max_free_size, head->size);
391 total_free_size += head->size;
392 free_blocks++;
393 head = head->next;
394 }
395
396
397 mem_stat_buf->ul_size = target->seg_tab[segid].length;
398
399
400 mem_stat_buf->ul_num_free_blocks = free_blocks;
401
402
403 mem_stat_buf->ul_total_free_size = total_free_size;
404
405
406 mem_stat_buf->ul_len_max_free_block = max_free_size;
407
408
409 mem_stat_buf->ul_num_alloc_blocks =
410 target->seg_tab[segid].number;
411
412 ret = true;
413 }
414
415 return ret;
416}
417
418
419
420
421
422
423static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
424 u32 align, u32 *dsp_address)
425{
426 struct rmm_header *head;
427 struct rmm_header *prevhead = NULL;
428 struct rmm_header *next;
429 u32 tmpalign;
430 u32 alignbytes;
431 u32 hsize;
432 u32 allocsize;
433 u32 addr;
434
435 alignbytes = (align == 0) ? 1 : align;
436 prevhead = NULL;
437 head = target->free_list[segid];
438
439 do {
440 hsize = head->size;
441 next = head->next;
442
443 addr = head->addr;
444
445
446 (tmpalign = (u32) addr % alignbytes);
447 if (tmpalign != 0)
448 tmpalign = alignbytes - tmpalign;
449
450 allocsize = size + tmpalign;
451
452 if (hsize >= allocsize) {
453 if (hsize == allocsize && prevhead != NULL) {
454 prevhead->next = next;
455 kfree(head);
456 } else {
457 head->size = hsize - allocsize;
458 head->addr += allocsize;
459 }
460
461
462 if (tmpalign)
463 free_block(target, segid, addr, tmpalign);
464
465 *dsp_address = addr + tmpalign;
466 return true;
467 }
468
469 prevhead = head;
470 head = next;
471
472 } while (head != NULL);
473
474 return false;
475}
476
477
478
479
480
481
482
483
484static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
485 u32 size)
486{
487 struct rmm_header *head;
488 struct rmm_header *thead;
489 struct rmm_header *rhead;
490 bool ret = true;
491
492
493 rhead = kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
494 if (rhead == NULL) {
495 ret = false;
496 } else {
497
498 head = target->free_list[segid];
499
500 if (addr >= head->addr) {
501 while (head->next != NULL && addr > head->next->addr)
502 head = head->next;
503
504 thead = head->next;
505
506 head->next = rhead;
507 rhead->next = thead;
508 rhead->addr = addr;
509 rhead->size = size;
510 } else {
511 *rhead = *head;
512 head->next = rhead;
513 head->addr = addr;
514 head->size = size;
515 thead = rhead->next;
516 }
517
518
519 if (thead != NULL && (rhead->addr + rhead->size) ==
520 thead->addr) {
521 head->next = rhead->next;
522 thead->size = size + thead->size;
523 thead->addr = addr;
524 kfree(rhead);
525 rhead = thead;
526 }
527
528
529 if ((head->addr + head->size) == rhead->addr) {
530 head->next = rhead->next;
531 head->size = head->size + rhead->size;
532 kfree(rhead);
533 }
534 }
535
536 return ret;
537}
538