1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/types.h>
33
34
35#include <dspbridge/dbdefs.h>
36
37
38#include <dspbridge/dbc.h>
39
40
41#include <dspbridge/list.h>
42#include <dspbridge/sync.h>
43#include <dspbridge/utildefs.h>
44
45
46#include <dspbridge/dev.h>
47#include <dspbridge/proc.h>
48
49
50#include <dspbridge/cmm.h>
51
52
53#define NEXT_PA(pnode) (pnode->dw_pa + pnode->ul_size)
54
55
56#define DSPPA2GPPPA(base, x, y) ((x)+(y))
57#define GPPPA2DSPPA(base, x, y) ((x)-(y))
58
59
60
61
62
63
64
65struct cmm_allocator {
66 unsigned int shm_base;
67 u32 ul_sm_size;
68 unsigned int dw_vm_base;
69
70 u32 dw_dsp_phys_addr_offset;
71
72 s8 c_factor;
73 unsigned int dw_dsp_base;
74 u32 ul_dsp_size;
75 struct cmm_object *hcmm_mgr;
76
77 struct lst_list *free_list_head;
78
79 struct lst_list *in_use_list_head;
80};
81
82struct cmm_xlator {
83
84 struct cmm_object *hcmm_mgr;
85
86
87
88
89
90 unsigned int dw_virt_base;
91 u32 ul_virt_size;
92 u32 ul_seg_id;
93};
94
95
96struct cmm_object {
97
98
99
100 struct mutex cmm_lock;
101 struct lst_list *node_free_list_head;
102 u32 ul_min_block_size;
103 u32 dw_page_size;
104
105 struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS];
106};
107
108
109static struct cmm_mgrattrs cmm_dfltmgrattrs = {
110
111 16
112};
113
114
115static struct cmm_attrs cmm_dfltalctattrs = {
116 1
117};
118
119
120static struct cmm_xlatorattrs cmm_dfltxlatorattrs = {
121
122 1,
123 0,
124 0,
125 NULL,
126 0,
127};
128
129
130struct cmm_mnode {
131 struct list_head link;
132 u32 dw_pa;
133 u32 dw_va;
134 u32 ul_size;
135 u32 client_proc;
136};
137
138
139static u32 refs;
140
141
142static void add_to_free_list(struct cmm_allocator *allocator,
143 struct cmm_mnode *pnode);
144static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
145 u32 ul_seg_id);
146static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
147 u32 usize);
148static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
149 u32 dw_va, u32 ul_size);
150
151static s32 get_slot(struct cmm_object *cmm_mgr_obj);
152static void un_register_gppsm_seg(struct cmm_allocator *psma);
153
154
155
156
157
158
159
160
161
162
163
164
165void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
166 struct cmm_attrs *pattrs, void **pp_buf_va)
167{
168 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
169 void *buf_pa = NULL;
170 struct cmm_mnode *pnode = NULL;
171 struct cmm_mnode *new_node = NULL;
172 struct cmm_allocator *allocator = NULL;
173 u32 delta_size;
174 u8 *pbyte = NULL;
175 s32 cnt;
176
177 if (pattrs == NULL)
178 pattrs = &cmm_dfltalctattrs;
179
180 if (pp_buf_va != NULL)
181 *pp_buf_va = NULL;
182
183 if (cmm_mgr_obj && (usize != 0)) {
184 if (pattrs->ul_seg_id > 0) {
185
186
187 allocator =
188 get_allocator(cmm_mgr_obj, pattrs->ul_seg_id);
189
190 usize =
191 ((usize - 1) & ~(cmm_mgr_obj->ul_min_block_size -
192 1))
193 + cmm_mgr_obj->ul_min_block_size;
194 mutex_lock(&cmm_mgr_obj->cmm_lock);
195 pnode = get_free_block(allocator, usize);
196 }
197 if (pnode) {
198 delta_size = (pnode->ul_size - usize);
199 if (delta_size >= cmm_mgr_obj->ul_min_block_size) {
200
201
202 new_node =
203 get_node(cmm_mgr_obj, pnode->dw_pa + usize,
204 pnode->dw_va + usize,
205 (u32) delta_size);
206
207 add_to_free_list(allocator, new_node);
208
209 pnode->ul_size = usize;
210 }
211
212
213
214
215
216 pnode->client_proc = current->tgid;
217
218
219 lst_put_tail(allocator->in_use_list_head,
220 (struct list_head *)pnode);
221 buf_pa = (void *)pnode->dw_pa;
222
223 pbyte = (u8 *) pnode->dw_va;
224 for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++)
225 *pbyte = 0;
226
227 if (pp_buf_va != NULL) {
228
229 *pp_buf_va = (void *)pnode->dw_va;
230 }
231 }
232 mutex_unlock(&cmm_mgr_obj->cmm_lock);
233 }
234 return buf_pa;
235}
236
237
238
239
240
241
242int cmm_create(struct cmm_object **ph_cmm_mgr,
243 struct dev_object *hdev_obj,
244 const struct cmm_mgrattrs *mgr_attrts)
245{
246 struct cmm_object *cmm_obj = NULL;
247 int status = 0;
248 struct util_sysinfo sys_info;
249
250 DBC_REQUIRE(refs > 0);
251 DBC_REQUIRE(ph_cmm_mgr != NULL);
252
253 *ph_cmm_mgr = NULL;
254
255 cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL);
256 if (cmm_obj != NULL) {
257 if (mgr_attrts == NULL)
258 mgr_attrts = &cmm_dfltmgrattrs;
259
260
261 DBC_ASSERT(mgr_attrts->ul_min_block_size >= 4);
262
263 cmm_obj->ul_min_block_size = mgr_attrts->ul_min_block_size;
264
265 sys_info.dw_page_size = PAGE_SIZE;
266 sys_info.dw_allocation_granularity = PAGE_SIZE;
267 sys_info.dw_number_of_processors = 1;
268
269 cmm_obj->dw_page_size = sys_info.dw_page_size;
270
271
272
273
274
275 cmm_obj->node_free_list_head =
276 kzalloc(sizeof(struct lst_list),
277 GFP_KERNEL);
278 if (cmm_obj->node_free_list_head == NULL) {
279 status = -ENOMEM;
280 cmm_destroy(cmm_obj, true);
281 } else {
282 INIT_LIST_HEAD(&cmm_obj->
283 node_free_list_head->head);
284 mutex_init(&cmm_obj->cmm_lock);
285 *ph_cmm_mgr = cmm_obj;
286 }
287 } else {
288 status = -ENOMEM;
289 }
290 return status;
291}
292
293
294
295
296
297
298int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
299{
300 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
301 struct cmm_info temp_info;
302 int status = 0;
303 s32 slot_seg;
304 struct cmm_mnode *pnode;
305
306 DBC_REQUIRE(refs > 0);
307 if (!hcmm_mgr) {
308 status = -EFAULT;
309 return status;
310 }
311 mutex_lock(&cmm_mgr_obj->cmm_lock);
312
313 if (!force) {
314
315 status = cmm_get_info(hcmm_mgr, &temp_info);
316 if (!status) {
317 if (temp_info.ul_total_in_use_cnt > 0) {
318
319 status = -EPERM;
320 }
321 }
322 }
323 if (!status) {
324
325 for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
326 if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] != NULL) {
327 un_register_gppsm_seg
328 (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg]);
329
330 cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = NULL;
331 }
332 }
333 }
334 if (cmm_mgr_obj->node_free_list_head != NULL) {
335
336 while (!LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) {
337 pnode = (struct cmm_mnode *)
338 lst_get_head(cmm_mgr_obj->node_free_list_head);
339 kfree(pnode);
340 }
341
342 kfree(cmm_mgr_obj->node_free_list_head);
343 }
344 mutex_unlock(&cmm_mgr_obj->cmm_lock);
345 if (!status) {
346
347 mutex_destroy(&cmm_mgr_obj->cmm_lock);
348 kfree(cmm_mgr_obj);
349 }
350 return status;
351}
352
353
354
355
356
357
358
359void cmm_exit(void)
360{
361 DBC_REQUIRE(refs > 0);
362
363 refs--;
364}
365
366
367
368
369
370
371int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa,
372 u32 ul_seg_id)
373{
374 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
375 int status = -EFAULT;
376 struct cmm_mnode *mnode_obj = NULL;
377 struct cmm_allocator *allocator = NULL;
378 struct cmm_attrs *pattrs;
379
380 DBC_REQUIRE(refs > 0);
381 DBC_REQUIRE(buf_pa != NULL);
382
383 if (ul_seg_id == 0) {
384 pattrs = &cmm_dfltalctattrs;
385 ul_seg_id = pattrs->ul_seg_id;
386 }
387 if (!hcmm_mgr || !(ul_seg_id > 0)) {
388 status = -EFAULT;
389 return status;
390 }
391
392 allocator = get_allocator(cmm_mgr_obj, ul_seg_id);
393 if (allocator != NULL) {
394 mutex_lock(&cmm_mgr_obj->cmm_lock);
395 mnode_obj =
396 (struct cmm_mnode *)lst_first(allocator->in_use_list_head);
397 while (mnode_obj) {
398 if ((u32) buf_pa == mnode_obj->dw_pa) {
399
400 lst_remove_elem(allocator->in_use_list_head,
401 (struct list_head *)mnode_obj);
402
403 add_to_free_list(allocator, mnode_obj);
404 status = 0;
405 break;
406 }
407
408 mnode_obj = (struct cmm_mnode *)
409 lst_next(allocator->in_use_list_head,
410 (struct list_head *)mnode_obj);
411 }
412 mutex_unlock(&cmm_mgr_obj->cmm_lock);
413 }
414 return status;
415}
416
417
418
419
420
421
422
423int cmm_get_handle(void *hprocessor, struct cmm_object ** ph_cmm_mgr)
424{
425 int status = 0;
426 struct dev_object *hdev_obj;
427
428 DBC_REQUIRE(refs > 0);
429 DBC_REQUIRE(ph_cmm_mgr != NULL);
430 if (hprocessor != NULL)
431 status = proc_get_dev_object(hprocessor, &hdev_obj);
432 else
433 hdev_obj = dev_get_first();
434
435 if (!status)
436 status = dev_get_cmm_mgr(hdev_obj, ph_cmm_mgr);
437
438 return status;
439}
440
441
442
443
444
445
446int cmm_get_info(struct cmm_object *hcmm_mgr,
447 struct cmm_info *cmm_info_obj)
448{
449 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
450 u32 ul_seg;
451 int status = 0;
452 struct cmm_allocator *altr;
453 struct cmm_mnode *mnode_obj = NULL;
454
455 DBC_REQUIRE(cmm_info_obj != NULL);
456
457 if (!hcmm_mgr) {
458 status = -EFAULT;
459 return status;
460 }
461 mutex_lock(&cmm_mgr_obj->cmm_lock);
462 cmm_info_obj->ul_num_gppsm_segs = 0;
463
464 cmm_info_obj->ul_total_in_use_cnt = 0;
465
466 cmm_info_obj->ul_min_block_size = cmm_mgr_obj->ul_min_block_size;
467
468 for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) {
469
470 altr = get_allocator(cmm_mgr_obj, ul_seg);
471 if (altr != NULL) {
472 cmm_info_obj->ul_num_gppsm_segs++;
473 cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_pa =
474 altr->shm_base - altr->ul_dsp_size;
475 cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size =
476 altr->ul_dsp_size + altr->ul_sm_size;
477 cmm_info_obj->seg_info[ul_seg - 1].dw_gpp_base_pa =
478 altr->shm_base;
479 cmm_info_obj->seg_info[ul_seg - 1].ul_gpp_size =
480 altr->ul_sm_size;
481 cmm_info_obj->seg_info[ul_seg - 1].dw_dsp_base_va =
482 altr->dw_dsp_base;
483 cmm_info_obj->seg_info[ul_seg - 1].ul_dsp_size =
484 altr->ul_dsp_size;
485 cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_va =
486 altr->dw_vm_base - altr->ul_dsp_size;
487 cmm_info_obj->seg_info[ul_seg - 1].ul_in_use_cnt = 0;
488 mnode_obj = (struct cmm_mnode *)
489 lst_first(altr->in_use_list_head);
490
491 while (mnode_obj) {
492 cmm_info_obj->ul_total_in_use_cnt++;
493 cmm_info_obj->seg_info[ul_seg -
494 1].ul_in_use_cnt++;
495
496 mnode_obj = (struct cmm_mnode *)
497 lst_next(altr->in_use_list_head,
498 (struct list_head *)mnode_obj);
499 }
500 }
501 }
502 mutex_unlock(&cmm_mgr_obj->cmm_lock);
503 return status;
504}
505
506
507
508
509
510
511bool cmm_init(void)
512{
513 bool ret = true;
514
515 DBC_REQUIRE(refs >= 0);
516 if (ret)
517 refs++;
518
519 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
520
521 return ret;
522}
523
524
525
526
527
528
529
530int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
531 u32 dw_gpp_base_pa, u32 ul_size,
532 u32 dsp_addr_offset, s8 c_factor,
533 u32 dw_dsp_base, u32 ul_dsp_size,
534 u32 *sgmt_id, u32 gpp_base_va)
535{
536 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
537 struct cmm_allocator *psma = NULL;
538 int status = 0;
539 struct cmm_mnode *new_node;
540 s32 slot_seg;
541
542 DBC_REQUIRE(ul_size > 0);
543 DBC_REQUIRE(sgmt_id != NULL);
544 DBC_REQUIRE(dw_gpp_base_pa != 0);
545 DBC_REQUIRE(gpp_base_va != 0);
546 DBC_REQUIRE((c_factor <= CMM_ADDTODSPPA) &&
547 (c_factor >= CMM_SUBFROMDSPPA));
548 dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
549 "dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n", __func__,
550 dw_gpp_base_pa, ul_size, dsp_addr_offset, dw_dsp_base,
551 ul_dsp_size, gpp_base_va);
552 if (!hcmm_mgr) {
553 status = -EFAULT;
554 return status;
555 }
556
557 mutex_lock(&cmm_mgr_obj->cmm_lock);
558 slot_seg = get_slot(cmm_mgr_obj);
559 if (slot_seg < 0) {
560
561 status = -EPERM;
562 goto func_end;
563 }
564
565 if (ul_size < cmm_mgr_obj->ul_min_block_size) {
566 status = -EINVAL;
567 goto func_end;
568 }
569
570
571 psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL);
572 if (psma != NULL) {
573 psma->hcmm_mgr = hcmm_mgr;
574 psma->shm_base = dw_gpp_base_pa;
575 psma->ul_sm_size = ul_size;
576 psma->dw_vm_base = gpp_base_va;
577 psma->dw_dsp_phys_addr_offset = dsp_addr_offset;
578 psma->c_factor = c_factor;
579 psma->dw_dsp_base = dw_dsp_base;
580 psma->ul_dsp_size = ul_dsp_size;
581 if (psma->dw_vm_base == 0) {
582 status = -EPERM;
583 goto func_end;
584 }
585
586 *sgmt_id = (u32) slot_seg + 1;
587
588 psma->free_list_head = kzalloc(sizeof(struct lst_list),
589 GFP_KERNEL);
590 if (psma->free_list_head == NULL) {
591 status = -ENOMEM;
592 goto func_end;
593 }
594 INIT_LIST_HEAD(&psma->free_list_head->head);
595
596
597 psma->in_use_list_head = kzalloc(sizeof(struct
598 lst_list), GFP_KERNEL);
599 if (psma->in_use_list_head == NULL) {
600 status = -ENOMEM;
601 goto func_end;
602 }
603 INIT_LIST_HEAD(&psma->in_use_list_head->head);
604
605
606 new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa,
607 psma->dw_vm_base, ul_size);
608
609 if (new_node) {
610 lst_put_tail(psma->free_list_head,
611 (struct list_head *)new_node);
612 } else {
613 status = -ENOMEM;
614 goto func_end;
615 }
616 } else {
617 status = -ENOMEM;
618 goto func_end;
619 }
620
621 cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma;
622
623func_end:
624 if (status && psma) {
625
626 un_register_gppsm_seg(psma);
627 }
628
629 mutex_unlock(&cmm_mgr_obj->cmm_lock);
630 return status;
631}
632
633
634
635
636
637
638int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
639 u32 ul_seg_id)
640{
641 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
642 int status = 0;
643 struct cmm_allocator *psma;
644 u32 ul_id = ul_seg_id;
645
646 DBC_REQUIRE(ul_seg_id > 0);
647 if (hcmm_mgr) {
648 if (ul_seg_id == CMM_ALLSEGMENTS)
649 ul_id = 1;
650
651 if ((ul_id > 0) && (ul_id <= CMM_MAXGPPSEGS)) {
652 while (ul_id <= CMM_MAXGPPSEGS) {
653 mutex_lock(&cmm_mgr_obj->cmm_lock);
654
655 psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1];
656 if (psma != NULL) {
657 un_register_gppsm_seg(psma);
658
659
660 cmm_mgr_obj->pa_gppsm_seg_tab[ul_id -
661 1] = NULL;
662 } else if (ul_seg_id != CMM_ALLSEGMENTS) {
663 status = -EPERM;
664 }
665 mutex_unlock(&cmm_mgr_obj->cmm_lock);
666 if (ul_seg_id != CMM_ALLSEGMENTS)
667 break;
668
669 ul_id++;
670 }
671 } else {
672 status = -EINVAL;
673 }
674 } else {
675 status = -EFAULT;
676 }
677 return status;
678}
679
680
681
682
683
684
685
686
687
688static void un_register_gppsm_seg(struct cmm_allocator *psma)
689{
690 struct cmm_mnode *mnode_obj = NULL;
691 struct cmm_mnode *next_node = NULL;
692
693 DBC_REQUIRE(psma != NULL);
694 if (psma->free_list_head != NULL) {
695
696 mnode_obj = (struct cmm_mnode *)lst_first(psma->free_list_head);
697 while (mnode_obj) {
698 next_node =
699 (struct cmm_mnode *)lst_next(psma->free_list_head,
700 (struct list_head *)
701 mnode_obj);
702 lst_remove_elem(psma->free_list_head,
703 (struct list_head *)mnode_obj);
704 kfree((void *)mnode_obj);
705
706 mnode_obj = next_node;
707 }
708 kfree(psma->free_list_head);
709
710 mnode_obj =
711 (struct cmm_mnode *)lst_first(psma->in_use_list_head);
712 while (mnode_obj) {
713 next_node =
714 (struct cmm_mnode *)lst_next(psma->in_use_list_head,
715 (struct list_head *)
716 mnode_obj);
717 lst_remove_elem(psma->in_use_list_head,
718 (struct list_head *)mnode_obj);
719 kfree((void *)mnode_obj);
720
721 mnode_obj = next_node;
722 }
723 kfree(psma->in_use_list_head);
724 }
725 if ((void *)psma->dw_vm_base != NULL)
726 MEM_UNMAP_LINEAR_ADDRESS((void *)psma->dw_vm_base);
727
728
729 kfree(psma);
730}
731
732
733
734
735
736
737static s32 get_slot(struct cmm_object *cmm_mgr_obj)
738{
739 s32 slot_seg = -1;
740 DBC_REQUIRE(cmm_mgr_obj != NULL);
741
742 for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
743 if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL)
744 break;
745
746 }
747 if (slot_seg == CMM_MAXGPPSEGS)
748 slot_seg = -1;
749
750 return slot_seg;
751}
752
753
754
755
756
757
758static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
759 u32 dw_va, u32 ul_size)
760{
761 struct cmm_mnode *pnode = NULL;
762
763 DBC_REQUIRE(cmm_mgr_obj != NULL);
764 DBC_REQUIRE(dw_pa != 0);
765 DBC_REQUIRE(dw_va != 0);
766 DBC_REQUIRE(ul_size != 0);
767
768 if (LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) {
769 pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
770 } else {
771
772 pnode = (struct cmm_mnode *)
773 lst_get_head(cmm_mgr_obj->node_free_list_head);
774 }
775 if (pnode) {
776 lst_init_elem((struct list_head *)pnode);
777 pnode->dw_pa = dw_pa;
778 pnode->dw_va = dw_va;
779 pnode->ul_size = ul_size;
780 }
781 return pnode;
782}
783
784
785
786
787
788
789
790static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
791{
792 DBC_REQUIRE(pnode != NULL);
793 lst_init_elem((struct list_head *)pnode);
794 lst_put_tail(cmm_mgr_obj->node_free_list_head,
795 (struct list_head *)pnode);
796}
797
798
799
800
801
802
803
804static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
805 u32 usize)
806{
807 if (allocator) {
808 struct cmm_mnode *mnode_obj = (struct cmm_mnode *)
809 lst_first(allocator->free_list_head);
810 while (mnode_obj) {
811 if (usize <= (u32) mnode_obj->ul_size) {
812 lst_remove_elem(allocator->free_list_head,
813 (struct list_head *)mnode_obj);
814 return mnode_obj;
815 }
816
817 mnode_obj = (struct cmm_mnode *)
818 lst_next(allocator->free_list_head,
819 (struct list_head *)mnode_obj);
820 }
821 }
822 return NULL;
823}
824
825
826
827
828
829
830static void add_to_free_list(struct cmm_allocator *allocator,
831 struct cmm_mnode *pnode)
832{
833 struct cmm_mnode *node_prev = NULL;
834 struct cmm_mnode *node_next = NULL;
835 struct cmm_mnode *mnode_obj;
836 u32 dw_this_pa;
837 u32 dw_next_pa;
838
839 DBC_REQUIRE(pnode != NULL);
840 DBC_REQUIRE(allocator != NULL);
841 dw_this_pa = pnode->dw_pa;
842 dw_next_pa = NEXT_PA(pnode);
843 mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
844 while (mnode_obj) {
845 if (dw_this_pa == NEXT_PA(mnode_obj)) {
846
847 node_prev = mnode_obj;
848 } else if (dw_next_pa == mnode_obj->dw_pa) {
849 node_next = mnode_obj;
850 }
851 if ((node_prev == NULL) || (node_next == NULL)) {
852
853 mnode_obj = (struct cmm_mnode *)
854 lst_next(allocator->free_list_head,
855 (struct list_head *)mnode_obj);
856 } else {
857
858 break;
859 }
860 }
861 if (node_prev != NULL) {
862
863 lst_remove_elem(allocator->free_list_head,
864 (struct list_head *)node_prev);
865
866 pnode->ul_size += node_prev->ul_size;
867 pnode->dw_pa = node_prev->dw_pa;
868 pnode->dw_va = node_prev->dw_va;
869
870 delete_node((struct cmm_object *)allocator->hcmm_mgr,
871 node_prev);
872 }
873 if (node_next != NULL) {
874
875 lst_remove_elem(allocator->free_list_head,
876 (struct list_head *)node_next);
877
878 pnode->ul_size += node_next->ul_size;
879
880 delete_node((struct cmm_object *)allocator->hcmm_mgr,
881 node_next);
882 }
883
884 mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
885 while (mnode_obj) {
886 if (pnode->ul_size <= mnode_obj->ul_size)
887 break;
888
889
890 mnode_obj =
891 (struct cmm_mnode *)lst_next(allocator->free_list_head,
892 (struct list_head *)mnode_obj);
893 }
894
895 if (mnode_obj == NULL) {
896 lst_put_tail(allocator->free_list_head,
897 (struct list_head *)pnode);
898 } else {
899
900 lst_insert_before(allocator->free_list_head,
901 (struct list_head *)pnode,
902 (struct list_head *)mnode_obj);
903 }
904}
905
906
907
908
909
910
911
912static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
913 u32 ul_seg_id)
914{
915 struct cmm_allocator *allocator = NULL;
916
917 DBC_REQUIRE(cmm_mgr_obj != NULL);
918 DBC_REQUIRE((ul_seg_id > 0) && (ul_seg_id <= CMM_MAXGPPSEGS));
919 allocator = cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
920 if (allocator != NULL) {
921
922 if (!allocator) {
923 allocator = NULL;
924 DBC_ASSERT(false);
925 }
926 }
927 return allocator;
928}
929
930
931
932
933
934
935
936
937
938
939
940
941int cmm_xlator_create(struct cmm_xlatorobject **xlator,
942 struct cmm_object *hcmm_mgr,
943 struct cmm_xlatorattrs *xlator_attrs)
944{
945 struct cmm_xlator *xlator_object = NULL;
946 int status = 0;
947
948 DBC_REQUIRE(refs > 0);
949 DBC_REQUIRE(xlator != NULL);
950 DBC_REQUIRE(hcmm_mgr != NULL);
951
952 *xlator = NULL;
953 if (xlator_attrs == NULL)
954 xlator_attrs = &cmm_dfltxlatorattrs;
955
956 xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL);
957 if (xlator_object != NULL) {
958 xlator_object->hcmm_mgr = hcmm_mgr;
959
960 xlator_object->ul_seg_id = xlator_attrs->ul_seg_id;
961 } else {
962 status = -ENOMEM;
963 }
964 if (!status)
965 *xlator = (struct cmm_xlatorobject *)xlator_object;
966
967 return status;
968}
969
970
971
972
973void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf,
974 u32 pa_size)
975{
976 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
977 void *pbuf = NULL;
978 void *tmp_va_buff;
979 struct cmm_attrs attrs;
980
981 DBC_REQUIRE(refs > 0);
982 DBC_REQUIRE(xlator != NULL);
983 DBC_REQUIRE(xlator_obj->hcmm_mgr != NULL);
984 DBC_REQUIRE(va_buf != NULL);
985 DBC_REQUIRE(pa_size > 0);
986 DBC_REQUIRE(xlator_obj->ul_seg_id > 0);
987
988 if (xlator_obj) {
989 attrs.ul_seg_id = xlator_obj->ul_seg_id;
990 __raw_writel(0, va_buf);
991
992 pbuf =
993 cmm_calloc_buf(xlator_obj->hcmm_mgr, pa_size, &attrs, NULL);
994 if (pbuf) {
995
996
997 tmp_va_buff = cmm_xlator_translate(xlator,
998 pbuf, CMM_PA2VA);
999 __raw_writel((u32)tmp_va_buff, va_buf);
1000 }
1001 }
1002 return pbuf;
1003}
1004
1005
1006
1007
1008
1009
1010
1011int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
1012{
1013 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
1014 int status = -EPERM;
1015 void *buf_pa = NULL;
1016
1017 DBC_REQUIRE(refs > 0);
1018 DBC_REQUIRE(buf_va != NULL);
1019 DBC_REQUIRE(xlator_obj->ul_seg_id > 0);
1020
1021 if (xlator_obj) {
1022
1023 buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA);
1024 if (buf_pa) {
1025 status = cmm_free_buf(xlator_obj->hcmm_mgr, buf_pa,
1026 xlator_obj->ul_seg_id);
1027 if (status) {
1028
1029
1030 DBC_ASSERT(false);
1031 }
1032 }
1033 }
1034 return status;
1035}
1036
1037
1038
1039
1040
1041
1042int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
1043 u32 ul_size, u32 segm_id, bool set_info)
1044{
1045 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
1046 int status = 0;
1047
1048 DBC_REQUIRE(refs > 0);
1049 DBC_REQUIRE(paddr != NULL);
1050 DBC_REQUIRE((segm_id > 0) && (segm_id <= CMM_MAXGPPSEGS));
1051
1052 if (xlator_obj) {
1053 if (set_info) {
1054
1055 xlator_obj->dw_virt_base = (u32) *paddr;
1056 xlator_obj->ul_virt_size = ul_size;
1057 } else {
1058 *paddr = (u8 *) xlator_obj->dw_virt_base;
1059 }
1060 } else {
1061 status = -EFAULT;
1062 }
1063 return status;
1064}
1065
1066
1067
1068
1069void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
1070 enum cmm_xlatetype xtype)
1071{
1072 u32 dw_addr_xlate = 0;
1073 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
1074 struct cmm_object *cmm_mgr_obj = NULL;
1075 struct cmm_allocator *allocator = NULL;
1076 u32 dw_offset = 0;
1077
1078 DBC_REQUIRE(refs > 0);
1079 DBC_REQUIRE(paddr != NULL);
1080 DBC_REQUIRE((xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA));
1081
1082 if (!xlator_obj)
1083 goto loop_cont;
1084
1085 cmm_mgr_obj = (struct cmm_object *)xlator_obj->hcmm_mgr;
1086
1087 DBC_ASSERT(xlator_obj->ul_seg_id > 0);
1088 allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->ul_seg_id - 1];
1089 if (!allocator)
1090 goto loop_cont;
1091
1092 if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_VA2PA) ||
1093 (xtype == CMM_PA2VA)) {
1094 if (xtype == CMM_PA2VA) {
1095
1096 dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base -
1097 allocator->
1098 ul_dsp_size);
1099 dw_addr_xlate = xlator_obj->dw_virt_base + dw_offset;
1100
1101 if ((dw_addr_xlate < xlator_obj->dw_virt_base) ||
1102 (dw_addr_xlate >=
1103 (xlator_obj->dw_virt_base +
1104 xlator_obj->ul_virt_size))) {
1105 dw_addr_xlate = 0;
1106 }
1107 } else {
1108
1109 dw_offset =
1110 (u8 *) paddr - (u8 *) xlator_obj->dw_virt_base;
1111 dw_addr_xlate =
1112 allocator->shm_base - allocator->ul_dsp_size +
1113 dw_offset;
1114 }
1115 } else {
1116 dw_addr_xlate = (u32) paddr;
1117 }
1118
1119 if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) {
1120
1121 dw_addr_xlate =
1122 GPPPA2DSPPA((allocator->shm_base - allocator->ul_dsp_size),
1123 dw_addr_xlate,
1124 allocator->dw_dsp_phys_addr_offset *
1125 allocator->c_factor);
1126 } else if (xtype == CMM_DSPPA2PA) {
1127
1128 dw_addr_xlate =
1129 DSPPA2GPPPA(allocator->shm_base - allocator->ul_dsp_size,
1130 dw_addr_xlate,
1131 allocator->dw_dsp_phys_addr_offset *
1132 allocator->c_factor);
1133 }
1134loop_cont:
1135 return (void *)dw_addr_xlate;
1136}
1137