1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/types.h>
33#include <linux/list.h>
34
35
36#include <dspbridge/dbdefs.h>
37
38
39#include <dspbridge/sync.h>
40
41
42#include <dspbridge/dev.h>
43#include <dspbridge/proc.h>
44
45
46#include <dspbridge/cmm.h>
47
48
49#define NEXT_PA(pnode) (pnode->pa + pnode->size)
50
51
52#define DSPPA2GPPPA(base, x, y) ((x)+(y))
53#define GPPPA2DSPPA(base, x, y) ((x)-(y))
54
55
56
57
58
59
60
61struct cmm_allocator {
62 unsigned int shm_base;
63 u32 sm_size;
64 unsigned int vm_base;
65
66 u32 dsp_phys_addr_offset;
67
68 s8 c_factor;
69 unsigned int dsp_base;
70 u32 dsp_size;
71 struct cmm_object *cmm_mgr;
72
73 struct list_head free_list;
74
75 struct list_head in_use_list;
76};
77
78struct cmm_xlator {
79
80 struct cmm_object *cmm_mgr;
81
82
83
84
85
86 unsigned int virt_base;
87 u32 virt_size;
88 u32 seg_id;
89};
90
91
92struct cmm_object {
93
94
95
96 struct mutex cmm_lock;
97 struct list_head node_free_list;
98 u32 min_block_size;
99 u32 page_size;
100
101 struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS];
102};
103
104
105static struct cmm_mgrattrs cmm_dfltmgrattrs = {
106
107 16
108};
109
110
111static struct cmm_attrs cmm_dfltalctattrs = {
112 1
113};
114
115
116static struct cmm_xlatorattrs cmm_dfltxlatorattrs = {
117
118 1,
119 0,
120 0,
121 NULL,
122 0,
123};
124
125
126struct cmm_mnode {
127 struct list_head link;
128 u32 pa;
129 u32 va;
130 u32 size;
131 u32 client_proc;
132};
133
134
135static void add_to_free_list(struct cmm_allocator *allocator,
136 struct cmm_mnode *pnode);
137static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
138 u32 ul_seg_id);
139static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
140 u32 usize);
141static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
142 u32 dw_va, u32 ul_size);
143
144static s32 get_slot(struct cmm_object *cmm_mgr_obj);
145static void un_register_gppsm_seg(struct cmm_allocator *psma);
146
147
148
149
150
151
152
153
154
155
156
157
158void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
159 struct cmm_attrs *pattrs, void **pp_buf_va)
160{
161 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
162 void *buf_pa = NULL;
163 struct cmm_mnode *pnode = NULL;
164 struct cmm_mnode *new_node = NULL;
165 struct cmm_allocator *allocator = NULL;
166 u32 delta_size;
167 u8 *pbyte = NULL;
168 s32 cnt;
169
170 if (pattrs == NULL)
171 pattrs = &cmm_dfltalctattrs;
172
173 if (pp_buf_va != NULL)
174 *pp_buf_va = NULL;
175
176 if (cmm_mgr_obj && (usize != 0)) {
177 if (pattrs->seg_id > 0) {
178
179
180 allocator =
181 get_allocator(cmm_mgr_obj, pattrs->seg_id);
182
183 usize =
184 ((usize - 1) & ~(cmm_mgr_obj->min_block_size -
185 1))
186 + cmm_mgr_obj->min_block_size;
187 mutex_lock(&cmm_mgr_obj->cmm_lock);
188 pnode = get_free_block(allocator, usize);
189 }
190 if (pnode) {
191 delta_size = (pnode->size - usize);
192 if (delta_size >= cmm_mgr_obj->min_block_size) {
193
194
195 new_node =
196 get_node(cmm_mgr_obj, pnode->pa + usize,
197 pnode->va + usize,
198 (u32) delta_size);
199
200 add_to_free_list(allocator, new_node);
201
202 pnode->size = usize;
203 }
204
205
206
207
208
209 pnode->client_proc = current->tgid;
210
211
212 list_add_tail(&pnode->link, &allocator->in_use_list);
213 buf_pa = (void *)pnode->pa;
214
215 pbyte = (u8 *) pnode->va;
216 for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++)
217 *pbyte = 0;
218
219 if (pp_buf_va != NULL) {
220
221 *pp_buf_va = (void *)pnode->va;
222 }
223 }
224 mutex_unlock(&cmm_mgr_obj->cmm_lock);
225 }
226 return buf_pa;
227}
228
229
230
231
232
233
234int cmm_create(struct cmm_object **ph_cmm_mgr,
235 struct dev_object *hdev_obj,
236 const struct cmm_mgrattrs *mgr_attrts)
237{
238 struct cmm_object *cmm_obj = NULL;
239 int status = 0;
240
241 *ph_cmm_mgr = NULL;
242
243 cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL);
244 if (!cmm_obj)
245 return -ENOMEM;
246
247 if (mgr_attrts == NULL)
248 mgr_attrts = &cmm_dfltmgrattrs;
249
250
251 cmm_obj->min_block_size = mgr_attrts->min_block_size;
252 cmm_obj->page_size = PAGE_SIZE;
253
254
255 INIT_LIST_HEAD(&cmm_obj->node_free_list);
256 mutex_init(&cmm_obj->cmm_lock);
257 *ph_cmm_mgr = cmm_obj;
258
259 return status;
260}
261
262
263
264
265
266
267int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
268{
269 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
270 struct cmm_info temp_info;
271 int status = 0;
272 s32 slot_seg;
273 struct cmm_mnode *node, *tmp;
274
275 if (!hcmm_mgr) {
276 status = -EFAULT;
277 return status;
278 }
279 mutex_lock(&cmm_mgr_obj->cmm_lock);
280
281 if (!force) {
282
283 status = cmm_get_info(hcmm_mgr, &temp_info);
284 if (!status) {
285 if (temp_info.total_in_use_cnt > 0) {
286
287 status = -EPERM;
288 }
289 }
290 }
291 if (!status) {
292
293 for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
294 if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] != NULL) {
295 un_register_gppsm_seg
296 (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg]);
297
298 cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = NULL;
299 }
300 }
301 }
302 list_for_each_entry_safe(node, tmp, &cmm_mgr_obj->node_free_list,
303 link) {
304 list_del(&node->link);
305 kfree(node);
306 }
307 mutex_unlock(&cmm_mgr_obj->cmm_lock);
308 if (!status) {
309
310 mutex_destroy(&cmm_mgr_obj->cmm_lock);
311 kfree(cmm_mgr_obj);
312 }
313 return status;
314}
315
316
317
318
319
320
321int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa, u32 ul_seg_id)
322{
323 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
324 int status = -EFAULT;
325 struct cmm_mnode *curr, *tmp;
326 struct cmm_allocator *allocator;
327 struct cmm_attrs *pattrs;
328
329 if (ul_seg_id == 0) {
330 pattrs = &cmm_dfltalctattrs;
331 ul_seg_id = pattrs->seg_id;
332 }
333 if (!hcmm_mgr || !(ul_seg_id > 0)) {
334 status = -EFAULT;
335 return status;
336 }
337
338 allocator = get_allocator(cmm_mgr_obj, ul_seg_id);
339 if (!allocator)
340 return status;
341
342 mutex_lock(&cmm_mgr_obj->cmm_lock);
343 list_for_each_entry_safe(curr, tmp, &allocator->in_use_list, link) {
344 if (curr->pa == (u32) buf_pa) {
345 list_del(&curr->link);
346 add_to_free_list(allocator, curr);
347 status = 0;
348 break;
349 }
350 }
351 mutex_unlock(&cmm_mgr_obj->cmm_lock);
352
353 return status;
354}
355
356
357
358
359
360
361
362int cmm_get_handle(void *hprocessor, struct cmm_object ** ph_cmm_mgr)
363{
364 int status = 0;
365 struct dev_object *hdev_obj;
366
367 if (hprocessor != NULL)
368 status = proc_get_dev_object(hprocessor, &hdev_obj);
369 else
370 hdev_obj = dev_get_first();
371
372 if (!status)
373 status = dev_get_cmm_mgr(hdev_obj, ph_cmm_mgr);
374
375 return status;
376}
377
378
379
380
381
382
383int cmm_get_info(struct cmm_object *hcmm_mgr,
384 struct cmm_info *cmm_info_obj)
385{
386 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
387 u32 ul_seg;
388 int status = 0;
389 struct cmm_allocator *altr;
390 struct cmm_mnode *curr;
391
392 if (!hcmm_mgr) {
393 status = -EFAULT;
394 return status;
395 }
396 mutex_lock(&cmm_mgr_obj->cmm_lock);
397 cmm_info_obj->num_gppsm_segs = 0;
398
399 cmm_info_obj->total_in_use_cnt = 0;
400
401 cmm_info_obj->min_block_size = cmm_mgr_obj->min_block_size;
402
403 for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) {
404
405 altr = get_allocator(cmm_mgr_obj, ul_seg);
406 if (!altr)
407 continue;
408 cmm_info_obj->num_gppsm_segs++;
409 cmm_info_obj->seg_info[ul_seg - 1].seg_base_pa =
410 altr->shm_base - altr->dsp_size;
411 cmm_info_obj->seg_info[ul_seg - 1].total_seg_size =
412 altr->dsp_size + altr->sm_size;
413 cmm_info_obj->seg_info[ul_seg - 1].gpp_base_pa =
414 altr->shm_base;
415 cmm_info_obj->seg_info[ul_seg - 1].gpp_size =
416 altr->sm_size;
417 cmm_info_obj->seg_info[ul_seg - 1].dsp_base_va =
418 altr->dsp_base;
419 cmm_info_obj->seg_info[ul_seg - 1].dsp_size =
420 altr->dsp_size;
421 cmm_info_obj->seg_info[ul_seg - 1].seg_base_va =
422 altr->vm_base - altr->dsp_size;
423 cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt = 0;
424
425 list_for_each_entry(curr, &altr->in_use_list, link) {
426 cmm_info_obj->total_in_use_cnt++;
427 cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt++;
428 }
429 }
430 mutex_unlock(&cmm_mgr_obj->cmm_lock);
431 return status;
432}
433
434
435
436
437
438
439
440int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
441 u32 dw_gpp_base_pa, u32 ul_size,
442 u32 dsp_addr_offset, s8 c_factor,
443 u32 dw_dsp_base, u32 ul_dsp_size,
444 u32 *sgmt_id, u32 gpp_base_va)
445{
446 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
447 struct cmm_allocator *psma = NULL;
448 int status = 0;
449 struct cmm_mnode *new_node;
450 s32 slot_seg;
451
452 dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
453 "dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n",
454 __func__, dw_gpp_base_pa, ul_size, dsp_addr_offset,
455 dw_dsp_base, ul_dsp_size, gpp_base_va);
456
457 if (!hcmm_mgr)
458 return -EFAULT;
459
460
461 mutex_lock(&cmm_mgr_obj->cmm_lock);
462
463 slot_seg = get_slot(cmm_mgr_obj);
464 if (slot_seg < 0) {
465 status = -EPERM;
466 goto func_end;
467 }
468
469
470 if (ul_size < cmm_mgr_obj->min_block_size) {
471 status = -EINVAL;
472 goto func_end;
473 }
474
475
476 psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL);
477 if (!psma) {
478 status = -ENOMEM;
479 goto func_end;
480 }
481
482 psma->cmm_mgr = hcmm_mgr;
483 psma->shm_base = dw_gpp_base_pa;
484 psma->sm_size = ul_size;
485 psma->vm_base = gpp_base_va;
486 psma->dsp_phys_addr_offset = dsp_addr_offset;
487 psma->c_factor = c_factor;
488 psma->dsp_base = dw_dsp_base;
489 psma->dsp_size = ul_dsp_size;
490 if (psma->vm_base == 0) {
491 status = -EPERM;
492 goto func_end;
493 }
494
495 *sgmt_id = (u32) slot_seg + 1;
496
497 INIT_LIST_HEAD(&psma->free_list);
498 INIT_LIST_HEAD(&psma->in_use_list);
499
500
501 new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa,
502 psma->vm_base, ul_size);
503
504 if (new_node) {
505 list_add_tail(&new_node->link, &psma->free_list);
506 } else {
507 status = -ENOMEM;
508 goto func_end;
509 }
510
511 cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma;
512
513func_end:
514
515 if (status && psma)
516 un_register_gppsm_seg(psma);
517 mutex_unlock(&cmm_mgr_obj->cmm_lock);
518
519 return status;
520}
521
522
523
524
525
526
527int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
528 u32 ul_seg_id)
529{
530 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
531 int status = 0;
532 struct cmm_allocator *psma;
533 u32 ul_id = ul_seg_id;
534
535 if (!hcmm_mgr)
536 return -EFAULT;
537
538 if (ul_seg_id == CMM_ALLSEGMENTS)
539 ul_id = 1;
540
541 if ((ul_id <= 0) || (ul_id > CMM_MAXGPPSEGS))
542 return -EINVAL;
543
544
545
546
547
548 while (ul_id <= CMM_MAXGPPSEGS) {
549 mutex_lock(&cmm_mgr_obj->cmm_lock);
550
551 psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1];
552 if (psma != NULL) {
553 un_register_gppsm_seg(psma);
554
555 cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1] = NULL;
556 } else if (ul_seg_id != CMM_ALLSEGMENTS) {
557 status = -EPERM;
558 }
559 mutex_unlock(&cmm_mgr_obj->cmm_lock);
560 if (ul_seg_id != CMM_ALLSEGMENTS)
561 break;
562
563 ul_id++;
564 }
565 return status;
566}
567
568
569
570
571
572
573
574
575
576static void un_register_gppsm_seg(struct cmm_allocator *psma)
577{
578 struct cmm_mnode *curr, *tmp;
579
580
581 list_for_each_entry_safe(curr, tmp, &psma->free_list, link) {
582 list_del(&curr->link);
583 kfree(curr);
584 }
585
586
587 list_for_each_entry_safe(curr, tmp, &psma->in_use_list, link) {
588 list_del(&curr->link);
589 kfree(curr);
590 }
591
592 if ((void *)psma->vm_base != NULL)
593 MEM_UNMAP_LINEAR_ADDRESS((void *)psma->vm_base);
594
595
596 kfree(psma);
597}
598
599
600
601
602
603
604static s32 get_slot(struct cmm_object *cmm_mgr_obj)
605{
606 s32 slot_seg = -1;
607
608 for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
609 if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL)
610 break;
611
612 }
613 if (slot_seg == CMM_MAXGPPSEGS)
614 slot_seg = -1;
615
616 return slot_seg;
617}
618
619
620
621
622
623
624static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
625 u32 dw_va, u32 ul_size)
626{
627 struct cmm_mnode *pnode;
628
629
630 if (list_empty(&cmm_mgr_obj->node_free_list)) {
631 pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
632 if (!pnode)
633 return NULL;
634 } else {
635
636 pnode = list_first_entry(&cmm_mgr_obj->node_free_list,
637 struct cmm_mnode, link);
638 list_del_init(&pnode->link);
639 }
640
641 pnode->pa = dw_pa;
642 pnode->va = dw_va;
643 pnode->size = ul_size;
644
645 return pnode;
646}
647
648
649
650
651
652
653
654static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
655{
656 list_add_tail(&pnode->link, &cmm_mgr_obj->node_free_list);
657}
658
659
660
661
662
663
664
665static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
666 u32 usize)
667{
668 struct cmm_mnode *node, *tmp;
669
670 if (!allocator)
671 return NULL;
672
673 list_for_each_entry_safe(node, tmp, &allocator->free_list, link) {
674 if (usize <= node->size) {
675 list_del(&node->link);
676 return node;
677 }
678 }
679
680 return NULL;
681}
682
683
684
685
686
687
688static void add_to_free_list(struct cmm_allocator *allocator,
689 struct cmm_mnode *node)
690{
691 struct cmm_mnode *curr;
692
693 if (!node) {
694 pr_err("%s: failed - node is NULL\n", __func__);
695 return;
696 }
697
698 list_for_each_entry(curr, &allocator->free_list, link) {
699 if (NEXT_PA(curr) == node->pa) {
700 curr->size += node->size;
701 delete_node(allocator->cmm_mgr, node);
702 return;
703 }
704 if (curr->pa == NEXT_PA(node)) {
705 curr->pa = node->pa;
706 curr->va = node->va;
707 curr->size += node->size;
708 delete_node(allocator->cmm_mgr, node);
709 return;
710 }
711 }
712 list_for_each_entry(curr, &allocator->free_list, link) {
713 if (curr->size >= node->size) {
714 list_add_tail(&node->link, &curr->link);
715 return;
716 }
717 }
718 list_add_tail(&node->link, &allocator->free_list);
719}
720
721
722
723
724
725
726
727static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
728 u32 ul_seg_id)
729{
730 return cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
731}
732
733
734
735
736
737
738
739
740
741
742
743
744int cmm_xlator_create(struct cmm_xlatorobject **xlator,
745 struct cmm_object *hcmm_mgr,
746 struct cmm_xlatorattrs *xlator_attrs)
747{
748 struct cmm_xlator *xlator_object = NULL;
749 int status = 0;
750
751 *xlator = NULL;
752 if (xlator_attrs == NULL)
753 xlator_attrs = &cmm_dfltxlatorattrs;
754
755 xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL);
756 if (xlator_object != NULL) {
757 xlator_object->cmm_mgr = hcmm_mgr;
758
759 xlator_object->seg_id = xlator_attrs->seg_id;
760 } else {
761 status = -ENOMEM;
762 }
763 if (!status)
764 *xlator = (struct cmm_xlatorobject *)xlator_object;
765
766 return status;
767}
768
769
770
771
772void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf,
773 u32 pa_size)
774{
775 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
776 void *pbuf = NULL;
777 void *tmp_va_buff;
778 struct cmm_attrs attrs;
779
780 if (xlator_obj) {
781 attrs.seg_id = xlator_obj->seg_id;
782 __raw_writel(0, va_buf);
783
784 pbuf =
785 cmm_calloc_buf(xlator_obj->cmm_mgr, pa_size, &attrs, NULL);
786 if (pbuf) {
787
788
789 tmp_va_buff = cmm_xlator_translate(xlator,
790 pbuf, CMM_PA2VA);
791 __raw_writel((u32)tmp_va_buff, va_buf);
792 }
793 }
794 return pbuf;
795}
796
797
798
799
800
801
802
803int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
804{
805 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
806 int status = -EPERM;
807 void *buf_pa = NULL;
808
809 if (xlator_obj) {
810
811 buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA);
812 if (buf_pa) {
813 status = cmm_free_buf(xlator_obj->cmm_mgr, buf_pa,
814 xlator_obj->seg_id);
815 if (status) {
816
817
818 pr_err("%s, line %d: Assertion failed\n",
819 __FILE__, __LINE__);
820 }
821 }
822 }
823 return status;
824}
825
826
827
828
829
830
831int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
832 u32 ul_size, u32 segm_id, bool set_info)
833{
834 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
835 int status = 0;
836
837 if (xlator_obj) {
838 if (set_info) {
839
840 xlator_obj->virt_base = (u32) *paddr;
841 xlator_obj->virt_size = ul_size;
842 } else {
843 *paddr = (u8 *) xlator_obj->virt_base;
844 }
845 } else {
846 status = -EFAULT;
847 }
848 return status;
849}
850
851
852
853
854void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
855 enum cmm_xlatetype xtype)
856{
857 u32 dw_addr_xlate = 0;
858 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
859 struct cmm_object *cmm_mgr_obj = NULL;
860 struct cmm_allocator *allocator = NULL;
861 u32 dw_offset = 0;
862
863 if (!xlator_obj)
864 goto loop_cont;
865
866 cmm_mgr_obj = (struct cmm_object *)xlator_obj->cmm_mgr;
867
868 allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->seg_id - 1];
869 if (!allocator)
870 goto loop_cont;
871
872 if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_VA2PA) ||
873 (xtype == CMM_PA2VA)) {
874 if (xtype == CMM_PA2VA) {
875
876 dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base -
877 allocator->
878 dsp_size);
879 dw_addr_xlate = xlator_obj->virt_base + dw_offset;
880
881 if ((dw_addr_xlate < xlator_obj->virt_base) ||
882 (dw_addr_xlate >=
883 (xlator_obj->virt_base +
884 xlator_obj->virt_size))) {
885 dw_addr_xlate = 0;
886 }
887 } else {
888
889 dw_offset =
890 (u8 *) paddr - (u8 *) xlator_obj->virt_base;
891 dw_addr_xlate =
892 allocator->shm_base - allocator->dsp_size +
893 dw_offset;
894 }
895 } else {
896 dw_addr_xlate = (u32) paddr;
897 }
898
899 if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) {
900
901 dw_addr_xlate =
902 GPPPA2DSPPA((allocator->shm_base - allocator->dsp_size),
903 dw_addr_xlate,
904 allocator->dsp_phys_addr_offset *
905 allocator->c_factor);
906 } else if (xtype == CMM_DSPPA2PA) {
907
908 dw_addr_xlate =
909 DSPPA2GPPPA(allocator->shm_base - allocator->dsp_size,
910 dw_addr_xlate,
911 allocator->dsp_phys_addr_offset *
912 allocator->c_factor);
913 }
914loop_cont:
915 return (void *)dw_addr_xlate;
916}
917