1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/types.h>
20
21#include <linux/dma-mapping.h>
22#include <linux/scatterlist.h>
23#include <dspbridge/host_os.h>
24
25
26#include <dspbridge/dbdefs.h>
27
28
29#include <dspbridge/ntfy.h>
30#include <dspbridge/sync.h>
31
32#include <dspbridge/dspdefs.h>
33#include <dspbridge/dspdeh.h>
34
35#include <dspbridge/cod.h>
36#include <dspbridge/dev.h>
37#include <dspbridge/procpriv.h>
38#include <dspbridge/dmm.h>
39
40
41#include <dspbridge/mgr.h>
42#include <dspbridge/node.h>
43#include <dspbridge/nldr.h>
44#include <dspbridge/rmm.h>
45
46
47#include <dspbridge/dbdcd.h>
48#include <dspbridge/msg.h>
49#include <dspbridge/dspioctl.h>
50#include <dspbridge/drv.h>
51
52
53#include <dspbridge/proc.h>
54#include <dspbridge/pwr.h>
55
56#include <dspbridge/resourcecleanup.h>
57
58#define MAXCMDLINELEN 255
59#define PROC_ENVPROCID "PROC_ID=%d"
60#define MAXPROCIDLEN (8 + 5)
61#define PROC_DFLT_TIMEOUT 10000
62#define PWR_TIMEOUT 500
63#define EXTEND "_EXT_END"
64
65#define DSP_CACHE_LINE 128
66
67#define BUFMODE_MASK (3 << 14)
68
69
70#define RBUF 0x4000
71#define WBUF 0x8000
72
73extern struct device *bridge;
74
75
76
77
78struct proc_object {
79 struct list_head link;
80 struct dev_object *dev_obj;
81 u32 process;
82 struct mgr_object *mgr_obj;
83 u32 attach_count;
84 u32 processor_id;
85 u32 timeout;
86 enum dsp_procstate proc_state;
87 u32 unit;
88 bool is_already_attached;
89
90
91
92 struct ntfy_object *ntfy_obj;
93
94 struct bridge_dev_context *bridge_context;
95
96 struct bridge_drv_interface *intf_fxns;
97 char *last_coff;
98 struct list_head proc_list;
99};
100
101DEFINE_MUTEX(proc_lock);
102
103
104static int proc_monitor(struct proc_object *proc_obj);
105static s32 get_envp_count(char **envp);
106static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
107 s32 cnew_envp, char *sz_var);
108
109
110static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt,
111 u32 mpu_addr, u32 dsp_addr, u32 size)
112{
113 struct dmm_map_object *map_obj;
114
115 u32 num_usr_pgs = size / PG_SIZE4K;
116
117 pr_debug("%s: adding map info: mpu_addr 0x%x virt 0x%x size 0x%x\n",
118 __func__, mpu_addr,
119 dsp_addr, size);
120
121 map_obj = kzalloc(sizeof(struct dmm_map_object), GFP_KERNEL);
122 if (!map_obj)
123 return NULL;
124
125 INIT_LIST_HEAD(&map_obj->link);
126
127 map_obj->pages = kcalloc(num_usr_pgs, sizeof(struct page *),
128 GFP_KERNEL);
129 if (!map_obj->pages) {
130 kfree(map_obj);
131 return NULL;
132 }
133
134 map_obj->mpu_addr = mpu_addr;
135 map_obj->dsp_addr = dsp_addr;
136 map_obj->size = size;
137 map_obj->num_usr_pgs = num_usr_pgs;
138
139 spin_lock(&pr_ctxt->dmm_map_lock);
140 list_add(&map_obj->link, &pr_ctxt->dmm_map_list);
141 spin_unlock(&pr_ctxt->dmm_map_lock);
142
143 return map_obj;
144}
145
146static int match_exact_map_obj(struct dmm_map_object *map_obj,
147 u32 dsp_addr, u32 size)
148{
149 if (map_obj->dsp_addr == dsp_addr && map_obj->size != size)
150 pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
151 __func__, dsp_addr, map_obj->size, size);
152
153 return map_obj->dsp_addr == dsp_addr &&
154 map_obj->size == size;
155}
156
157static void remove_mapping_information(struct process_context *pr_ctxt,
158 u32 dsp_addr, u32 size)
159{
160 struct dmm_map_object *map_obj;
161
162 pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__,
163 dsp_addr, size);
164
165 spin_lock(&pr_ctxt->dmm_map_lock);
166 list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
167 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
168 __func__,
169 map_obj->mpu_addr,
170 map_obj->dsp_addr,
171 map_obj->size);
172
173 if (match_exact_map_obj(map_obj, dsp_addr, size)) {
174 pr_debug("%s: match, deleting map info\n", __func__);
175 list_del(&map_obj->link);
176 kfree(map_obj->dma_info.sg);
177 kfree(map_obj->pages);
178 kfree(map_obj);
179 goto out;
180 }
181 pr_debug("%s: candidate didn't match\n", __func__);
182 }
183
184 pr_err("%s: failed to find given map info\n", __func__);
185out:
186 spin_unlock(&pr_ctxt->dmm_map_lock);
187}
188
189static int match_containing_map_obj(struct dmm_map_object *map_obj,
190 u32 mpu_addr, u32 size)
191{
192 u32 map_obj_end = map_obj->mpu_addr + map_obj->size;
193
194 return mpu_addr >= map_obj->mpu_addr &&
195 mpu_addr + size <= map_obj_end;
196}
197
198static struct dmm_map_object *find_containing_mapping(
199 struct process_context *pr_ctxt,
200 u32 mpu_addr, u32 size)
201{
202 struct dmm_map_object *map_obj;
203 pr_debug("%s: looking for mpu_addr 0x%x size 0x%x\n", __func__,
204 mpu_addr, size);
205
206 spin_lock(&pr_ctxt->dmm_map_lock);
207 list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
208 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
209 __func__,
210 map_obj->mpu_addr,
211 map_obj->dsp_addr,
212 map_obj->size);
213 if (match_containing_map_obj(map_obj, mpu_addr, size)) {
214 pr_debug("%s: match!\n", __func__);
215 goto out;
216 }
217
218 pr_debug("%s: no match!\n", __func__);
219 }
220
221 map_obj = NULL;
222out:
223 spin_unlock(&pr_ctxt->dmm_map_lock);
224 return map_obj;
225}
226
227static int find_first_page_in_cache(struct dmm_map_object *map_obj,
228 unsigned long mpu_addr)
229{
230 u32 mapped_base_page = map_obj->mpu_addr >> PAGE_SHIFT;
231 u32 requested_base_page = mpu_addr >> PAGE_SHIFT;
232 int pg_index = requested_base_page - mapped_base_page;
233
234 if (pg_index < 0 || pg_index >= map_obj->num_usr_pgs) {
235 pr_err("%s: failed (got %d)\n", __func__, pg_index);
236 return -1;
237 }
238
239 pr_debug("%s: first page is %d\n", __func__, pg_index);
240 return pg_index;
241}
242
243static inline struct page *get_mapping_page(struct dmm_map_object *map_obj,
244 int pg_i)
245{
246 pr_debug("%s: looking for pg_i %d, num_usr_pgs: %d\n", __func__,
247 pg_i, map_obj->num_usr_pgs);
248
249 if (pg_i < 0 || pg_i >= map_obj->num_usr_pgs) {
250 pr_err("%s: requested pg_i %d is out of mapped range\n",
251 __func__, pg_i);
252 return NULL;
253 }
254
255 return map_obj->pages[pg_i];
256}
257
258
259
260
261
262
263
264int
265proc_attach(u32 processor_id,
266 const struct dsp_processorattrin *attr_in,
267 void **ph_processor, struct process_context *pr_ctxt)
268{
269 int status = 0;
270 struct dev_object *hdev_obj;
271 struct proc_object *p_proc_object = NULL;
272 struct mgr_object *hmgr_obj = NULL;
273 struct drv_object *hdrv_obj = NULL;
274 struct drv_data *drv_datap = dev_get_drvdata(bridge);
275 u8 dev_type;
276
277 if (pr_ctxt->processor) {
278 *ph_processor = pr_ctxt->processor;
279 return status;
280 }
281
282
283 if (!drv_datap || !drv_datap->drv_object || !drv_datap->mgr_object) {
284 status = -ENODATA;
285 pr_err("%s: Failed to get object handles\n", __func__);
286 } else {
287 hdrv_obj = drv_datap->drv_object;
288 hmgr_obj = drv_datap->mgr_object;
289 }
290
291 if (!status) {
292
293 status = drv_get_dev_object(processor_id, hdrv_obj, &hdev_obj);
294 }
295 if (!status)
296 status = dev_get_dev_type(hdev_obj, &dev_type);
297
298 if (status)
299 goto func_end;
300
301
302 p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL);
303
304 if (p_proc_object == NULL) {
305 status = -ENOMEM;
306 goto func_end;
307 }
308 p_proc_object->dev_obj = hdev_obj;
309 p_proc_object->mgr_obj = hmgr_obj;
310 p_proc_object->processor_id = dev_type;
311
312 p_proc_object->process = current->tgid;
313
314 INIT_LIST_HEAD(&p_proc_object->proc_list);
315
316 if (attr_in)
317 p_proc_object->timeout = attr_in->timeout;
318 else
319 p_proc_object->timeout = PROC_DFLT_TIMEOUT;
320
321 status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
322 if (!status) {
323 status = dev_get_bridge_context(hdev_obj,
324 &p_proc_object->bridge_context);
325 if (status)
326 kfree(p_proc_object);
327 } else
328 kfree(p_proc_object);
329
330 if (status)
331 goto func_end;
332
333
334
335
336
337 p_proc_object->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
338 GFP_KERNEL);
339 if (p_proc_object->ntfy_obj)
340 ntfy_init(p_proc_object->ntfy_obj);
341 else
342 status = -ENOMEM;
343
344 if (!status) {
345
346
347
348
349 status = dev_insert_proc_object(p_proc_object->dev_obj,
350 (u32) p_proc_object,
351 &p_proc_object->
352 is_already_attached);
353 if (!status) {
354 if (p_proc_object->is_already_attached)
355 status = 0;
356 } else {
357 if (p_proc_object->ntfy_obj) {
358 ntfy_delete(p_proc_object->ntfy_obj);
359 kfree(p_proc_object->ntfy_obj);
360 }
361
362 kfree(p_proc_object);
363 }
364 if (!status) {
365 *ph_processor = (void *)p_proc_object;
366 pr_ctxt->processor = *ph_processor;
367 (void)proc_notify_clients(p_proc_object,
368 DSP_PROCESSORATTACH);
369 }
370 } else {
371
372 kfree(p_proc_object);
373 }
374func_end:
375 return status;
376}
377
378static int get_exec_file(struct cfg_devnode *dev_node_obj,
379 struct dev_object *hdev_obj,
380 u32 size, char *exec_file)
381{
382 u8 dev_type;
383 struct drv_data *drv_datap = dev_get_drvdata(bridge);
384
385 dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
386
387 if (!exec_file)
388 return -EFAULT;
389
390 if (dev_type == DSP_UNIT) {
391 if (!drv_datap || !drv_datap->base_img)
392 return -EFAULT;
393
394 if (strlen(drv_datap->base_img) >= size)
395 return -EINVAL;
396
397 strcpy(exec_file, drv_datap->base_img);
398 } else {
399 return -ENOENT;
400 }
401
402 return 0;
403}
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419int proc_auto_start(struct cfg_devnode *dev_node_obj,
420 struct dev_object *hdev_obj)
421{
422 int status = -EPERM;
423 struct proc_object *p_proc_object;
424 char sz_exec_file[MAXCMDLINELEN];
425 char *argv[2];
426 struct mgr_object *hmgr_obj = NULL;
427 struct drv_data *drv_datap = dev_get_drvdata(bridge);
428 u8 dev_type;
429
430
431 if (!drv_datap || !drv_datap->mgr_object) {
432 status = -ENODATA;
433 pr_err("%s: Failed to retrieve the object handle\n", __func__);
434 goto func_end;
435 } else {
436 hmgr_obj = drv_datap->mgr_object;
437 }
438
439 p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL);
440 if (p_proc_object == NULL) {
441 status = -ENOMEM;
442 goto func_end;
443 }
444 p_proc_object->dev_obj = hdev_obj;
445 p_proc_object->mgr_obj = hmgr_obj;
446 status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
447 if (!status)
448 status = dev_get_bridge_context(hdev_obj,
449 &p_proc_object->bridge_context);
450 if (status)
451 goto func_cont;
452
453
454 status = proc_stop(p_proc_object);
455
456 if (status)
457 goto func_cont;
458
459
460 dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
461 p_proc_object->processor_id = dev_type;
462 status = get_exec_file(dev_node_obj, hdev_obj, sizeof(sz_exec_file),
463 sz_exec_file);
464 if (!status) {
465 argv[0] = sz_exec_file;
466 argv[1] = NULL;
467
468 status = proc_load(p_proc_object, 1, (const char **)argv, NULL);
469 if (!status)
470 status = proc_start(p_proc_object);
471 }
472 kfree(p_proc_object->last_coff);
473 p_proc_object->last_coff = NULL;
474func_cont:
475 kfree(p_proc_object);
476func_end:
477 return status;
478}
479
480
481
482
483
484
485
486
487
488
489
490
491int proc_ctrl(void *hprocessor, u32 dw_cmd, struct dsp_cbdata *arg)
492{
493 int status = 0;
494 struct proc_object *p_proc_object = hprocessor;
495 u32 timeout = 0;
496
497 if (p_proc_object) {
498
499 if (dw_cmd == BRDIOCTL_DEEPSLEEP) {
500 timeout = arg->cb_data;
501 status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout);
502 }
503
504 else if (dw_cmd == BRDIOCTL_EMERGENCYSLEEP) {
505 timeout = arg->cb_data;
506 status = pwr_sleep_dsp(PWR_EMERGENCYDEEPSLEEP, timeout);
507 } else if (dw_cmd == PWR_DEEPSLEEP) {
508
509 status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout);
510 }
511
512 else if (dw_cmd == BRDIOCTL_WAKEUP) {
513 timeout = arg->cb_data;
514 status = pwr_wake_dsp(timeout);
515 } else if (dw_cmd == PWR_WAKEUP) {
516
517 status = pwr_wake_dsp(timeout);
518 } else
519 if (!((*p_proc_object->intf_fxns->dev_cntrl)
520 (p_proc_object->bridge_context, dw_cmd,
521 arg))) {
522 status = 0;
523 } else {
524 status = -EPERM;
525 }
526 } else {
527 status = -EFAULT;
528 }
529
530 return status;
531}
532
533
534
535
536
537
538
539int proc_detach(struct process_context *pr_ctxt)
540{
541 int status = 0;
542 struct proc_object *p_proc_object = NULL;
543
544 p_proc_object = (struct proc_object *)pr_ctxt->processor;
545
546 if (p_proc_object) {
547
548 ntfy_notify(p_proc_object->ntfy_obj, DSP_PROCESSORDETACH);
549
550 if (p_proc_object->ntfy_obj) {
551 ntfy_delete(p_proc_object->ntfy_obj);
552 kfree(p_proc_object->ntfy_obj);
553 }
554
555 kfree(p_proc_object->last_coff);
556 p_proc_object->last_coff = NULL;
557
558 (void)dev_remove_proc_object(p_proc_object->dev_obj,
559 (u32) p_proc_object);
560
561 kfree(p_proc_object);
562 pr_ctxt->processor = NULL;
563 } else {
564 status = -EFAULT;
565 }
566
567 return status;
568}
569
570
571
572
573
574
575
576int proc_enum_nodes(void *hprocessor, void **node_tab,
577 u32 node_tab_size, u32 *pu_num_nodes,
578 u32 *pu_allocated)
579{
580 int status = -EPERM;
581 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
582 struct node_mgr *hnode_mgr = NULL;
583
584 if (p_proc_object) {
585 if (!(dev_get_node_manager(p_proc_object->dev_obj,
586 &hnode_mgr))) {
587 if (hnode_mgr) {
588 status = node_enum_nodes(hnode_mgr, node_tab,
589 node_tab_size,
590 pu_num_nodes,
591 pu_allocated);
592 }
593 }
594 } else {
595 status = -EFAULT;
596 }
597
598 return status;
599}
600
601
602static int build_dma_sg(struct dmm_map_object *map_obj, unsigned long start,
603 ssize_t len, int pg_i)
604{
605 struct page *page;
606 unsigned long offset;
607 ssize_t rest;
608 int ret = 0, i = 0;
609 struct scatterlist *sg = map_obj->dma_info.sg;
610
611 while (len) {
612 page = get_mapping_page(map_obj, pg_i);
613 if (!page) {
614 pr_err("%s: no page for %08lx\n", __func__, start);
615 ret = -EINVAL;
616 goto out;
617 } else if (IS_ERR(page)) {
618 pr_err("%s: err page for %08lx(%lu)\n", __func__, start,
619 PTR_ERR(page));
620 ret = PTR_ERR(page);
621 goto out;
622 }
623
624 offset = start & ~PAGE_MASK;
625 rest = min_t(ssize_t, PAGE_SIZE - offset, len);
626
627 sg_set_page(&sg[i], page, rest, offset);
628
629 len -= rest;
630 start += rest;
631 pg_i++, i++;
632 }
633
634 if (i != map_obj->dma_info.num_pages) {
635 pr_err("%s: bad number of sg iterations\n", __func__);
636 ret = -EFAULT;
637 goto out;
638 }
639
640out:
641 return ret;
642}
643
644static int memory_regain_ownership(struct dmm_map_object *map_obj,
645 unsigned long start, ssize_t len, enum dma_data_direction dir)
646{
647 int ret = 0;
648 unsigned long first_data_page = start >> PAGE_SHIFT;
649 unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
650
651 unsigned long num_pages = last_data_page - first_data_page + 1;
652 struct bridge_dma_map_info *dma_info = &map_obj->dma_info;
653
654 if (!dma_info->sg)
655 goto out;
656
657 if (dma_info->dir != dir || dma_info->num_pages != num_pages) {
658 pr_err("%s: dma info doesn't match given params\n", __func__);
659 return -EINVAL;
660 }
661
662 dma_unmap_sg(bridge, dma_info->sg, num_pages, dma_info->dir);
663
664 pr_debug("%s: dma_map_sg unmapped\n", __func__);
665
666 kfree(dma_info->sg);
667
668 map_obj->dma_info.sg = NULL;
669
670out:
671 return ret;
672}
673
674
675static int memory_give_ownership(struct dmm_map_object *map_obj,
676 unsigned long start, ssize_t len, enum dma_data_direction dir)
677{
678 int pg_i, ret, sg_num;
679 struct scatterlist *sg;
680 unsigned long first_data_page = start >> PAGE_SHIFT;
681 unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
682
683 unsigned long num_pages = last_data_page - first_data_page + 1;
684
685 pg_i = find_first_page_in_cache(map_obj, start);
686 if (pg_i < 0) {
687 pr_err("%s: failed to find first page in cache\n", __func__);
688 ret = -EINVAL;
689 goto out;
690 }
691
692 sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL);
693 if (!sg) {
694 ret = -ENOMEM;
695 goto out;
696 }
697
698 sg_init_table(sg, num_pages);
699
700
701
702 kfree(map_obj->dma_info.sg);
703
704 map_obj->dma_info.sg = sg;
705 map_obj->dma_info.dir = dir;
706 map_obj->dma_info.num_pages = num_pages;
707
708 ret = build_dma_sg(map_obj, start, len, pg_i);
709 if (ret)
710 goto kfree_sg;
711
712 sg_num = dma_map_sg(bridge, sg, num_pages, dir);
713 if (sg_num < 1) {
714 pr_err("%s: dma_map_sg failed: %d\n", __func__, sg_num);
715 ret = -EFAULT;
716 goto kfree_sg;
717 }
718
719 pr_debug("%s: dma_map_sg mapped %d elements\n", __func__, sg_num);
720 map_obj->dma_info.sg_num = sg_num;
721
722 return 0;
723
724kfree_sg:
725 kfree(sg);
726 map_obj->dma_info.sg = NULL;
727out:
728 return ret;
729}
730
731int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
732 enum dma_data_direction dir)
733{
734
735 int status = 0;
736 struct process_context *pr_ctxt = (struct process_context *) hprocessor;
737 struct dmm_map_object *map_obj;
738
739 if (!pr_ctxt) {
740 status = -EFAULT;
741 goto err_out;
742 }
743
744 pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
745 (u32)pmpu_addr,
746 ul_size, dir);
747
748 mutex_lock(&proc_lock);
749
750
751 map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
752 if (!map_obj) {
753 pr_err("%s: find_containing_mapping failed\n", __func__);
754 status = -EFAULT;
755 goto no_map;
756 }
757
758 if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
759 pr_err("%s: InValid address parameters %p %x\n",
760 __func__, pmpu_addr, ul_size);
761 status = -EFAULT;
762 }
763
764no_map:
765 mutex_unlock(&proc_lock);
766err_out:
767
768 return status;
769}
770
771int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
772 enum dma_data_direction dir)
773{
774
775 int status = 0;
776 struct process_context *pr_ctxt = (struct process_context *) hprocessor;
777 struct dmm_map_object *map_obj;
778
779 if (!pr_ctxt) {
780 status = -EFAULT;
781 goto err_out;
782 }
783
784 pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
785 (u32)pmpu_addr,
786 ul_size, dir);
787
788 mutex_lock(&proc_lock);
789
790
791 map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
792 if (!map_obj) {
793 pr_err("%s: find_containing_mapping failed\n", __func__);
794 status = -EFAULT;
795 goto no_map;
796 }
797
798 if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
799 pr_err("%s: InValid address parameters %p %x\n",
800 __func__, pmpu_addr, ul_size);
801 status = -EFAULT;
802 }
803
804no_map:
805 mutex_unlock(&proc_lock);
806err_out:
807 return status;
808}
809
810
811
812
813
814
815int proc_flush_memory(void *hprocessor, void *pmpu_addr,
816 u32 ul_size, u32 ul_flags)
817{
818 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
819
820 return proc_begin_dma(hprocessor, pmpu_addr, ul_size, dir);
821}
822
823
824
825
826
827
828int proc_invalidate_memory(void *hprocessor, void *pmpu_addr, u32 size)
829{
830 enum dma_data_direction dir = DMA_FROM_DEVICE;
831
832 return proc_begin_dma(hprocessor, pmpu_addr, size, dir);
833}
834
835
836
837
838
839
840int proc_get_resource_info(void *hprocessor, u32 resource_type,
841 struct dsp_resourceinfo *resource_info,
842 u32 resource_info_size)
843{
844 int status = -EPERM;
845 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
846 struct node_mgr *hnode_mgr = NULL;
847 struct nldr_object *nldr_obj = NULL;
848 struct rmm_target_obj *rmm = NULL;
849 struct io_mgr *hio_mgr = NULL;
850
851 if (!p_proc_object) {
852 status = -EFAULT;
853 goto func_end;
854 }
855 switch (resource_type) {
856 case DSP_RESOURCE_DYNDARAM:
857 case DSP_RESOURCE_DYNSARAM:
858 case DSP_RESOURCE_DYNEXTERNAL:
859 case DSP_RESOURCE_DYNSRAM:
860 status = dev_get_node_manager(p_proc_object->dev_obj,
861 &hnode_mgr);
862 if (!hnode_mgr) {
863 status = -EFAULT;
864 goto func_end;
865 }
866
867 status = node_get_nldr_obj(hnode_mgr, &nldr_obj);
868 if (!status) {
869 status = nldr_get_rmm_manager(nldr_obj, &rmm);
870 if (rmm) {
871 if (!rmm_stat(rmm,
872 (enum dsp_memtype)resource_type,
873 (struct dsp_memstat *)
874 &(resource_info->result.
875 mem_stat)))
876 status = -EINVAL;
877 } else {
878 status = -EFAULT;
879 }
880 }
881 break;
882 case DSP_RESOURCE_PROCLOAD:
883 status = dev_get_io_mgr(p_proc_object->dev_obj, &hio_mgr);
884 if (hio_mgr)
885 status =
886 p_proc_object->intf_fxns->
887 io_get_proc_load(hio_mgr,
888 (struct dsp_procloadstat *)
889 &(resource_info->result.
890 proc_load_stat));
891 else
892 status = -EFAULT;
893 break;
894 default:
895 status = -EPERM;
896 break;
897 }
898func_end:
899 return status;
900}
901
902
903
904
905
906
907
908int proc_get_dev_object(void *hprocessor,
909 struct dev_object **device_obj)
910{
911 int status = -EPERM;
912 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
913
914 if (p_proc_object) {
915 *device_obj = p_proc_object->dev_obj;
916 status = 0;
917 } else {
918 *device_obj = NULL;
919 status = -EFAULT;
920 }
921
922 return status;
923}
924
925
926
927
928
929
930int proc_get_state(void *hprocessor,
931 struct dsp_processorstate *proc_state_obj,
932 u32 state_info_size)
933{
934 int status = 0;
935 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
936 int brd_status;
937
938 if (p_proc_object) {
939
940 status = (*p_proc_object->intf_fxns->brd_status)
941 (p_proc_object->bridge_context, &brd_status);
942 if (!status) {
943 switch (brd_status) {
944 case BRD_STOPPED:
945 proc_state_obj->proc_state = PROC_STOPPED;
946 break;
947 case BRD_SLEEP_TRANSITION:
948 case BRD_DSP_HIBERNATION:
949
950 case BRD_RUNNING:
951 proc_state_obj->proc_state = PROC_RUNNING;
952 break;
953 case BRD_LOADED:
954 proc_state_obj->proc_state = PROC_LOADED;
955 break;
956 case BRD_ERROR:
957 proc_state_obj->proc_state = PROC_ERROR;
958 break;
959 default:
960 proc_state_obj->proc_state = 0xFF;
961 status = -EPERM;
962 break;
963 }
964 }
965 } else {
966 status = -EFAULT;
967 }
968 dev_dbg(bridge, "%s, results: status: 0x%x proc_state_obj: 0x%x\n",
969 __func__, status, proc_state_obj->proc_state);
970 return status;
971}
972
973
974
975
976
977
978
979
980
981
982
983
984
985int proc_get_trace(void *hprocessor, u8 *pbuf, u32 max_size)
986{
987 int status;
988 status = -ENOSYS;
989 return status;
990}
991
992
993
994
995
996
997
998
999int proc_load(void *hprocessor, const s32 argc_index,
1000 const char **user_args, const char **user_envp)
1001{
1002 int status = 0;
1003 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1004 struct io_mgr *hio_mgr;
1005 struct msg_mgr *hmsg_mgr;
1006 struct cod_manager *cod_mgr;
1007 char *pargv0;
1008 char **new_envp;
1009 char sz_proc_id[MAXPROCIDLEN];
1010 s32 envp_elems;
1011 s32 cnew_envp;
1012 s32 nproc_id = 0;
1013 struct dcd_manager *hdcd_handle;
1014 struct dmm_object *dmm_mgr;
1015 u32 dw_ext_end;
1016 u32 proc_id;
1017 int brd_state;
1018 struct drv_data *drv_datap = dev_get_drvdata(bridge);
1019
1020#ifdef OPT_LOAD_TIME_INSTRUMENTATION
1021 struct timeval tv1;
1022 struct timeval tv2;
1023#endif
1024
1025#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1026 struct dspbridge_platform_data *pdata =
1027 omap_dspbridge_dev->dev.platform_data;
1028#endif
1029
1030#ifdef OPT_LOAD_TIME_INSTRUMENTATION
1031 do_gettimeofday(&tv1);
1032#endif
1033 if (!p_proc_object) {
1034 status = -EFAULT;
1035 goto func_end;
1036 }
1037 dev_get_cod_mgr(p_proc_object->dev_obj, &cod_mgr);
1038 if (!cod_mgr) {
1039 status = -EPERM;
1040 goto func_end;
1041 }
1042 status = proc_stop(hprocessor);
1043 if (status)
1044 goto func_end;
1045
1046
1047 status = proc_monitor(hprocessor);
1048 if (status)
1049 goto func_end;
1050
1051
1052 pargv0 = (char *)user_args[0];
1053
1054 envp_elems = get_envp_count((char **)user_envp);
1055 cnew_envp = (envp_elems ? (envp_elems + 1) : (envp_elems + 2));
1056 new_envp = kzalloc(cnew_envp * sizeof(char **), GFP_KERNEL);
1057 if (new_envp) {
1058 status = snprintf(sz_proc_id, MAXPROCIDLEN, PROC_ENVPROCID,
1059 nproc_id);
1060 if (status == -1) {
1061 dev_dbg(bridge, "%s: Proc ID string overflow\n",
1062 __func__);
1063 status = -EPERM;
1064 } else {
1065 new_envp =
1066 prepend_envp(new_envp, (char **)user_envp,
1067 envp_elems, cnew_envp, sz_proc_id);
1068
1069 status = mgr_get_dcd_handle(p_proc_object->mgr_obj,
1070 (u32 *) &hdcd_handle);
1071 if (!status) {
1072
1073
1074
1075
1076
1077
1078 if (p_proc_object->last_coff != NULL) {
1079 status =
1080 dcd_auto_unregister(hdcd_handle,
1081 p_proc_object->
1082 last_coff);
1083
1084
1085
1086 kfree(p_proc_object->last_coff);
1087 p_proc_object->last_coff = NULL;
1088 }
1089 }
1090
1091 status = cod_open_base(cod_mgr, (char *)user_args[0],
1092 COD_SYMB);
1093 }
1094 } else {
1095 status = -ENOMEM;
1096 }
1097 if (!status) {
1098
1099
1100 status = mgr_get_dcd_handle(p_proc_object->mgr_obj,
1101 (u32 *) &hdcd_handle);
1102 if (!status) {
1103
1104
1105
1106
1107
1108 status =
1109 dcd_auto_register(hdcd_handle,
1110 (char *)user_args[0]);
1111 if (status == -EACCES)
1112 status = 0;
1113
1114 if (status) {
1115 status = -EPERM;
1116 } else {
1117
1118 p_proc_object->last_coff =
1119 kzalloc((strlen(user_args[0]) +
1120 1), GFP_KERNEL);
1121
1122 if (p_proc_object->last_coff) {
1123 strncpy(p_proc_object->last_coff,
1124 (char *)user_args[0],
1125 (strlen((char *)user_args[0]) +
1126 1));
1127 }
1128 }
1129 }
1130 }
1131
1132 if (!status) {
1133
1134
1135 dev_get_msg_mgr(p_proc_object->dev_obj, &hmsg_mgr);
1136 if (!hmsg_mgr) {
1137 status = msg_create(&hmsg_mgr, p_proc_object->dev_obj,
1138 (msg_onexit) node_on_exit);
1139 dev_set_msg_mgr(p_proc_object->dev_obj, hmsg_mgr);
1140 }
1141 }
1142 if (!status) {
1143
1144 status = dev_get_io_mgr(p_proc_object->dev_obj, &hio_mgr);
1145 if (hio_mgr)
1146 status = (*p_proc_object->intf_fxns->io_on_loaded)
1147 (hio_mgr);
1148 else
1149 status = -EFAULT;
1150 }
1151 if (!status) {
1152
1153
1154
1155#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1156 if (pdata->cpu_set_freq)
1157 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP5]);
1158#endif
1159 status = cod_load_base(cod_mgr, argc_index, (char **)user_args,
1160 dev_brd_write_fxn,
1161 p_proc_object->dev_obj, NULL);
1162 if (status) {
1163 if (status == -EBADF) {
1164 dev_dbg(bridge, "%s: Failure to Load the EXE\n",
1165 __func__);
1166 }
1167 if (status == -ESPIPE) {
1168 pr_err("%s: Couldn't parse the file\n",
1169 __func__);
1170 }
1171 }
1172
1173#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1174 if (pdata->cpu_set_freq)
1175 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
1176#endif
1177
1178 }
1179 if (!status) {
1180
1181 status = (*p_proc_object->intf_fxns->brd_set_state)
1182 (p_proc_object->bridge_context, BRD_LOADED);
1183 if (!status) {
1184 p_proc_object->proc_state = PROC_LOADED;
1185 if (p_proc_object->ntfy_obj)
1186 proc_notify_clients(p_proc_object,
1187 DSP_PROCESSORSTATECHANGE);
1188 }
1189 }
1190 if (!status) {
1191 status = proc_get_processor_id(hprocessor, &proc_id);
1192 if (proc_id == DSP_UNIT) {
1193
1194
1195 if (!status)
1196 status = cod_get_sym_value(cod_mgr, EXTEND,
1197 &dw_ext_end);
1198
1199
1200 if (!status) {
1201 status =
1202 dev_get_dmm_mgr(p_proc_object->dev_obj,
1203 &dmm_mgr);
1204 if (dmm_mgr) {
1205
1206
1207 dw_ext_end =
1208 (dw_ext_end + 1) * DSPWORDSIZE;
1209
1210 status = dmm_create_tables(dmm_mgr,
1211 dw_ext_end,
1212 DMMPOOLSIZE);
1213 } else {
1214 status = -EFAULT;
1215 }
1216 }
1217 }
1218 }
1219
1220 kfree(new_envp);
1221 user_args[0] = pargv0;
1222 if (!status) {
1223 if (!((*p_proc_object->intf_fxns->brd_status)
1224 (p_proc_object->bridge_context, &brd_state))) {
1225 pr_info("%s: Processor Loaded %s\n", __func__, pargv0);
1226 kfree(drv_datap->base_img);
1227 drv_datap->base_img = kstrdup(pargv0, GFP_KERNEL);
1228 if (!drv_datap->base_img)
1229 status = -ENOMEM;
1230 }
1231 }
1232
1233func_end:
1234 if (status) {
1235 pr_err("%s: Processor failed to load\n", __func__);
1236 proc_stop(p_proc_object);
1237 }
1238#ifdef OPT_LOAD_TIME_INSTRUMENTATION
1239 do_gettimeofday(&tv2);
1240 if (tv2.tv_usec < tv1.tv_usec) {
1241 tv2.tv_usec += 1000000;
1242 tv2.tv_sec--;
1243 }
1244 dev_dbg(bridge, "%s: time to load %d sec and %d usec\n", __func__,
1245 tv2.tv_sec - tv1.tv_sec, tv2.tv_usec - tv1.tv_usec);
1246#endif
1247 return status;
1248}
1249
1250
1251
1252
1253
1254
1255int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
1256 void *req_addr, void **pp_map_addr, u32 ul_map_attr,
1257 struct process_context *pr_ctxt)
1258{
1259 u32 va_align;
1260 u32 pa_align;
1261 struct dmm_object *dmm_mgr;
1262 u32 size_align;
1263 int status = 0;
1264 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1265 struct dmm_map_object *map_obj;
1266 u32 tmp_addr = 0;
1267
1268#ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK
1269 if ((ul_map_attr & BUFMODE_MASK) != RBUF) {
1270 if (!IS_ALIGNED((u32)pmpu_addr, DSP_CACHE_LINE) ||
1271 !IS_ALIGNED(ul_size, DSP_CACHE_LINE)) {
1272 pr_err("%s: not aligned: 0x%x (%d)\n", __func__,
1273 (u32)pmpu_addr, ul_size);
1274 return -EFAULT;
1275 }
1276 }
1277#endif
1278
1279
1280 va_align = PG_ALIGN_LOW((u32) req_addr, PG_SIZE4K);
1281 pa_align = PG_ALIGN_LOW((u32) pmpu_addr, PG_SIZE4K);
1282 size_align = PG_ALIGN_HIGH(ul_size + (u32) pmpu_addr - pa_align,
1283 PG_SIZE4K);
1284
1285 if (!p_proc_object) {
1286 status = -EFAULT;
1287 goto func_end;
1288 }
1289
1290 mutex_lock(&proc_lock);
1291 dmm_get_handle(p_proc_object, &dmm_mgr);
1292 if (dmm_mgr)
1293 status = dmm_map_memory(dmm_mgr, va_align, size_align);
1294 else
1295 status = -EFAULT;
1296
1297
1298 if (!status) {
1299
1300
1301 tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1)));
1302
1303 map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr,
1304 size_align);
1305 if (!map_obj)
1306 status = -ENOMEM;
1307 else
1308 status = (*p_proc_object->intf_fxns->brd_mem_map)
1309 (p_proc_object->bridge_context, pa_align, va_align,
1310 size_align, ul_map_attr, map_obj->pages);
1311 }
1312 if (!status) {
1313
1314 *pp_map_addr = (void *) tmp_addr;
1315 } else {
1316 remove_mapping_information(pr_ctxt, tmp_addr, size_align);
1317 dmm_un_map_memory(dmm_mgr, va_align, &size_align);
1318 }
1319 mutex_unlock(&proc_lock);
1320
1321 if (status)
1322 goto func_end;
1323
1324func_end:
1325 dev_dbg(bridge, "%s: hprocessor %p, pmpu_addr %p, ul_size %x, "
1326 "req_addr %p, ul_map_attr %x, pp_map_addr %p, va_align %x, "
1327 "pa_align %x, size_align %x status 0x%x\n", __func__,
1328 hprocessor, pmpu_addr, ul_size, req_addr, ul_map_attr,
1329 pp_map_addr, va_align, pa_align, size_align, status);
1330
1331 return status;
1332}
1333
1334
1335
1336
1337
1338
1339int proc_register_notify(void *hprocessor, u32 event_mask,
1340 u32 notify_type, struct dsp_notification
1341 *hnotification)
1342{
1343 int status = 0;
1344 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1345 struct deh_mgr *hdeh_mgr;
1346
1347
1348 if (!p_proc_object) {
1349 status = -EFAULT;
1350 goto func_end;
1351 }
1352
1353 if (event_mask & ~(DSP_PROCESSORSTATECHANGE | DSP_PROCESSORATTACH |
1354 DSP_PROCESSORDETACH | DSP_PROCESSORRESTART |
1355 DSP_MMUFAULT | DSP_SYSERROR | DSP_PWRERROR |
1356 DSP_WDTOVERFLOW))
1357 status = -EINVAL;
1358
1359
1360 if (notify_type != DSP_SIGNALEVENT)
1361 status = -EINVAL;
1362
1363 if (!status) {
1364
1365
1366 if (event_mask &
1367 ~(DSP_SYSERROR | DSP_MMUFAULT | DSP_PWRERROR |
1368 DSP_WDTOVERFLOW)) {
1369 status = ntfy_register(p_proc_object->ntfy_obj,
1370 hnotification, event_mask,
1371 notify_type);
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381 if ((event_mask == 0) && status) {
1382 status =
1383 dev_get_deh_mgr(p_proc_object->dev_obj,
1384 &hdeh_mgr);
1385 status =
1386 bridge_deh_register_notify(hdeh_mgr,
1387 event_mask,
1388 notify_type,
1389 hnotification);
1390 }
1391 } else {
1392 status = dev_get_deh_mgr(p_proc_object->dev_obj,
1393 &hdeh_mgr);
1394 status =
1395 bridge_deh_register_notify(hdeh_mgr,
1396 event_mask,
1397 notify_type,
1398 hnotification);
1399
1400 }
1401 }
1402func_end:
1403 return status;
1404}
1405
1406
1407
1408
1409
1410
1411int proc_reserve_memory(void *hprocessor, u32 ul_size,
1412 void **pp_rsv_addr,
1413 struct process_context *pr_ctxt)
1414{
1415 struct dmm_object *dmm_mgr;
1416 int status = 0;
1417 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1418 struct dmm_rsv_object *rsv_obj;
1419
1420 if (!p_proc_object) {
1421 status = -EFAULT;
1422 goto func_end;
1423 }
1424
1425 status = dmm_get_handle(p_proc_object, &dmm_mgr);
1426 if (!dmm_mgr) {
1427 status = -EFAULT;
1428 goto func_end;
1429 }
1430
1431 status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr);
1432 if (status != 0)
1433 goto func_end;
1434
1435
1436
1437
1438
1439
1440 rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL);
1441 if (rsv_obj) {
1442 rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr;
1443 spin_lock(&pr_ctxt->dmm_rsv_lock);
1444 list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list);
1445 spin_unlock(&pr_ctxt->dmm_rsv_lock);
1446 }
1447
1448func_end:
1449 dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p "
1450 "status 0x%x\n", __func__, hprocessor,
1451 ul_size, pp_rsv_addr, status);
1452 return status;
1453}
1454
1455
1456
1457
1458
1459
1460int proc_start(void *hprocessor)
1461{
1462 int status = 0;
1463 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1464 struct cod_manager *cod_mgr;
1465 u32 dw_dsp_addr;
1466 int brd_state;
1467
1468 if (!p_proc_object) {
1469 status = -EFAULT;
1470 goto func_end;
1471 }
1472
1473 if (p_proc_object->proc_state != PROC_LOADED) {
1474 status = -EBADR;
1475 goto func_end;
1476 }
1477 status = dev_get_cod_mgr(p_proc_object->dev_obj, &cod_mgr);
1478 if (!cod_mgr) {
1479 status = -EFAULT;
1480 goto func_cont;
1481 }
1482
1483 status = cod_get_entry(cod_mgr, &dw_dsp_addr);
1484 if (status)
1485 goto func_cont;
1486
1487 status = (*p_proc_object->intf_fxns->brd_start)
1488 (p_proc_object->bridge_context, dw_dsp_addr);
1489 if (status)
1490 goto func_cont;
1491
1492
1493 status = dev_create2(p_proc_object->dev_obj);
1494 if (!status) {
1495 p_proc_object->proc_state = PROC_RUNNING;
1496
1497
1498
1499
1500 if (p_proc_object->ntfy_obj) {
1501 proc_notify_clients(p_proc_object,
1502 DSP_PROCESSORSTATECHANGE);
1503 }
1504 } else {
1505
1506
1507 (void)(*p_proc_object->intf_fxns->
1508 brd_stop) (p_proc_object->bridge_context);
1509 p_proc_object->proc_state = PROC_STOPPED;
1510 }
1511func_cont:
1512 if (!status) {
1513 if (!((*p_proc_object->intf_fxns->brd_status)
1514 (p_proc_object->bridge_context, &brd_state))) {
1515 pr_info("%s: dsp in running state\n", __func__);
1516 }
1517 } else {
1518 pr_err("%s: Failed to start the dsp\n", __func__);
1519 proc_stop(p_proc_object);
1520 }
1521
1522func_end:
1523 return status;
1524}
1525
1526
1527
1528
1529
1530
1531int proc_stop(void *hprocessor)
1532{
1533 int status = 0;
1534 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1535 struct msg_mgr *hmsg_mgr;
1536 struct node_mgr *hnode_mgr;
1537 void *hnode;
1538 u32 node_tab_size = 1;
1539 u32 num_nodes = 0;
1540 u32 nodes_allocated = 0;
1541
1542 if (!p_proc_object) {
1543 status = -EFAULT;
1544 goto func_end;
1545 }
1546
1547 status = dev_get_node_manager(p_proc_object->dev_obj, &hnode_mgr);
1548 if (!status && hnode_mgr) {
1549 status = node_enum_nodes(hnode_mgr, &hnode, node_tab_size,
1550 &num_nodes, &nodes_allocated);
1551 if ((status == -EINVAL) || (nodes_allocated > 0)) {
1552 pr_err("%s: Can't stop device, active nodes = %d\n",
1553 __func__, nodes_allocated);
1554 return -EBADR;
1555 }
1556 }
1557
1558
1559 status =
1560 (*p_proc_object->intf_fxns->
1561 brd_stop) (p_proc_object->bridge_context);
1562 if (!status) {
1563 dev_dbg(bridge, "%s: processor in standby mode\n", __func__);
1564 p_proc_object->proc_state = PROC_STOPPED;
1565
1566 if (!(dev_destroy2(p_proc_object->dev_obj))) {
1567
1568 dev_get_msg_mgr(p_proc_object->dev_obj, &hmsg_mgr);
1569 if (hmsg_mgr) {
1570 msg_delete(hmsg_mgr);
1571 dev_set_msg_mgr(p_proc_object->dev_obj, NULL);
1572 }
1573 }
1574 } else {
1575 pr_err("%s: Failed to stop the processor\n", __func__);
1576 }
1577func_end:
1578
1579 return status;
1580}
1581
1582
1583
1584
1585
1586
1587int proc_un_map(void *hprocessor, void *map_addr,
1588 struct process_context *pr_ctxt)
1589{
1590 int status = 0;
1591 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1592 struct dmm_object *dmm_mgr;
1593 u32 va_align;
1594 u32 size_align;
1595
1596 va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K);
1597 if (!p_proc_object) {
1598 status = -EFAULT;
1599 goto func_end;
1600 }
1601
1602 status = dmm_get_handle(hprocessor, &dmm_mgr);
1603 if (!dmm_mgr) {
1604 status = -EFAULT;
1605 goto func_end;
1606 }
1607
1608
1609 mutex_lock(&proc_lock);
1610
1611
1612
1613
1614 status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align);
1615
1616 if (!status) {
1617 status = (*p_proc_object->intf_fxns->brd_mem_un_map)
1618 (p_proc_object->bridge_context, va_align, size_align);
1619 }
1620
1621 if (status)
1622 goto unmap_failed;
1623
1624
1625
1626
1627
1628
1629 remove_mapping_information(pr_ctxt, (u32) map_addr, size_align);
1630
1631unmap_failed:
1632 mutex_unlock(&proc_lock);
1633
1634func_end:
1635 dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
1636 __func__, hprocessor, map_addr, status);
1637 return status;
1638}
1639
1640
1641
1642
1643
1644
1645int proc_un_reserve_memory(void *hprocessor, void *prsv_addr,
1646 struct process_context *pr_ctxt)
1647{
1648 struct dmm_object *dmm_mgr;
1649 int status = 0;
1650 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1651 struct dmm_rsv_object *rsv_obj;
1652
1653 if (!p_proc_object) {
1654 status = -EFAULT;
1655 goto func_end;
1656 }
1657
1658 status = dmm_get_handle(p_proc_object, &dmm_mgr);
1659 if (!dmm_mgr) {
1660 status = -EFAULT;
1661 goto func_end;
1662 }
1663
1664 status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr);
1665 if (status != 0)
1666 goto func_end;
1667
1668
1669
1670
1671
1672
1673 spin_lock(&pr_ctxt->dmm_rsv_lock);
1674 list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) {
1675 if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) {
1676 list_del(&rsv_obj->link);
1677 kfree(rsv_obj);
1678 break;
1679 }
1680 }
1681 spin_unlock(&pr_ctxt->dmm_rsv_lock);
1682
1683func_end:
1684 dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n",
1685 __func__, hprocessor, prsv_addr, status);
1686 return status;
1687}
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706static int proc_monitor(struct proc_object *proc_obj)
1707{
1708 int status = -EPERM;
1709 struct msg_mgr *hmsg_mgr;
1710
1711
1712
1713
1714 if (!dev_destroy2(proc_obj->dev_obj)) {
1715
1716 dev_get_msg_mgr(proc_obj->dev_obj, &hmsg_mgr);
1717 if (hmsg_mgr) {
1718 msg_delete(hmsg_mgr);
1719 dev_set_msg_mgr(proc_obj->dev_obj, NULL);
1720 }
1721 }
1722
1723 if (!((*proc_obj->intf_fxns->brd_monitor)
1724 (proc_obj->bridge_context))) {
1725 status = 0;
1726 }
1727
1728 return status;
1729}
1730
1731
1732
1733
1734
1735
1736
1737static s32 get_envp_count(char **envp)
1738{
1739 s32 ret = 0;
1740 if (envp) {
1741 while (*envp++)
1742 ret++;
1743
1744 ret += 1;
1745 }
1746
1747 return ret;
1748}
1749
1750
1751
1752
1753
1754
1755
1756static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
1757 s32 cnew_envp, char *sz_var)
1758{
1759 char **pp_envp = new_envp;
1760
1761
1762 *new_envp++ = sz_var;
1763
1764
1765 while (envp_elems--)
1766 *new_envp++ = *envp++;
1767
1768
1769 if (envp_elems == 0)
1770 *new_envp = NULL;
1771
1772 return pp_envp;
1773}
1774
1775
1776
1777
1778
1779
1780int proc_notify_clients(void *proc, u32 events)
1781{
1782 int status = 0;
1783 struct proc_object *p_proc_object = (struct proc_object *)proc;
1784
1785 if (!p_proc_object) {
1786 status = -EFAULT;
1787 goto func_end;
1788 }
1789
1790 ntfy_notify(p_proc_object->ntfy_obj, events);
1791func_end:
1792 return status;
1793}
1794
1795
1796
1797
1798
1799
1800
1801int proc_notify_all_clients(void *proc, u32 events)
1802{
1803 int status = 0;
1804 struct proc_object *p_proc_object = (struct proc_object *)proc;
1805
1806 if (!p_proc_object) {
1807 status = -EFAULT;
1808 goto func_end;
1809 }
1810
1811 dev_notify_clients(p_proc_object->dev_obj, events);
1812
1813func_end:
1814 return status;
1815}
1816
1817
1818
1819
1820
1821
1822int proc_get_processor_id(void *proc, u32 *proc_id)
1823{
1824 int status = 0;
1825 struct proc_object *p_proc_object = (struct proc_object *)proc;
1826
1827 if (p_proc_object)
1828 *proc_id = p_proc_object->processor_id;
1829 else
1830 status = -EFAULT;
1831
1832 return status;
1833}
1834