1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/types.h>
20#include <linux/bitmap.h>
21#include <linux/list.h>
22
23
24#include <dspbridge/host_os.h>
25
26
27#include <dspbridge/dbdefs.h>
28
29
30#include <dspbridge/memdefs.h>
31#include <dspbridge/proc.h>
32#include <dspbridge/strm.h>
33#include <dspbridge/sync.h>
34#include <dspbridge/ntfy.h>
35
36
37#include <dspbridge/cmm.h>
38#include <dspbridge/cod.h>
39#include <dspbridge/dev.h>
40#include <dspbridge/msg.h>
41
42
43#include <dspbridge/dbdcd.h>
44#include <dspbridge/disp.h>
45#include <dspbridge/rms_sh.h>
46
47
48#include <dspbridge/dspdefs.h>
49#include <dspbridge/dspioctl.h>
50
51
52#include <dspbridge/uuidutil.h>
53
54
55#include <dspbridge/nodepriv.h>
56#include <dspbridge/node.h>
57#include <dspbridge/dmm.h>
58
59
60#include <dspbridge/dbll.h>
61#include <dspbridge/nldr.h>
62
63#include <dspbridge/drv.h>
64#include <dspbridge/resourcecleanup.h>
65#include <_tiomap.h>
66
67#include <dspbridge/dspdeh.h>
68
69#define HOSTPREFIX "/host"
70#define PIPEPREFIX "/dbpipe"
71
72#define MAX_INPUTS(h) \
73 ((h)->dcd_props.obj_data.node_obj.ndb_props.num_input_streams)
74#define MAX_OUTPUTS(h) \
75 ((h)->dcd_props.obj_data.node_obj.ndb_props.num_output_streams)
76
77#define NODE_GET_PRIORITY(h) ((h)->prio)
78#define NODE_SET_PRIORITY(hnode, prio) ((hnode)->prio = prio)
79#define NODE_SET_STATE(hnode, state) ((hnode)->node_state = state)
80
81#define MAXPIPES 100
82#define MAXDEVSUFFIXLEN 2
83
84#define PIPENAMELEN (sizeof(PIPEPREFIX) + MAXDEVSUFFIXLEN)
85#define HOSTNAMELEN (sizeof(HOSTPREFIX) + MAXDEVSUFFIXLEN)
86
87#define MAXDEVNAMELEN 32
88#define CREATEPHASE 1
89#define EXECUTEPHASE 2
90#define DELETEPHASE 3
91
92
93
94
95
96
97#define DEFAULTBUFSIZE 32
98#define DEFAULTNBUFS 2
99#define DEFAULTSEGID 0
100#define DEFAULTALIGNMENT 0
101#define DEFAULTTIMEOUT 10000
102
103#define RMSQUERYSERVER 0
104#define RMSCONFIGURESERVER 1
105#define RMSCREATENODE 2
106#define RMSEXECUTENODE 3
107#define RMSDELETENODE 4
108#define RMSCHANGENODEPRIORITY 5
109#define RMSREADMEMORY 6
110#define RMSWRITEMEMORY 7
111#define RMSCOPY 8
112#define MAXTIMEOUT 2000
113
114#define NUMRMSFXNS 9
115
116#define PWR_TIMEOUT 500
117
118#define STACKSEGLABEL "L1DSRAM_HEAP"
119
120
121
122
123struct node_mgr {
124 struct dev_object *dev_obj;
125
126 struct bridge_drv_interface *intf_fxns;
127 struct dcd_manager *dcd_mgr;
128 struct disp_object *disp_obj;
129 struct list_head node_list;
130 u32 num_nodes;
131 u32 num_created;
132 DECLARE_BITMAP(pipe_map, MAXPIPES);
133 DECLARE_BITMAP(pipe_done_map, MAXPIPES);
134
135 DECLARE_BITMAP(chnl_map, CHNL_MAXCHANNELS);
136
137 DECLARE_BITMAP(dma_chnl_map, CHNL_MAXCHANNELS);
138
139 DECLARE_BITMAP(zc_chnl_map, CHNL_MAXCHANNELS);
140 struct ntfy_object *ntfy_obj;
141 struct mutex node_mgr_lock;
142 u32 fxn_addrs[NUMRMSFXNS];
143 struct msg_mgr *msg_mgr_obj;
144
145
146 u32 num_chnls;
147 u32 chnl_offset;
148 u32 chnl_buf_size;
149 int proc_family;
150 int proc_type;
151 u32 dsp_word_size;
152 u32 dsp_data_mau_size;
153 u32 dsp_mau_size;
154 s32 min_pri;
155 s32 max_pri;
156
157 struct strm_mgr *strm_mgr_obj;
158
159
160 struct nldr_object *nldr_obj;
161 struct node_ldr_fxns nldr_fxns;
162};
163
164
165
166
167enum connecttype {
168 NOTCONNECTED = 0,
169 NODECONNECT,
170 HOSTCONNECT,
171 DEVICECONNECT,
172};
173
174
175
176
177struct stream_chnl {
178 enum connecttype type;
179 u32 dev_id;
180};
181
182
183
184
185struct node_object {
186 struct list_head list_elem;
187 struct node_mgr *node_mgr;
188 struct proc_object *processor;
189 struct dsp_uuid node_uuid;
190 s32 prio;
191 u32 timeout;
192 u32 heap_size;
193 u32 dsp_heap_virt_addr;
194 u32 gpp_heap_virt_addr;
195 enum node_type ntype;
196 enum node_state node_state;
197 u32 num_inputs;
198 u32 num_outputs;
199 u32 max_input_index;
200 u32 max_output_index;
201 struct stream_chnl *inputs;
202 struct stream_chnl *outputs;
203 struct node_createargs create_args;
204 nodeenv node_env;
205 struct dcd_genericobj dcd_props;
206 struct dsp_cbdata *args;
207 struct ntfy_object *ntfy_obj;
208 char *str_dev_name;
209 struct sync_object *sync_done;
210 s32 exit_status;
211
212
213 void *device_owner;
214 u32 num_gpp_inputs;
215 u32 num_gpp_outputs;
216
217 struct dsp_streamconnect *stream_connect;
218
219
220 struct msg_queue *msg_queue_obj;
221
222
223 struct cmm_xlatorobject *xlator;
224
225
226 struct nldr_nodeobject *nldr_node_obj;
227 bool loaded;
228 bool phase_split;
229
230};
231
232
233static struct dsp_bufferattr node_dfltbufattrs = {
234 .cb_struct = 0,
235 .segment_id = 1,
236 .buf_alignment = 0,
237};
238
239static void delete_node(struct node_object *hnode,
240 struct process_context *pr_ctxt);
241static void delete_node_mgr(struct node_mgr *hnode_mgr);
242static void fill_stream_connect(struct node_object *node1,
243 struct node_object *node2, u32 stream1,
244 u32 stream2);
245static void fill_stream_def(struct node_object *hnode,
246 struct node_strmdef *pstrm_def,
247 struct dsp_strmattr *pattrs);
248static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream);
249static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
250 u32 phase);
251static int get_node_props(struct dcd_manager *hdcd_mgr,
252 struct node_object *hnode,
253 const struct dsp_uuid *node_uuid,
254 struct dcd_genericobj *dcd_prop);
255static int get_proc_props(struct node_mgr *hnode_mgr,
256 struct dev_object *hdev_obj);
257static int get_rms_fxns(struct node_mgr *hnode_mgr);
258static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
259 u32 ul_num_bytes, u32 mem_space);
260static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
261 u32 ul_num_bytes, u32 mem_space);
262
263
264static struct node_ldr_fxns nldr_fxns = {
265 nldr_allocate,
266 nldr_create,
267 nldr_delete,
268 nldr_get_fxn_addr,
269 nldr_load,
270 nldr_unload,
271};
272
273enum node_state node_get_state(void *hnode)
274{
275 struct node_object *pnode = (struct node_object *)hnode;
276 if (!pnode)
277 return -1;
278 return pnode->node_state;
279}
280
281
282
283
284
285
286int node_allocate(struct proc_object *hprocessor,
287 const struct dsp_uuid *node_uuid,
288 const struct dsp_cbdata *pargs,
289 const struct dsp_nodeattrin *attr_in,
290 struct node_res_object **noderes,
291 struct process_context *pr_ctxt)
292{
293 struct node_mgr *hnode_mgr;
294 struct dev_object *hdev_obj;
295 struct node_object *pnode = NULL;
296 enum node_type node_type = NODE_TASK;
297 struct node_msgargs *pmsg_args;
298 struct node_taskargs *ptask_args;
299 u32 num_streams;
300 struct bridge_drv_interface *intf_fxns;
301 int status = 0;
302 struct cmm_object *hcmm_mgr = NULL;
303 u32 proc_id;
304 u32 pul_value;
305 u32 dynext_base;
306 u32 off_set = 0;
307 u32 ul_stack_seg_val;
308 struct cfg_hostres *host_res;
309 struct bridge_dev_context *pbridge_context;
310 u32 mapped_addr = 0;
311 u32 map_attrs = 0x0;
312 struct dsp_processorstate proc_state;
313#ifdef DSP_DMM_DEBUG
314 struct dmm_object *dmm_mgr;
315 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
316#endif
317
318 void *node_res;
319
320 *noderes = NULL;
321
322 status = proc_get_processor_id(hprocessor, &proc_id);
323
324 if (proc_id != DSP_UNIT)
325 goto func_end;
326
327 status = proc_get_dev_object(hprocessor, &hdev_obj);
328 if (!status) {
329 status = dev_get_node_manager(hdev_obj, &hnode_mgr);
330 if (hnode_mgr == NULL)
331 status = -EPERM;
332
333 }
334
335 if (status)
336 goto func_end;
337
338 status = dev_get_bridge_context(hdev_obj, &pbridge_context);
339 if (!pbridge_context) {
340 status = -EFAULT;
341 goto func_end;
342 }
343
344 status = proc_get_state(hprocessor, &proc_state,
345 sizeof(struct dsp_processorstate));
346 if (status)
347 goto func_end;
348
349
350 if (proc_state.proc_state == PROC_ERROR) {
351 status = -EPERM;
352 goto func_end;
353 }
354
355
356 if (hnode_mgr->fxn_addrs[0] == 0) {
357
358 pr_err("%s: Failed, no RMS in base image\n", __func__);
359 status = -EPERM;
360 } else {
361
362 if (attr_in) {
363
364 if (attr_in->prio < hnode_mgr->min_pri ||
365 attr_in->prio > hnode_mgr->max_pri)
366 status = -EDOM;
367 }
368 }
369
370 if (status)
371 goto func_end;
372
373 pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL);
374 if (pnode == NULL) {
375 status = -ENOMEM;
376 goto func_end;
377 }
378 pnode->node_mgr = hnode_mgr;
379
380 mutex_lock(&hnode_mgr->node_mgr_lock);
381
382
383 status = get_node_props(hnode_mgr->dcd_mgr, pnode, node_uuid,
384 &(pnode->dcd_props));
385 if (status)
386 goto func_cont;
387
388 pnode->node_uuid = *node_uuid;
389 pnode->processor = hprocessor;
390 pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype;
391 pnode->timeout = pnode->dcd_props.obj_data.node_obj.ndb_props.timeout;
392 pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio;
393
394
395
396 pnode->create_args.asa.task_arg_obj.heap_size = 0;
397 pnode->create_args.asa.task_arg_obj.dsp_heap_addr = 0;
398 pnode->create_args.asa.task_arg_obj.dsp_heap_res_addr = 0;
399 pnode->create_args.asa.task_arg_obj.gpp_heap_addr = 0;
400 if (!attr_in)
401 goto func_cont;
402
403
404 if (!(attr_in->pgpp_virt_addr))
405 goto func_cont;
406
407
408 if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
409 pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
410 __func__, attr_in->heap_size);
411 status = -EINVAL;
412 } else {
413 pnode->create_args.asa.task_arg_obj.heap_size =
414 attr_in->heap_size;
415 pnode->create_args.asa.task_arg_obj.gpp_heap_addr =
416 (u32) attr_in->pgpp_virt_addr;
417 }
418 if (status)
419 goto func_cont;
420
421 status = proc_reserve_memory(hprocessor,
422 pnode->create_args.asa.task_arg_obj.
423 heap_size + PAGE_SIZE,
424 (void **)&(pnode->create_args.asa.
425 task_arg_obj.dsp_heap_res_addr),
426 pr_ctxt);
427 if (status) {
428 pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
429 __func__, status);
430 goto func_cont;
431 }
432#ifdef DSP_DMM_DEBUG
433 status = dmm_get_handle(p_proc_object, &dmm_mgr);
434 if (!dmm_mgr) {
435 status = DSP_EHANDLE;
436 goto func_cont;
437 }
438
439 dmm_mem_map_dump(dmm_mgr);
440#endif
441
442 map_attrs |= DSP_MAPLITTLEENDIAN;
443 map_attrs |= DSP_MAPELEMSIZE32;
444 map_attrs |= DSP_MAPVIRTUALADDR;
445 status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
446 pnode->create_args.asa.task_arg_obj.heap_size,
447 (void *)pnode->create_args.asa.task_arg_obj.
448 dsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
449 pr_ctxt);
450 if (status)
451 pr_err("%s: Failed to map memory for Heap: 0x%x\n",
452 __func__, status);
453 else
454 pnode->create_args.asa.task_arg_obj.dsp_heap_addr =
455 (u32) mapped_addr;
456
457func_cont:
458 mutex_unlock(&hnode_mgr->node_mgr_lock);
459 if (attr_in != NULL) {
460
461 pnode->timeout = attr_in->timeout;
462 pnode->prio = attr_in->prio;
463 }
464
465 if (!status) {
466 pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
467 GFP_KERNEL);
468 if (pnode->ntfy_obj)
469 ntfy_init(pnode->ntfy_obj);
470 else
471 status = -ENOMEM;
472 }
473
474 if (!status) {
475 node_type = node_get_type(pnode);
476
477
478 if (node_type != NODE_MESSAGE) {
479 num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode);
480 pnode->stream_connect = kzalloc(num_streams *
481 sizeof(struct dsp_streamconnect),
482 GFP_KERNEL);
483 if (num_streams > 0 && pnode->stream_connect == NULL)
484 status = -ENOMEM;
485
486 }
487 if (!status && (node_type == NODE_TASK ||
488 node_type == NODE_DAISSOCKET)) {
489
490 pnode->inputs = kzalloc(MAX_INPUTS(pnode) *
491 sizeof(struct stream_chnl), GFP_KERNEL);
492 pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) *
493 sizeof(struct stream_chnl), GFP_KERNEL);
494 ptask_args = &(pnode->create_args.asa.task_arg_obj);
495 ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) *
496 sizeof(struct node_strmdef),
497 GFP_KERNEL);
498 ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) *
499 sizeof(struct node_strmdef),
500 GFP_KERNEL);
501 if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL ||
502 ptask_args->strm_in_def
503 == NULL))
504 || (MAX_OUTPUTS(pnode) > 0
505 && (pnode->outputs == NULL
506 || ptask_args->strm_out_def == NULL)))
507 status = -ENOMEM;
508 }
509 }
510 if (!status && (node_type != NODE_DEVICE)) {
511
512
513 pnode->sync_done = kzalloc(sizeof(struct sync_object),
514 GFP_KERNEL);
515 if (pnode->sync_done)
516 sync_init_event(pnode->sync_done);
517 else
518 status = -ENOMEM;
519
520 if (!status) {
521
522 status = cmm_get_handle(hprocessor, &hcmm_mgr);
523 if (!status) {
524
525
526 status = cmm_xlator_create(&pnode->xlator,
527 hcmm_mgr, NULL);
528 }
529 }
530 if (!status) {
531
532 if ((pargs != NULL) && (pargs->cb_data > 0)) {
533 pmsg_args =
534 &(pnode->create_args.asa.node_msg_args);
535 pmsg_args->pdata = kzalloc(pargs->cb_data,
536 GFP_KERNEL);
537 if (pmsg_args->pdata == NULL) {
538 status = -ENOMEM;
539 } else {
540 pmsg_args->arg_length = pargs->cb_data;
541 memcpy(pmsg_args->pdata,
542 pargs->node_data,
543 pargs->cb_data);
544 }
545 }
546 }
547 }
548
549 if (!status && node_type != NODE_DEVICE) {
550
551 intf_fxns = hnode_mgr->intf_fxns;
552 status =
553 (*intf_fxns->msg_create_queue) (hnode_mgr->msg_mgr_obj,
554 &pnode->msg_queue_obj,
555 0,
556 pnode->create_args.asa.
557 node_msg_args.max_msgs,
558 pnode);
559 }
560
561 if (!status) {
562
563
564 status = hnode_mgr->nldr_fxns.allocate(hnode_mgr->nldr_obj,
565 (void *)pnode,
566 &pnode->dcd_props.
567 obj_data.node_obj,
568 &pnode->
569 nldr_node_obj,
570 &pnode->phase_split);
571 }
572
573
574
575
576
577 if (!status &&
578 (char *)pnode->dcd_props.obj_data.node_obj.ndb_props.
579 stack_seg_name != NULL) {
580 if (strcmp((char *)
581 pnode->dcd_props.obj_data.node_obj.ndb_props.
582 stack_seg_name, STACKSEGLABEL) == 0) {
583 void __iomem *stack_seg;
584 u32 stack_seg_pa;
585
586 status =
587 hnode_mgr->nldr_fxns.
588 get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
589 &dynext_base);
590 if (status)
591 pr_err("%s: Failed to get addr for DYNEXT_BEG"
592 " status = 0x%x\n", __func__, status);
593
594 status =
595 hnode_mgr->nldr_fxns.
596 get_fxn_addr(pnode->nldr_node_obj,
597 "L1DSRAM_HEAP", &pul_value);
598
599 if (status)
600 pr_err("%s: Failed to get addr for L1DSRAM_HEAP"
601 " status = 0x%x\n", __func__, status);
602
603 host_res = pbridge_context->resources;
604 if (!host_res)
605 status = -EPERM;
606
607 if (status) {
608 pr_err("%s: Failed to get host resource, status"
609 " = 0x%x\n", __func__, status);
610 goto func_end;
611 }
612
613 off_set = pul_value - dynext_base;
614 stack_seg_pa = host_res->mem_phys[1] + off_set;
615 stack_seg = ioremap(stack_seg_pa, SZ_32);
616 if (!stack_seg) {
617 status = -ENOMEM;
618 goto func_end;
619 }
620
621 ul_stack_seg_val = readl(stack_seg);
622
623 iounmap(stack_seg);
624
625 dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
626 " 0x%x\n", __func__, ul_stack_seg_val,
627 host_res->mem_base[1] + off_set);
628
629 pnode->create_args.asa.task_arg_obj.stack_seg =
630 ul_stack_seg_val;
631
632 }
633 }
634
635 if (!status) {
636
637
638 NODE_SET_STATE(pnode, NODE_ALLOCATED);
639
640 mutex_lock(&hnode_mgr->node_mgr_lock);
641
642 list_add_tail(&pnode->list_elem, &hnode_mgr->node_list);
643 ++(hnode_mgr->num_nodes);
644
645
646 mutex_unlock(&hnode_mgr->node_mgr_lock);
647
648
649
650 pnode->phase_split = true;
651
652
653 proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE);
654 } else {
655
656 if (pnode)
657 delete_node(pnode, pr_ctxt);
658
659 }
660
661 if (!status) {
662 status = drv_insert_node_res_element(pnode, &node_res, pr_ctxt);
663 if (status) {
664 delete_node(pnode, pr_ctxt);
665 goto func_end;
666 }
667
668 *noderes = (struct node_res_object *)node_res;
669 drv_proc_node_update_heap_status(node_res, true);
670 drv_proc_node_update_status(node_res, true);
671 }
672func_end:
673 dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
674 "node_res: %p status: 0x%x\n", __func__, hprocessor,
675 node_uuid, pargs, attr_in, noderes, status);
676 return status;
677}
678
679
680
681
682
683
684DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
685 struct dsp_bufferattr *pattr,
686 u8 **pbuffer)
687{
688 struct node_object *pnode = (struct node_object *)hnode;
689 int status = 0;
690 bool va_flag = false;
691 bool set_info;
692 u32 proc_id;
693
694 if (!pnode)
695 status = -EFAULT;
696 else if (node_get_type(pnode) == NODE_DEVICE)
697 status = -EPERM;
698
699 if (status)
700 goto func_end;
701
702 if (pattr == NULL)
703 pattr = &node_dfltbufattrs;
704
705 status = proc_get_processor_id(pnode->processor, &proc_id);
706 if (proc_id != DSP_UNIT) {
707 goto func_end;
708 }
709
710
711
712
713 if ((pattr->segment_id & MEM_SETVIRTUALSEGID) ||
714 (pattr->segment_id & MEM_GETVIRTUALSEGID)) {
715 va_flag = true;
716 set_info = (pattr->segment_id & MEM_SETVIRTUALSEGID) ?
717 true : false;
718
719 pattr->segment_id &= ~MEM_MASKVIRTUALSEGID;
720
721 status = cmm_xlator_info(pnode->xlator, pbuffer, usize,
722 pattr->segment_id, set_info);
723 }
724 if (!status && (!va_flag)) {
725 if (pattr->segment_id != 1) {
726
727 status = -EBADR;
728 }
729
730
731
732 switch (pattr->buf_alignment) {
733 case 0:
734 case 1:
735 case 2:
736 case 4:
737 break;
738 default:
739
740 status = -EPERM;
741 break;
742 }
743 if (!status) {
744
745
746 (void)cmm_xlator_alloc_buf(pnode->xlator, pbuffer,
747 usize);
748 if (*pbuffer == NULL) {
749 pr_err("%s: error - Out of shared memory\n",
750 __func__);
751 status = -ENOMEM;
752 }
753 }
754 }
755func_end:
756 return status;
757}
758
759
760
761
762
763
764
765int node_change_priority(struct node_object *hnode, s32 prio)
766{
767 struct node_object *pnode = (struct node_object *)hnode;
768 struct node_mgr *hnode_mgr = NULL;
769 enum node_type node_type;
770 enum node_state state;
771 int status = 0;
772 u32 proc_id;
773
774 if (!hnode || !hnode->node_mgr) {
775 status = -EFAULT;
776 } else {
777 hnode_mgr = hnode->node_mgr;
778 node_type = node_get_type(hnode);
779 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
780 status = -EPERM;
781 else if (prio < hnode_mgr->min_pri || prio > hnode_mgr->max_pri)
782 status = -EDOM;
783 }
784 if (status)
785 goto func_end;
786
787
788 mutex_lock(&hnode_mgr->node_mgr_lock);
789
790 state = node_get_state(hnode);
791 if (state == NODE_ALLOCATED || state == NODE_PAUSED) {
792 NODE_SET_PRIORITY(hnode, prio);
793 } else {
794 if (state != NODE_RUNNING) {
795 status = -EBADR;
796 goto func_cont;
797 }
798 status = proc_get_processor_id(pnode->processor, &proc_id);
799 if (proc_id == DSP_UNIT) {
800 status =
801 disp_node_change_priority(hnode_mgr->disp_obj,
802 hnode,
803 hnode_mgr->fxn_addrs
804 [RMSCHANGENODEPRIORITY],
805 hnode->node_env, prio);
806 }
807 if (status >= 0)
808 NODE_SET_PRIORITY(hnode, prio);
809
810 }
811func_cont:
812
813 mutex_unlock(&hnode_mgr->node_mgr_lock);
814func_end:
815 return status;
816}
817
818
819
820
821
822
823int node_connect(struct node_object *node1, u32 stream1,
824 struct node_object *node2,
825 u32 stream2, struct dsp_strmattr *pattrs,
826 struct dsp_cbdata *conn_param)
827{
828 struct node_mgr *hnode_mgr;
829 char *pstr_dev_name = NULL;
830 enum node_type node1_type = NODE_TASK;
831 enum node_type node2_type = NODE_TASK;
832 enum dsp_strmmode strm_mode;
833 struct node_strmdef *pstrm_def;
834 struct node_strmdef *input = NULL;
835 struct node_strmdef *output = NULL;
836 struct node_object *dev_node_obj;
837 struct node_object *hnode;
838 struct stream_chnl *pstream;
839 u32 pipe_id;
840 u32 chnl_id;
841 s8 chnl_mode;
842 u32 dw_length;
843 int status = 0;
844
845 if (!node1 || !node2)
846 return -EFAULT;
847
848
849 if (node1 != (struct node_object *)DSP_HGPPNODE &&
850 node2 != (struct node_object *)DSP_HGPPNODE &&
851 node1->node_mgr != node2->node_mgr)
852 return -EPERM;
853
854
855 if (node1 == node2)
856 return -EPERM;
857
858
859 node1_type = node_get_type(node1);
860 node2_type = node_get_type(node2);
861
862 if ((node1_type != NODE_GPP && node1_type != NODE_DEVICE &&
863 stream1 >= MAX_OUTPUTS(node1)) ||
864 (node2_type != NODE_GPP && node2_type != NODE_DEVICE &&
865 stream2 >= MAX_INPUTS(node2)))
866 return -EINVAL;
867
868
869
870
871
872
873
874
875
876
877 if (node1_type == NODE_MESSAGE || node2_type == NODE_MESSAGE ||
878 (node1_type != NODE_TASK &&
879 node1_type != NODE_DAISSOCKET &&
880 node2_type != NODE_TASK &&
881 node2_type != NODE_DAISSOCKET))
882 return -EPERM;
883
884
885
886 if (pattrs && pattrs->strm_mode != STRMMODE_PROCCOPY)
887 return -EPERM;
888
889 if (node1_type != NODE_GPP) {
890 hnode_mgr = node1->node_mgr;
891 } else {
892 hnode_mgr = node2->node_mgr;
893 }
894
895
896 mutex_lock(&hnode_mgr->node_mgr_lock);
897
898
899 if (node1_type != NODE_GPP &&
900 node_get_state(node1) != NODE_ALLOCATED) {
901 status = -EBADR;
902 goto out_unlock;
903 }
904
905 if (node2_type != NODE_GPP &&
906 node_get_state(node2) != NODE_ALLOCATED) {
907 status = -EBADR;
908 goto out_unlock;
909 }
910
911
912
913
914
915 if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
916 output = &(node1->create_args.asa.
917 task_arg_obj.strm_out_def[stream1]);
918 if (output->sz_device) {
919 status = -EISCONN;
920 goto out_unlock;
921 }
922
923 }
924 if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
925 input = &(node2->create_args.asa.
926 task_arg_obj.strm_in_def[stream2]);
927 if (input->sz_device) {
928 status = -EISCONN;
929 goto out_unlock;
930 }
931
932 }
933
934 if ((node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) &&
935 (node2_type == NODE_TASK ||
936 node2_type == NODE_DAISSOCKET)) {
937
938 pipe_id = find_first_zero_bit(hnode_mgr->pipe_map, MAXPIPES);
939 if (pipe_id == MAXPIPES) {
940 status = -ECONNREFUSED;
941 goto out_unlock;
942 }
943 set_bit(pipe_id, hnode_mgr->pipe_map);
944 node1->outputs[stream1].type = NODECONNECT;
945 node2->inputs[stream2].type = NODECONNECT;
946 node1->outputs[stream1].dev_id = pipe_id;
947 node2->inputs[stream2].dev_id = pipe_id;
948 output->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
949 input->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
950 if (!output->sz_device || !input->sz_device) {
951
952 kfree(output->sz_device);
953 kfree(input->sz_device);
954 clear_bit(pipe_id, hnode_mgr->pipe_map);
955 status = -ENOMEM;
956 goto out_unlock;
957 }
958
959 sprintf(output->sz_device, "%s%d", PIPEPREFIX, pipe_id);
960 strcpy(input->sz_device, output->sz_device);
961 }
962
963 if (node1_type == NODE_GPP || node2_type == NODE_GPP) {
964 pstr_dev_name = kzalloc(HOSTNAMELEN + 1, GFP_KERNEL);
965 if (!pstr_dev_name) {
966 status = -ENOMEM;
967 goto out_unlock;
968 }
969
970 chnl_mode = (node1_type == NODE_GPP) ?
971 CHNL_MODETODSP : CHNL_MODEFROMDSP;
972
973
974
975
976
977
978
979 strm_mode = pattrs ? pattrs->strm_mode : STRMMODE_PROCCOPY;
980 switch (strm_mode) {
981 case STRMMODE_RDMA:
982 chnl_id = find_first_zero_bit(hnode_mgr->dma_chnl_map,
983 CHNL_MAXCHANNELS);
984 if (chnl_id < CHNL_MAXCHANNELS) {
985 set_bit(chnl_id, hnode_mgr->dma_chnl_map);
986
987
988 chnl_id = chnl_id + hnode_mgr->num_chnls;
989 }
990 break;
991 case STRMMODE_ZEROCOPY:
992 chnl_id = find_first_zero_bit(hnode_mgr->zc_chnl_map,
993 CHNL_MAXCHANNELS);
994 if (chnl_id < CHNL_MAXCHANNELS) {
995 set_bit(chnl_id, hnode_mgr->zc_chnl_map);
996
997
998 chnl_id = chnl_id +
999 (2 * hnode_mgr->num_chnls);
1000 }
1001 break;
1002 case STRMMODE_PROCCOPY:
1003 chnl_id = find_first_zero_bit(hnode_mgr->chnl_map,
1004 CHNL_MAXCHANNELS);
1005 if (chnl_id < CHNL_MAXCHANNELS)
1006 set_bit(chnl_id, hnode_mgr->chnl_map);
1007 break;
1008 default:
1009 status = -EINVAL;
1010 goto out_unlock;
1011 }
1012 if (chnl_id == CHNL_MAXCHANNELS) {
1013 status = -ECONNREFUSED;
1014 goto out_unlock;
1015 }
1016
1017 if (node1 == (struct node_object *)DSP_HGPPNODE) {
1018 node2->inputs[stream2].type = HOSTCONNECT;
1019 node2->inputs[stream2].dev_id = chnl_id;
1020 input->sz_device = pstr_dev_name;
1021 } else {
1022 node1->outputs[stream1].type = HOSTCONNECT;
1023 node1->outputs[stream1].dev_id = chnl_id;
1024 output->sz_device = pstr_dev_name;
1025 }
1026 sprintf(pstr_dev_name, "%s%d", HOSTPREFIX, chnl_id);
1027 }
1028
1029 if ((node1_type == NODE_DEVICE) || (node2_type == NODE_DEVICE)) {
1030 if (node2_type == NODE_DEVICE) {
1031
1032 dev_node_obj = node2;
1033 hnode = node1;
1034 pstream = &(node1->outputs[stream1]);
1035 pstrm_def = output;
1036 } else {
1037
1038 dev_node_obj = node1;
1039 hnode = node2;
1040 pstream = &(node2->inputs[stream2]);
1041 pstrm_def = input;
1042 }
1043
1044 pstream->type = DEVICECONNECT;
1045 dw_length = strlen(dev_node_obj->str_dev_name);
1046 if (conn_param)
1047 pstrm_def->sz_device = kzalloc(dw_length + 1 +
1048 conn_param->cb_data,
1049 GFP_KERNEL);
1050 else
1051 pstrm_def->sz_device = kzalloc(dw_length + 1,
1052 GFP_KERNEL);
1053 if (!pstrm_def->sz_device) {
1054 status = -ENOMEM;
1055 goto out_unlock;
1056 }
1057
1058 strncpy(pstrm_def->sz_device,
1059 dev_node_obj->str_dev_name, dw_length);
1060 if (conn_param)
1061 strncat(pstrm_def->sz_device,
1062 (char *)conn_param->node_data,
1063 (u32) conn_param->cb_data);
1064 dev_node_obj->device_owner = hnode;
1065 }
1066
1067 if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
1068 node1->create_args.asa.task_arg_obj.num_outputs++;
1069 fill_stream_def(node1, output, pattrs);
1070 }
1071 if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
1072 node2->create_args.asa.task_arg_obj.num_inputs++;
1073 fill_stream_def(node2, input, pattrs);
1074 }
1075
1076 if (node1_type != NODE_GPP && node1_type != NODE_DEVICE) {
1077 node1->num_outputs++;
1078 if (stream1 > node1->max_output_index)
1079 node1->max_output_index = stream1;
1080
1081 }
1082 if (node2_type != NODE_GPP && node2_type != NODE_DEVICE) {
1083 node2->num_inputs++;
1084 if (stream2 > node2->max_input_index)
1085 node2->max_input_index = stream2;
1086
1087 }
1088 fill_stream_connect(node1, node2, stream1, stream2);
1089
1090
1091out_unlock:
1092 if (status && pstr_dev_name)
1093 kfree(pstr_dev_name);
1094 mutex_unlock(&hnode_mgr->node_mgr_lock);
1095 dev_dbg(bridge, "%s: node1: %p stream1: %d node2: %p stream2: %d"
1096 "pattrs: %p status: 0x%x\n", __func__, node1,
1097 stream1, node2, stream2, pattrs, status);
1098 return status;
1099}
1100
1101
1102
1103
1104
1105
1106int node_create(struct node_object *hnode)
1107{
1108 struct node_object *pnode = (struct node_object *)hnode;
1109 struct node_mgr *hnode_mgr;
1110 struct bridge_drv_interface *intf_fxns;
1111 u32 ul_create_fxn;
1112 enum node_type node_type;
1113 int status = 0;
1114 int status1 = 0;
1115 struct dsp_cbdata cb_data;
1116 u32 proc_id = 255;
1117 struct dsp_processorstate proc_state;
1118 struct proc_object *hprocessor;
1119#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1120 struct dspbridge_platform_data *pdata =
1121 omap_dspbridge_dev->dev.platform_data;
1122#endif
1123
1124 if (!pnode) {
1125 status = -EFAULT;
1126 goto func_end;
1127 }
1128 hprocessor = hnode->processor;
1129 status = proc_get_state(hprocessor, &proc_state,
1130 sizeof(struct dsp_processorstate));
1131 if (status)
1132 goto func_end;
1133
1134
1135 if (proc_state.proc_state == PROC_ERROR) {
1136 status = -EPERM;
1137 goto func_end;
1138 }
1139
1140 cb_data.cb_data = PWR_TIMEOUT;
1141 node_type = node_get_type(hnode);
1142 hnode_mgr = hnode->node_mgr;
1143 intf_fxns = hnode_mgr->intf_fxns;
1144
1145 mutex_lock(&hnode_mgr->node_mgr_lock);
1146
1147
1148 if (node_get_state(hnode) != NODE_ALLOCATED)
1149 status = -EBADR;
1150
1151 if (!status)
1152 status = proc_get_processor_id(pnode->processor, &proc_id);
1153
1154 if (status)
1155 goto func_cont2;
1156
1157 if (proc_id != DSP_UNIT)
1158 goto func_cont2;
1159
1160
1161 if ((hnode->num_inputs && hnode->max_input_index >
1162 hnode->num_inputs - 1) ||
1163 (hnode->num_outputs && hnode->max_output_index >
1164 hnode->num_outputs - 1))
1165 status = -ENOTCONN;
1166
1167 if (!status) {
1168
1169
1170#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1171 if (pdata->cpu_set_freq)
1172 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP3]);
1173#endif
1174 status = hnode_mgr->nldr_fxns.load(hnode->nldr_node_obj,
1175 NLDR_CREATE);
1176
1177 if (!status) {
1178 hnode->loaded = true;
1179 if (node_type != NODE_DEVICE) {
1180 status = get_fxn_address(hnode, &ul_create_fxn,
1181 CREATEPHASE);
1182 }
1183 } else {
1184 pr_err("%s: failed to load create code: 0x%x\n",
1185 __func__, status);
1186 }
1187
1188#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1189 if (pdata->cpu_set_freq)
1190 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
1191#endif
1192
1193 if (!status) {
1194 if (node_type == NODE_DAISSOCKET) {
1195 status = hnode_mgr->nldr_fxns.get_fxn_addr
1196 (hnode->nldr_node_obj,
1197 hnode->dcd_props.obj_data.node_obj.
1198 str_i_alg_name,
1199 &hnode->create_args.asa.
1200 task_arg_obj.dais_arg);
1201 }
1202 }
1203 }
1204 if (!status) {
1205 if (node_type != NODE_DEVICE) {
1206 status = disp_node_create(hnode_mgr->disp_obj, hnode,
1207 hnode_mgr->fxn_addrs
1208 [RMSCREATENODE],
1209 ul_create_fxn,
1210 &(hnode->create_args),
1211 &(hnode->node_env));
1212 if (status >= 0) {
1213
1214
1215 intf_fxns = hnode_mgr->intf_fxns;
1216 (*intf_fxns->msg_set_queue_id) (hnode->
1217 msg_queue_obj,
1218 hnode->node_env);
1219 }
1220 }
1221 }
1222
1223
1224 if (hnode->loaded && hnode->phase_split) {
1225
1226
1227 status1 = hnode_mgr->nldr_fxns.unload(hnode->nldr_node_obj,
1228 NLDR_CREATE);
1229 hnode->loaded = false;
1230 }
1231 if (status1)
1232 pr_err("%s: Failed to unload create code: 0x%x\n",
1233 __func__, status1);
1234func_cont2:
1235
1236 if (status >= 0) {
1237 NODE_SET_STATE(hnode, NODE_CREATED);
1238 hnode_mgr->num_created++;
1239 goto func_cont;
1240 }
1241 if (status != -EBADR) {
1242
1243 NODE_SET_STATE(hnode, NODE_ALLOCATED);
1244 }
1245func_cont:
1246
1247 mutex_unlock(&hnode_mgr->node_mgr_lock);
1248func_end:
1249 if (status >= 0) {
1250 proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
1251 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1252 }
1253
1254 dev_dbg(bridge, "%s: hnode: %p status: 0x%x\n", __func__,
1255 hnode, status);
1256 return status;
1257}
1258
1259
1260
1261
1262
1263
1264int node_create_mgr(struct node_mgr **node_man,
1265 struct dev_object *hdev_obj)
1266{
1267 u32 i;
1268 struct node_mgr *node_mgr_obj = NULL;
1269 struct disp_attr disp_attr_obj;
1270 char *sz_zl_file = "";
1271 struct nldr_attrs nldr_attrs_obj;
1272 int status = 0;
1273 u8 dev_type;
1274
1275 *node_man = NULL;
1276
1277 node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL);
1278 if (!node_mgr_obj)
1279 return -ENOMEM;
1280
1281 node_mgr_obj->dev_obj = hdev_obj;
1282
1283 node_mgr_obj->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
1284 GFP_KERNEL);
1285 if (!node_mgr_obj->ntfy_obj) {
1286 status = -ENOMEM;
1287 goto out_err;
1288 }
1289 ntfy_init(node_mgr_obj->ntfy_obj);
1290
1291 INIT_LIST_HEAD(&node_mgr_obj->node_list);
1292
1293 dev_get_dev_type(hdev_obj, &dev_type);
1294
1295 status = dcd_create_manager(sz_zl_file, &node_mgr_obj->dcd_mgr);
1296 if (status)
1297 goto out_err;
1298
1299 status = get_proc_props(node_mgr_obj, hdev_obj);
1300 if (status)
1301 goto out_err;
1302
1303
1304 disp_attr_obj.chnl_offset = node_mgr_obj->chnl_offset;
1305 disp_attr_obj.chnl_buf_size = node_mgr_obj->chnl_buf_size;
1306 disp_attr_obj.proc_family = node_mgr_obj->proc_family;
1307 disp_attr_obj.proc_type = node_mgr_obj->proc_type;
1308
1309 status = disp_create(&node_mgr_obj->disp_obj, hdev_obj, &disp_attr_obj);
1310 if (status)
1311 goto out_err;
1312
1313
1314 status = strm_create(&node_mgr_obj->strm_mgr_obj, hdev_obj);
1315 if (status)
1316 goto out_err;
1317
1318 dev_get_intf_fxns(hdev_obj, &node_mgr_obj->intf_fxns);
1319
1320 dev_get_msg_mgr(hdev_obj, &node_mgr_obj->msg_mgr_obj);
1321 mutex_init(&node_mgr_obj->node_mgr_lock);
1322
1323
1324 for (i = 0; i < node_mgr_obj->chnl_offset; i++)
1325 set_bit(i, node_mgr_obj->chnl_map);
1326
1327
1328 set_bit(node_mgr_obj->chnl_offset, node_mgr_obj->chnl_map);
1329 set_bit(node_mgr_obj->chnl_offset + 1, node_mgr_obj->chnl_map);
1330
1331
1332 if (dev_type != IVA_UNIT) {
1333
1334 status = get_rms_fxns(node_mgr_obj);
1335 if (status)
1336 goto out_err;
1337 }
1338
1339
1340 node_mgr_obj->nldr_fxns = nldr_fxns;
1341
1342 nldr_attrs_obj.ovly = ovly;
1343 nldr_attrs_obj.write = mem_write;
1344 nldr_attrs_obj.dsp_word_size = node_mgr_obj->dsp_word_size;
1345 nldr_attrs_obj.dsp_mau_size = node_mgr_obj->dsp_mau_size;
1346 status = node_mgr_obj->nldr_fxns.create(&node_mgr_obj->nldr_obj,
1347 hdev_obj,
1348 &nldr_attrs_obj);
1349 if (status)
1350 goto out_err;
1351
1352 *node_man = node_mgr_obj;
1353
1354 return status;
1355out_err:
1356 delete_node_mgr(node_mgr_obj);
1357 return status;
1358}
1359
1360
1361
1362
1363
1364
1365
1366
1367int node_delete(struct node_res_object *noderes,
1368 struct process_context *pr_ctxt)
1369{
1370 struct node_object *pnode = noderes->node;
1371 struct node_mgr *hnode_mgr;
1372 struct proc_object *hprocessor;
1373 struct disp_object *disp_obj;
1374 u32 ul_delete_fxn;
1375 enum node_type node_type;
1376 enum node_state state;
1377 int status = 0;
1378 int status1 = 0;
1379 struct dsp_cbdata cb_data;
1380 u32 proc_id;
1381 struct bridge_drv_interface *intf_fxns;
1382
1383 void *node_res = noderes;
1384
1385 struct dsp_processorstate proc_state;
1386
1387 if (!pnode) {
1388 status = -EFAULT;
1389 goto func_end;
1390 }
1391
1392 cb_data.cb_data = PWR_TIMEOUT;
1393 hnode_mgr = pnode->node_mgr;
1394 hprocessor = pnode->processor;
1395 disp_obj = hnode_mgr->disp_obj;
1396 node_type = node_get_type(pnode);
1397 intf_fxns = hnode_mgr->intf_fxns;
1398
1399 mutex_lock(&hnode_mgr->node_mgr_lock);
1400
1401 state = node_get_state(pnode);
1402
1403
1404
1405
1406
1407 if (!(state == NODE_ALLOCATED && pnode->node_env == (u32) NULL) &&
1408 node_type != NODE_DEVICE) {
1409 status = proc_get_processor_id(pnode->processor, &proc_id);
1410 if (status)
1411 goto func_cont1;
1412
1413 if (proc_id == DSP_UNIT || proc_id == IVA_UNIT) {
1414
1415
1416
1417
1418
1419
1420 if (state == NODE_PAUSED && pnode->loaded &&
1421 pnode->phase_split) {
1422
1423
1424 status1 =
1425 hnode_mgr->nldr_fxns.
1426 unload(pnode->nldr_node_obj,
1427 NLDR_EXECUTE);
1428 pnode->loaded = false;
1429 NODE_SET_STATE(pnode, NODE_DONE);
1430 }
1431
1432
1433 if ((!(pnode->loaded) || (state == NODE_RUNNING)) &&
1434 pnode->phase_split) {
1435 status =
1436 hnode_mgr->nldr_fxns.
1437 load(pnode->nldr_node_obj, NLDR_DELETE);
1438 if (!status)
1439 pnode->loaded = true;
1440 else
1441 pr_err("%s: fail - load delete code:"
1442 " 0x%x\n", __func__, status);
1443 }
1444 }
1445func_cont1:
1446 if (!status) {
1447
1448 (void)sync_set_event(pnode->sync_done);
1449 if (proc_id == DSP_UNIT) {
1450
1451
1452 status = get_fxn_address(pnode, &ul_delete_fxn,
1453 DELETEPHASE);
1454 } else if (proc_id == IVA_UNIT)
1455 ul_delete_fxn = (u32) pnode->node_env;
1456 if (!status) {
1457 status = proc_get_state(hprocessor,
1458 &proc_state,
1459 sizeof(struct
1460 dsp_processorstate));
1461 if (proc_state.proc_state != PROC_ERROR) {
1462 status =
1463 disp_node_delete(disp_obj, pnode,
1464 hnode_mgr->
1465 fxn_addrs
1466 [RMSDELETENODE],
1467 ul_delete_fxn,
1468 pnode->node_env);
1469 } else
1470 NODE_SET_STATE(pnode, NODE_DONE);
1471
1472
1473
1474 if (state == NODE_RUNNING &&
1475 pnode->phase_split) {
1476 status1 =
1477 hnode_mgr->nldr_fxns.
1478 unload(pnode->nldr_node_obj,
1479 NLDR_EXECUTE);
1480 }
1481 if (status1)
1482 pr_err("%s: fail - unload execute code:"
1483 " 0x%x\n", __func__, status1);
1484
1485 status1 =
1486 hnode_mgr->nldr_fxns.unload(pnode->
1487 nldr_node_obj,
1488 NLDR_DELETE);
1489 pnode->loaded = false;
1490 if (status1)
1491 pr_err("%s: fail - unload delete code: "
1492 "0x%x\n", __func__, status1);
1493 }
1494 }
1495 }
1496
1497
1498 list_del(&pnode->list_elem);
1499 hnode_mgr->num_nodes--;
1500
1501 if ((state != NODE_ALLOCATED) || ((state == NODE_ALLOCATED) &&
1502 (pnode->node_env != (u32) NULL)))
1503 hnode_mgr->num_created--;
1504
1505
1506 drv_proc_node_update_status(node_res, false);
1507 delete_node(pnode, pr_ctxt);
1508
1509
1510
1511
1512 idr_remove(pr_ctxt->node_id, ((struct node_res_object *)node_res)->id);
1513 kfree(node_res);
1514
1515
1516 mutex_unlock(&hnode_mgr->node_mgr_lock);
1517 proc_notify_clients(hprocessor, DSP_NODESTATECHANGE);
1518func_end:
1519 dev_dbg(bridge, "%s: pnode: %p status 0x%x\n", __func__, pnode, status);
1520 return status;
1521}
1522
1523
1524
1525
1526
1527
1528int node_delete_mgr(struct node_mgr *hnode_mgr)
1529{
1530 if (!hnode_mgr)
1531 return -EFAULT;
1532
1533 delete_node_mgr(hnode_mgr);
1534
1535 return 0;
1536}
1537
1538
1539
1540
1541
1542
1543int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
1544 u32 node_tab_size, u32 *pu_num_nodes,
1545 u32 *pu_allocated)
1546{
1547 struct node_object *hnode;
1548 u32 i = 0;
1549 int status = 0;
1550
1551 if (!hnode_mgr) {
1552 status = -EFAULT;
1553 goto func_end;
1554 }
1555
1556 mutex_lock(&hnode_mgr->node_mgr_lock);
1557
1558 if (hnode_mgr->num_nodes > node_tab_size) {
1559 *pu_allocated = hnode_mgr->num_nodes;
1560 *pu_num_nodes = 0;
1561 status = -EINVAL;
1562 } else {
1563 list_for_each_entry(hnode, &hnode_mgr->node_list, list_elem)
1564 node_tab[i++] = hnode;
1565 *pu_allocated = *pu_num_nodes = hnode_mgr->num_nodes;
1566 }
1567
1568
1569 mutex_unlock(&hnode_mgr->node_mgr_lock);
1570func_end:
1571 return status;
1572}
1573
1574
1575
1576
1577
1578
1579int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
1580 struct dsp_bufferattr *pattr)
1581{
1582 struct node_object *pnode = (struct node_object *)hnode;
1583 int status = 0;
1584 u32 proc_id;
1585
1586 if (!hnode) {
1587 status = -EFAULT;
1588 goto func_end;
1589 }
1590 status = proc_get_processor_id(pnode->processor, &proc_id);
1591 if (proc_id == DSP_UNIT) {
1592 if (!status) {
1593 if (pattr == NULL) {
1594
1595 pattr = &node_dfltbufattrs;
1596 }
1597
1598 if (pattr->segment_id != 1)
1599 status = -EBADR;
1600
1601
1602 status = cmm_xlator_free_buf(pnode->xlator, pbuffer);
1603 }
1604 } else {
1605 }
1606func_end:
1607 return status;
1608}
1609
1610
1611
1612
1613
1614
1615
1616int node_get_attr(struct node_object *hnode,
1617 struct dsp_nodeattr *pattr, u32 attr_size)
1618{
1619 struct node_mgr *hnode_mgr;
1620
1621 if (!hnode)
1622 return -EFAULT;
1623
1624 hnode_mgr = hnode->node_mgr;
1625
1626
1627
1628 mutex_lock(&hnode_mgr->node_mgr_lock);
1629 pattr->cb_struct = sizeof(struct dsp_nodeattr);
1630
1631 pattr->in_node_attr_in.cb_struct =
1632 sizeof(struct dsp_nodeattrin);
1633 pattr->in_node_attr_in.prio = hnode->prio;
1634 pattr->in_node_attr_in.timeout = hnode->timeout;
1635 pattr->in_node_attr_in.heap_size =
1636 hnode->create_args.asa.task_arg_obj.heap_size;
1637 pattr->in_node_attr_in.pgpp_virt_addr = (void *)
1638 hnode->create_args.asa.task_arg_obj.gpp_heap_addr;
1639 pattr->node_attr_inputs = hnode->num_gpp_inputs;
1640 pattr->node_attr_outputs = hnode->num_gpp_outputs;
1641
1642 get_node_info(hnode, &(pattr->node_info));
1643
1644
1645 mutex_unlock(&hnode_mgr->node_mgr_lock);
1646
1647 return 0;
1648}
1649
1650
1651
1652
1653
1654
1655
1656int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
1657 u32 *chan_id)
1658{
1659 enum node_type node_type;
1660 int status = -EINVAL;
1661
1662 if (!hnode) {
1663 status = -EFAULT;
1664 return status;
1665 }
1666 node_type = node_get_type(hnode);
1667 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) {
1668 status = -EPERM;
1669 return status;
1670 }
1671 if (dir == DSP_TONODE) {
1672 if (index < MAX_INPUTS(hnode)) {
1673 if (hnode->inputs[index].type == HOSTCONNECT) {
1674 *chan_id = hnode->inputs[index].dev_id;
1675 status = 0;
1676 }
1677 }
1678 } else {
1679 if (index < MAX_OUTPUTS(hnode)) {
1680 if (hnode->outputs[index].type == HOSTCONNECT) {
1681 *chan_id = hnode->outputs[index].dev_id;
1682 status = 0;
1683 }
1684 }
1685 }
1686 return status;
1687}
1688
1689
1690
1691
1692
1693
1694int node_get_message(struct node_object *hnode,
1695 struct dsp_msg *message, u32 utimeout)
1696{
1697 struct node_mgr *hnode_mgr;
1698 enum node_type node_type;
1699 struct bridge_drv_interface *intf_fxns;
1700 int status = 0;
1701 void *tmp_buf;
1702 struct dsp_processorstate proc_state;
1703 struct proc_object *hprocessor;
1704
1705 if (!hnode) {
1706 status = -EFAULT;
1707 goto func_end;
1708 }
1709 hprocessor = hnode->processor;
1710 status = proc_get_state(hprocessor, &proc_state,
1711 sizeof(struct dsp_processorstate));
1712 if (status)
1713 goto func_end;
1714
1715
1716 if (proc_state.proc_state == PROC_ERROR) {
1717 status = -EPERM;
1718 goto func_end;
1719 }
1720 hnode_mgr = hnode->node_mgr;
1721 node_type = node_get_type(hnode);
1722 if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
1723 node_type != NODE_DAISSOCKET) {
1724 status = -EPERM;
1725 goto func_end;
1726 }
1727
1728
1729
1730
1731
1732 intf_fxns = hnode_mgr->intf_fxns;
1733 status =
1734 (*intf_fxns->msg_get) (hnode->msg_queue_obj, message, utimeout);
1735
1736 if (status || !(message->cmd & DSP_RMSBUFDESC))
1737 goto func_end;
1738
1739
1740 tmp_buf = cmm_xlator_translate(hnode->xlator,
1741 (void *)(message->arg1 *
1742 hnode->node_mgr->
1743 dsp_word_size), CMM_DSPPA2PA);
1744 if (tmp_buf != NULL) {
1745
1746 tmp_buf = cmm_xlator_translate(hnode->xlator, tmp_buf,
1747 CMM_PA2VA);
1748 if (tmp_buf != NULL) {
1749
1750 message->arg1 = (u32) tmp_buf;
1751 message->arg2 *= hnode->node_mgr->dsp_word_size;
1752 } else {
1753 status = -ESRCH;
1754 }
1755 } else {
1756 status = -ESRCH;
1757 }
1758func_end:
1759 dev_dbg(bridge, "%s: hnode: %p message: %p utimeout: 0x%x\n", __func__,
1760 hnode, message, utimeout);
1761 return status;
1762}
1763
1764
1765
1766
1767int node_get_nldr_obj(struct node_mgr *hnode_mgr,
1768 struct nldr_object **nldr_ovlyobj)
1769{
1770 int status = 0;
1771 struct node_mgr *node_mgr_obj = hnode_mgr;
1772
1773 if (!hnode_mgr)
1774 status = -EFAULT;
1775 else
1776 *nldr_ovlyobj = node_mgr_obj->nldr_obj;
1777
1778 return status;
1779}
1780
1781
1782
1783
1784
1785
1786int node_get_strm_mgr(struct node_object *hnode,
1787 struct strm_mgr **strm_man)
1788{
1789 int status = 0;
1790
1791 if (!hnode)
1792 status = -EFAULT;
1793 else
1794 *strm_man = hnode->node_mgr->strm_mgr_obj;
1795
1796 return status;
1797}
1798
1799
1800
1801
1802enum nldr_loadtype node_get_load_type(struct node_object *hnode)
1803{
1804 if (!hnode) {
1805 dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode);
1806 return -1;
1807 } else {
1808 return hnode->dcd_props.obj_data.node_obj.load_type;
1809 }
1810}
1811
1812
1813
1814
1815
1816
1817u32 node_get_timeout(struct node_object *hnode)
1818{
1819 if (!hnode) {
1820 dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode);
1821 return 0;
1822 } else {
1823 return hnode->timeout;
1824 }
1825}
1826
1827
1828
1829
1830
1831
1832enum node_type node_get_type(struct node_object *hnode)
1833{
1834 enum node_type node_type;
1835
1836 if (hnode == (struct node_object *)DSP_HGPPNODE)
1837 node_type = NODE_GPP;
1838 else {
1839 if (!hnode)
1840 node_type = -1;
1841 else
1842 node_type = hnode->ntype;
1843 }
1844 return node_type;
1845}
1846
1847
1848
1849
1850
1851
1852void node_on_exit(struct node_object *hnode, s32 node_status)
1853{
1854 if (!hnode)
1855 return;
1856
1857
1858 NODE_SET_STATE(hnode, NODE_DONE);
1859 hnode->exit_status = node_status;
1860 if (hnode->loaded && hnode->phase_split) {
1861 (void)hnode->node_mgr->nldr_fxns.unload(hnode->
1862 nldr_node_obj,
1863 NLDR_EXECUTE);
1864 hnode->loaded = false;
1865 }
1866
1867 (void)sync_set_event(hnode->sync_done);
1868
1869 proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
1870 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1871}
1872
1873
1874
1875
1876
1877
1878int node_pause(struct node_object *hnode)
1879{
1880 struct node_object *pnode = (struct node_object *)hnode;
1881 enum node_type node_type;
1882 enum node_state state;
1883 struct node_mgr *hnode_mgr;
1884 int status = 0;
1885 u32 proc_id;
1886 struct dsp_processorstate proc_state;
1887 struct proc_object *hprocessor;
1888
1889 if (!hnode) {
1890 status = -EFAULT;
1891 } else {
1892 node_type = node_get_type(hnode);
1893 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
1894 status = -EPERM;
1895 }
1896 if (status)
1897 goto func_end;
1898
1899 status = proc_get_processor_id(pnode->processor, &proc_id);
1900
1901 if (proc_id == IVA_UNIT)
1902 status = -ENOSYS;
1903
1904 if (!status) {
1905 hnode_mgr = hnode->node_mgr;
1906
1907
1908 mutex_lock(&hnode_mgr->node_mgr_lock);
1909 state = node_get_state(hnode);
1910
1911 if (state != NODE_RUNNING)
1912 status = -EBADR;
1913
1914 if (status)
1915 goto func_cont;
1916 hprocessor = hnode->processor;
1917 status = proc_get_state(hprocessor, &proc_state,
1918 sizeof(struct dsp_processorstate));
1919 if (status)
1920 goto func_cont;
1921
1922
1923 if (proc_state.proc_state == PROC_ERROR) {
1924 status = -EPERM;
1925 goto func_cont;
1926 }
1927
1928 status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
1929 hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY],
1930 hnode->node_env, NODE_SUSPENDEDPRI);
1931
1932
1933 if (status >= 0)
1934 NODE_SET_STATE(hnode, NODE_PAUSED);
1935
1936func_cont:
1937
1938
1939 mutex_unlock(&hnode_mgr->node_mgr_lock);
1940 if (status >= 0) {
1941 proc_notify_clients(hnode->processor,
1942 DSP_NODESTATECHANGE);
1943 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1944 }
1945 }
1946func_end:
1947 dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
1948 return status;
1949}
1950
1951
1952
1953
1954
1955
1956
1957
1958int node_put_message(struct node_object *hnode,
1959 const struct dsp_msg *pmsg, u32 utimeout)
1960{
1961 struct node_mgr *hnode_mgr = NULL;
1962 enum node_type node_type;
1963 struct bridge_drv_interface *intf_fxns;
1964 enum node_state state;
1965 int status = 0;
1966 void *tmp_buf;
1967 struct dsp_msg new_msg;
1968 struct dsp_processorstate proc_state;
1969 struct proc_object *hprocessor;
1970
1971 if (!hnode) {
1972 status = -EFAULT;
1973 goto func_end;
1974 }
1975 hprocessor = hnode->processor;
1976 status = proc_get_state(hprocessor, &proc_state,
1977 sizeof(struct dsp_processorstate));
1978 if (status)
1979 goto func_end;
1980
1981
1982 if (proc_state.proc_state == PROC_ERROR) {
1983 status = -EPERM;
1984 goto func_end;
1985 }
1986 hnode_mgr = hnode->node_mgr;
1987 node_type = node_get_type(hnode);
1988 if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
1989 node_type != NODE_DAISSOCKET)
1990 status = -EPERM;
1991
1992 if (!status) {
1993
1994
1995
1996
1997
1998
1999
2000 mutex_lock(&hnode_mgr->node_mgr_lock);
2001 state = node_get_state(hnode);
2002 if (state == NODE_TERMINATING || state == NODE_DONE)
2003 status = -EBADR;
2004
2005
2006 mutex_unlock(&hnode_mgr->node_mgr_lock);
2007 }
2008 if (status)
2009 goto func_end;
2010
2011
2012 new_msg = *pmsg;
2013
2014 if (pmsg->cmd & DSP_RMSBUFDESC) {
2015
2016 tmp_buf = cmm_xlator_translate(hnode->xlator,
2017 (void *)new_msg.arg1,
2018 CMM_VA2DSPPA);
2019 if (tmp_buf != NULL) {
2020
2021 if (hnode->node_mgr->dsp_word_size != 0) {
2022 new_msg.arg1 =
2023 (u32) tmp_buf /
2024 hnode->node_mgr->dsp_word_size;
2025
2026 new_msg.arg2 /= hnode->node_mgr->
2027 dsp_word_size;
2028 } else {
2029 pr_err("%s: dsp_word_size is zero!\n",
2030 __func__);
2031 status = -EPERM;
2032 }
2033 } else {
2034 status = -ESRCH;
2035 }
2036 }
2037 if (!status) {
2038 intf_fxns = hnode_mgr->intf_fxns;
2039 status = (*intf_fxns->msg_put) (hnode->msg_queue_obj,
2040 &new_msg, utimeout);
2041 }
2042func_end:
2043 dev_dbg(bridge, "%s: hnode: %p pmsg: %p utimeout: 0x%x, "
2044 "status 0x%x\n", __func__, hnode, pmsg, utimeout, status);
2045 return status;
2046}
2047
2048
2049
2050
2051
2052
2053int node_register_notify(struct node_object *hnode, u32 event_mask,
2054 u32 notify_type,
2055 struct dsp_notification *hnotification)
2056{
2057 struct bridge_drv_interface *intf_fxns;
2058 int status = 0;
2059
2060 if (!hnode) {
2061 status = -EFAULT;
2062 } else {
2063
2064 if (event_mask & ~(DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
2065 status = -EINVAL;
2066
2067
2068 if (notify_type != DSP_SIGNALEVENT)
2069 status = -EINVAL;
2070
2071
2072
2073 if (event_mask == (DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
2074 status = -EINVAL;
2075 }
2076 if (!status) {
2077 if (event_mask == DSP_NODESTATECHANGE) {
2078 status = ntfy_register(hnode->ntfy_obj, hnotification,
2079 event_mask & DSP_NODESTATECHANGE,
2080 notify_type);
2081 } else {
2082
2083 intf_fxns = hnode->node_mgr->intf_fxns;
2084 status = (*intf_fxns->msg_register_notify)
2085 (hnode->msg_queue_obj,
2086 event_mask & DSP_NODEMESSAGEREADY, notify_type,
2087 hnotification);
2088 }
2089
2090 }
2091 dev_dbg(bridge, "%s: hnode: %p event_mask: 0x%x notify_type: 0x%x "
2092 "hnotification: %p status 0x%x\n", __func__, hnode,
2093 event_mask, notify_type, hnotification, status);
2094 return status;
2095}
2096
2097
2098
2099
2100
2101
2102
2103
2104int node_run(struct node_object *hnode)
2105{
2106 struct node_object *pnode = (struct node_object *)hnode;
2107 struct node_mgr *hnode_mgr;
2108 enum node_type node_type;
2109 enum node_state state;
2110 u32 ul_execute_fxn;
2111 u32 ul_fxn_addr;
2112 int status = 0;
2113 u32 proc_id;
2114 struct bridge_drv_interface *intf_fxns;
2115 struct dsp_processorstate proc_state;
2116 struct proc_object *hprocessor;
2117
2118 if (!hnode) {
2119 status = -EFAULT;
2120 goto func_end;
2121 }
2122 hprocessor = hnode->processor;
2123 status = proc_get_state(hprocessor, &proc_state,
2124 sizeof(struct dsp_processorstate));
2125 if (status)
2126 goto func_end;
2127
2128 if (proc_state.proc_state == PROC_ERROR) {
2129 status = -EPERM;
2130 goto func_end;
2131 }
2132 node_type = node_get_type(hnode);
2133 if (node_type == NODE_DEVICE)
2134 status = -EPERM;
2135 if (status)
2136 goto func_end;
2137
2138 hnode_mgr = hnode->node_mgr;
2139 if (!hnode_mgr) {
2140 status = -EFAULT;
2141 goto func_end;
2142 }
2143 intf_fxns = hnode_mgr->intf_fxns;
2144
2145 mutex_lock(&hnode_mgr->node_mgr_lock);
2146
2147 state = node_get_state(hnode);
2148 if (state != NODE_CREATED && state != NODE_PAUSED)
2149 status = -EBADR;
2150
2151 if (!status)
2152 status = proc_get_processor_id(pnode->processor, &proc_id);
2153
2154 if (status)
2155 goto func_cont1;
2156
2157 if ((proc_id != DSP_UNIT) && (proc_id != IVA_UNIT))
2158 goto func_cont1;
2159
2160 if (state == NODE_CREATED) {
2161
2162 if (!(hnode->loaded) && hnode->phase_split) {
2163 status =
2164 hnode_mgr->nldr_fxns.load(hnode->nldr_node_obj,
2165 NLDR_EXECUTE);
2166 if (!status) {
2167 hnode->loaded = true;
2168 } else {
2169 pr_err("%s: fail - load execute code: 0x%x\n",
2170 __func__, status);
2171 }
2172 }
2173 if (!status) {
2174
2175 if (proc_id == IVA_UNIT)
2176 ul_execute_fxn = (u32) hnode->node_env;
2177 else {
2178 status = get_fxn_address(hnode, &ul_execute_fxn,
2179 EXECUTEPHASE);
2180 }
2181 }
2182 if (!status) {
2183 ul_fxn_addr = hnode_mgr->fxn_addrs[RMSEXECUTENODE];
2184 status =
2185 disp_node_run(hnode_mgr->disp_obj, hnode,
2186 ul_fxn_addr, ul_execute_fxn,
2187 hnode->node_env);
2188 }
2189 } else if (state == NODE_PAUSED) {
2190 ul_fxn_addr = hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY];
2191 status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
2192 ul_fxn_addr, hnode->node_env,
2193 NODE_GET_PRIORITY(hnode));
2194 } else {
2195
2196 }
2197func_cont1:
2198
2199 if (status >= 0)
2200 NODE_SET_STATE(hnode, NODE_RUNNING);
2201 else
2202 NODE_SET_STATE(hnode, state);
2203
2204
2205 mutex_unlock(&hnode_mgr->node_mgr_lock);
2206 if (status >= 0) {
2207 proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
2208 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2209 }
2210func_end:
2211 dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
2212 return status;
2213}
2214
2215
2216
2217
2218
2219
2220
2221int node_terminate(struct node_object *hnode, int *pstatus)
2222{
2223 struct node_object *pnode = (struct node_object *)hnode;
2224 struct node_mgr *hnode_mgr = NULL;
2225 enum node_type node_type;
2226 struct bridge_drv_interface *intf_fxns;
2227 enum node_state state;
2228 struct dsp_msg msg, killmsg;
2229 int status = 0;
2230 u32 proc_id, kill_time_out;
2231 struct deh_mgr *hdeh_mgr;
2232 struct dsp_processorstate proc_state;
2233
2234 if (!hnode || !hnode->node_mgr) {
2235 status = -EFAULT;
2236 goto func_end;
2237 }
2238 if (pnode->processor == NULL) {
2239 status = -EFAULT;
2240 goto func_end;
2241 }
2242 status = proc_get_processor_id(pnode->processor, &proc_id);
2243
2244 if (!status) {
2245 hnode_mgr = hnode->node_mgr;
2246 node_type = node_get_type(hnode);
2247 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
2248 status = -EPERM;
2249 }
2250 if (!status) {
2251
2252 mutex_lock(&hnode_mgr->node_mgr_lock);
2253 state = node_get_state(hnode);
2254 if (state != NODE_RUNNING) {
2255 status = -EBADR;
2256
2257
2258 if (state == NODE_DONE)
2259 *pstatus = hnode->exit_status;
2260
2261 } else {
2262 NODE_SET_STATE(hnode, NODE_TERMINATING);
2263 }
2264
2265 mutex_unlock(&hnode_mgr->node_mgr_lock);
2266 }
2267 if (!status) {
2268
2269
2270
2271
2272 status = proc_get_state(pnode->processor, &proc_state,
2273 sizeof(struct dsp_processorstate));
2274 if (status)
2275 goto func_cont;
2276
2277
2278 if (proc_state.proc_state == PROC_ERROR) {
2279 status = -EPERM;
2280 goto func_cont;
2281 }
2282
2283 msg.cmd = RMS_EXIT;
2284 msg.arg1 = hnode->node_env;
2285 killmsg.cmd = RMS_KILLTASK;
2286 killmsg.arg1 = hnode->node_env;
2287 intf_fxns = hnode_mgr->intf_fxns;
2288
2289 if (hnode->timeout > MAXTIMEOUT)
2290 kill_time_out = MAXTIMEOUT;
2291 else
2292 kill_time_out = (hnode->timeout) * 2;
2293
2294 status = (*intf_fxns->msg_put) (hnode->msg_queue_obj, &msg,
2295 hnode->timeout);
2296 if (status)
2297 goto func_cont;
2298
2299
2300
2301
2302
2303
2304
2305 status = sync_wait_on_event(hnode->sync_done,
2306 kill_time_out / 2);
2307 if (status != ETIME)
2308 goto func_cont;
2309
2310 status = (*intf_fxns->msg_put)(hnode->msg_queue_obj,
2311 &killmsg, hnode->timeout);
2312 if (status)
2313 goto func_cont;
2314 status = sync_wait_on_event(hnode->sync_done,
2315 kill_time_out / 2);
2316 if (status) {
2317
2318
2319
2320
2321 dev_get_deh_mgr(hnode_mgr->dev_obj, &hdeh_mgr);
2322 if (!hdeh_mgr)
2323 goto func_cont;
2324
2325 bridge_deh_notify(hdeh_mgr, DSP_SYSERROR, DSP_EXCEPTIONABORT);
2326 }
2327 }
2328func_cont:
2329 if (!status) {
2330
2331
2332 mutex_lock(&hnode_mgr->node_mgr_lock);
2333
2334 if (!hnode) {
2335 status = -EPERM;
2336 } else {
2337 *pstatus = hnode->exit_status;
2338 dev_dbg(bridge, "%s: hnode: %p env 0x%x status 0x%x\n",
2339 __func__, hnode, hnode->node_env, status);
2340 }
2341 mutex_unlock(&hnode_mgr->node_mgr_lock);
2342 }
2343func_end:
2344 return status;
2345}
2346
2347
2348
2349
2350
2351
2352static void delete_node(struct node_object *hnode,
2353 struct process_context *pr_ctxt)
2354{
2355 struct node_mgr *hnode_mgr;
2356 struct bridge_drv_interface *intf_fxns;
2357 u32 i;
2358 enum node_type node_type;
2359 struct stream_chnl stream;
2360 struct node_msgargs node_msg_args;
2361 struct node_taskargs task_arg_obj;
2362#ifdef DSP_DMM_DEBUG
2363 struct dmm_object *dmm_mgr;
2364 struct proc_object *p_proc_object =
2365 (struct proc_object *)hnode->processor;
2366#endif
2367 int status;
2368 if (!hnode)
2369 goto func_end;
2370 hnode_mgr = hnode->node_mgr;
2371 if (!hnode_mgr)
2372 goto func_end;
2373
2374 node_type = node_get_type(hnode);
2375 if (node_type != NODE_DEVICE) {
2376 node_msg_args = hnode->create_args.asa.node_msg_args;
2377 kfree(node_msg_args.pdata);
2378
2379
2380 if (hnode->msg_queue_obj) {
2381 intf_fxns = hnode_mgr->intf_fxns;
2382 (*intf_fxns->msg_delete_queue) (hnode->
2383 msg_queue_obj);
2384 hnode->msg_queue_obj = NULL;
2385 }
2386
2387 kfree(hnode->sync_done);
2388
2389
2390 if (hnode->inputs) {
2391 for (i = 0; i < MAX_INPUTS(hnode); i++) {
2392 stream = hnode->inputs[i];
2393 free_stream(hnode_mgr, stream);
2394 }
2395 kfree(hnode->inputs);
2396 hnode->inputs = NULL;
2397 }
2398 if (hnode->outputs) {
2399 for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
2400 stream = hnode->outputs[i];
2401 free_stream(hnode_mgr, stream);
2402 }
2403 kfree(hnode->outputs);
2404 hnode->outputs = NULL;
2405 }
2406 task_arg_obj = hnode->create_args.asa.task_arg_obj;
2407 if (task_arg_obj.strm_in_def) {
2408 for (i = 0; i < MAX_INPUTS(hnode); i++) {
2409 kfree(task_arg_obj.strm_in_def[i].sz_device);
2410 task_arg_obj.strm_in_def[i].sz_device = NULL;
2411 }
2412 kfree(task_arg_obj.strm_in_def);
2413 task_arg_obj.strm_in_def = NULL;
2414 }
2415 if (task_arg_obj.strm_out_def) {
2416 for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
2417 kfree(task_arg_obj.strm_out_def[i].sz_device);
2418 task_arg_obj.strm_out_def[i].sz_device = NULL;
2419 }
2420 kfree(task_arg_obj.strm_out_def);
2421 task_arg_obj.strm_out_def = NULL;
2422 }
2423 if (task_arg_obj.dsp_heap_res_addr) {
2424 status = proc_un_map(hnode->processor, (void *)
2425 task_arg_obj.dsp_heap_addr,
2426 pr_ctxt);
2427
2428 status = proc_un_reserve_memory(hnode->processor,
2429 (void *)
2430 task_arg_obj.
2431 dsp_heap_res_addr,
2432 pr_ctxt);
2433#ifdef DSP_DMM_DEBUG
2434 status = dmm_get_handle(p_proc_object, &dmm_mgr);
2435 if (dmm_mgr)
2436 dmm_mem_map_dump(dmm_mgr);
2437 else
2438 status = DSP_EHANDLE;
2439#endif
2440 }
2441 }
2442 if (node_type != NODE_MESSAGE) {
2443 kfree(hnode->stream_connect);
2444 hnode->stream_connect = NULL;
2445 }
2446 kfree(hnode->str_dev_name);
2447 hnode->str_dev_name = NULL;
2448
2449 if (hnode->ntfy_obj) {
2450 ntfy_delete(hnode->ntfy_obj);
2451 kfree(hnode->ntfy_obj);
2452 hnode->ntfy_obj = NULL;
2453 }
2454
2455
2456 kfree(hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn);
2457 hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn = NULL;
2458
2459 kfree(hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn);
2460 hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn = NULL;
2461
2462 kfree(hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn);
2463 hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn = NULL;
2464
2465 kfree(hnode->dcd_props.obj_data.node_obj.str_i_alg_name);
2466 hnode->dcd_props.obj_data.node_obj.str_i_alg_name = NULL;
2467
2468
2469 kfree(hnode->xlator);
2470 kfree(hnode->nldr_node_obj);
2471 hnode->nldr_node_obj = NULL;
2472 hnode->node_mgr = NULL;
2473 kfree(hnode);
2474 hnode = NULL;
2475func_end:
2476 return;
2477}
2478
2479
2480
2481
2482
2483
2484static void delete_node_mgr(struct node_mgr *hnode_mgr)
2485{
2486 struct node_object *hnode, *tmp;
2487
2488 if (hnode_mgr) {
2489
2490 if (hnode_mgr->dcd_mgr)
2491 dcd_destroy_manager(hnode_mgr->dcd_mgr);
2492
2493
2494 list_for_each_entry_safe(hnode, tmp, &hnode_mgr->node_list,
2495 list_elem) {
2496 list_del(&hnode->list_elem);
2497 delete_node(hnode, NULL);
2498 }
2499 mutex_destroy(&hnode_mgr->node_mgr_lock);
2500 if (hnode_mgr->ntfy_obj) {
2501 ntfy_delete(hnode_mgr->ntfy_obj);
2502 kfree(hnode_mgr->ntfy_obj);
2503 }
2504
2505 if (hnode_mgr->disp_obj)
2506 disp_delete(hnode_mgr->disp_obj);
2507
2508 if (hnode_mgr->strm_mgr_obj)
2509 strm_delete(hnode_mgr->strm_mgr_obj);
2510
2511
2512 if (hnode_mgr->nldr_obj)
2513 hnode_mgr->nldr_fxns.delete(hnode_mgr->nldr_obj);
2514
2515 kfree(hnode_mgr);
2516 }
2517}
2518
2519
2520
2521
2522
2523
2524static void fill_stream_connect(struct node_object *node1,
2525 struct node_object *node2,
2526 u32 stream1, u32 stream2)
2527{
2528 u32 strm_index;
2529 struct dsp_streamconnect *strm1 = NULL;
2530 struct dsp_streamconnect *strm2 = NULL;
2531 enum node_type node1_type = NODE_TASK;
2532 enum node_type node2_type = NODE_TASK;
2533
2534 node1_type = node_get_type(node1);
2535 node2_type = node_get_type(node2);
2536 if (node1 != (struct node_object *)DSP_HGPPNODE) {
2537
2538 if (node1_type != NODE_DEVICE) {
2539 strm_index = node1->num_inputs +
2540 node1->num_outputs - 1;
2541 strm1 = &(node1->stream_connect[strm_index]);
2542 strm1->cb_struct = sizeof(struct dsp_streamconnect);
2543 strm1->this_node_stream_index = stream1;
2544 }
2545
2546 if (node2 != (struct node_object *)DSP_HGPPNODE) {
2547
2548 if (node1_type != NODE_DEVICE) {
2549 strm1->connected_node = node2;
2550 strm1->ui_connected_node_id = node2->node_uuid;
2551 strm1->connected_node_stream_index = stream2;
2552 strm1->connect_type = CONNECTTYPE_NODEOUTPUT;
2553 }
2554 if (node2_type != NODE_DEVICE) {
2555 strm_index = node2->num_inputs +
2556 node2->num_outputs - 1;
2557 strm2 = &(node2->stream_connect[strm_index]);
2558 strm2->cb_struct =
2559 sizeof(struct dsp_streamconnect);
2560 strm2->this_node_stream_index = stream2;
2561 strm2->connected_node = node1;
2562 strm2->ui_connected_node_id = node1->node_uuid;
2563 strm2->connected_node_stream_index = stream1;
2564 strm2->connect_type = CONNECTTYPE_NODEINPUT;
2565 }
2566 } else if (node1_type != NODE_DEVICE)
2567 strm1->connect_type = CONNECTTYPE_GPPOUTPUT;
2568 } else {
2569
2570 strm_index = node2->num_inputs + node2->num_outputs - 1;
2571 strm2 = &(node2->stream_connect[strm_index]);
2572 strm2->cb_struct = sizeof(struct dsp_streamconnect);
2573 strm2->this_node_stream_index = stream2;
2574 strm2->connect_type = CONNECTTYPE_GPPINPUT;
2575 }
2576}
2577
2578
2579
2580
2581
2582
2583static void fill_stream_def(struct node_object *hnode,
2584 struct node_strmdef *pstrm_def,
2585 struct dsp_strmattr *pattrs)
2586{
2587 struct node_mgr *hnode_mgr = hnode->node_mgr;
2588
2589 if (pattrs != NULL) {
2590 pstrm_def->num_bufs = pattrs->num_bufs;
2591 pstrm_def->buf_size =
2592 pattrs->buf_size / hnode_mgr->dsp_data_mau_size;
2593 pstrm_def->seg_id = pattrs->seg_id;
2594 pstrm_def->buf_alignment = pattrs->buf_alignment;
2595 pstrm_def->timeout = pattrs->timeout;
2596 } else {
2597 pstrm_def->num_bufs = DEFAULTNBUFS;
2598 pstrm_def->buf_size =
2599 DEFAULTBUFSIZE / hnode_mgr->dsp_data_mau_size;
2600 pstrm_def->seg_id = DEFAULTSEGID;
2601 pstrm_def->buf_alignment = DEFAULTALIGNMENT;
2602 pstrm_def->timeout = DEFAULTTIMEOUT;
2603 }
2604}
2605
2606
2607
2608
2609
2610
2611static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream)
2612{
2613
2614 if (stream.type == NODECONNECT) {
2615 if (test_bit(stream.dev_id, hnode_mgr->pipe_done_map)) {
2616
2617 clear_bit(stream.dev_id, hnode_mgr->pipe_done_map);
2618 clear_bit(stream.dev_id, hnode_mgr->pipe_map);
2619 } else {
2620
2621 set_bit(stream.dev_id, hnode_mgr->pipe_done_map);
2622 }
2623 } else if (stream.type == HOSTCONNECT) {
2624 if (stream.dev_id < hnode_mgr->num_chnls) {
2625 clear_bit(stream.dev_id, hnode_mgr->chnl_map);
2626 } else if (stream.dev_id < (2 * hnode_mgr->num_chnls)) {
2627
2628 clear_bit(stream.dev_id - (1 * hnode_mgr->num_chnls),
2629 hnode_mgr->dma_chnl_map);
2630 } else if (stream.dev_id < (3 * hnode_mgr->num_chnls)) {
2631
2632 clear_bit(stream.dev_id - (2 * hnode_mgr->num_chnls),
2633 hnode_mgr->zc_chnl_map);
2634 }
2635 }
2636}
2637
2638
2639
2640
2641
2642
2643static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
2644 u32 phase)
2645{
2646 char *pstr_fxn_name = NULL;
2647 struct node_mgr *hnode_mgr = hnode->node_mgr;
2648 int status = 0;
2649
2650 switch (phase) {
2651 case CREATEPHASE:
2652 pstr_fxn_name =
2653 hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn;
2654 break;
2655 case EXECUTEPHASE:
2656 pstr_fxn_name =
2657 hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn;
2658 break;
2659 case DELETEPHASE:
2660 pstr_fxn_name =
2661 hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn;
2662 break;
2663 default:
2664
2665 break;
2666 }
2667
2668 status =
2669 hnode_mgr->nldr_fxns.get_fxn_addr(hnode->nldr_node_obj,
2670 pstr_fxn_name, fxn_addr);
2671
2672 return status;
2673}
2674
2675
2676
2677
2678
2679
2680void get_node_info(struct node_object *hnode, struct dsp_nodeinfo *node_info)
2681{
2682 u32 i;
2683
2684 node_info->cb_struct = sizeof(struct dsp_nodeinfo);
2685 node_info->nb_node_database_props =
2686 hnode->dcd_props.obj_data.node_obj.ndb_props;
2687 node_info->execution_priority = hnode->prio;
2688 node_info->device_owner = hnode->device_owner;
2689 node_info->number_streams = hnode->num_inputs + hnode->num_outputs;
2690 node_info->node_env = hnode->node_env;
2691
2692 node_info->ns_execution_state = node_get_state(hnode);
2693
2694
2695 for (i = 0; i < hnode->num_inputs + hnode->num_outputs; i++)
2696 node_info->sc_stream_connection[i] = hnode->stream_connect[i];
2697
2698}
2699
2700
2701
2702
2703
2704
2705static int get_node_props(struct dcd_manager *hdcd_mgr,
2706 struct node_object *hnode,
2707 const struct dsp_uuid *node_uuid,
2708 struct dcd_genericobj *dcd_prop)
2709{
2710 u32 len;
2711 struct node_msgargs *pmsg_args;
2712 struct node_taskargs *task_arg_obj;
2713 enum node_type node_type = NODE_TASK;
2714 struct dsp_ndbprops *pndb_props =
2715 &(dcd_prop->obj_data.node_obj.ndb_props);
2716 int status = 0;
2717 char sz_uuid[MAXUUIDLEN];
2718
2719 status = dcd_get_object_def(hdcd_mgr, (struct dsp_uuid *)node_uuid,
2720 DSP_DCDNODETYPE, dcd_prop);
2721
2722 if (!status) {
2723 hnode->ntype = node_type = pndb_props->ntype;
2724
2725
2726 snprintf(sz_uuid, MAXUUIDLEN, "%pUL", node_uuid);
2727 dev_dbg(bridge, "(node) UUID: %s\n", sz_uuid);
2728
2729
2730 if (node_type != NODE_DEVICE) {
2731 pmsg_args = &(hnode->create_args.asa.node_msg_args);
2732 pmsg_args->seg_id =
2733 dcd_prop->obj_data.node_obj.msg_segid;
2734 pmsg_args->notify_type =
2735 dcd_prop->obj_data.node_obj.msg_notify_type;
2736 pmsg_args->max_msgs = pndb_props->message_depth;
2737 dev_dbg(bridge, "(node) Max Number of Messages: 0x%x\n",
2738 pmsg_args->max_msgs);
2739 } else {
2740
2741 len = strlen(pndb_props->ac_name);
2742 hnode->str_dev_name = kzalloc(len + 1, GFP_KERNEL);
2743 if (hnode->str_dev_name == NULL) {
2744 status = -ENOMEM;
2745 } else {
2746 strncpy(hnode->str_dev_name,
2747 pndb_props->ac_name, len);
2748 }
2749 }
2750 }
2751 if (!status) {
2752
2753 if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) {
2754 task_arg_obj = &(hnode->create_args.asa.task_arg_obj);
2755 task_arg_obj->prio = pndb_props->prio;
2756 task_arg_obj->stack_size = pndb_props->stack_size;
2757 task_arg_obj->sys_stack_size =
2758 pndb_props->sys_stack_size;
2759 task_arg_obj->stack_seg = pndb_props->stack_seg;
2760 dev_dbg(bridge, "(node) Priority: 0x%x Stack Size: "
2761 "0x%x words System Stack Size: 0x%x words "
2762 "Stack Segment: 0x%x profile count : 0x%x\n",
2763 task_arg_obj->prio, task_arg_obj->stack_size,
2764 task_arg_obj->sys_stack_size,
2765 task_arg_obj->stack_seg,
2766 pndb_props->count_profiles);
2767 }
2768 }
2769
2770 return status;
2771}
2772
2773
2774
2775
2776
2777
2778static int get_proc_props(struct node_mgr *hnode_mgr,
2779 struct dev_object *hdev_obj)
2780{
2781 struct cfg_hostres *host_res;
2782 struct bridge_dev_context *pbridge_context;
2783 int status = 0;
2784
2785 status = dev_get_bridge_context(hdev_obj, &pbridge_context);
2786 if (!pbridge_context)
2787 status = -EFAULT;
2788
2789 if (!status) {
2790 host_res = pbridge_context->resources;
2791 if (!host_res)
2792 return -EPERM;
2793 hnode_mgr->chnl_offset = host_res->chnl_offset;
2794 hnode_mgr->chnl_buf_size = host_res->chnl_buf_size;
2795 hnode_mgr->num_chnls = host_res->num_chnls;
2796
2797
2798
2799
2800
2801
2802 hnode_mgr->proc_family = 6000;
2803 hnode_mgr->proc_type = 6410;
2804 hnode_mgr->min_pri = DSP_NODE_MIN_PRIORITY;
2805 hnode_mgr->max_pri = DSP_NODE_MAX_PRIORITY;
2806 hnode_mgr->dsp_word_size = DSPWORDSIZE;
2807 hnode_mgr->dsp_data_mau_size = DSPWORDSIZE;
2808 hnode_mgr->dsp_mau_size = 1;
2809
2810 }
2811 return status;
2812}
2813
2814
2815
2816
2817
2818
2819int node_get_uuid_props(void *hprocessor,
2820 const struct dsp_uuid *node_uuid,
2821 struct dsp_ndbprops *node_props)
2822{
2823 struct node_mgr *hnode_mgr = NULL;
2824 struct dev_object *hdev_obj;
2825 int status = 0;
2826 struct dcd_nodeprops dcd_node_props;
2827 struct dsp_processorstate proc_state;
2828
2829 if (hprocessor == NULL || node_uuid == NULL) {
2830 status = -EFAULT;
2831 goto func_end;
2832 }
2833 status = proc_get_state(hprocessor, &proc_state,
2834 sizeof(struct dsp_processorstate));
2835 if (status)
2836 goto func_end;
2837
2838
2839 if (proc_state.proc_state == PROC_ERROR) {
2840 status = -EPERM;
2841 goto func_end;
2842 }
2843
2844 status = proc_get_dev_object(hprocessor, &hdev_obj);
2845 if (hdev_obj) {
2846 status = dev_get_node_manager(hdev_obj, &hnode_mgr);
2847 if (hnode_mgr == NULL) {
2848 status = -EFAULT;
2849 goto func_end;
2850 }
2851 }
2852
2853
2854
2855
2856
2857
2858
2859 mutex_lock(&hnode_mgr->node_mgr_lock);
2860
2861 dcd_node_props.str_create_phase_fxn = NULL;
2862 dcd_node_props.str_execute_phase_fxn = NULL;
2863 dcd_node_props.str_delete_phase_fxn = NULL;
2864 dcd_node_props.str_i_alg_name = NULL;
2865
2866 status = dcd_get_object_def(hnode_mgr->dcd_mgr,
2867 (struct dsp_uuid *)node_uuid, DSP_DCDNODETYPE,
2868 (struct dcd_genericobj *)&dcd_node_props);
2869
2870 if (!status) {
2871 *node_props = dcd_node_props.ndb_props;
2872 kfree(dcd_node_props.str_create_phase_fxn);
2873
2874 kfree(dcd_node_props.str_execute_phase_fxn);
2875
2876 kfree(dcd_node_props.str_delete_phase_fxn);
2877
2878 kfree(dcd_node_props.str_i_alg_name);
2879 }
2880
2881 mutex_unlock(&hnode_mgr->node_mgr_lock);
2882func_end:
2883 return status;
2884}
2885
2886
2887
2888
2889
2890
2891static int get_rms_fxns(struct node_mgr *hnode_mgr)
2892{
2893 s32 i;
2894 struct dev_object *dev_obj = hnode_mgr->dev_obj;
2895 int status = 0;
2896
2897 static char *psz_fxns[NUMRMSFXNS] = {
2898 "RMS_queryServer",
2899 "RMS_configureServer",
2900 "RMS_createNode",
2901 "RMS_executeNode",
2902 "RMS_deleteNode",
2903 "RMS_changeNodePriority",
2904 "RMS_readMemory",
2905 "RMS_writeMemory",
2906 "RMS_copy",
2907 };
2908
2909 for (i = 0; i < NUMRMSFXNS; i++) {
2910 status = dev_get_symbol(dev_obj, psz_fxns[i],
2911 &(hnode_mgr->fxn_addrs[i]));
2912 if (status) {
2913 if (status == -ESPIPE) {
2914
2915
2916
2917
2918 dev_dbg(bridge, "%s: RMS function: %s currently"
2919 " not loaded\n", __func__, psz_fxns[i]);
2920 } else {
2921 dev_dbg(bridge, "%s: Symbol not found: %s "
2922 "status = 0x%x\n", __func__,
2923 psz_fxns[i], status);
2924 break;
2925 }
2926 }
2927 }
2928
2929 return status;
2930}
2931
2932
2933
2934
2935
2936
2937static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
2938 u32 ul_num_bytes, u32 mem_space)
2939{
2940 struct node_object *hnode = (struct node_object *)priv_ref;
2941 struct node_mgr *hnode_mgr;
2942 u32 ul_bytes = 0;
2943 u32 ul_size;
2944 u32 ul_timeout;
2945 int status = 0;
2946 struct bridge_dev_context *hbridge_context;
2947
2948 struct bridge_drv_interface *intf_fxns;
2949
2950 hnode_mgr = hnode->node_mgr;
2951
2952 ul_size = ul_num_bytes / hnode_mgr->dsp_word_size;
2953 ul_timeout = hnode->timeout;
2954
2955
2956 intf_fxns = hnode_mgr->intf_fxns;
2957 status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context);
2958 if (!status) {
2959 status =
2960 (*intf_fxns->brd_mem_copy) (hbridge_context,
2961 dsp_run_addr, dsp_load_addr,
2962 ul_num_bytes, (u32) mem_space);
2963 if (!status)
2964 ul_bytes = ul_num_bytes;
2965 else
2966 pr_debug("%s: failed to copy brd memory, status 0x%x\n",
2967 __func__, status);
2968 } else {
2969 pr_debug("%s: failed to get Bridge context, status 0x%x\n",
2970 __func__, status);
2971 }
2972
2973 return ul_bytes;
2974}
2975
2976
2977
2978
2979static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
2980 u32 ul_num_bytes, u32 mem_space)
2981{
2982 struct node_object *hnode = (struct node_object *)priv_ref;
2983 struct node_mgr *hnode_mgr;
2984 u16 mem_sect_type;
2985 u32 ul_timeout;
2986 int status = 0;
2987 struct bridge_dev_context *hbridge_context;
2988
2989 struct bridge_drv_interface *intf_fxns;
2990
2991 hnode_mgr = hnode->node_mgr;
2992
2993 ul_timeout = hnode->timeout;
2994 mem_sect_type = (mem_space & DBLL_CODE) ? RMS_CODE : RMS_DATA;
2995
2996
2997 intf_fxns = hnode_mgr->intf_fxns;
2998 status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context);
2999 status = (*intf_fxns->brd_mem_write) (hbridge_context, pbuf,
3000 dsp_add, ul_num_bytes, mem_sect_type);
3001
3002 return ul_num_bytes;
3003}
3004
3005#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
3006
3007
3008
3009int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr,
3010 u32 offset_range, void *sym_addr_output, char *sym_name)
3011{
3012 struct node_object *node_obj;
3013 int status = -ENOENT;
3014
3015 list_for_each_entry(node_obj, &node_mgr->node_list, list_elem) {
3016 status = nldr_find_addr(node_obj->nldr_node_obj, sym_addr,
3017 offset_range, sym_addr_output, sym_name);
3018 if (!status) {
3019 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__,
3020 (unsigned int) node_mgr,
3021 sym_addr, offset_range,
3022 (unsigned int) sym_addr_output, sym_name);
3023 break;
3024 }
3025 }
3026
3027 return status;
3028}
3029#endif
3030