• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/staging/tidspbridge/rmgr/
1/*
2 * node.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * DSP/BIOS Bridge Node Manager.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#include <linux/types.h>
20/*  ----------------------------------- Host OS */
21#include <dspbridge/host_os.h>
22
23/*  ----------------------------------- DSP/BIOS Bridge */
24#include <dspbridge/dbdefs.h>
25
26/*  ----------------------------------- Trace & Debug */
27#include <dspbridge/dbc.h>
28
29/*  ----------------------------------- OS Adaptation Layer */
30#include <dspbridge/cfg.h>
31#include <dspbridge/list.h>
32#include <dspbridge/memdefs.h>
33#include <dspbridge/proc.h>
34#include <dspbridge/strm.h>
35#include <dspbridge/sync.h>
36#include <dspbridge/ntfy.h>
37
38/*  ----------------------------------- Platform Manager */
39#include <dspbridge/cmm.h>
40#include <dspbridge/cod.h>
41#include <dspbridge/dev.h>
42#include <dspbridge/msg.h>
43
44/*  ----------------------------------- Resource Manager */
45#include <dspbridge/dbdcd.h>
46#include <dspbridge/disp.h>
47#include <dspbridge/rms_sh.h>
48
49/*  ----------------------------------- Link Driver */
50#include <dspbridge/dspdefs.h>
51#include <dspbridge/dspioctl.h>
52
53/*  ----------------------------------- Others */
54#include <dspbridge/gb.h>
55#include <dspbridge/uuidutil.h>
56
57/*  ----------------------------------- This */
58#include <dspbridge/nodepriv.h>
59#include <dspbridge/node.h>
60#include <dspbridge/dmm.h>
61
62/* Static/Dynamic Loader includes */
63#include <dspbridge/dbll.h>
64#include <dspbridge/nldr.h>
65
66#include <dspbridge/drv.h>
67#include <dspbridge/drvdefs.h>
68#include <dspbridge/resourcecleanup.h>
69#include <_tiomap.h>
70
71#include <dspbridge/dspdeh.h>
72
73#define HOSTPREFIX	  "/host"
74#define PIPEPREFIX	  "/dbpipe"
75
76#define MAX_INPUTS(h)  \
77		((h)->dcd_props.obj_data.node_obj.ndb_props.num_input_streams)
78#define MAX_OUTPUTS(h) \
79		((h)->dcd_props.obj_data.node_obj.ndb_props.num_output_streams)
80
81#define NODE_GET_PRIORITY(h) ((h)->prio)
82#define NODE_SET_PRIORITY(hnode, prio) ((hnode)->prio = prio)
83#define NODE_SET_STATE(hnode, state) ((hnode)->node_state = state)
84
85#define MAXPIPES	100	/* Max # of /pipe connections (CSL limit) */
86#define MAXDEVSUFFIXLEN 2	/* Max(Log base 10 of MAXPIPES, MAXSTREAMS) */
87
88#define PIPENAMELEN     (sizeof(PIPEPREFIX) + MAXDEVSUFFIXLEN)
89#define HOSTNAMELEN     (sizeof(HOSTPREFIX) + MAXDEVSUFFIXLEN)
90
91#define MAXDEVNAMELEN	32	/* dsp_ndbprops.ac_name size */
92#define CREATEPHASE	1
93#define EXECUTEPHASE	2
94#define DELETEPHASE	3
95
96/* Define default STRM parameters */
97/*
98 *  TBD: Put in header file, make global DSP_STRMATTRS with defaults,
99 *  or make defaults configurable.
100 */
101#define DEFAULTBUFSIZE		32
102#define DEFAULTNBUFS		2
103#define DEFAULTSEGID		0
104#define DEFAULTALIGNMENT	0
105#define DEFAULTTIMEOUT		10000
106
107#define RMSQUERYSERVER		0
108#define RMSCONFIGURESERVER	1
109#define RMSCREATENODE		2
110#define RMSEXECUTENODE		3
111#define RMSDELETENODE		4
112#define RMSCHANGENODEPRIORITY	5
113#define RMSREADMEMORY		6
114#define RMSWRITEMEMORY		7
115#define RMSCOPY			8
116#define MAXTIMEOUT		2000
117
118#define NUMRMSFXNS		9
119
120#define PWR_TIMEOUT		500	/* default PWR timeout in msec */
121
122#define STACKSEGLABEL "L1DSRAM_HEAP"	/* Label for DSP Stack Segment Addr */
123
124/*
125 *  ======== node_mgr ========
126 */
127struct node_mgr {
128	struct dev_object *hdev_obj;	/* Device object */
129	/* Function interface to Bridge driver */
130	struct bridge_drv_interface *intf_fxns;
131	struct dcd_manager *hdcd_mgr;	/* Proc/Node data manager */
132	struct disp_object *disp_obj;	/* Node dispatcher */
133	struct lst_list *node_list;	/* List of all allocated nodes */
134	u32 num_nodes;		/* Number of nodes in node_list */
135	u32 num_created;	/* Number of nodes *created* on DSP */
136	struct gb_t_map *pipe_map;	/* Pipe connection bit map */
137	struct gb_t_map *pipe_done_map;	/* Pipes that are half free */
138	struct gb_t_map *chnl_map;	/* Channel allocation bit map */
139	struct gb_t_map *dma_chnl_map;	/* DMA Channel allocation bit map */
140	struct gb_t_map *zc_chnl_map;	/* Zero-Copy Channel alloc bit map */
141	struct ntfy_object *ntfy_obj;	/* Manages registered notifications */
142	struct mutex node_mgr_lock;	/* For critical sections */
143	u32 ul_fxn_addrs[NUMRMSFXNS];	/* RMS function addresses */
144	struct msg_mgr *msg_mgr_obj;
145
146	/* Processor properties needed by Node Dispatcher */
147	u32 ul_num_chnls;	/* Total number of channels */
148	u32 ul_chnl_offset;	/* Offset of chnl ids rsvd for RMS */
149	u32 ul_chnl_buf_size;	/* Buffer size for data to RMS */
150	int proc_family;	/* eg, 5000 */
151	int proc_type;		/* eg, 5510 */
152	u32 udsp_word_size;	/* Size of DSP word on host bytes */
153	u32 udsp_data_mau_size;	/* Size of DSP data MAU */
154	u32 udsp_mau_size;	/* Size of MAU */
155	s32 min_pri;		/* Minimum runtime priority for node */
156	s32 max_pri;		/* Maximum runtime priority for node */
157
158	struct strm_mgr *strm_mgr_obj;	/* STRM manager */
159
160	/* Loader properties */
161	struct nldr_object *nldr_obj;	/* Handle to loader */
162	struct node_ldr_fxns nldr_fxns;	/* Handle to loader functions */
163	bool loader_init;	/* Loader Init function succeeded? */
164};
165
166/*
167 *  ======== connecttype ========
168 */
169enum connecttype {
170	NOTCONNECTED = 0,
171	NODECONNECT,
172	HOSTCONNECT,
173	DEVICECONNECT,
174};
175
176/*
177 *  ======== stream_chnl ========
178 */
179struct stream_chnl {
180	enum connecttype type;	/* Type of stream connection */
181	u32 dev_id;		/* pipe or channel id */
182};
183
184/*
185 *  ======== node_object ========
186 */
187struct node_object {
188	struct list_head list_elem;
189	struct node_mgr *hnode_mgr;	/* The manager of this node */
190	struct proc_object *hprocessor;	/* Back pointer to processor */
191	struct dsp_uuid node_uuid;	/* Node's ID */
192	s32 prio;		/* Node's current priority */
193	u32 utimeout;		/* Timeout for blocking NODE calls */
194	u32 heap_size;		/* Heap Size */
195	u32 udsp_heap_virt_addr;	/* Heap Size */
196	u32 ugpp_heap_virt_addr;	/* Heap Size */
197	enum node_type ntype;	/* Type of node: message, task, etc */
198	enum node_state node_state;	/* NODE_ALLOCATED, NODE_CREATED, ... */
199	u32 num_inputs;		/* Current number of inputs */
200	u32 num_outputs;	/* Current number of outputs */
201	u32 max_input_index;	/* Current max input stream index */
202	u32 max_output_index;	/* Current max output stream index */
203	struct stream_chnl *inputs;	/* Node's input streams */
204	struct stream_chnl *outputs;	/* Node's output streams */
205	struct node_createargs create_args;	/* Args for node create func */
206	nodeenv node_env;	/* Environment returned by RMS */
207	struct dcd_genericobj dcd_props;	/* Node properties from DCD */
208	struct dsp_cbdata *pargs;	/* Optional args to pass to node */
209	struct ntfy_object *ntfy_obj;	/* Manages registered notifications */
210	char *pstr_dev_name;	/* device name, if device node */
211	struct sync_object *sync_done;	/* Synchronize node_terminate */
212	s32 exit_status;	/* execute function return status */
213
214	/* Information needed for node_get_attr() */
215	void *device_owner;	/* If dev node, task that owns it */
216	u32 num_gpp_inputs;	/* Current # of from GPP streams */
217	u32 num_gpp_outputs;	/* Current # of to GPP streams */
218	/* Current stream connections */
219	struct dsp_streamconnect *stream_connect;
220
221	/* Message queue */
222	struct msg_queue *msg_queue_obj;
223
224	/* These fields used for SM messaging */
225	struct cmm_xlatorobject *xlator;	/* Node's SM addr translator */
226
227	/* Handle to pass to dynamic loader */
228	struct nldr_nodeobject *nldr_node_obj;
229	bool loaded;		/* Code is (dynamically) loaded */
230	bool phase_split;	/* Phases split in many libs or ovly */
231
232};
233
234/* Default buffer attributes */
235static struct dsp_bufferattr node_dfltbufattrs = {
236	0,			/* cb_struct */
237	1,			/* segment_id */
238	0,			/* buf_alignment */
239};
240
241static void delete_node(struct node_object *hnode,
242			struct process_context *pr_ctxt);
243static void delete_node_mgr(struct node_mgr *hnode_mgr);
244static void fill_stream_connect(struct node_object *node1,
245				struct node_object *node2, u32 stream1,
246				u32 stream2);
247static void fill_stream_def(struct node_object *hnode,
248			    struct node_strmdef *pstrm_def,
249			    struct dsp_strmattr *pattrs);
250static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream);
251static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
252				  u32 phase);
253static int get_node_props(struct dcd_manager *hdcd_mgr,
254				 struct node_object *hnode,
255				 const struct dsp_uuid *node_uuid,
256				 struct dcd_genericobj *dcd_prop);
257static int get_proc_props(struct node_mgr *hnode_mgr,
258				 struct dev_object *hdev_obj);
259static int get_rms_fxns(struct node_mgr *hnode_mgr);
260static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
261		u32 ul_num_bytes, u32 mem_space);
262static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
263		     u32 ul_num_bytes, u32 mem_space);
264
265static u32 refs;		/* module reference count */
266
267/* Dynamic loader functions. */
268static struct node_ldr_fxns nldr_fxns = {
269	nldr_allocate,
270	nldr_create,
271	nldr_delete,
272	nldr_exit,
273	nldr_get_fxn_addr,
274	nldr_init,
275	nldr_load,
276	nldr_unload,
277};
278
279enum node_state node_get_state(void *hnode)
280{
281	struct node_object *pnode = (struct node_object *)hnode;
282	if (!pnode)
283		return -1;
284	else
285		return pnode->node_state;
286}
287
288/*
289 *  ======== node_allocate ========
290 *  Purpose:
291 *      Allocate GPP resources to manage a node on the DSP.
292 */
293int node_allocate(struct proc_object *hprocessor,
294			const struct dsp_uuid *node_uuid,
295			const struct dsp_cbdata *pargs,
296			const struct dsp_nodeattrin *attr_in,
297			struct node_res_object **noderes,
298			struct process_context *pr_ctxt)
299{
300	struct node_mgr *hnode_mgr;
301	struct dev_object *hdev_obj;
302	struct node_object *pnode = NULL;
303	enum node_type node_type = NODE_TASK;
304	struct node_msgargs *pmsg_args;
305	struct node_taskargs *ptask_args;
306	u32 num_streams;
307	struct bridge_drv_interface *intf_fxns;
308	int status = 0;
309	struct cmm_object *hcmm_mgr = NULL;	/* Shared memory manager hndl */
310	u32 proc_id;
311	u32 pul_value;
312	u32 dynext_base;
313	u32 off_set = 0;
314	u32 ul_stack_seg_addr, ul_stack_seg_val;
315	u32 ul_gpp_mem_base;
316	struct cfg_hostres *host_res;
317	struct bridge_dev_context *pbridge_context;
318	u32 mapped_addr = 0;
319	u32 map_attrs = 0x0;
320	struct dsp_processorstate proc_state;
321#ifdef DSP_DMM_DEBUG
322	struct dmm_object *dmm_mgr;
323	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
324#endif
325
326	void *node_res;
327
328	DBC_REQUIRE(refs > 0);
329	DBC_REQUIRE(hprocessor != NULL);
330	DBC_REQUIRE(noderes != NULL);
331	DBC_REQUIRE(node_uuid != NULL);
332
333	*noderes = NULL;
334
335	status = proc_get_processor_id(hprocessor, &proc_id);
336
337	if (proc_id != DSP_UNIT)
338		goto func_end;
339
340	status = proc_get_dev_object(hprocessor, &hdev_obj);
341	if (!status) {
342		status = dev_get_node_manager(hdev_obj, &hnode_mgr);
343		if (hnode_mgr == NULL)
344			status = -EPERM;
345
346	}
347
348	if (status)
349		goto func_end;
350
351	status = dev_get_bridge_context(hdev_obj, &pbridge_context);
352	if (!pbridge_context) {
353		status = -EFAULT;
354		goto func_end;
355	}
356
357	status = proc_get_state(hprocessor, &proc_state,
358				sizeof(struct dsp_processorstate));
359	if (status)
360		goto func_end;
361	/* If processor is in error state then don't attempt
362	   to send the message */
363	if (proc_state.proc_state == PROC_ERROR) {
364		status = -EPERM;
365		goto func_end;
366	}
367
368	/* Assuming that 0 is not a valid function address */
369	if (hnode_mgr->ul_fxn_addrs[0] == 0) {
370		/* No RMS on target - we currently can't handle this */
371		pr_err("%s: Failed, no RMS in base image\n", __func__);
372		status = -EPERM;
373	} else {
374		/* Validate attr_in fields, if non-NULL */
375		if (attr_in) {
376			/* Check if attr_in->prio is within range */
377			if (attr_in->prio < hnode_mgr->min_pri ||
378			    attr_in->prio > hnode_mgr->max_pri)
379				status = -EDOM;
380		}
381	}
382	/* Allocate node object and fill in */
383	if (status)
384		goto func_end;
385
386	pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL);
387	if (pnode == NULL) {
388		status = -ENOMEM;
389		goto func_end;
390	}
391	pnode->hnode_mgr = hnode_mgr;
392	/* This critical section protects get_node_props */
393	mutex_lock(&hnode_mgr->node_mgr_lock);
394
395	/* Get dsp_ndbprops from node database */
396	status = get_node_props(hnode_mgr->hdcd_mgr, pnode, node_uuid,
397				&(pnode->dcd_props));
398	if (status)
399		goto func_cont;
400
401	pnode->node_uuid = *node_uuid;
402	pnode->hprocessor = hprocessor;
403	pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype;
404	pnode->utimeout = pnode->dcd_props.obj_data.node_obj.ndb_props.utimeout;
405	pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio;
406
407	/* Currently only C64 DSP builds support Node Dynamic * heaps */
408	/* Allocate memory for node heap */
409	pnode->create_args.asa.task_arg_obj.heap_size = 0;
410	pnode->create_args.asa.task_arg_obj.udsp_heap_addr = 0;
411	pnode->create_args.asa.task_arg_obj.udsp_heap_res_addr = 0;
412	pnode->create_args.asa.task_arg_obj.ugpp_heap_addr = 0;
413	if (!attr_in)
414		goto func_cont;
415
416	/* Check if we have a user allocated node heap */
417	if (!(attr_in->pgpp_virt_addr))
418		goto func_cont;
419
420	/* check for page aligned Heap size */
421	if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
422		pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
423		       __func__, attr_in->heap_size);
424		status = -EINVAL;
425	} else {
426		pnode->create_args.asa.task_arg_obj.heap_size =
427		    attr_in->heap_size;
428		pnode->create_args.asa.task_arg_obj.ugpp_heap_addr =
429		    (u32) attr_in->pgpp_virt_addr;
430	}
431	if (status)
432		goto func_cont;
433
434	status = proc_reserve_memory(hprocessor,
435				     pnode->create_args.asa.task_arg_obj.
436				     heap_size + PAGE_SIZE,
437				     (void **)&(pnode->create_args.asa.
438					task_arg_obj.udsp_heap_res_addr),
439				     pr_ctxt);
440	if (status) {
441		pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
442		       __func__, status);
443		goto func_cont;
444	}
445#ifdef DSP_DMM_DEBUG
446	status = dmm_get_handle(p_proc_object, &dmm_mgr);
447	if (!dmm_mgr) {
448		status = DSP_EHANDLE;
449		goto func_cont;
450	}
451
452	dmm_mem_map_dump(dmm_mgr);
453#endif
454
455	map_attrs |= DSP_MAPLITTLEENDIAN;
456	map_attrs |= DSP_MAPELEMSIZE32;
457	map_attrs |= DSP_MAPVIRTUALADDR;
458	status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
459			  pnode->create_args.asa.task_arg_obj.heap_size,
460			  (void *)pnode->create_args.asa.task_arg_obj.
461			  udsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
462			  pr_ctxt);
463	if (status)
464		pr_err("%s: Failed to map memory for Heap: 0x%x\n",
465		       __func__, status);
466	else
467		pnode->create_args.asa.task_arg_obj.udsp_heap_addr =
468		    (u32) mapped_addr;
469
470func_cont:
471	mutex_unlock(&hnode_mgr->node_mgr_lock);
472	if (attr_in != NULL) {
473		/* Overrides of NBD properties */
474		pnode->utimeout = attr_in->utimeout;
475		pnode->prio = attr_in->prio;
476	}
477	/* Create object to manage notifications */
478	if (!status) {
479		pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
480							GFP_KERNEL);
481		if (pnode->ntfy_obj)
482			ntfy_init(pnode->ntfy_obj);
483		else
484			status = -ENOMEM;
485	}
486
487	if (!status) {
488		node_type = node_get_type(pnode);
489		/*  Allocate dsp_streamconnect array for device, task, and
490		 *  dais socket nodes. */
491		if (node_type != NODE_MESSAGE) {
492			num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode);
493			pnode->stream_connect = kzalloc(num_streams *
494					sizeof(struct dsp_streamconnect),
495					GFP_KERNEL);
496			if (num_streams > 0 && pnode->stream_connect == NULL)
497				status = -ENOMEM;
498
499		}
500		if (!status && (node_type == NODE_TASK ||
501					      node_type == NODE_DAISSOCKET)) {
502			/* Allocate arrays for maintainig stream connections */
503			pnode->inputs = kzalloc(MAX_INPUTS(pnode) *
504					sizeof(struct stream_chnl), GFP_KERNEL);
505			pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) *
506					sizeof(struct stream_chnl), GFP_KERNEL);
507			ptask_args = &(pnode->create_args.asa.task_arg_obj);
508			ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) *
509						sizeof(struct node_strmdef),
510						GFP_KERNEL);
511			ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) *
512						sizeof(struct node_strmdef),
513						GFP_KERNEL);
514			if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL ||
515						       ptask_args->strm_in_def
516						       == NULL))
517			    || (MAX_OUTPUTS(pnode) > 0
518				&& (pnode->outputs == NULL
519				    || ptask_args->strm_out_def == NULL)))
520				status = -ENOMEM;
521		}
522	}
523	if (!status && (node_type != NODE_DEVICE)) {
524		/* Create an event that will be posted when RMS_EXIT is
525		 * received. */
526		pnode->sync_done = kzalloc(sizeof(struct sync_object),
527								GFP_KERNEL);
528		if (pnode->sync_done)
529			sync_init_event(pnode->sync_done);
530		else
531			status = -ENOMEM;
532
533		if (!status) {
534			/*Get the shared mem mgr for this nodes dev object */
535			status = cmm_get_handle(hprocessor, &hcmm_mgr);
536			if (!status) {
537				/* Allocate a SM addr translator for this node
538				 * w/ deflt attr */
539				status = cmm_xlator_create(&pnode->xlator,
540							   hcmm_mgr, NULL);
541			}
542		}
543		if (!status) {
544			/* Fill in message args */
545			if ((pargs != NULL) && (pargs->cb_data > 0)) {
546				pmsg_args =
547				    &(pnode->create_args.asa.node_msg_args);
548				pmsg_args->pdata = kzalloc(pargs->cb_data,
549								GFP_KERNEL);
550				if (pmsg_args->pdata == NULL) {
551					status = -ENOMEM;
552				} else {
553					pmsg_args->arg_length = pargs->cb_data;
554					memcpy(pmsg_args->pdata,
555					       pargs->node_data,
556					       pargs->cb_data);
557				}
558			}
559		}
560	}
561
562	if (!status && node_type != NODE_DEVICE) {
563		/* Create a message queue for this node */
564		intf_fxns = hnode_mgr->intf_fxns;
565		status =
566		    (*intf_fxns->pfn_msg_create_queue) (hnode_mgr->msg_mgr_obj,
567							&pnode->msg_queue_obj,
568							0,
569							pnode->create_args.asa.
570							node_msg_args.max_msgs,
571							pnode);
572	}
573
574	if (!status) {
575		/* Create object for dynamic loading */
576
577		status = hnode_mgr->nldr_fxns.pfn_allocate(hnode_mgr->nldr_obj,
578							   (void *)pnode,
579							   &pnode->dcd_props.
580							   obj_data.node_obj,
581							   &pnode->
582							   nldr_node_obj,
583							   &pnode->phase_split);
584	}
585
586	/* Compare value read from Node Properties and check if it is same as
587	 * STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate
588	 * GPP Address, Read the value in that address and override the
589	 * stack_seg value in task args */
590	if (!status &&
591	    (char *)pnode->dcd_props.obj_data.node_obj.ndb_props.
592	    stack_seg_name != NULL) {
593		if (strcmp((char *)
594			   pnode->dcd_props.obj_data.node_obj.ndb_props.
595			   stack_seg_name, STACKSEGLABEL) == 0) {
596			status =
597			    hnode_mgr->nldr_fxns.
598			    pfn_get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
599					     &dynext_base);
600			if (status)
601				pr_err("%s: Failed to get addr for DYNEXT_BEG"
602				       " status = 0x%x\n", __func__, status);
603
604			status =
605			    hnode_mgr->nldr_fxns.
606			    pfn_get_fxn_addr(pnode->nldr_node_obj,
607					     "L1DSRAM_HEAP", &pul_value);
608
609			if (status)
610				pr_err("%s: Failed to get addr for L1DSRAM_HEAP"
611				       " status = 0x%x\n", __func__, status);
612
613			host_res = pbridge_context->resources;
614			if (!host_res)
615				status = -EPERM;
616
617			if (status) {
618				pr_err("%s: Failed to get host resource, status"
619				       " = 0x%x\n", __func__, status);
620				goto func_end;
621			}
622
623			ul_gpp_mem_base = (u32) host_res->dw_mem_base[1];
624			off_set = pul_value - dynext_base;
625			ul_stack_seg_addr = ul_gpp_mem_base + off_set;
626			ul_stack_seg_val = readl(ul_stack_seg_addr);
627
628			dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
629				" 0x%x\n", __func__, ul_stack_seg_val,
630				ul_stack_seg_addr);
631
632			pnode->create_args.asa.task_arg_obj.stack_seg =
633			    ul_stack_seg_val;
634
635		}
636	}
637
638	if (!status) {
639		/* Add the node to the node manager's list of allocated
640		 * nodes. */
641		lst_init_elem((struct list_head *)pnode);
642		NODE_SET_STATE(pnode, NODE_ALLOCATED);
643
644		mutex_lock(&hnode_mgr->node_mgr_lock);
645
646		lst_put_tail(hnode_mgr->node_list, (struct list_head *) pnode);
647			++(hnode_mgr->num_nodes);
648
649		/* Exit critical section */
650		mutex_unlock(&hnode_mgr->node_mgr_lock);
651
652		/* Preset this to assume phases are split
653		 * (for overlay and dll) */
654		pnode->phase_split = true;
655
656		/* Notify all clients registered for DSP_NODESTATECHANGE. */
657		proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE);
658	} else {
659		/* Cleanup */
660		if (pnode)
661			delete_node(pnode, pr_ctxt);
662
663	}
664
665	if (!status) {
666		status = drv_insert_node_res_element(pnode, &node_res, pr_ctxt);
667		if (status) {
668			delete_node(pnode, pr_ctxt);
669			goto func_end;
670		}
671
672		*noderes = (struct node_res_object *)node_res;
673		drv_proc_node_update_heap_status(node_res, true);
674		drv_proc_node_update_status(node_res, true);
675	}
676	DBC_ENSURE((status && *noderes == NULL) || (!status && *noderes));
677func_end:
678	dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
679		"node_res: %p status: 0x%x\n", __func__, hprocessor,
680		node_uuid, pargs, attr_in, noderes, status);
681	return status;
682}
683
684/*
685 *  ======== node_alloc_msg_buf ========
686 *  Purpose:
687 *      Allocates buffer for zero copy messaging.
688 */
689DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
690			 struct dsp_bufferattr *pattr,
691			 u8 **pbuffer)
692{
693	struct node_object *pnode = (struct node_object *)hnode;
694	int status = 0;
695	bool va_flag = false;
696	bool set_info;
697	u32 proc_id;
698
699	DBC_REQUIRE(refs > 0);
700	DBC_REQUIRE(pbuffer != NULL);
701
702	DBC_REQUIRE(usize > 0);
703
704	if (!pnode)
705		status = -EFAULT;
706	else if (node_get_type(pnode) == NODE_DEVICE)
707		status = -EPERM;
708
709	if (status)
710		goto func_end;
711
712	if (pattr == NULL)
713		pattr = &node_dfltbufattrs;	/* set defaults */
714
715	status = proc_get_processor_id(pnode->hprocessor, &proc_id);
716	if (proc_id != DSP_UNIT) {
717		DBC_ASSERT(NULL);
718		goto func_end;
719	}
720	/*  If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
721	 *  virt  address, so set this info in this node's translator
722	 *  object for  future ref. If MEM_GETVIRTUALSEGID then retrieve
723	 *  virtual address  from node's translator. */
724	if ((pattr->segment_id & MEM_SETVIRTUALSEGID) ||
725	    (pattr->segment_id & MEM_GETVIRTUALSEGID)) {
726		va_flag = true;
727		set_info = (pattr->segment_id & MEM_SETVIRTUALSEGID) ?
728		    true : false;
729		/* Clear mask bits */
730		pattr->segment_id &= ~MEM_MASKVIRTUALSEGID;
731		/* Set/get this node's translators virtual address base/size */
732		status = cmm_xlator_info(pnode->xlator, pbuffer, usize,
733					 pattr->segment_id, set_info);
734	}
735	if (!status && (!va_flag)) {
736		if (pattr->segment_id != 1) {
737			/* Node supports single SM segment only. */
738			status = -EBADR;
739		}
740		/*  Arbitrary SM buffer alignment not supported for host side
741		 *  allocs, but guaranteed for the following alignment
742		 *  values. */
743		switch (pattr->buf_alignment) {
744		case 0:
745		case 1:
746		case 2:
747		case 4:
748			break;
749		default:
750			/* alignment value not suportted */
751			status = -EPERM;
752			break;
753		}
754		if (!status) {
755			/* allocate physical buffer from seg_id in node's
756			 * translator */
757			(void)cmm_xlator_alloc_buf(pnode->xlator, pbuffer,
758						   usize);
759			if (*pbuffer == NULL) {
760				pr_err("%s: error - Out of shared memory\n",
761				       __func__);
762				status = -ENOMEM;
763			}
764		}
765	}
766func_end:
767	return status;
768}
769
770/*
771 *  ======== node_change_priority ========
772 *  Purpose:
773 *      Change the priority of a node in the allocated state, or that is
774 *      currently running or paused on the target.
775 */
776int node_change_priority(struct node_object *hnode, s32 prio)
777{
778	struct node_object *pnode = (struct node_object *)hnode;
779	struct node_mgr *hnode_mgr = NULL;
780	enum node_type node_type;
781	enum node_state state;
782	int status = 0;
783	u32 proc_id;
784
785	DBC_REQUIRE(refs > 0);
786
787	if (!hnode || !hnode->hnode_mgr) {
788		status = -EFAULT;
789	} else {
790		hnode_mgr = hnode->hnode_mgr;
791		node_type = node_get_type(hnode);
792		if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
793			status = -EPERM;
794		else if (prio < hnode_mgr->min_pri || prio > hnode_mgr->max_pri)
795			status = -EDOM;
796	}
797	if (status)
798		goto func_end;
799
800	/* Enter critical section */
801	mutex_lock(&hnode_mgr->node_mgr_lock);
802
803	state = node_get_state(hnode);
804	if (state == NODE_ALLOCATED || state == NODE_PAUSED) {
805		NODE_SET_PRIORITY(hnode, prio);
806	} else {
807		if (state != NODE_RUNNING) {
808			status = -EBADR;
809			goto func_cont;
810		}
811		status = proc_get_processor_id(pnode->hprocessor, &proc_id);
812		if (proc_id == DSP_UNIT) {
813			status =
814			    disp_node_change_priority(hnode_mgr->disp_obj,
815						      hnode,
816						      hnode_mgr->ul_fxn_addrs
817						      [RMSCHANGENODEPRIORITY],
818						      hnode->node_env, prio);
819		}
820		if (status >= 0)
821			NODE_SET_PRIORITY(hnode, prio);
822
823	}
824func_cont:
825	/* Leave critical section */
826	mutex_unlock(&hnode_mgr->node_mgr_lock);
827func_end:
828	return status;
829}
830
831/*
832 *  ======== node_connect ========
833 *  Purpose:
834 *      Connect two nodes on the DSP, or a node on the DSP to the GPP.
835 */
836int node_connect(struct node_object *node1, u32 stream1,
837			struct node_object *node2,
838			u32 stream2, struct dsp_strmattr *pattrs,
839			struct dsp_cbdata *conn_param)
840{
841	struct node_mgr *hnode_mgr;
842	char *pstr_dev_name = NULL;
843	enum node_type node1_type = NODE_TASK;
844	enum node_type node2_type = NODE_TASK;
845	struct node_strmdef *pstrm_def;
846	struct node_strmdef *input = NULL;
847	struct node_strmdef *output = NULL;
848	struct node_object *dev_node_obj;
849	struct node_object *hnode;
850	struct stream_chnl *pstream;
851	u32 pipe_id = GB_NOBITS;
852	u32 chnl_id = GB_NOBITS;
853	s8 chnl_mode;
854	u32 dw_length;
855	int status = 0;
856	DBC_REQUIRE(refs > 0);
857
858	if ((node1 != (struct node_object *)DSP_HGPPNODE && !node1) ||
859	    (node2 != (struct node_object *)DSP_HGPPNODE && !node2))
860		status = -EFAULT;
861
862	if (!status) {
863		/* The two nodes must be on the same processor */
864		if (node1 != (struct node_object *)DSP_HGPPNODE &&
865		    node2 != (struct node_object *)DSP_HGPPNODE &&
866		    node1->hnode_mgr != node2->hnode_mgr)
867			status = -EPERM;
868		/* Cannot connect a node to itself */
869		if (node1 == node2)
870			status = -EPERM;
871
872	}
873	if (!status) {
874		/* node_get_type() will return NODE_GPP if hnode =
875		 * DSP_HGPPNODE. */
876		node1_type = node_get_type(node1);
877		node2_type = node_get_type(node2);
878		/* Check stream indices ranges */
879		if ((node1_type != NODE_GPP && node1_type != NODE_DEVICE &&
880		     stream1 >= MAX_OUTPUTS(node1)) || (node2_type != NODE_GPP
881							  && node2_type !=
882							  NODE_DEVICE
883							  && stream2 >=
884							  MAX_INPUTS(node2)))
885			status = -EINVAL;
886	}
887	if (!status) {
888		/*
889		 *  Only the following types of connections are allowed:
890		 *      task/dais socket < == > task/dais socket
891		 *      task/dais socket < == > device
892		 *      task/dais socket < == > GPP
893		 *
894		 *  ie, no message nodes, and at least one task or dais
895		 *  socket node.
896		 */
897		if (node1_type == NODE_MESSAGE || node2_type == NODE_MESSAGE ||
898		    (node1_type != NODE_TASK && node1_type != NODE_DAISSOCKET &&
899		     node2_type != NODE_TASK && node2_type != NODE_DAISSOCKET))
900			status = -EPERM;
901	}
902	/*
903	 * Check stream mode. Default is STRMMODE_PROCCOPY.
904	 */
905	if (!status && pattrs) {
906		if (pattrs->strm_mode != STRMMODE_PROCCOPY)
907			status = -EPERM;	/* illegal stream mode */
908
909	}
910	if (status)
911		goto func_end;
912
913	if (node1_type != NODE_GPP) {
914		hnode_mgr = node1->hnode_mgr;
915	} else {
916		DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
917		hnode_mgr = node2->hnode_mgr;
918	}
919	/* Enter critical section */
920	mutex_lock(&hnode_mgr->node_mgr_lock);
921
922	/* Nodes must be in the allocated state */
923	if (node1_type != NODE_GPP && node_get_state(node1) != NODE_ALLOCATED)
924		status = -EBADR;
925
926	if (node2_type != NODE_GPP && node_get_state(node2) != NODE_ALLOCATED)
927		status = -EBADR;
928
929	if (!status) {
930		/*  Check that stream indices for task and dais socket nodes
931		 *  are not already be used. (Device nodes checked later) */
932		if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
933			output =
934			    &(node1->create_args.asa.
935			      task_arg_obj.strm_out_def[stream1]);
936			if (output->sz_device != NULL)
937				status = -EISCONN;
938
939		}
940		if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
941			input =
942			    &(node2->create_args.asa.
943			      task_arg_obj.strm_in_def[stream2]);
944			if (input->sz_device != NULL)
945				status = -EISCONN;
946
947		}
948	}
949	/* Connecting two task nodes? */
950	if (!status && ((node1_type == NODE_TASK ||
951				       node1_type == NODE_DAISSOCKET)
952				      && (node2_type == NODE_TASK
953					  || node2_type == NODE_DAISSOCKET))) {
954		/* Find available pipe */
955		pipe_id = gb_findandset(hnode_mgr->pipe_map);
956		if (pipe_id == GB_NOBITS) {
957			status = -ECONNREFUSED;
958		} else {
959			node1->outputs[stream1].type = NODECONNECT;
960			node2->inputs[stream2].type = NODECONNECT;
961			node1->outputs[stream1].dev_id = pipe_id;
962			node2->inputs[stream2].dev_id = pipe_id;
963			output->sz_device = kzalloc(PIPENAMELEN + 1,
964							GFP_KERNEL);
965			input->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
966			if (output->sz_device == NULL ||
967			    input->sz_device == NULL) {
968				/* Undo the connection */
969				kfree(output->sz_device);
970
971				kfree(input->sz_device);
972
973				output->sz_device = NULL;
974				input->sz_device = NULL;
975				gb_clear(hnode_mgr->pipe_map, pipe_id);
976				status = -ENOMEM;
977			} else {
978				/* Copy "/dbpipe<pipId>" name to device names */
979				sprintf(output->sz_device, "%s%d",
980					PIPEPREFIX, pipe_id);
981				strcpy(input->sz_device, output->sz_device);
982			}
983		}
984	}
985	/* Connecting task node to host? */
986	if (!status && (node1_type == NODE_GPP ||
987				      node2_type == NODE_GPP)) {
988		if (node1_type == NODE_GPP) {
989			chnl_mode = CHNL_MODETODSP;
990		} else {
991			DBC_ASSERT(node2_type == NODE_GPP);
992			chnl_mode = CHNL_MODEFROMDSP;
993		}
994		/*  Reserve a channel id. We need to put the name "/host<id>"
995		 *  in the node's create_args, but the host
996		 *  side channel will not be opened until DSPStream_Open is
997		 *  called for this node. */
998		if (pattrs) {
999			if (pattrs->strm_mode == STRMMODE_RDMA) {
1000				chnl_id =
1001				    gb_findandset(hnode_mgr->dma_chnl_map);
1002				/* dma chans are 2nd transport chnl set
1003				 * ids(e.g. 16-31) */
1004				(chnl_id != GB_NOBITS) ?
1005				    (chnl_id =
1006				     chnl_id +
1007				     hnode_mgr->ul_num_chnls) : chnl_id;
1008			} else if (pattrs->strm_mode == STRMMODE_ZEROCOPY) {
1009				chnl_id = gb_findandset(hnode_mgr->zc_chnl_map);
1010				/* zero-copy chans are 3nd transport set
1011				 * (e.g. 32-47) */
1012				(chnl_id != GB_NOBITS) ? (chnl_id = chnl_id +
1013							  (2 *
1014							   hnode_mgr->
1015							   ul_num_chnls))
1016				    : chnl_id;
1017			} else {	/* must be PROCCOPY */
1018				DBC_ASSERT(pattrs->strm_mode ==
1019					   STRMMODE_PROCCOPY);
1020				chnl_id = gb_findandset(hnode_mgr->chnl_map);
1021				/* e.g. 0-15 */
1022			}
1023		} else {
1024			/* default to PROCCOPY */
1025			chnl_id = gb_findandset(hnode_mgr->chnl_map);
1026		}
1027		if (chnl_id == GB_NOBITS) {
1028			status = -ECONNREFUSED;
1029			goto func_cont2;
1030		}
1031		pstr_dev_name = kzalloc(HOSTNAMELEN + 1, GFP_KERNEL);
1032		if (pstr_dev_name != NULL)
1033			goto func_cont2;
1034
1035		if (pattrs) {
1036			if (pattrs->strm_mode == STRMMODE_RDMA) {
1037				gb_clear(hnode_mgr->dma_chnl_map, chnl_id -
1038					 hnode_mgr->ul_num_chnls);
1039			} else if (pattrs->strm_mode == STRMMODE_ZEROCOPY) {
1040				gb_clear(hnode_mgr->zc_chnl_map, chnl_id -
1041					 (2 * hnode_mgr->ul_num_chnls));
1042			} else {
1043				DBC_ASSERT(pattrs->strm_mode ==
1044					   STRMMODE_PROCCOPY);
1045				gb_clear(hnode_mgr->chnl_map, chnl_id);
1046			}
1047		} else {
1048			gb_clear(hnode_mgr->chnl_map, chnl_id);
1049		}
1050		status = -ENOMEM;
1051func_cont2:
1052		if (!status) {
1053			if (node1 == (struct node_object *)DSP_HGPPNODE) {
1054				node2->inputs[stream2].type = HOSTCONNECT;
1055				node2->inputs[stream2].dev_id = chnl_id;
1056				input->sz_device = pstr_dev_name;
1057			} else {
1058				node1->outputs[stream1].type = HOSTCONNECT;
1059				node1->outputs[stream1].dev_id = chnl_id;
1060				output->sz_device = pstr_dev_name;
1061			}
1062			sprintf(pstr_dev_name, "%s%d", HOSTPREFIX, chnl_id);
1063		}
1064	}
1065	/* Connecting task node to device node? */
1066	if (!status && ((node1_type == NODE_DEVICE) ||
1067				      (node2_type == NODE_DEVICE))) {
1068		if (node2_type == NODE_DEVICE) {
1069			/* node1 == > device */
1070			dev_node_obj = node2;
1071			hnode = node1;
1072			pstream = &(node1->outputs[stream1]);
1073			pstrm_def = output;
1074		} else {
1075			/* device == > node2 */
1076			dev_node_obj = node1;
1077			hnode = node2;
1078			pstream = &(node2->inputs[stream2]);
1079			pstrm_def = input;
1080		}
1081		/* Set up create args */
1082		pstream->type = DEVICECONNECT;
1083		dw_length = strlen(dev_node_obj->pstr_dev_name);
1084		if (conn_param != NULL) {
1085			pstrm_def->sz_device = kzalloc(dw_length + 1 +
1086							conn_param->cb_data,
1087							GFP_KERNEL);
1088		} else {
1089			pstrm_def->sz_device = kzalloc(dw_length + 1,
1090							GFP_KERNEL);
1091		}
1092		if (pstrm_def->sz_device == NULL) {
1093			status = -ENOMEM;
1094		} else {
1095			/* Copy device name */
1096			strncpy(pstrm_def->sz_device,
1097				dev_node_obj->pstr_dev_name, dw_length);
1098			if (conn_param != NULL) {
1099				strncat(pstrm_def->sz_device,
1100					(char *)conn_param->node_data,
1101					(u32) conn_param->cb_data);
1102			}
1103			dev_node_obj->device_owner = hnode;
1104		}
1105	}
1106	if (!status) {
1107		/* Fill in create args */
1108		if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
1109			node1->create_args.asa.task_arg_obj.num_outputs++;
1110			fill_stream_def(node1, output, pattrs);
1111		}
1112		if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
1113			node2->create_args.asa.task_arg_obj.num_inputs++;
1114			fill_stream_def(node2, input, pattrs);
1115		}
1116		/* Update node1 and node2 stream_connect */
1117		if (node1_type != NODE_GPP && node1_type != NODE_DEVICE) {
1118			node1->num_outputs++;
1119			if (stream1 > node1->max_output_index)
1120				node1->max_output_index = stream1;
1121
1122		}
1123		if (node2_type != NODE_GPP && node2_type != NODE_DEVICE) {
1124			node2->num_inputs++;
1125			if (stream2 > node2->max_input_index)
1126				node2->max_input_index = stream2;
1127
1128		}
1129		fill_stream_connect(node1, node2, stream1, stream2);
1130	}
1131	/* end of sync_enter_cs */
1132	/* Exit critical section */
1133	mutex_unlock(&hnode_mgr->node_mgr_lock);
1134func_end:
1135	dev_dbg(bridge, "%s: node1: %p stream1: %d node2: %p stream2: %d"
1136		"pattrs: %p status: 0x%x\n", __func__, node1,
1137		stream1, node2, stream2, pattrs, status);
1138	return status;
1139}
1140
1141/*
1142 *  ======== node_create ========
1143 *  Purpose:
1144 *      Create a node on the DSP by remotely calling the node's create function.
1145 */
1146int node_create(struct node_object *hnode)
1147{
1148	struct node_object *pnode = (struct node_object *)hnode;
1149	struct node_mgr *hnode_mgr;
1150	struct bridge_drv_interface *intf_fxns;
1151	u32 ul_create_fxn;
1152	enum node_type node_type;
1153	int status = 0;
1154	int status1 = 0;
1155	struct dsp_cbdata cb_data;
1156	u32 proc_id = 255;
1157	struct dsp_processorstate proc_state;
1158	struct proc_object *hprocessor;
1159#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1160	struct dspbridge_platform_data *pdata =
1161	    omap_dspbridge_dev->dev.platform_data;
1162#endif
1163
1164	DBC_REQUIRE(refs > 0);
1165	if (!pnode) {
1166		status = -EFAULT;
1167		goto func_end;
1168	}
1169	hprocessor = hnode->hprocessor;
1170	status = proc_get_state(hprocessor, &proc_state,
1171				sizeof(struct dsp_processorstate));
1172	if (status)
1173		goto func_end;
1174	/* If processor is in error state then don't attempt to create
1175	   new node */
1176	if (proc_state.proc_state == PROC_ERROR) {
1177		status = -EPERM;
1178		goto func_end;
1179	}
1180	/* create struct dsp_cbdata struct for PWR calls */
1181	cb_data.cb_data = PWR_TIMEOUT;
1182	node_type = node_get_type(hnode);
1183	hnode_mgr = hnode->hnode_mgr;
1184	intf_fxns = hnode_mgr->intf_fxns;
1185	/* Get access to node dispatcher */
1186	mutex_lock(&hnode_mgr->node_mgr_lock);
1187
1188	/* Check node state */
1189	if (node_get_state(hnode) != NODE_ALLOCATED)
1190		status = -EBADR;
1191
1192	if (!status)
1193		status = proc_get_processor_id(pnode->hprocessor, &proc_id);
1194
1195	if (status)
1196		goto func_cont2;
1197
1198	if (proc_id != DSP_UNIT)
1199		goto func_cont2;
1200
1201	/* Make sure streams are properly connected */
1202	if ((hnode->num_inputs && hnode->max_input_index >
1203	     hnode->num_inputs - 1) ||
1204	    (hnode->num_outputs && hnode->max_output_index >
1205	     hnode->num_outputs - 1))
1206		status = -ENOTCONN;
1207
1208	if (!status) {
1209		/* If node's create function is not loaded, load it */
1210		/* Boost the OPP level to max level that DSP can be requested */
1211#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1212		if (pdata->cpu_set_freq)
1213			(*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP3]);
1214#endif
1215		status = hnode_mgr->nldr_fxns.pfn_load(hnode->nldr_node_obj,
1216						       NLDR_CREATE);
1217		/* Get address of node's create function */
1218		if (!status) {
1219			hnode->loaded = true;
1220			if (node_type != NODE_DEVICE) {
1221				status = get_fxn_address(hnode, &ul_create_fxn,
1222							 CREATEPHASE);
1223			}
1224		} else {
1225			pr_err("%s: failed to load create code: 0x%x\n",
1226			       __func__, status);
1227		}
1228		/* Request the lowest OPP level */
1229#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1230		if (pdata->cpu_set_freq)
1231			(*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
1232#endif
1233		/* Get address of iAlg functions, if socket node */
1234		if (!status) {
1235			if (node_type == NODE_DAISSOCKET) {
1236				status = hnode_mgr->nldr_fxns.pfn_get_fxn_addr
1237				    (hnode->nldr_node_obj,
1238				     hnode->dcd_props.obj_data.node_obj.
1239				     pstr_i_alg_name,
1240				     &hnode->create_args.asa.
1241				     task_arg_obj.ul_dais_arg);
1242			}
1243		}
1244	}
1245	if (!status) {
1246		if (node_type != NODE_DEVICE) {
1247			status = disp_node_create(hnode_mgr->disp_obj, hnode,
1248						  hnode_mgr->ul_fxn_addrs
1249						  [RMSCREATENODE],
1250						  ul_create_fxn,
1251						  &(hnode->create_args),
1252						  &(hnode->node_env));
1253			if (status >= 0) {
1254				/* Set the message queue id to the node env
1255				 * pointer */
1256				intf_fxns = hnode_mgr->intf_fxns;
1257				(*intf_fxns->pfn_msg_set_queue_id) (hnode->
1258							msg_queue_obj,
1259							hnode->node_env);
1260			}
1261		}
1262	}
1263	/*  Phase II/Overlays: Create, execute, delete phases  possibly in
1264	 *  different files/sections. */
1265	if (hnode->loaded && hnode->phase_split) {
1266		/* If create code was dynamically loaded, we can now unload
1267		 * it. */
1268		status1 = hnode_mgr->nldr_fxns.pfn_unload(hnode->nldr_node_obj,
1269							  NLDR_CREATE);
1270		hnode->loaded = false;
1271	}
1272	if (status1)
1273		pr_err("%s: Failed to unload create code: 0x%x\n",
1274		       __func__, status1);
1275func_cont2:
1276	/* Update node state and node manager state */
1277	if (status >= 0) {
1278		NODE_SET_STATE(hnode, NODE_CREATED);
1279		hnode_mgr->num_created++;
1280		goto func_cont;
1281	}
1282	if (status != -EBADR) {
1283		/* Put back in NODE_ALLOCATED state if error occurred */
1284		NODE_SET_STATE(hnode, NODE_ALLOCATED);
1285	}
1286func_cont:
1287	/* Free access to node dispatcher */
1288	mutex_unlock(&hnode_mgr->node_mgr_lock);
1289func_end:
1290	if (status >= 0) {
1291		proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
1292		ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1293	}
1294
1295	dev_dbg(bridge, "%s: hnode: %p status: 0x%x\n", __func__,
1296		hnode, status);
1297	return status;
1298}
1299
1300/*
1301 *  ======== node_create_mgr ========
1302 *  Purpose:
1303 *      Create a NODE Manager object.
1304 */
1305int node_create_mgr(struct node_mgr **node_man,
1306			   struct dev_object *hdev_obj)
1307{
1308	u32 i;
1309	struct node_mgr *node_mgr_obj = NULL;
1310	struct disp_attr disp_attr_obj;
1311	char *sz_zl_file = "";
1312	struct nldr_attrs nldr_attrs_obj;
1313	int status = 0;
1314	u8 dev_type;
1315	DBC_REQUIRE(refs > 0);
1316	DBC_REQUIRE(node_man != NULL);
1317	DBC_REQUIRE(hdev_obj != NULL);
1318
1319	*node_man = NULL;
1320	/* Allocate Node manager object */
1321	node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL);
1322	if (node_mgr_obj) {
1323		node_mgr_obj->hdev_obj = hdev_obj;
1324		node_mgr_obj->node_list = kzalloc(sizeof(struct lst_list),
1325							GFP_KERNEL);
1326		node_mgr_obj->pipe_map = gb_create(MAXPIPES);
1327		node_mgr_obj->pipe_done_map = gb_create(MAXPIPES);
1328		if (node_mgr_obj->node_list == NULL
1329		    || node_mgr_obj->pipe_map == NULL
1330		    || node_mgr_obj->pipe_done_map == NULL) {
1331			status = -ENOMEM;
1332		} else {
1333			INIT_LIST_HEAD(&node_mgr_obj->node_list->head);
1334			node_mgr_obj->ntfy_obj = kmalloc(
1335				sizeof(struct ntfy_object), GFP_KERNEL);
1336			if (node_mgr_obj->ntfy_obj)
1337				ntfy_init(node_mgr_obj->ntfy_obj);
1338			else
1339				status = -ENOMEM;
1340		}
1341		node_mgr_obj->num_created = 0;
1342	} else {
1343		status = -ENOMEM;
1344	}
1345	/* get devNodeType */
1346	if (!status)
1347		status = dev_get_dev_type(hdev_obj, &dev_type);
1348
1349	/* Create the DCD Manager */
1350	if (!status) {
1351		status =
1352		    dcd_create_manager(sz_zl_file, &node_mgr_obj->hdcd_mgr);
1353		if (!status)
1354			status = get_proc_props(node_mgr_obj, hdev_obj);
1355
1356	}
1357	/* Create NODE Dispatcher */
1358	if (!status) {
1359		disp_attr_obj.ul_chnl_offset = node_mgr_obj->ul_chnl_offset;
1360		disp_attr_obj.ul_chnl_buf_size = node_mgr_obj->ul_chnl_buf_size;
1361		disp_attr_obj.proc_family = node_mgr_obj->proc_family;
1362		disp_attr_obj.proc_type = node_mgr_obj->proc_type;
1363		status =
1364		    disp_create(&node_mgr_obj->disp_obj, hdev_obj,
1365				&disp_attr_obj);
1366	}
1367	/* Create a STRM Manager */
1368	if (!status)
1369		status = strm_create(&node_mgr_obj->strm_mgr_obj, hdev_obj);
1370
1371	if (!status) {
1372		dev_get_intf_fxns(hdev_obj, &node_mgr_obj->intf_fxns);
1373		/* Get msg_ctrl queue manager */
1374		dev_get_msg_mgr(hdev_obj, &node_mgr_obj->msg_mgr_obj);
1375		mutex_init(&node_mgr_obj->node_mgr_lock);
1376		node_mgr_obj->chnl_map = gb_create(node_mgr_obj->ul_num_chnls);
1377		/* dma chnl map. ul_num_chnls is # per transport */
1378		node_mgr_obj->dma_chnl_map =
1379		    gb_create(node_mgr_obj->ul_num_chnls);
1380		node_mgr_obj->zc_chnl_map =
1381		    gb_create(node_mgr_obj->ul_num_chnls);
1382		if ((node_mgr_obj->chnl_map == NULL)
1383		    || (node_mgr_obj->dma_chnl_map == NULL)
1384		    || (node_mgr_obj->zc_chnl_map == NULL)) {
1385			status = -ENOMEM;
1386		} else {
1387			/* Block out reserved channels */
1388			for (i = 0; i < node_mgr_obj->ul_chnl_offset; i++)
1389				gb_set(node_mgr_obj->chnl_map, i);
1390
1391			/* Block out channels reserved for RMS */
1392			gb_set(node_mgr_obj->chnl_map,
1393			       node_mgr_obj->ul_chnl_offset);
1394			gb_set(node_mgr_obj->chnl_map,
1395			       node_mgr_obj->ul_chnl_offset + 1);
1396		}
1397	}
1398	if (!status) {
1399		/* NO RM Server on the IVA */
1400		if (dev_type != IVA_UNIT) {
1401			/* Get addresses of any RMS functions loaded */
1402			status = get_rms_fxns(node_mgr_obj);
1403		}
1404	}
1405
1406	/* Get loader functions and create loader */
1407	if (!status)
1408		node_mgr_obj->nldr_fxns = nldr_fxns;	/* Dyn loader funcs */
1409
1410	if (!status) {
1411		nldr_attrs_obj.pfn_ovly = ovly;
1412		nldr_attrs_obj.pfn_write = mem_write;
1413		nldr_attrs_obj.us_dsp_word_size = node_mgr_obj->udsp_word_size;
1414		nldr_attrs_obj.us_dsp_mau_size = node_mgr_obj->udsp_mau_size;
1415		node_mgr_obj->loader_init = node_mgr_obj->nldr_fxns.pfn_init();
1416		status =
1417		    node_mgr_obj->nldr_fxns.pfn_create(&node_mgr_obj->nldr_obj,
1418						       hdev_obj,
1419						       &nldr_attrs_obj);
1420	}
1421	if (!status)
1422		*node_man = node_mgr_obj;
1423	else
1424		delete_node_mgr(node_mgr_obj);
1425
1426	DBC_ENSURE((status && *node_man == NULL) || (!status && *node_man));
1427
1428	return status;
1429}
1430
1431/*
1432 *  ======== node_delete ========
1433 *  Purpose:
1434 *      Delete a node on the DSP by remotely calling the node's delete function.
1435 *      Loads the node's delete function if necessary. Free GPP side resources
1436 *      after node's delete function returns.
1437 */
1438int node_delete(struct node_res_object *noderes,
1439		       struct process_context *pr_ctxt)
1440{
1441	struct node_object *pnode = noderes->hnode;
1442	struct node_mgr *hnode_mgr;
1443	struct proc_object *hprocessor;
1444	struct disp_object *disp_obj;
1445	u32 ul_delete_fxn;
1446	enum node_type node_type;
1447	enum node_state state;
1448	int status = 0;
1449	int status1 = 0;
1450	struct dsp_cbdata cb_data;
1451	u32 proc_id;
1452	struct bridge_drv_interface *intf_fxns;
1453
1454	void *node_res = noderes;
1455
1456	struct dsp_processorstate proc_state;
1457	DBC_REQUIRE(refs > 0);
1458
1459	if (!pnode) {
1460		status = -EFAULT;
1461		goto func_end;
1462	}
1463	/* create struct dsp_cbdata struct for PWR call */
1464	cb_data.cb_data = PWR_TIMEOUT;
1465	hnode_mgr = pnode->hnode_mgr;
1466	hprocessor = pnode->hprocessor;
1467	disp_obj = hnode_mgr->disp_obj;
1468	node_type = node_get_type(pnode);
1469	intf_fxns = hnode_mgr->intf_fxns;
1470	/* Enter critical section */
1471	mutex_lock(&hnode_mgr->node_mgr_lock);
1472
1473	state = node_get_state(pnode);
1474	/*  Execute delete phase code for non-device node in all cases
1475	 *  except when the node was only allocated. Delete phase must be
1476	 *  executed even if create phase was executed, but failed.
1477	 *  If the node environment pointer is non-NULL, the delete phase
1478	 *  code must be  executed. */
1479	if (!(state == NODE_ALLOCATED && pnode->node_env == (u32) NULL) &&
1480	    node_type != NODE_DEVICE) {
1481		status = proc_get_processor_id(pnode->hprocessor, &proc_id);
1482		if (status)
1483			goto func_cont1;
1484
1485		if (proc_id == DSP_UNIT || proc_id == IVA_UNIT) {
1486			/*  If node has terminated, execute phase code will
1487			 *  have already been unloaded in node_on_exit(). If the
1488			 *  node is PAUSED, the execute phase is loaded, and it
1489			 *  is now ok to unload it. If the node is running, we
1490			 *  will unload the execute phase only after deleting
1491			 *  the node. */
1492			if (state == NODE_PAUSED && pnode->loaded &&
1493			    pnode->phase_split) {
1494				/* Ok to unload execute code as long as node
1495				 * is not * running */
1496				status1 =
1497				    hnode_mgr->nldr_fxns.
1498				    pfn_unload(pnode->nldr_node_obj,
1499					       NLDR_EXECUTE);
1500				pnode->loaded = false;
1501				NODE_SET_STATE(pnode, NODE_DONE);
1502			}
1503			/* Load delete phase code if not loaded or if haven't
1504			 * * unloaded EXECUTE phase */
1505			if ((!(pnode->loaded) || (state == NODE_RUNNING)) &&
1506			    pnode->phase_split) {
1507				status =
1508				    hnode_mgr->nldr_fxns.
1509				    pfn_load(pnode->nldr_node_obj, NLDR_DELETE);
1510				if (!status)
1511					pnode->loaded = true;
1512				else
1513					pr_err("%s: fail - load delete code:"
1514					       " 0x%x\n", __func__, status);
1515			}
1516		}
1517func_cont1:
1518		if (!status) {
1519			/* Unblock a thread trying to terminate the node */
1520			(void)sync_set_event(pnode->sync_done);
1521			if (proc_id == DSP_UNIT) {
1522				/* ul_delete_fxn = address of node's delete
1523				 * function */
1524				status = get_fxn_address(pnode, &ul_delete_fxn,
1525							 DELETEPHASE);
1526			} else if (proc_id == IVA_UNIT)
1527				ul_delete_fxn = (u32) pnode->node_env;
1528			if (!status) {
1529				status = proc_get_state(hprocessor,
1530						&proc_state,
1531						sizeof(struct
1532						       dsp_processorstate));
1533				if (proc_state.proc_state != PROC_ERROR) {
1534					status =
1535					    disp_node_delete(disp_obj, pnode,
1536							     hnode_mgr->
1537							     ul_fxn_addrs
1538							     [RMSDELETENODE],
1539							     ul_delete_fxn,
1540							     pnode->node_env);
1541				} else
1542					NODE_SET_STATE(pnode, NODE_DONE);
1543
1544				/* Unload execute, if not unloaded, and delete
1545				 * function */
1546				if (state == NODE_RUNNING &&
1547				    pnode->phase_split) {
1548					status1 =
1549					    hnode_mgr->nldr_fxns.
1550					    pfn_unload(pnode->nldr_node_obj,
1551						       NLDR_EXECUTE);
1552				}
1553				if (status1)
1554					pr_err("%s: fail - unload execute code:"
1555					       " 0x%x\n", __func__, status1);
1556
1557				status1 =
1558				    hnode_mgr->nldr_fxns.pfn_unload(pnode->
1559							    nldr_node_obj,
1560							    NLDR_DELETE);
1561				pnode->loaded = false;
1562				if (status1)
1563					pr_err("%s: fail - unload delete code: "
1564					       "0x%x\n", __func__, status1);
1565			}
1566		}
1567	}
1568	/* Free host side resources even if a failure occurred */
1569	/* Remove node from hnode_mgr->node_list */
1570	lst_remove_elem(hnode_mgr->node_list, (struct list_head *)pnode);
1571	hnode_mgr->num_nodes--;
1572	/* Decrement count of nodes created on DSP */
1573	if ((state != NODE_ALLOCATED) || ((state == NODE_ALLOCATED) &&
1574					  (pnode->node_env != (u32) NULL)))
1575		hnode_mgr->num_created--;
1576	/*  Free host-side resources allocated by node_create()
1577	 *  delete_node() fails if SM buffers not freed by client! */
1578	drv_proc_node_update_status(node_res, false);
1579	delete_node(pnode, pr_ctxt);
1580
1581	/*
1582	 * Release all Node resources and its context
1583	 */
1584	idr_remove(pr_ctxt->node_id, ((struct node_res_object *)node_res)->id);
1585	kfree(node_res);
1586
1587	/* Exit critical section */
1588	mutex_unlock(&hnode_mgr->node_mgr_lock);
1589	proc_notify_clients(hprocessor, DSP_NODESTATECHANGE);
1590func_end:
1591	dev_dbg(bridge, "%s: pnode: %p status 0x%x\n", __func__, pnode, status);
1592	return status;
1593}
1594
1595/*
1596 *  ======== node_delete_mgr ========
1597 *  Purpose:
1598 *      Delete the NODE Manager.
1599 */
1600int node_delete_mgr(struct node_mgr *hnode_mgr)
1601{
1602	int status = 0;
1603
1604	DBC_REQUIRE(refs > 0);
1605
1606	if (hnode_mgr)
1607		delete_node_mgr(hnode_mgr);
1608	else
1609		status = -EFAULT;
1610
1611	return status;
1612}
1613
1614/*
1615 *  ======== node_enum_nodes ========
1616 *  Purpose:
1617 *      Enumerate currently allocated nodes.
1618 */
1619int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
1620			   u32 node_tab_size, u32 *pu_num_nodes,
1621			   u32 *pu_allocated)
1622{
1623	struct node_object *hnode;
1624	u32 i;
1625	int status = 0;
1626	DBC_REQUIRE(refs > 0);
1627	DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
1628	DBC_REQUIRE(pu_num_nodes != NULL);
1629	DBC_REQUIRE(pu_allocated != NULL);
1630
1631	if (!hnode_mgr) {
1632		status = -EFAULT;
1633		goto func_end;
1634	}
1635	/* Enter critical section */
1636	mutex_lock(&hnode_mgr->node_mgr_lock);
1637
1638	if (hnode_mgr->num_nodes > node_tab_size) {
1639		*pu_allocated = hnode_mgr->num_nodes;
1640		*pu_num_nodes = 0;
1641		status = -EINVAL;
1642	} else {
1643		hnode = (struct node_object *)lst_first(hnode_mgr->
1644			node_list);
1645		for (i = 0; i < hnode_mgr->num_nodes; i++) {
1646			DBC_ASSERT(hnode);
1647			node_tab[i] = hnode;
1648			hnode = (struct node_object *)lst_next
1649				(hnode_mgr->node_list,
1650				(struct list_head *)hnode);
1651		}
1652		*pu_allocated = *pu_num_nodes = hnode_mgr->num_nodes;
1653	}
1654	/* end of sync_enter_cs */
1655	/* Exit critical section */
1656	mutex_unlock(&hnode_mgr->node_mgr_lock);
1657func_end:
1658	return status;
1659}
1660
1661/*
1662 *  ======== node_exit ========
1663 *  Purpose:
1664 *      Discontinue usage of NODE module.
1665 */
1666void node_exit(void)
1667{
1668	DBC_REQUIRE(refs > 0);
1669
1670	refs--;
1671
1672	DBC_ENSURE(refs >= 0);
1673}
1674
1675/*
1676 *  ======== node_free_msg_buf ========
1677 *  Purpose:
1678 *      Frees the message buffer.
1679 */
1680int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
1681			     struct dsp_bufferattr *pattr)
1682{
1683	struct node_object *pnode = (struct node_object *)hnode;
1684	int status = 0;
1685	u32 proc_id;
1686	DBC_REQUIRE(refs > 0);
1687	DBC_REQUIRE(pbuffer != NULL);
1688	DBC_REQUIRE(pnode != NULL);
1689	DBC_REQUIRE(pnode->xlator != NULL);
1690
1691	if (!hnode) {
1692		status = -EFAULT;
1693		goto func_end;
1694	}
1695	status = proc_get_processor_id(pnode->hprocessor, &proc_id);
1696	if (proc_id == DSP_UNIT) {
1697		if (!status) {
1698			if (pattr == NULL) {
1699				/* set defaults */
1700				pattr = &node_dfltbufattrs;
1701			}
1702			/* Node supports single SM segment only */
1703			if (pattr->segment_id != 1)
1704				status = -EBADR;
1705
1706			/* pbuffer is clients Va. */
1707			status = cmm_xlator_free_buf(pnode->xlator, pbuffer);
1708		}
1709	} else {
1710		DBC_ASSERT(NULL);	/* BUG */
1711	}
1712func_end:
1713	return status;
1714}
1715
1716/*
1717 *  ======== node_get_attr ========
1718 *  Purpose:
1719 *      Copy the current attributes of the specified node into a dsp_nodeattr
1720 *      structure.
1721 */
1722int node_get_attr(struct node_object *hnode,
1723			 struct dsp_nodeattr *pattr, u32 attr_size)
1724{
1725	struct node_mgr *hnode_mgr;
1726	int status = 0;
1727	DBC_REQUIRE(refs > 0);
1728	DBC_REQUIRE(pattr != NULL);
1729	DBC_REQUIRE(attr_size >= sizeof(struct dsp_nodeattr));
1730
1731	if (!hnode) {
1732		status = -EFAULT;
1733	} else {
1734		hnode_mgr = hnode->hnode_mgr;
1735		/* Enter hnode_mgr critical section (since we're accessing
1736		 * data that could be changed by node_change_priority() and
1737		 * node_connect(). */
1738		mutex_lock(&hnode_mgr->node_mgr_lock);
1739		pattr->cb_struct = sizeof(struct dsp_nodeattr);
1740		/* dsp_nodeattrin */
1741		pattr->in_node_attr_in.cb_struct =
1742				 sizeof(struct dsp_nodeattrin);
1743		pattr->in_node_attr_in.prio = hnode->prio;
1744		pattr->in_node_attr_in.utimeout = hnode->utimeout;
1745		pattr->in_node_attr_in.heap_size =
1746			hnode->create_args.asa.task_arg_obj.heap_size;
1747		pattr->in_node_attr_in.pgpp_virt_addr = (void *)
1748			hnode->create_args.asa.task_arg_obj.ugpp_heap_addr;
1749		pattr->node_attr_inputs = hnode->num_gpp_inputs;
1750		pattr->node_attr_outputs = hnode->num_gpp_outputs;
1751		/* dsp_nodeinfo */
1752		get_node_info(hnode, &(pattr->node_info));
1753		/* end of sync_enter_cs */
1754		/* Exit critical section */
1755		mutex_unlock(&hnode_mgr->node_mgr_lock);
1756	}
1757	return status;
1758}
1759
1760/*
1761 *  ======== node_get_channel_id ========
1762 *  Purpose:
1763 *      Get the channel index reserved for a stream connection between the
1764 *      host and a node.
1765 */
1766int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
1767			       u32 *chan_id)
1768{
1769	enum node_type node_type;
1770	int status = -EINVAL;
1771	DBC_REQUIRE(refs > 0);
1772	DBC_REQUIRE(dir == DSP_TONODE || dir == DSP_FROMNODE);
1773	DBC_REQUIRE(chan_id != NULL);
1774
1775	if (!hnode) {
1776		status = -EFAULT;
1777		return status;
1778	}
1779	node_type = node_get_type(hnode);
1780	if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) {
1781		status = -EPERM;
1782		return status;
1783	}
1784	if (dir == DSP_TONODE) {
1785		if (index < MAX_INPUTS(hnode)) {
1786			if (hnode->inputs[index].type == HOSTCONNECT) {
1787				*chan_id = hnode->inputs[index].dev_id;
1788				status = 0;
1789			}
1790		}
1791	} else {
1792		DBC_ASSERT(dir == DSP_FROMNODE);
1793		if (index < MAX_OUTPUTS(hnode)) {
1794			if (hnode->outputs[index].type == HOSTCONNECT) {
1795				*chan_id = hnode->outputs[index].dev_id;
1796				status = 0;
1797			}
1798		}
1799	}
1800	return status;
1801}
1802
1803/*
1804 *  ======== node_get_message ========
1805 *  Purpose:
1806 *      Retrieve a message from a node on the DSP.
1807 */
1808int node_get_message(struct node_object *hnode,
1809			    struct dsp_msg *message, u32 utimeout)
1810{
1811	struct node_mgr *hnode_mgr;
1812	enum node_type node_type;
1813	struct bridge_drv_interface *intf_fxns;
1814	int status = 0;
1815	void *tmp_buf;
1816	struct dsp_processorstate proc_state;
1817	struct proc_object *hprocessor;
1818
1819	DBC_REQUIRE(refs > 0);
1820	DBC_REQUIRE(message != NULL);
1821
1822	if (!hnode) {
1823		status = -EFAULT;
1824		goto func_end;
1825	}
1826	hprocessor = hnode->hprocessor;
1827	status = proc_get_state(hprocessor, &proc_state,
1828				sizeof(struct dsp_processorstate));
1829	if (status)
1830		goto func_end;
1831	/* If processor is in error state then don't attempt to get the
1832	   message */
1833	if (proc_state.proc_state == PROC_ERROR) {
1834		status = -EPERM;
1835		goto func_end;
1836	}
1837	hnode_mgr = hnode->hnode_mgr;
1838	node_type = node_get_type(hnode);
1839	if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
1840	    node_type != NODE_DAISSOCKET) {
1841		status = -EPERM;
1842		goto func_end;
1843	}
1844	/*  This function will block unless a message is available. Since
1845	 *  DSPNode_RegisterNotify() allows notification when a message
1846	 *  is available, the system can be designed so that
1847	 *  DSPNode_GetMessage() is only called when a message is
1848	 *  available. */
1849	intf_fxns = hnode_mgr->intf_fxns;
1850	status =
1851	    (*intf_fxns->pfn_msg_get) (hnode->msg_queue_obj, message, utimeout);
1852	/* Check if message contains SM descriptor */
1853	if (status || !(message->dw_cmd & DSP_RMSBUFDESC))
1854		goto func_end;
1855
1856	/* Translate DSP byte addr to GPP Va. */
1857	tmp_buf = cmm_xlator_translate(hnode->xlator,
1858				       (void *)(message->dw_arg1 *
1859						hnode->hnode_mgr->
1860						udsp_word_size), CMM_DSPPA2PA);
1861	if (tmp_buf != NULL) {
1862		/* now convert this GPP Pa to Va */
1863		tmp_buf = cmm_xlator_translate(hnode->xlator, tmp_buf,
1864					       CMM_PA2VA);
1865		if (tmp_buf != NULL) {
1866			/* Adjust SM size in msg */
1867			message->dw_arg1 = (u32) tmp_buf;
1868			message->dw_arg2 *= hnode->hnode_mgr->udsp_word_size;
1869		} else {
1870			status = -ESRCH;
1871		}
1872	} else {
1873		status = -ESRCH;
1874	}
1875func_end:
1876	dev_dbg(bridge, "%s: hnode: %p message: %p utimeout: 0x%x\n", __func__,
1877		hnode, message, utimeout);
1878	return status;
1879}
1880
1881/*
1882 *   ======== node_get_nldr_obj ========
1883 */
1884int node_get_nldr_obj(struct node_mgr *hnode_mgr,
1885			     struct nldr_object **nldr_ovlyobj)
1886{
1887	int status = 0;
1888	struct node_mgr *node_mgr_obj = hnode_mgr;
1889	DBC_REQUIRE(nldr_ovlyobj != NULL);
1890
1891	if (!hnode_mgr)
1892		status = -EFAULT;
1893	else
1894		*nldr_ovlyobj = node_mgr_obj->nldr_obj;
1895
1896	DBC_ENSURE(!status || (nldr_ovlyobj != NULL && *nldr_ovlyobj == NULL));
1897	return status;
1898}
1899
1900/*
1901 *  ======== node_get_strm_mgr ========
1902 *  Purpose:
1903 *      Returns the Stream manager.
1904 */
1905int node_get_strm_mgr(struct node_object *hnode,
1906			     struct strm_mgr **strm_man)
1907{
1908	int status = 0;
1909
1910	DBC_REQUIRE(refs > 0);
1911
1912	if (!hnode)
1913		status = -EFAULT;
1914	else
1915		*strm_man = hnode->hnode_mgr->strm_mgr_obj;
1916
1917	return status;
1918}
1919
1920/*
1921 *  ======== node_get_load_type ========
1922 */
1923enum nldr_loadtype node_get_load_type(struct node_object *hnode)
1924{
1925	DBC_REQUIRE(refs > 0);
1926	DBC_REQUIRE(hnode);
1927	if (!hnode) {
1928		dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode);
1929		return -1;
1930	} else {
1931		return hnode->dcd_props.obj_data.node_obj.us_load_type;
1932	}
1933}
1934
1935/*
1936 *  ======== node_get_timeout ========
1937 *  Purpose:
1938 *      Returns the timeout value for this node.
1939 */
1940u32 node_get_timeout(struct node_object *hnode)
1941{
1942	DBC_REQUIRE(refs > 0);
1943	DBC_REQUIRE(hnode);
1944	if (!hnode) {
1945		dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode);
1946		return 0;
1947	} else {
1948		return hnode->utimeout;
1949	}
1950}
1951
1952/*
1953 *  ======== node_get_type ========
1954 *  Purpose:
1955 *      Returns the node type.
1956 */
1957enum node_type node_get_type(struct node_object *hnode)
1958{
1959	enum node_type node_type;
1960
1961	if (hnode == (struct node_object *)DSP_HGPPNODE)
1962		node_type = NODE_GPP;
1963	else {
1964		if (!hnode)
1965			node_type = -1;
1966		else
1967			node_type = hnode->ntype;
1968	}
1969	return node_type;
1970}
1971
1972/*
1973 *  ======== node_init ========
1974 *  Purpose:
1975 *      Initialize the NODE module.
1976 */
1977bool node_init(void)
1978{
1979	DBC_REQUIRE(refs >= 0);
1980
1981	refs++;
1982
1983	return true;
1984}
1985
1986/*
1987 *  ======== node_on_exit ========
1988 *  Purpose:
1989 *      Gets called when RMS_EXIT is received for a node.
1990 */
1991void node_on_exit(struct node_object *hnode, s32 node_status)
1992{
1993	if (!hnode)
1994		return;
1995
1996	/* Set node state to done */
1997	NODE_SET_STATE(hnode, NODE_DONE);
1998	hnode->exit_status = node_status;
1999	if (hnode->loaded && hnode->phase_split) {
2000		(void)hnode->hnode_mgr->nldr_fxns.pfn_unload(hnode->
2001							     nldr_node_obj,
2002							     NLDR_EXECUTE);
2003		hnode->loaded = false;
2004	}
2005	/* Unblock call to node_terminate */
2006	(void)sync_set_event(hnode->sync_done);
2007	/* Notify clients */
2008	proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
2009	ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2010}
2011
2012/*
2013 *  ======== node_pause ========
2014 *  Purpose:
2015 *      Suspend execution of a node currently running on the DSP.
2016 */
2017int node_pause(struct node_object *hnode)
2018{
2019	struct node_object *pnode = (struct node_object *)hnode;
2020	enum node_type node_type;
2021	enum node_state state;
2022	struct node_mgr *hnode_mgr;
2023	int status = 0;
2024	u32 proc_id;
2025	struct dsp_processorstate proc_state;
2026	struct proc_object *hprocessor;
2027
2028	DBC_REQUIRE(refs > 0);
2029
2030	if (!hnode) {
2031		status = -EFAULT;
2032	} else {
2033		node_type = node_get_type(hnode);
2034		if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
2035			status = -EPERM;
2036	}
2037	if (status)
2038		goto func_end;
2039
2040	status = proc_get_processor_id(pnode->hprocessor, &proc_id);
2041
2042	if (proc_id == IVA_UNIT)
2043		status = -ENOSYS;
2044
2045	if (!status) {
2046		hnode_mgr = hnode->hnode_mgr;
2047
2048		/* Enter critical section */
2049		mutex_lock(&hnode_mgr->node_mgr_lock);
2050		state = node_get_state(hnode);
2051		/* Check node state */
2052		if (state != NODE_RUNNING)
2053			status = -EBADR;
2054
2055		if (status)
2056			goto func_cont;
2057		hprocessor = hnode->hprocessor;
2058		status = proc_get_state(hprocessor, &proc_state,
2059				sizeof(struct dsp_processorstate));
2060		if (status)
2061			goto func_cont;
2062		/* If processor is in error state then don't attempt
2063		   to send the message */
2064		if (proc_state.proc_state == PROC_ERROR) {
2065			status = -EPERM;
2066			goto func_cont;
2067		}
2068
2069		status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
2070			hnode_mgr->ul_fxn_addrs[RMSCHANGENODEPRIORITY],
2071			hnode->node_env, NODE_SUSPENDEDPRI);
2072
2073		/* Update state */
2074		if (status >= 0)
2075			NODE_SET_STATE(hnode, NODE_PAUSED);
2076
2077func_cont:
2078		/* End of sync_enter_cs */
2079		/* Leave critical section */
2080		mutex_unlock(&hnode_mgr->node_mgr_lock);
2081		if (status >= 0) {
2082			proc_notify_clients(hnode->hprocessor,
2083					    DSP_NODESTATECHANGE);
2084			ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2085		}
2086	}
2087func_end:
2088	dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
2089	return status;
2090}
2091
2092/*
2093 *  ======== node_put_message ========
2094 *  Purpose:
2095 *      Send a message to a message node, task node, or XDAIS socket node. This
2096 *      function will block until the message stream can accommodate the
2097 *      message, or a timeout occurs.
2098 */
2099int node_put_message(struct node_object *hnode,
2100			    const struct dsp_msg *pmsg, u32 utimeout)
2101{
2102	struct node_mgr *hnode_mgr = NULL;
2103	enum node_type node_type;
2104	struct bridge_drv_interface *intf_fxns;
2105	enum node_state state;
2106	int status = 0;
2107	void *tmp_buf;
2108	struct dsp_msg new_msg;
2109	struct dsp_processorstate proc_state;
2110	struct proc_object *hprocessor;
2111
2112	DBC_REQUIRE(refs > 0);
2113	DBC_REQUIRE(pmsg != NULL);
2114
2115	if (!hnode) {
2116		status = -EFAULT;
2117		goto func_end;
2118	}
2119	hprocessor = hnode->hprocessor;
2120	status = proc_get_state(hprocessor, &proc_state,
2121				sizeof(struct dsp_processorstate));
2122	if (status)
2123		goto func_end;
2124	/* If processor is in bad state then don't attempt sending the
2125	   message */
2126	if (proc_state.proc_state == PROC_ERROR) {
2127		status = -EPERM;
2128		goto func_end;
2129	}
2130	hnode_mgr = hnode->hnode_mgr;
2131	node_type = node_get_type(hnode);
2132	if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
2133	    node_type != NODE_DAISSOCKET)
2134		status = -EPERM;
2135
2136	if (!status) {
2137		/*  Check node state. Can't send messages to a node after
2138		 *  we've sent the RMS_EXIT command. There is still the
2139		 *  possibility that node_terminate can be called after we've
2140		 *  checked the state. Could add another SYNC object to
2141		 *  prevent this (can't use node_mgr_lock, since we don't
2142		 *  want to block other NODE functions). However, the node may
2143		 *  still exit on its own, before this message is sent. */
2144		mutex_lock(&hnode_mgr->node_mgr_lock);
2145		state = node_get_state(hnode);
2146		if (state == NODE_TERMINATING || state == NODE_DONE)
2147			status = -EBADR;
2148
2149		/* end of sync_enter_cs */
2150		mutex_unlock(&hnode_mgr->node_mgr_lock);
2151	}
2152	if (status)
2153		goto func_end;
2154
2155	/* assign pmsg values to new msg */
2156	new_msg = *pmsg;
2157	/* Now, check if message contains a SM buffer descriptor */
2158	if (pmsg->dw_cmd & DSP_RMSBUFDESC) {
2159		/* Translate GPP Va to DSP physical buf Ptr. */
2160		tmp_buf = cmm_xlator_translate(hnode->xlator,
2161					       (void *)new_msg.dw_arg1,
2162					       CMM_VA2DSPPA);
2163		if (tmp_buf != NULL) {
2164			/* got translation, convert to MAUs in msg */
2165			if (hnode->hnode_mgr->udsp_word_size != 0) {
2166				new_msg.dw_arg1 =
2167				    (u32) tmp_buf /
2168				    hnode->hnode_mgr->udsp_word_size;
2169				/* MAUs */
2170				new_msg.dw_arg2 /= hnode->hnode_mgr->
2171				    udsp_word_size;
2172			} else {
2173				pr_err("%s: udsp_word_size is zero!\n",
2174				       __func__);
2175				status = -EPERM;	/* bad DSPWordSize */
2176			}
2177		} else {	/* failed to translate buffer address */
2178			status = -ESRCH;
2179		}
2180	}
2181	if (!status) {
2182		intf_fxns = hnode_mgr->intf_fxns;
2183		status = (*intf_fxns->pfn_msg_put) (hnode->msg_queue_obj,
2184						    &new_msg, utimeout);
2185	}
2186func_end:
2187	dev_dbg(bridge, "%s: hnode: %p pmsg: %p utimeout: 0x%x, "
2188		"status 0x%x\n", __func__, hnode, pmsg, utimeout, status);
2189	return status;
2190}
2191
2192/*
2193 *  ======== node_register_notify ========
2194 *  Purpose:
2195 *      Register to be notified on specific events for this node.
2196 */
2197int node_register_notify(struct node_object *hnode, u32 event_mask,
2198				u32 notify_type,
2199				struct dsp_notification *hnotification)
2200{
2201	struct bridge_drv_interface *intf_fxns;
2202	int status = 0;
2203
2204	DBC_REQUIRE(refs > 0);
2205	DBC_REQUIRE(hnotification != NULL);
2206
2207	if (!hnode) {
2208		status = -EFAULT;
2209	} else {
2210		/* Check if event mask is a valid node related event */
2211		if (event_mask & ~(DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
2212			status = -EINVAL;
2213
2214		/* Check if notify type is valid */
2215		if (notify_type != DSP_SIGNALEVENT)
2216			status = -EINVAL;
2217
2218		/* Only one Notification can be registered at a
2219		 * time - Limitation */
2220		if (event_mask == (DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
2221			status = -EINVAL;
2222	}
2223	if (!status) {
2224		if (event_mask == DSP_NODESTATECHANGE) {
2225			status = ntfy_register(hnode->ntfy_obj, hnotification,
2226					       event_mask & DSP_NODESTATECHANGE,
2227					       notify_type);
2228		} else {
2229			/* Send Message part of event mask to msg_ctrl */
2230			intf_fxns = hnode->hnode_mgr->intf_fxns;
2231			status = (*intf_fxns->pfn_msg_register_notify)
2232			    (hnode->msg_queue_obj,
2233			     event_mask & DSP_NODEMESSAGEREADY, notify_type,
2234			     hnotification);
2235		}
2236
2237	}
2238	dev_dbg(bridge, "%s: hnode: %p event_mask: 0x%x notify_type: 0x%x "
2239		"hnotification: %p status 0x%x\n", __func__, hnode,
2240		event_mask, notify_type, hnotification, status);
2241	return status;
2242}
2243
2244/*
2245 *  ======== node_run ========
2246 *  Purpose:
2247 *      Start execution of a node's execute phase, or resume execution of a node
2248 *      that has been suspended (via NODE_NodePause()) on the DSP. Load the
2249 *      node's execute function if necessary.
2250 */
2251int node_run(struct node_object *hnode)
2252{
2253	struct node_object *pnode = (struct node_object *)hnode;
2254	struct node_mgr *hnode_mgr;
2255	enum node_type node_type;
2256	enum node_state state;
2257	u32 ul_execute_fxn;
2258	u32 ul_fxn_addr;
2259	int status = 0;
2260	u32 proc_id;
2261	struct bridge_drv_interface *intf_fxns;
2262	struct dsp_processorstate proc_state;
2263	struct proc_object *hprocessor;
2264
2265	DBC_REQUIRE(refs > 0);
2266
2267	if (!hnode) {
2268		status = -EFAULT;
2269		goto func_end;
2270	}
2271	hprocessor = hnode->hprocessor;
2272	status = proc_get_state(hprocessor, &proc_state,
2273				sizeof(struct dsp_processorstate));
2274	if (status)
2275		goto func_end;
2276	/* If processor is in error state then don't attempt to run the node */
2277	if (proc_state.proc_state == PROC_ERROR) {
2278		status = -EPERM;
2279		goto func_end;
2280	}
2281	node_type = node_get_type(hnode);
2282	if (node_type == NODE_DEVICE)
2283		status = -EPERM;
2284	if (status)
2285		goto func_end;
2286
2287	hnode_mgr = hnode->hnode_mgr;
2288	if (!hnode_mgr) {
2289		status = -EFAULT;
2290		goto func_end;
2291	}
2292	intf_fxns = hnode_mgr->intf_fxns;
2293	/* Enter critical section */
2294	mutex_lock(&hnode_mgr->node_mgr_lock);
2295
2296	state = node_get_state(hnode);
2297	if (state != NODE_CREATED && state != NODE_PAUSED)
2298		status = -EBADR;
2299
2300	if (!status)
2301		status = proc_get_processor_id(pnode->hprocessor, &proc_id);
2302
2303	if (status)
2304		goto func_cont1;
2305
2306	if ((proc_id != DSP_UNIT) && (proc_id != IVA_UNIT))
2307		goto func_cont1;
2308
2309	if (state == NODE_CREATED) {
2310		/* If node's execute function is not loaded, load it */
2311		if (!(hnode->loaded) && hnode->phase_split) {
2312			status =
2313			    hnode_mgr->nldr_fxns.pfn_load(hnode->nldr_node_obj,
2314							  NLDR_EXECUTE);
2315			if (!status) {
2316				hnode->loaded = true;
2317			} else {
2318				pr_err("%s: fail - load execute code: 0x%x\n",
2319				       __func__, status);
2320			}
2321		}
2322		if (!status) {
2323			/* Get address of node's execute function */
2324			if (proc_id == IVA_UNIT)
2325				ul_execute_fxn = (u32) hnode->node_env;
2326			else {
2327				status = get_fxn_address(hnode, &ul_execute_fxn,
2328							 EXECUTEPHASE);
2329			}
2330		}
2331		if (!status) {
2332			ul_fxn_addr = hnode_mgr->ul_fxn_addrs[RMSEXECUTENODE];
2333			status =
2334			    disp_node_run(hnode_mgr->disp_obj, hnode,
2335					  ul_fxn_addr, ul_execute_fxn,
2336					  hnode->node_env);
2337		}
2338	} else if (state == NODE_PAUSED) {
2339		ul_fxn_addr = hnode_mgr->ul_fxn_addrs[RMSCHANGENODEPRIORITY];
2340		status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
2341						   ul_fxn_addr, hnode->node_env,
2342						   NODE_GET_PRIORITY(hnode));
2343	} else {
2344		/* We should never get here */
2345		DBC_ASSERT(false);
2346	}
2347func_cont1:
2348	/* Update node state. */
2349	if (status >= 0)
2350		NODE_SET_STATE(hnode, NODE_RUNNING);
2351	else			/* Set state back to previous value */
2352		NODE_SET_STATE(hnode, state);
2353	/*End of sync_enter_cs */
2354	/* Exit critical section */
2355	mutex_unlock(&hnode_mgr->node_mgr_lock);
2356	if (status >= 0) {
2357		proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
2358		ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2359	}
2360func_end:
2361	dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
2362	return status;
2363}
2364
2365/*
2366 *  ======== node_terminate ========
2367 *  Purpose:
2368 *      Signal a node running on the DSP that it should exit its execute phase
2369 *      function.
2370 */
2371int node_terminate(struct node_object *hnode, int *pstatus)
2372{
2373	struct node_object *pnode = (struct node_object *)hnode;
2374	struct node_mgr *hnode_mgr = NULL;
2375	enum node_type node_type;
2376	struct bridge_drv_interface *intf_fxns;
2377	enum node_state state;
2378	struct dsp_msg msg, killmsg;
2379	int status = 0;
2380	u32 proc_id, kill_time_out;
2381	struct deh_mgr *hdeh_mgr;
2382	struct dsp_processorstate proc_state;
2383
2384	DBC_REQUIRE(refs > 0);
2385	DBC_REQUIRE(pstatus != NULL);
2386
2387	if (!hnode || !hnode->hnode_mgr) {
2388		status = -EFAULT;
2389		goto func_end;
2390	}
2391	if (pnode->hprocessor == NULL) {
2392		status = -EFAULT;
2393		goto func_end;
2394	}
2395	status = proc_get_processor_id(pnode->hprocessor, &proc_id);
2396
2397	if (!status) {
2398		hnode_mgr = hnode->hnode_mgr;
2399		node_type = node_get_type(hnode);
2400		if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
2401			status = -EPERM;
2402	}
2403	if (!status) {
2404		/* Check node state */
2405		mutex_lock(&hnode_mgr->node_mgr_lock);
2406		state = node_get_state(hnode);
2407		if (state != NODE_RUNNING) {
2408			status = -EBADR;
2409			/* Set the exit status if node terminated on
2410			 * its own. */
2411			if (state == NODE_DONE)
2412				*pstatus = hnode->exit_status;
2413
2414		} else {
2415			NODE_SET_STATE(hnode, NODE_TERMINATING);
2416		}
2417		/* end of sync_enter_cs */
2418		mutex_unlock(&hnode_mgr->node_mgr_lock);
2419	}
2420	if (!status) {
2421		/*
2422		 *  Send exit message. Do not change state to NODE_DONE
2423		 *  here. That will be done in callback.
2424		 */
2425		status = proc_get_state(pnode->hprocessor, &proc_state,
2426					sizeof(struct dsp_processorstate));
2427		if (status)
2428			goto func_cont;
2429		/* If processor is in error state then don't attempt to send
2430		 * A kill task command */
2431		if (proc_state.proc_state == PROC_ERROR) {
2432			status = -EPERM;
2433			goto func_cont;
2434		}
2435
2436		msg.dw_cmd = RMS_EXIT;
2437		msg.dw_arg1 = hnode->node_env;
2438		killmsg.dw_cmd = RMS_KILLTASK;
2439		killmsg.dw_arg1 = hnode->node_env;
2440		intf_fxns = hnode_mgr->intf_fxns;
2441
2442		if (hnode->utimeout > MAXTIMEOUT)
2443			kill_time_out = MAXTIMEOUT;
2444		else
2445			kill_time_out = (hnode->utimeout) * 2;
2446
2447		status = (*intf_fxns->pfn_msg_put) (hnode->msg_queue_obj, &msg,
2448						    hnode->utimeout);
2449		if (status)
2450			goto func_cont;
2451
2452		/*
2453		 * Wait on synchronization object that will be
2454		 * posted in the callback on receiving RMS_EXIT
2455		 * message, or by node_delete. Check for valid hnode,
2456		 * in case posted by node_delete().
2457		 */
2458		status = sync_wait_on_event(hnode->sync_done,
2459					    kill_time_out / 2);
2460		if (status != ETIME)
2461			goto func_cont;
2462
2463		status = (*intf_fxns->pfn_msg_put)(hnode->msg_queue_obj,
2464						&killmsg, hnode->utimeout);
2465		if (status)
2466			goto func_cont;
2467		status = sync_wait_on_event(hnode->sync_done,
2468					     kill_time_out / 2);
2469		if (status) {
2470			/*
2471			 * Here it goes the part of the simulation of
2472			 * the DSP exception.
2473			 */
2474			dev_get_deh_mgr(hnode_mgr->hdev_obj, &hdeh_mgr);
2475			if (!hdeh_mgr)
2476				goto func_cont;
2477
2478			bridge_deh_notify(hdeh_mgr, DSP_SYSERROR, DSP_EXCEPTIONABORT);
2479		}
2480	}
2481func_cont:
2482	if (!status) {
2483		/* Enter CS before getting exit status, in case node was
2484		 * deleted. */
2485		mutex_lock(&hnode_mgr->node_mgr_lock);
2486		/* Make sure node wasn't deleted while we blocked */
2487		if (!hnode) {
2488			status = -EPERM;
2489		} else {
2490			*pstatus = hnode->exit_status;
2491			dev_dbg(bridge, "%s: hnode: %p env 0x%x status 0x%x\n",
2492				__func__, hnode, hnode->node_env, status);
2493		}
2494		mutex_unlock(&hnode_mgr->node_mgr_lock);
2495	}			/*End of sync_enter_cs */
2496func_end:
2497	return status;
2498}
2499
2500/*
2501 *  ======== delete_node ========
2502 *  Purpose:
2503 *      Free GPP resources allocated in node_allocate() or node_connect().
2504 */
2505static void delete_node(struct node_object *hnode,
2506			struct process_context *pr_ctxt)
2507{
2508	struct node_mgr *hnode_mgr;
2509	struct cmm_xlatorobject *xlator;
2510	struct bridge_drv_interface *intf_fxns;
2511	u32 i;
2512	enum node_type node_type;
2513	struct stream_chnl stream;
2514	struct node_msgargs node_msg_args;
2515	struct node_taskargs task_arg_obj;
2516#ifdef DSP_DMM_DEBUG
2517	struct dmm_object *dmm_mgr;
2518	struct proc_object *p_proc_object =
2519	    (struct proc_object *)hnode->hprocessor;
2520#endif
2521	int status;
2522	if (!hnode)
2523		goto func_end;
2524	hnode_mgr = hnode->hnode_mgr;
2525	if (!hnode_mgr)
2526		goto func_end;
2527	xlator = hnode->xlator;
2528	node_type = node_get_type(hnode);
2529	if (node_type != NODE_DEVICE) {
2530		node_msg_args = hnode->create_args.asa.node_msg_args;
2531		kfree(node_msg_args.pdata);
2532
2533		/* Free msg_ctrl queue */
2534		if (hnode->msg_queue_obj) {
2535			intf_fxns = hnode_mgr->intf_fxns;
2536			(*intf_fxns->pfn_msg_delete_queue) (hnode->
2537							    msg_queue_obj);
2538			hnode->msg_queue_obj = NULL;
2539		}
2540
2541		kfree(hnode->sync_done);
2542
2543		/* Free all stream info */
2544		if (hnode->inputs) {
2545			for (i = 0; i < MAX_INPUTS(hnode); i++) {
2546				stream = hnode->inputs[i];
2547				free_stream(hnode_mgr, stream);
2548			}
2549			kfree(hnode->inputs);
2550			hnode->inputs = NULL;
2551		}
2552		if (hnode->outputs) {
2553			for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
2554				stream = hnode->outputs[i];
2555				free_stream(hnode_mgr, stream);
2556			}
2557			kfree(hnode->outputs);
2558			hnode->outputs = NULL;
2559		}
2560		task_arg_obj = hnode->create_args.asa.task_arg_obj;
2561		if (task_arg_obj.strm_in_def) {
2562			for (i = 0; i < MAX_INPUTS(hnode); i++) {
2563				kfree(task_arg_obj.strm_in_def[i].sz_device);
2564				task_arg_obj.strm_in_def[i].sz_device = NULL;
2565			}
2566			kfree(task_arg_obj.strm_in_def);
2567			task_arg_obj.strm_in_def = NULL;
2568		}
2569		if (task_arg_obj.strm_out_def) {
2570			for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
2571				kfree(task_arg_obj.strm_out_def[i].sz_device);
2572				task_arg_obj.strm_out_def[i].sz_device = NULL;
2573			}
2574			kfree(task_arg_obj.strm_out_def);
2575			task_arg_obj.strm_out_def = NULL;
2576		}
2577		if (task_arg_obj.udsp_heap_res_addr) {
2578			status = proc_un_map(hnode->hprocessor, (void *)
2579					     task_arg_obj.udsp_heap_addr,
2580					     pr_ctxt);
2581
2582			status = proc_un_reserve_memory(hnode->hprocessor,
2583							(void *)
2584							task_arg_obj.
2585							udsp_heap_res_addr,
2586							pr_ctxt);
2587#ifdef DSP_DMM_DEBUG
2588			status = dmm_get_handle(p_proc_object, &dmm_mgr);
2589			if (dmm_mgr)
2590				dmm_mem_map_dump(dmm_mgr);
2591			else
2592				status = DSP_EHANDLE;
2593#endif
2594		}
2595	}
2596	if (node_type != NODE_MESSAGE) {
2597		kfree(hnode->stream_connect);
2598		hnode->stream_connect = NULL;
2599	}
2600	kfree(hnode->pstr_dev_name);
2601	hnode->pstr_dev_name = NULL;
2602
2603	if (hnode->ntfy_obj) {
2604		ntfy_delete(hnode->ntfy_obj);
2605		kfree(hnode->ntfy_obj);
2606		hnode->ntfy_obj = NULL;
2607	}
2608
2609	/* These were allocated in dcd_get_object_def (via node_allocate) */
2610	kfree(hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn);
2611	hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn = NULL;
2612
2613	kfree(hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn);
2614	hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn = NULL;
2615
2616	kfree(hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn);
2617	hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn = NULL;
2618
2619	kfree(hnode->dcd_props.obj_data.node_obj.pstr_i_alg_name);
2620	hnode->dcd_props.obj_data.node_obj.pstr_i_alg_name = NULL;
2621
2622	/* Free all SM address translator resources */
2623	if (xlator) {
2624		(void)cmm_xlator_delete(xlator, true);	/* force free */
2625		xlator = NULL;
2626	}
2627
2628	kfree(hnode->nldr_node_obj);
2629	hnode->nldr_node_obj = NULL;
2630	hnode->hnode_mgr = NULL;
2631	kfree(hnode);
2632	hnode = NULL;
2633func_end:
2634	return;
2635}
2636
2637/*
2638 *  ======== delete_node_mgr ========
2639 *  Purpose:
2640 *      Frees the node manager.
2641 */
2642static void delete_node_mgr(struct node_mgr *hnode_mgr)
2643{
2644	struct node_object *hnode;
2645
2646	if (hnode_mgr) {
2647		/* Free resources */
2648		if (hnode_mgr->hdcd_mgr)
2649			dcd_destroy_manager(hnode_mgr->hdcd_mgr);
2650
2651		/* Remove any elements remaining in lists */
2652		if (hnode_mgr->node_list) {
2653			while ((hnode = (struct node_object *)
2654				lst_get_head(hnode_mgr->node_list)))
2655				delete_node(hnode, NULL);
2656
2657			DBC_ASSERT(LST_IS_EMPTY(hnode_mgr->node_list));
2658			kfree(hnode_mgr->node_list);
2659		}
2660		mutex_destroy(&hnode_mgr->node_mgr_lock);
2661		if (hnode_mgr->ntfy_obj) {
2662			ntfy_delete(hnode_mgr->ntfy_obj);
2663			kfree(hnode_mgr->ntfy_obj);
2664		}
2665
2666		if (hnode_mgr->pipe_map)
2667			gb_delete(hnode_mgr->pipe_map);
2668
2669		if (hnode_mgr->pipe_done_map)
2670			gb_delete(hnode_mgr->pipe_done_map);
2671
2672		if (hnode_mgr->chnl_map)
2673			gb_delete(hnode_mgr->chnl_map);
2674
2675		if (hnode_mgr->dma_chnl_map)
2676			gb_delete(hnode_mgr->dma_chnl_map);
2677
2678		if (hnode_mgr->zc_chnl_map)
2679			gb_delete(hnode_mgr->zc_chnl_map);
2680
2681		if (hnode_mgr->disp_obj)
2682			disp_delete(hnode_mgr->disp_obj);
2683
2684		if (hnode_mgr->strm_mgr_obj)
2685			strm_delete(hnode_mgr->strm_mgr_obj);
2686
2687		/* Delete the loader */
2688		if (hnode_mgr->nldr_obj)
2689			hnode_mgr->nldr_fxns.pfn_delete(hnode_mgr->nldr_obj);
2690
2691		if (hnode_mgr->loader_init)
2692			hnode_mgr->nldr_fxns.pfn_exit();
2693
2694		kfree(hnode_mgr);
2695	}
2696}
2697
2698/*
2699 *  ======== fill_stream_connect ========
2700 *  Purpose:
2701 *      Fills stream information.
2702 */
2703static void fill_stream_connect(struct node_object *node1,
2704				struct node_object *node2,
2705				u32 stream1, u32 stream2)
2706{
2707	u32 strm_index;
2708	struct dsp_streamconnect *strm1 = NULL;
2709	struct dsp_streamconnect *strm2 = NULL;
2710	enum node_type node1_type = NODE_TASK;
2711	enum node_type node2_type = NODE_TASK;
2712
2713	node1_type = node_get_type(node1);
2714	node2_type = node_get_type(node2);
2715	if (node1 != (struct node_object *)DSP_HGPPNODE) {
2716
2717		if (node1_type != NODE_DEVICE) {
2718			strm_index = node1->num_inputs +
2719			    node1->num_outputs - 1;
2720			strm1 = &(node1->stream_connect[strm_index]);
2721			strm1->cb_struct = sizeof(struct dsp_streamconnect);
2722			strm1->this_node_stream_index = stream1;
2723		}
2724
2725		if (node2 != (struct node_object *)DSP_HGPPNODE) {
2726			/* NODE == > NODE */
2727			if (node1_type != NODE_DEVICE) {
2728				strm1->connected_node = node2;
2729				strm1->ui_connected_node_id = node2->node_uuid;
2730				strm1->connected_node_stream_index = stream2;
2731				strm1->connect_type = CONNECTTYPE_NODEOUTPUT;
2732			}
2733			if (node2_type != NODE_DEVICE) {
2734				strm_index = node2->num_inputs +
2735				    node2->num_outputs - 1;
2736				strm2 = &(node2->stream_connect[strm_index]);
2737				strm2->cb_struct =
2738				    sizeof(struct dsp_streamconnect);
2739				strm2->this_node_stream_index = stream2;
2740				strm2->connected_node = node1;
2741				strm2->ui_connected_node_id = node1->node_uuid;
2742				strm2->connected_node_stream_index = stream1;
2743				strm2->connect_type = CONNECTTYPE_NODEINPUT;
2744			}
2745		} else if (node1_type != NODE_DEVICE)
2746			strm1->connect_type = CONNECTTYPE_GPPOUTPUT;
2747	} else {
2748		/* GPP == > NODE */
2749		DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
2750		strm_index = node2->num_inputs + node2->num_outputs - 1;
2751		strm2 = &(node2->stream_connect[strm_index]);
2752		strm2->cb_struct = sizeof(struct dsp_streamconnect);
2753		strm2->this_node_stream_index = stream2;
2754		strm2->connect_type = CONNECTTYPE_GPPINPUT;
2755	}
2756}
2757
2758/*
2759 *  ======== fill_stream_def ========
2760 *  Purpose:
2761 *      Fills Stream attributes.
2762 */
2763static void fill_stream_def(struct node_object *hnode,
2764			    struct node_strmdef *pstrm_def,
2765			    struct dsp_strmattr *pattrs)
2766{
2767	struct node_mgr *hnode_mgr = hnode->hnode_mgr;
2768
2769	if (pattrs != NULL) {
2770		pstrm_def->num_bufs = pattrs->num_bufs;
2771		pstrm_def->buf_size =
2772		    pattrs->buf_size / hnode_mgr->udsp_data_mau_size;
2773		pstrm_def->seg_id = pattrs->seg_id;
2774		pstrm_def->buf_alignment = pattrs->buf_alignment;
2775		pstrm_def->utimeout = pattrs->utimeout;
2776	} else {
2777		pstrm_def->num_bufs = DEFAULTNBUFS;
2778		pstrm_def->buf_size =
2779		    DEFAULTBUFSIZE / hnode_mgr->udsp_data_mau_size;
2780		pstrm_def->seg_id = DEFAULTSEGID;
2781		pstrm_def->buf_alignment = DEFAULTALIGNMENT;
2782		pstrm_def->utimeout = DEFAULTTIMEOUT;
2783	}
2784}
2785
2786/*
2787 *  ======== free_stream ========
2788 *  Purpose:
2789 *      Updates the channel mask and frees the pipe id.
2790 */
2791static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream)
2792{
2793	/* Free up the pipe id unless other node has not yet been deleted. */
2794	if (stream.type == NODECONNECT) {
2795		if (gb_test(hnode_mgr->pipe_done_map, stream.dev_id)) {
2796			/* The other node has already been deleted */
2797			gb_clear(hnode_mgr->pipe_done_map, stream.dev_id);
2798			gb_clear(hnode_mgr->pipe_map, stream.dev_id);
2799		} else {
2800			/* The other node has not been deleted yet */
2801			gb_set(hnode_mgr->pipe_done_map, stream.dev_id);
2802		}
2803	} else if (stream.type == HOSTCONNECT) {
2804		if (stream.dev_id < hnode_mgr->ul_num_chnls) {
2805			gb_clear(hnode_mgr->chnl_map, stream.dev_id);
2806		} else if (stream.dev_id < (2 * hnode_mgr->ul_num_chnls)) {
2807			/* dsp-dma */
2808			gb_clear(hnode_mgr->dma_chnl_map, stream.dev_id -
2809				 (1 * hnode_mgr->ul_num_chnls));
2810		} else if (stream.dev_id < (3 * hnode_mgr->ul_num_chnls)) {
2811			/* zero-copy */
2812			gb_clear(hnode_mgr->zc_chnl_map, stream.dev_id -
2813				 (2 * hnode_mgr->ul_num_chnls));
2814		}
2815	}
2816}
2817
2818/*
2819 *  ======== get_fxn_address ========
2820 *  Purpose:
2821 *      Retrieves the address for create, execute or delete phase for a node.
2822 */
2823static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
2824				  u32 phase)
2825{
2826	char *pstr_fxn_name = NULL;
2827	struct node_mgr *hnode_mgr = hnode->hnode_mgr;
2828	int status = 0;
2829	DBC_REQUIRE(node_get_type(hnode) == NODE_TASK ||
2830		    node_get_type(hnode) == NODE_DAISSOCKET ||
2831		    node_get_type(hnode) == NODE_MESSAGE);
2832
2833	switch (phase) {
2834	case CREATEPHASE:
2835		pstr_fxn_name =
2836		    hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn;
2837		break;
2838	case EXECUTEPHASE:
2839		pstr_fxn_name =
2840		    hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn;
2841		break;
2842	case DELETEPHASE:
2843		pstr_fxn_name =
2844		    hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn;
2845		break;
2846	default:
2847		/* Should never get here */
2848		DBC_ASSERT(false);
2849		break;
2850	}
2851
2852	status =
2853	    hnode_mgr->nldr_fxns.pfn_get_fxn_addr(hnode->nldr_node_obj,
2854						  pstr_fxn_name, fxn_addr);
2855
2856	return status;
2857}
2858
2859/*
2860 *  ======== get_node_info ========
2861 *  Purpose:
2862 *      Retrieves the node information.
2863 */
2864void get_node_info(struct node_object *hnode, struct dsp_nodeinfo *node_info)
2865{
2866	u32 i;
2867
2868	DBC_REQUIRE(hnode);
2869	DBC_REQUIRE(node_info != NULL);
2870
2871	node_info->cb_struct = sizeof(struct dsp_nodeinfo);
2872	node_info->nb_node_database_props =
2873	    hnode->dcd_props.obj_data.node_obj.ndb_props;
2874	node_info->execution_priority = hnode->prio;
2875	node_info->device_owner = hnode->device_owner;
2876	node_info->number_streams = hnode->num_inputs + hnode->num_outputs;
2877	node_info->node_env = hnode->node_env;
2878
2879	node_info->ns_execution_state = node_get_state(hnode);
2880
2881	/* Copy stream connect data */
2882	for (i = 0; i < hnode->num_inputs + hnode->num_outputs; i++)
2883		node_info->sc_stream_connection[i] = hnode->stream_connect[i];
2884
2885}
2886
2887/*
2888 *  ======== get_node_props ========
2889 *  Purpose:
2890 *      Retrieve node properties.
2891 */
2892static int get_node_props(struct dcd_manager *hdcd_mgr,
2893				 struct node_object *hnode,
2894				 const struct dsp_uuid *node_uuid,
2895				 struct dcd_genericobj *dcd_prop)
2896{
2897	u32 len;
2898	struct node_msgargs *pmsg_args;
2899	struct node_taskargs *task_arg_obj;
2900	enum node_type node_type = NODE_TASK;
2901	struct dsp_ndbprops *pndb_props =
2902	    &(dcd_prop->obj_data.node_obj.ndb_props);
2903	int status = 0;
2904	char sz_uuid[MAXUUIDLEN];
2905
2906	status = dcd_get_object_def(hdcd_mgr, (struct dsp_uuid *)node_uuid,
2907				    DSP_DCDNODETYPE, dcd_prop);
2908
2909	if (!status) {
2910		hnode->ntype = node_type = pndb_props->ntype;
2911
2912		/* Create UUID value to set in registry. */
2913		uuid_uuid_to_string((struct dsp_uuid *)node_uuid, sz_uuid,
2914				    MAXUUIDLEN);
2915		dev_dbg(bridge, "(node) UUID: %s\n", sz_uuid);
2916
2917		/* Fill in message args that come from NDB */
2918		if (node_type != NODE_DEVICE) {
2919			pmsg_args = &(hnode->create_args.asa.node_msg_args);
2920			pmsg_args->seg_id =
2921			    dcd_prop->obj_data.node_obj.msg_segid;
2922			pmsg_args->notify_type =
2923			    dcd_prop->obj_data.node_obj.msg_notify_type;
2924			pmsg_args->max_msgs = pndb_props->message_depth;
2925			dev_dbg(bridge, "(node) Max Number of Messages: 0x%x\n",
2926				pmsg_args->max_msgs);
2927		} else {
2928			/* Copy device name */
2929			DBC_REQUIRE(pndb_props->ac_name);
2930			len = strlen(pndb_props->ac_name);
2931			DBC_ASSERT(len < MAXDEVNAMELEN);
2932			hnode->pstr_dev_name = kzalloc(len + 1, GFP_KERNEL);
2933			if (hnode->pstr_dev_name == NULL) {
2934				status = -ENOMEM;
2935			} else {
2936				strncpy(hnode->pstr_dev_name,
2937					pndb_props->ac_name, len);
2938			}
2939		}
2940	}
2941	if (!status) {
2942		/* Fill in create args that come from NDB */
2943		if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) {
2944			task_arg_obj = &(hnode->create_args.asa.task_arg_obj);
2945			task_arg_obj->prio = pndb_props->prio;
2946			task_arg_obj->stack_size = pndb_props->stack_size;
2947			task_arg_obj->sys_stack_size =
2948			    pndb_props->sys_stack_size;
2949			task_arg_obj->stack_seg = pndb_props->stack_seg;
2950			dev_dbg(bridge, "(node) Priority: 0x%x Stack Size: "
2951				"0x%x words System Stack Size: 0x%x words "
2952				"Stack Segment: 0x%x profile count : 0x%x\n",
2953				task_arg_obj->prio, task_arg_obj->stack_size,
2954				task_arg_obj->sys_stack_size,
2955				task_arg_obj->stack_seg,
2956				pndb_props->count_profiles);
2957		}
2958	}
2959
2960	return status;
2961}
2962
2963/*
2964 *  ======== get_proc_props ========
2965 *  Purpose:
2966 *      Retrieve the processor properties.
2967 */
2968static int get_proc_props(struct node_mgr *hnode_mgr,
2969				 struct dev_object *hdev_obj)
2970{
2971	struct cfg_hostres *host_res;
2972	struct bridge_dev_context *pbridge_context;
2973	int status = 0;
2974
2975	status = dev_get_bridge_context(hdev_obj, &pbridge_context);
2976	if (!pbridge_context)
2977		status = -EFAULT;
2978
2979	if (!status) {
2980		host_res = pbridge_context->resources;
2981		if (!host_res)
2982			return -EPERM;
2983		hnode_mgr->ul_chnl_offset = host_res->dw_chnl_offset;
2984		hnode_mgr->ul_chnl_buf_size = host_res->dw_chnl_buf_size;
2985		hnode_mgr->ul_num_chnls = host_res->dw_num_chnls;
2986
2987		/*
2988		 *  PROC will add an API to get dsp_processorinfo.
2989		 *  Fill in default values for now.
2990		 */
2991		/* TODO -- Instead of hard coding, take from registry */
2992		hnode_mgr->proc_family = 6000;
2993		hnode_mgr->proc_type = 6410;
2994		hnode_mgr->min_pri = DSP_NODE_MIN_PRIORITY;
2995		hnode_mgr->max_pri = DSP_NODE_MAX_PRIORITY;
2996		hnode_mgr->udsp_word_size = DSPWORDSIZE;
2997		hnode_mgr->udsp_data_mau_size = DSPWORDSIZE;
2998		hnode_mgr->udsp_mau_size = 1;
2999
3000	}
3001	return status;
3002}
3003
3004/*
3005 *  ======== node_get_uuid_props ========
3006 *  Purpose:
3007 *      Fetch Node UUID properties from DCD/DOF file.
3008 */
3009int node_get_uuid_props(void *hprocessor,
3010			       const struct dsp_uuid *node_uuid,
3011			       struct dsp_ndbprops *node_props)
3012{
3013	struct node_mgr *hnode_mgr = NULL;
3014	struct dev_object *hdev_obj;
3015	int status = 0;
3016	struct dcd_nodeprops dcd_node_props;
3017	struct dsp_processorstate proc_state;
3018
3019	DBC_REQUIRE(refs > 0);
3020	DBC_REQUIRE(hprocessor != NULL);
3021	DBC_REQUIRE(node_uuid != NULL);
3022
3023	if (hprocessor == NULL || node_uuid == NULL) {
3024		status = -EFAULT;
3025		goto func_end;
3026	}
3027	status = proc_get_state(hprocessor, &proc_state,
3028				sizeof(struct dsp_processorstate));
3029	if (status)
3030		goto func_end;
3031	/* If processor is in error state then don't attempt
3032	   to send the message */
3033	if (proc_state.proc_state == PROC_ERROR) {
3034		status = -EPERM;
3035		goto func_end;
3036	}
3037
3038	status = proc_get_dev_object(hprocessor, &hdev_obj);
3039	if (hdev_obj) {
3040		status = dev_get_node_manager(hdev_obj, &hnode_mgr);
3041		if (hnode_mgr == NULL) {
3042			status = -EFAULT;
3043			goto func_end;
3044		}
3045	}
3046
3047	/*
3048	 * Enter the critical section. This is needed because
3049	 * dcd_get_object_def will ultimately end up calling dbll_open/close,
3050	 * which needs to be protected in order to not corrupt the zlib manager
3051	 * (COD).
3052	 */
3053	mutex_lock(&hnode_mgr->node_mgr_lock);
3054
3055	dcd_node_props.pstr_create_phase_fxn = NULL;
3056	dcd_node_props.pstr_execute_phase_fxn = NULL;
3057	dcd_node_props.pstr_delete_phase_fxn = NULL;
3058	dcd_node_props.pstr_i_alg_name = NULL;
3059
3060	status = dcd_get_object_def(hnode_mgr->hdcd_mgr,
3061		(struct dsp_uuid *)node_uuid, DSP_DCDNODETYPE,
3062		(struct dcd_genericobj *)&dcd_node_props);
3063
3064	if (!status) {
3065		*node_props = dcd_node_props.ndb_props;
3066		kfree(dcd_node_props.pstr_create_phase_fxn);
3067
3068		kfree(dcd_node_props.pstr_execute_phase_fxn);
3069
3070		kfree(dcd_node_props.pstr_delete_phase_fxn);
3071
3072		kfree(dcd_node_props.pstr_i_alg_name);
3073	}
3074	/*  Leave the critical section, we're done. */
3075	mutex_unlock(&hnode_mgr->node_mgr_lock);
3076func_end:
3077	return status;
3078}
3079
3080/*
3081 *  ======== get_rms_fxns ========
3082 *  Purpose:
3083 *      Retrieve the RMS functions.
3084 */
3085static int get_rms_fxns(struct node_mgr *hnode_mgr)
3086{
3087	s32 i;
3088	struct dev_object *dev_obj = hnode_mgr->hdev_obj;
3089	int status = 0;
3090
3091	static char *psz_fxns[NUMRMSFXNS] = {
3092		"RMS_queryServer",	/* RMSQUERYSERVER */
3093		"RMS_configureServer",	/* RMSCONFIGURESERVER */
3094		"RMS_createNode",	/* RMSCREATENODE */
3095		"RMS_executeNode",	/* RMSEXECUTENODE */
3096		"RMS_deleteNode",	/* RMSDELETENODE */
3097		"RMS_changeNodePriority",	/* RMSCHANGENODEPRIORITY */
3098		"RMS_readMemory",	/* RMSREADMEMORY */
3099		"RMS_writeMemory",	/* RMSWRITEMEMORY */
3100		"RMS_copy",	/* RMSCOPY */
3101	};
3102
3103	for (i = 0; i < NUMRMSFXNS; i++) {
3104		status = dev_get_symbol(dev_obj, psz_fxns[i],
3105					&(hnode_mgr->ul_fxn_addrs[i]));
3106		if (status) {
3107			if (status == -ESPIPE) {
3108				/*
3109				 *  May be loaded dynamically (in the future),
3110				 *  but return an error for now.
3111				 */
3112				dev_dbg(bridge, "%s: RMS function: %s currently"
3113					" not loaded\n", __func__, psz_fxns[i]);
3114			} else {
3115				dev_dbg(bridge, "%s: Symbol not found: %s "
3116					"status = 0x%x\n", __func__,
3117					psz_fxns[i], status);
3118				break;
3119			}
3120		}
3121	}
3122
3123	return status;
3124}
3125
3126/*
3127 *  ======== ovly ========
3128 *  Purpose:
3129 *      Called during overlay.Sends command to RMS to copy a block of data.
3130 */
3131static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
3132		u32 ul_num_bytes, u32 mem_space)
3133{
3134	struct node_object *hnode = (struct node_object *)priv_ref;
3135	struct node_mgr *hnode_mgr;
3136	u32 ul_bytes = 0;
3137	u32 ul_size;
3138	u32 ul_timeout;
3139	int status = 0;
3140	struct bridge_dev_context *hbridge_context;
3141	/* Function interface to Bridge driver*/
3142	struct bridge_drv_interface *intf_fxns;
3143
3144	DBC_REQUIRE(hnode);
3145
3146	hnode_mgr = hnode->hnode_mgr;
3147
3148	ul_size = ul_num_bytes / hnode_mgr->udsp_word_size;
3149	ul_timeout = hnode->utimeout;
3150
3151	/* Call new MemCopy function */
3152	intf_fxns = hnode_mgr->intf_fxns;
3153	status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context);
3154	if (!status) {
3155		status =
3156		    (*intf_fxns->pfn_brd_mem_copy) (hbridge_context,
3157						dsp_run_addr, dsp_load_addr,
3158						ul_num_bytes, (u32) mem_space);
3159		if (!status)
3160			ul_bytes = ul_num_bytes;
3161		else
3162			pr_debug("%s: failed to copy brd memory, status 0x%x\n",
3163				 __func__, status);
3164	} else {
3165		pr_debug("%s: failed to get Bridge context, status 0x%x\n",
3166			 __func__, status);
3167	}
3168
3169	return ul_bytes;
3170}
3171
3172/*
3173 *  ======== mem_write ========
3174 */
3175static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
3176		     u32 ul_num_bytes, u32 mem_space)
3177{
3178	struct node_object *hnode = (struct node_object *)priv_ref;
3179	struct node_mgr *hnode_mgr;
3180	u16 mem_sect_type;
3181	u32 ul_timeout;
3182	int status = 0;
3183	struct bridge_dev_context *hbridge_context;
3184	/* Function interface to Bridge driver */
3185	struct bridge_drv_interface *intf_fxns;
3186
3187	DBC_REQUIRE(hnode);
3188	DBC_REQUIRE(mem_space & DBLL_CODE || mem_space & DBLL_DATA);
3189
3190	hnode_mgr = hnode->hnode_mgr;
3191
3192	ul_timeout = hnode->utimeout;
3193	mem_sect_type = (mem_space & DBLL_CODE) ? RMS_CODE : RMS_DATA;
3194
3195	/* Call new MemWrite function */
3196	intf_fxns = hnode_mgr->intf_fxns;
3197	status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context);
3198	status = (*intf_fxns->pfn_brd_mem_write) (hbridge_context, pbuf,
3199					dsp_add, ul_num_bytes, mem_sect_type);
3200
3201	return ul_num_bytes;
3202}
3203
3204#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
3205/*
3206 *  ======== node_find_addr ========
3207 */
3208int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr,
3209		u32 offset_range, void *sym_addr_output, char *sym_name)
3210{
3211	struct node_object *node_obj;
3212	int status = -ENOENT;
3213	u32 n;
3214
3215	pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x,  %s)\n", __func__,
3216			(unsigned int) node_mgr,
3217			sym_addr, offset_range,
3218			(unsigned int) sym_addr_output, sym_name);
3219
3220	node_obj = (struct node_object *)(node_mgr->node_list->head.next);
3221
3222	for (n = 0; n < node_mgr->num_nodes; n++) {
3223		status = nldr_find_addr(node_obj->nldr_node_obj, sym_addr,
3224			offset_range, sym_addr_output, sym_name);
3225
3226		if (!status)
3227			break;
3228
3229		node_obj = (struct node_object *) (node_obj->list_elem.next);
3230	}
3231
3232	return status;
3233}
3234#endif
3235