• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/staging/tidspbridge/rmgr/
1/*
2 * nldr.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * DSP/BIOS Bridge dynamic + overlay Node loader.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#include <linux/types.h>
20
21#include <dspbridge/host_os.h>
22
23#include <dspbridge/dbdefs.h>
24
25#include <dspbridge/dbc.h>
26
27/* Platform manager */
28#include <dspbridge/cod.h>
29#include <dspbridge/dev.h>
30
31/* Resource manager */
32#include <dspbridge/dbll.h>
33#include <dspbridge/dbdcd.h>
34#include <dspbridge/rmm.h>
35#include <dspbridge/uuidutil.h>
36
37#include <dspbridge/nldr.h>
38#include <linux/gcd.h>
39
40/* Name of section containing dynamic load mem */
41#define DYNMEMSECT  ".dspbridge_mem"
42
43/* Name of section containing dependent library information */
44#define DEPLIBSECT  ".dspbridge_deplibs"
45
46/* Max depth of recursion for loading node's dependent libraries */
47#define MAXDEPTH	    5
48
49/* Max number of persistent libraries kept by a node */
50#define MAXLIBS	 5
51
52/*
53 *  Defines for extracting packed dynamic load memory requirements from two
54 *  masks.
55 *  These defines must match node.cdb and dynm.cdb
56 *  Format of data/code mask is:
57 *   uuuuuuuu|fueeeeee|fudddddd|fucccccc|
58 *  where
59 *      u = unused
60 *      cccccc = prefered/required dynamic mem segid for create phase data/code
61 *      dddddd = prefered/required dynamic mem segid for delete phase data/code
62 *      eeeeee = prefered/req. dynamic mem segid for execute phase data/code
63 *      f = flag indicating if memory is preferred or required:
64 *	  f = 1 if required, f = 0 if preferred.
65 *
66 *  The 6 bits of the segid are interpreted as follows:
67 *
68 *  If the 6th bit (bit 5) is not set, then this specifies a memory segment
69 *  between 0 and 31 (a maximum of 32 dynamic loading memory segments).
70 *  If the 6th bit (bit 5) is set, segid has the following interpretation:
71 *      segid = 32 - Any internal memory segment can be used.
72 *      segid = 33 - Any external memory segment can be used.
73 *      segid = 63 - Any memory segment can be used (in this case the
74 *		   required/preferred flag is irrelevant).
75 *
76 */
77/* Maximum allowed dynamic loading memory segments */
78#define MAXMEMSEGS      32
79
80#define MAXSEGID	3	/* Largest possible (real) segid */
81#define MEMINTERNALID   32	/* Segid meaning use internal mem */
82#define MEMEXTERNALID   33	/* Segid meaning use external mem */
83#define NULLID	  63		/* Segid meaning no memory req/pref */
84#define FLAGBIT	 7		/* 7th bit is pref./req. flag */
85#define SEGMASK	 0x3f		/* Bits 0 - 5 */
86
87#define CREATEBIT	0	/* Create segid starts at bit 0 */
88#define DELETEBIT	8	/* Delete segid starts at bit 8 */
89#define EXECUTEBIT      16	/* Execute segid starts at bit 16 */
90
91/*
92 *  Masks that define memory type.  Must match defines in dynm.cdb.
93 */
94#define DYNM_CODE	0x2
95#define DYNM_DATA	0x4
96#define DYNM_CODEDATA   (DYNM_CODE | DYNM_DATA)
97#define DYNM_INTERNAL   0x8
98#define DYNM_EXTERNAL   0x10
99
100/*
101 *  Defines for packing memory requirement/preference flags for code and
102 *  data of each of the node's phases into one mask.
103 *  The bit is set if the segid is required for loading code/data of the
104 *  given phase. The bit is not set, if the segid is preferred only.
105 *
106 *  These defines are also used as indeces into a segid array for the node.
107 *  eg node's segid[CREATEDATAFLAGBIT] is the memory segment id that the
108 *  create phase data is required or preferred to be loaded into.
109 */
110#define CREATEDATAFLAGBIT   0
111#define CREATECODEFLAGBIT   1
112#define EXECUTEDATAFLAGBIT  2
113#define EXECUTECODEFLAGBIT  3
114#define DELETEDATAFLAGBIT   4
115#define DELETECODEFLAGBIT   5
116#define MAXFLAGS	    6
117
118    /*
119     *  These names may be embedded in overlay sections to identify which
120     *  node phase the section should be overlayed.
121 */
122#define PCREATE	 "create"
123#define PDELETE	 "delete"
124#define PEXECUTE	"execute"
125
126static inline bool is_equal_uuid(struct dsp_uuid *uuid1,
127							struct dsp_uuid *uuid2)
128{
129	return !memcmp(uuid1, uuid2, sizeof(struct dsp_uuid));
130}
131
132    /*
133     *  ======== mem_seg_info ========
134     *  Format of dynamic loading memory segment info in coff file.
135     *  Must match dynm.h55.
136 */
137struct mem_seg_info {
138	u32 segid;		/* Dynamic loading memory segment number */
139	u32 base;
140	u32 len;
141	u32 type;		/* Mask of DYNM_CODE, DYNM_INTERNAL, etc. */
142};
143
144/*
145 *  ======== lib_node ========
146 *  For maintaining a tree of library dependencies.
147 */
148struct lib_node {
149	struct dbll_library_obj *lib;	/* The library */
150	u16 dep_libs;		/* Number of dependent libraries */
151	struct lib_node *dep_libs_tree;	/* Dependent libraries of lib */
152};
153
154/*
155 *  ======== ovly_sect ========
156 *  Information needed to overlay a section.
157 */
158struct ovly_sect {
159	struct ovly_sect *next_sect;
160	u32 sect_load_addr;	/* Load address of section */
161	u32 sect_run_addr;	/* Run address of section */
162	u32 size;		/* Size of section */
163	u16 page;		/* DBL_CODE, DBL_DATA */
164};
165
166/*
167 *  ======== ovly_node ========
168 *  For maintaining a list of overlay nodes, with sections that need to be
169 *  overlayed for each of the nodes phases.
170 */
171struct ovly_node {
172	struct dsp_uuid uuid;
173	char *node_name;
174	struct ovly_sect *create_sects_list;
175	struct ovly_sect *delete_sects_list;
176	struct ovly_sect *execute_sects_list;
177	struct ovly_sect *other_sects_list;
178	u16 create_sects;
179	u16 delete_sects;
180	u16 execute_sects;
181	u16 other_sects;
182	u16 create_ref;
183	u16 delete_ref;
184	u16 execute_ref;
185	u16 other_ref;
186};
187
188/*
189 *  ======== nldr_object ========
190 *  Overlay loader object.
191 */
192struct nldr_object {
193	struct dev_object *hdev_obj;	/* Device object */
194	struct dcd_manager *hdcd_mgr;	/* Proc/Node data manager */
195	struct dbll_tar_obj *dbll;	/* The DBL loader */
196	struct dbll_library_obj *base_lib;	/* Base image library */
197	struct rmm_target_obj *rmm;	/* Remote memory manager for DSP */
198	struct dbll_fxns ldr_fxns;	/* Loader function table */
199	struct dbll_attrs ldr_attrs;	/* attrs to pass to loader functions */
200	nldr_ovlyfxn ovly_fxn;	/* "write" for overlay nodes */
201	nldr_writefxn write_fxn;	/* "write" for dynamic nodes */
202	struct ovly_node *ovly_table;	/* Table of overlay nodes */
203	u16 ovly_nodes;		/* Number of overlay nodes in base */
204	u16 ovly_nid;		/* Index for tracking overlay nodes */
205	u16 dload_segs;		/* Number of dynamic load mem segs */
206	u32 *seg_table;		/* memtypes of dynamic memory segs
207				 * indexed by segid
208				 */
209	u16 us_dsp_mau_size;	/* Size of DSP MAU */
210	u16 us_dsp_word_size;	/* Size of DSP word */
211};
212
213/*
214 *  ======== nldr_nodeobject ========
215 *  Dynamic node object. This object is created when a node is allocated.
216 */
217struct nldr_nodeobject {
218	struct nldr_object *nldr_obj;	/* Dynamic loader handle */
219	void *priv_ref;		/* Handle to pass to dbl_write_fxn */
220	struct dsp_uuid uuid;	/* Node's UUID */
221	bool dynamic;		/* Dynamically loaded node? */
222	bool overlay;		/* Overlay node? */
223	bool *pf_phase_split;	/* Multiple phase libraries? */
224	struct lib_node root;	/* Library containing node phase */
225	struct lib_node create_lib;	/* Library with create phase lib */
226	struct lib_node execute_lib;	/* Library with execute phase lib */
227	struct lib_node delete_lib;	/* Library with delete phase lib */
228	/* libs remain loaded until Delete */
229	struct lib_node pers_lib_table[MAXLIBS];
230	s32 pers_libs;		/* Number of persistent libraries */
231	/* Path in lib dependency tree */
232	struct dbll_library_obj *lib_path[MAXDEPTH + 1];
233	enum nldr_phase phase;	/* Node phase currently being loaded */
234
235	/*
236	 *  Dynamic loading memory segments for data and code of each phase.
237	 */
238	u16 seg_id[MAXFLAGS];
239
240	/*
241	 *  Mask indicating whether each mem segment specified in seg_id[]
242	 *  is preferred or required.
243	 *  For example
244	 *  	if (code_data_flag_mask & (1 << EXECUTEDATAFLAGBIT)) != 0,
245	 *  then it is required to load execute phase data into the memory
246	 *  specified by seg_id[EXECUTEDATAFLAGBIT].
247	 */
248	u32 code_data_flag_mask;
249};
250
251/* Dynamic loader function table */
252static struct dbll_fxns ldr_fxns = {
253	(dbll_close_fxn) dbll_close,
254	(dbll_create_fxn) dbll_create,
255	(dbll_delete_fxn) dbll_delete,
256	(dbll_exit_fxn) dbll_exit,
257	(dbll_get_attrs_fxn) dbll_get_attrs,
258	(dbll_get_addr_fxn) dbll_get_addr,
259	(dbll_get_c_addr_fxn) dbll_get_c_addr,
260	(dbll_get_sect_fxn) dbll_get_sect,
261	(dbll_init_fxn) dbll_init,
262	(dbll_load_fxn) dbll_load,
263	(dbll_load_sect_fxn) dbll_load_sect,
264	(dbll_open_fxn) dbll_open,
265	(dbll_read_sect_fxn) dbll_read_sect,
266	(dbll_set_attrs_fxn) dbll_set_attrs,
267	(dbll_unload_fxn) dbll_unload,
268	(dbll_unload_sect_fxn) dbll_unload_sect,
269};
270
271static u32 refs;		/* module reference count */
272
273static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
274				u32 addr, u32 bytes);
275static int add_ovly_node(struct dsp_uuid *uuid_obj,
276				enum dsp_dcdobjtype obj_type, void *handle);
277static int add_ovly_sect(struct nldr_object *nldr_obj,
278				struct ovly_sect **lst,
279				struct dbll_sect_info *sect_inf,
280				bool *exists, u32 addr, u32 bytes);
281static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes,
282			   s32 mtype);
283static void free_sects(struct nldr_object *nldr_obj,
284		       struct ovly_sect *phase_sects, u16 alloc_num);
285static bool get_symbol_value(void *handle, void *parg, void *rmm_handle,
286			     char *sym_name, struct dbll_sym_val **sym);
287static int load_lib(struct nldr_nodeobject *nldr_node_obj,
288			   struct lib_node *root, struct dsp_uuid uuid,
289			   bool root_prstnt,
290			   struct dbll_library_obj **lib_path,
291			   enum nldr_phase phase, u16 depth);
292static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
293			    enum nldr_phase phase);
294static int remote_alloc(void **ref, u16 mem_sect, u32 size,
295			       u32 align, u32 *dsp_address,
296			       s32 segmnt_id,
297			       s32 req, bool reserve);
298static int remote_free(void **ref, u16 space, u32 dsp_address, u32 size,
299			      bool reserve);
300
301static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
302		       struct lib_node *root);
303static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
304			enum nldr_phase phase);
305static bool find_in_persistent_lib_array(struct nldr_nodeobject *nldr_node_obj,
306					 struct dbll_library_obj *lib);
307static u32 find_lcm(u32 a, u32 b);
308
309/*
310 *  ======== nldr_allocate ========
311 */
312int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref,
313			 const struct dcd_nodeprops *node_props,
314			 struct nldr_nodeobject **nldr_nodeobj,
315			 bool *pf_phase_split)
316{
317	struct nldr_nodeobject *nldr_node_obj = NULL;
318	int status = 0;
319
320	DBC_REQUIRE(refs > 0);
321	DBC_REQUIRE(node_props != NULL);
322	DBC_REQUIRE(nldr_nodeobj != NULL);
323	DBC_REQUIRE(nldr_obj);
324
325	/* Initialize handle in case of failure */
326	*nldr_nodeobj = NULL;
327	/* Allocate node object */
328	nldr_node_obj = kzalloc(sizeof(struct nldr_nodeobject), GFP_KERNEL);
329
330	if (nldr_node_obj == NULL) {
331		status = -ENOMEM;
332	} else {
333		nldr_node_obj->pf_phase_split = pf_phase_split;
334		nldr_node_obj->pers_libs = 0;
335		nldr_node_obj->nldr_obj = nldr_obj;
336		nldr_node_obj->priv_ref = priv_ref;
337		/* Save node's UUID. */
338		nldr_node_obj->uuid = node_props->ndb_props.ui_node_id;
339		/*
340		 *  Determine if node is a dynamically loaded node from
341		 *  ndb_props.
342		 */
343		if (node_props->us_load_type == NLDR_DYNAMICLOAD) {
344			/* Dynamic node */
345			nldr_node_obj->dynamic = true;
346			/*
347			 *  Extract memory requirements from ndb_props masks
348			 */
349			/* Create phase */
350			nldr_node_obj->seg_id[CREATEDATAFLAGBIT] = (u16)
351			    (node_props->ul_data_mem_seg_mask >> CREATEBIT) &
352			    SEGMASK;
353			nldr_node_obj->code_data_flag_mask |=
354			    ((node_props->ul_data_mem_seg_mask >>
355			      (CREATEBIT + FLAGBIT)) & 1) << CREATEDATAFLAGBIT;
356			nldr_node_obj->seg_id[CREATECODEFLAGBIT] = (u16)
357			    (node_props->ul_code_mem_seg_mask >>
358			     CREATEBIT) & SEGMASK;
359			nldr_node_obj->code_data_flag_mask |=
360			    ((node_props->ul_code_mem_seg_mask >>
361			      (CREATEBIT + FLAGBIT)) & 1) << CREATECODEFLAGBIT;
362			/* Execute phase */
363			nldr_node_obj->seg_id[EXECUTEDATAFLAGBIT] = (u16)
364			    (node_props->ul_data_mem_seg_mask >>
365			     EXECUTEBIT) & SEGMASK;
366			nldr_node_obj->code_data_flag_mask |=
367			    ((node_props->ul_data_mem_seg_mask >>
368			      (EXECUTEBIT + FLAGBIT)) & 1) <<
369			    EXECUTEDATAFLAGBIT;
370			nldr_node_obj->seg_id[EXECUTECODEFLAGBIT] = (u16)
371			    (node_props->ul_code_mem_seg_mask >>
372			     EXECUTEBIT) & SEGMASK;
373			nldr_node_obj->code_data_flag_mask |=
374			    ((node_props->ul_code_mem_seg_mask >>
375			      (EXECUTEBIT + FLAGBIT)) & 1) <<
376			    EXECUTECODEFLAGBIT;
377			/* Delete phase */
378			nldr_node_obj->seg_id[DELETEDATAFLAGBIT] = (u16)
379			    (node_props->ul_data_mem_seg_mask >> DELETEBIT) &
380			    SEGMASK;
381			nldr_node_obj->code_data_flag_mask |=
382			    ((node_props->ul_data_mem_seg_mask >>
383			      (DELETEBIT + FLAGBIT)) & 1) << DELETEDATAFLAGBIT;
384			nldr_node_obj->seg_id[DELETECODEFLAGBIT] = (u16)
385			    (node_props->ul_code_mem_seg_mask >>
386			     DELETEBIT) & SEGMASK;
387			nldr_node_obj->code_data_flag_mask |=
388			    ((node_props->ul_code_mem_seg_mask >>
389			      (DELETEBIT + FLAGBIT)) & 1) << DELETECODEFLAGBIT;
390		} else {
391			/* Non-dynamically loaded nodes are part of the
392			 * base image */
393			nldr_node_obj->root.lib = nldr_obj->base_lib;
394			/* Check for overlay node */
395			if (node_props->us_load_type == NLDR_OVLYLOAD)
396				nldr_node_obj->overlay = true;
397
398		}
399		*nldr_nodeobj = (struct nldr_nodeobject *)nldr_node_obj;
400	}
401	/* Cleanup on failure */
402	if (status && nldr_node_obj)
403		kfree(nldr_node_obj);
404
405	DBC_ENSURE((!status && *nldr_nodeobj)
406		   || (status && *nldr_nodeobj == NULL));
407	return status;
408}
409
410/*
411 *  ======== nldr_create ========
412 */
413int nldr_create(struct nldr_object **nldr,
414		       struct dev_object *hdev_obj,
415		       const struct nldr_attrs *pattrs)
416{
417	struct cod_manager *cod_mgr;	/* COD manager */
418	char *psz_coff_buf = NULL;
419	char sz_zl_file[COD_MAXPATHLENGTH];
420	struct nldr_object *nldr_obj = NULL;
421	struct dbll_attrs save_attrs;
422	struct dbll_attrs new_attrs;
423	dbll_flags flags;
424	u32 ul_entry;
425	u16 dload_segs = 0;
426	struct mem_seg_info *mem_info_obj;
427	u32 ul_len = 0;
428	u32 ul_addr;
429	struct rmm_segment *rmm_segs = NULL;
430	u16 i;
431	int status = 0;
432	DBC_REQUIRE(refs > 0);
433	DBC_REQUIRE(nldr != NULL);
434	DBC_REQUIRE(hdev_obj != NULL);
435	DBC_REQUIRE(pattrs != NULL);
436	DBC_REQUIRE(pattrs->pfn_ovly != NULL);
437	DBC_REQUIRE(pattrs->pfn_write != NULL);
438
439	/* Allocate dynamic loader object */
440	nldr_obj = kzalloc(sizeof(struct nldr_object), GFP_KERNEL);
441	if (nldr_obj) {
442		nldr_obj->hdev_obj = hdev_obj;
443		/* warning, lazy status checking alert! */
444		dev_get_cod_mgr(hdev_obj, &cod_mgr);
445		if (cod_mgr) {
446			status = cod_get_loader(cod_mgr, &nldr_obj->dbll);
447			DBC_ASSERT(!status);
448			status = cod_get_base_lib(cod_mgr, &nldr_obj->base_lib);
449			DBC_ASSERT(!status);
450			status =
451			    cod_get_base_name(cod_mgr, sz_zl_file,
452							COD_MAXPATHLENGTH);
453			DBC_ASSERT(!status);
454		}
455		status = 0;
456		/* end lazy status checking */
457		nldr_obj->us_dsp_mau_size = pattrs->us_dsp_mau_size;
458		nldr_obj->us_dsp_word_size = pattrs->us_dsp_word_size;
459		nldr_obj->ldr_fxns = ldr_fxns;
460		if (!(nldr_obj->ldr_fxns.init_fxn()))
461			status = -ENOMEM;
462
463	} else {
464		status = -ENOMEM;
465	}
466	/* Create the DCD Manager */
467	if (!status)
468		status = dcd_create_manager(NULL, &nldr_obj->hdcd_mgr);
469
470	/* Get dynamic loading memory sections from base lib */
471	if (!status) {
472		status =
473		    nldr_obj->ldr_fxns.get_sect_fxn(nldr_obj->base_lib,
474						    DYNMEMSECT, &ul_addr,
475						    &ul_len);
476		if (!status) {
477			psz_coff_buf =
478				kzalloc(ul_len * nldr_obj->us_dsp_mau_size,
479								GFP_KERNEL);
480			if (!psz_coff_buf)
481				status = -ENOMEM;
482		} else {
483			/* Ok to not have dynamic loading memory */
484			status = 0;
485			ul_len = 0;
486			dev_dbg(bridge, "%s: failed - no dynamic loading mem "
487				"segments: 0x%x\n", __func__, status);
488		}
489	}
490	if (!status && ul_len > 0) {
491		/* Read section containing dynamic load mem segments */
492		status =
493		    nldr_obj->ldr_fxns.read_sect_fxn(nldr_obj->base_lib,
494						     DYNMEMSECT, psz_coff_buf,
495						     ul_len);
496	}
497	if (!status && ul_len > 0) {
498		/* Parse memory segment data */
499		dload_segs = (u16) (*((u32 *) psz_coff_buf));
500		if (dload_segs > MAXMEMSEGS)
501			status = -EBADF;
502	}
503	/* Parse dynamic load memory segments */
504	if (!status && dload_segs > 0) {
505		rmm_segs = kzalloc(sizeof(struct rmm_segment) * dload_segs,
506								GFP_KERNEL);
507		nldr_obj->seg_table =
508				kzalloc(sizeof(u32) * dload_segs, GFP_KERNEL);
509		if (rmm_segs == NULL || nldr_obj->seg_table == NULL) {
510			status = -ENOMEM;
511		} else {
512			nldr_obj->dload_segs = dload_segs;
513			mem_info_obj = (struct mem_seg_info *)(psz_coff_buf +
514							       sizeof(u32));
515			for (i = 0; i < dload_segs; i++) {
516				rmm_segs[i].base = (mem_info_obj + i)->base;
517				rmm_segs[i].length = (mem_info_obj + i)->len;
518				rmm_segs[i].space = 0;
519				nldr_obj->seg_table[i] =
520				    (mem_info_obj + i)->type;
521				dev_dbg(bridge,
522					"(proc) DLL MEMSEGMENT: %d, "
523					"Base: 0x%x, Length: 0x%x\n", i,
524					rmm_segs[i].base, rmm_segs[i].length);
525			}
526		}
527	}
528	/* Create Remote memory manager */
529	if (!status)
530		status = rmm_create(&nldr_obj->rmm, rmm_segs, dload_segs);
531
532	if (!status) {
533		/* set the alloc, free, write functions for loader */
534		nldr_obj->ldr_fxns.get_attrs_fxn(nldr_obj->dbll, &save_attrs);
535		new_attrs = save_attrs;
536		new_attrs.alloc = (dbll_alloc_fxn) remote_alloc;
537		new_attrs.free = (dbll_free_fxn) remote_free;
538		new_attrs.sym_lookup = (dbll_sym_lookup) get_symbol_value;
539		new_attrs.sym_handle = nldr_obj;
540		new_attrs.write = (dbll_write_fxn) pattrs->pfn_write;
541		nldr_obj->ovly_fxn = pattrs->pfn_ovly;
542		nldr_obj->write_fxn = pattrs->pfn_write;
543		nldr_obj->ldr_attrs = new_attrs;
544	}
545	kfree(rmm_segs);
546
547	kfree(psz_coff_buf);
548
549	/* Get overlay nodes */
550	if (!status) {
551		status =
552		    cod_get_base_name(cod_mgr, sz_zl_file, COD_MAXPATHLENGTH);
553		/* lazy check */
554		DBC_ASSERT(!status);
555		/* First count number of overlay nodes */
556		status =
557		    dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file,
558				    add_ovly_node, (void *)nldr_obj);
559		/* Now build table of overlay nodes */
560		if (!status && nldr_obj->ovly_nodes > 0) {
561			/* Allocate table for overlay nodes */
562			nldr_obj->ovly_table =
563					kzalloc(sizeof(struct ovly_node) *
564					nldr_obj->ovly_nodes, GFP_KERNEL);
565			/* Put overlay nodes in the table */
566			nldr_obj->ovly_nid = 0;
567			status = dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file,
568						 add_ovly_node,
569						 (void *)nldr_obj);
570		}
571	}
572	/* Do a fake reload of the base image to get overlay section info */
573	if (!status && nldr_obj->ovly_nodes > 0) {
574		save_attrs.write = fake_ovly_write;
575		save_attrs.log_write = add_ovly_info;
576		save_attrs.log_write_handle = nldr_obj;
577		flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB;
578		status = nldr_obj->ldr_fxns.load_fxn(nldr_obj->base_lib, flags,
579						     &save_attrs, &ul_entry);
580	}
581	if (!status) {
582		*nldr = (struct nldr_object *)nldr_obj;
583	} else {
584		if (nldr_obj)
585			nldr_delete((struct nldr_object *)nldr_obj);
586
587		*nldr = NULL;
588	}
589	DBC_ENSURE((!status && *nldr) || (status && *nldr == NULL));
590	return status;
591}
592
593/*
594 *  ======== nldr_delete ========
595 */
596void nldr_delete(struct nldr_object *nldr_obj)
597{
598	struct ovly_sect *ovly_section;
599	struct ovly_sect *next;
600	u16 i;
601	DBC_REQUIRE(refs > 0);
602	DBC_REQUIRE(nldr_obj);
603
604	nldr_obj->ldr_fxns.exit_fxn();
605	if (nldr_obj->rmm)
606		rmm_delete(nldr_obj->rmm);
607
608	kfree(nldr_obj->seg_table);
609
610	if (nldr_obj->hdcd_mgr)
611		dcd_destroy_manager(nldr_obj->hdcd_mgr);
612
613	/* Free overlay node information */
614	if (nldr_obj->ovly_table) {
615		for (i = 0; i < nldr_obj->ovly_nodes; i++) {
616			ovly_section =
617			    nldr_obj->ovly_table[i].create_sects_list;
618			while (ovly_section) {
619				next = ovly_section->next_sect;
620				kfree(ovly_section);
621				ovly_section = next;
622			}
623			ovly_section =
624			    nldr_obj->ovly_table[i].delete_sects_list;
625			while (ovly_section) {
626				next = ovly_section->next_sect;
627				kfree(ovly_section);
628				ovly_section = next;
629			}
630			ovly_section =
631			    nldr_obj->ovly_table[i].execute_sects_list;
632			while (ovly_section) {
633				next = ovly_section->next_sect;
634				kfree(ovly_section);
635				ovly_section = next;
636			}
637			ovly_section = nldr_obj->ovly_table[i].other_sects_list;
638			while (ovly_section) {
639				next = ovly_section->next_sect;
640				kfree(ovly_section);
641				ovly_section = next;
642			}
643		}
644		kfree(nldr_obj->ovly_table);
645	}
646	kfree(nldr_obj);
647}
648
649/*
650 *  ======== nldr_exit ========
651 *  Discontinue usage of NLDR module.
652 */
653void nldr_exit(void)
654{
655	DBC_REQUIRE(refs > 0);
656
657	refs--;
658
659	if (refs == 0)
660		rmm_exit();
661
662	DBC_ENSURE(refs >= 0);
663}
664
665/*
666 *  ======== nldr_get_fxn_addr ========
667 */
668int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
669			     char *str_fxn, u32 * addr)
670{
671	struct dbll_sym_val *dbll_sym;
672	struct nldr_object *nldr_obj;
673	int status = 0;
674	bool status1 = false;
675	s32 i = 0;
676	struct lib_node root = { NULL, 0, NULL };
677	DBC_REQUIRE(refs > 0);
678	DBC_REQUIRE(nldr_node_obj);
679	DBC_REQUIRE(addr != NULL);
680	DBC_REQUIRE(str_fxn != NULL);
681
682	nldr_obj = nldr_node_obj->nldr_obj;
683	/* Called from node_create(), node_delete(), or node_run(). */
684	if (nldr_node_obj->dynamic && *nldr_node_obj->pf_phase_split) {
685		switch (nldr_node_obj->phase) {
686		case NLDR_CREATE:
687			root = nldr_node_obj->create_lib;
688			break;
689		case NLDR_EXECUTE:
690			root = nldr_node_obj->execute_lib;
691			break;
692		case NLDR_DELETE:
693			root = nldr_node_obj->delete_lib;
694			break;
695		default:
696			DBC_ASSERT(false);
697			break;
698		}
699	} else {
700		/* for Overlay nodes or non-split Dynamic nodes */
701		root = nldr_node_obj->root;
702	}
703	status1 =
704	    nldr_obj->ldr_fxns.get_c_addr_fxn(root.lib, str_fxn, &dbll_sym);
705	if (!status1)
706		status1 =
707		    nldr_obj->ldr_fxns.get_addr_fxn(root.lib, str_fxn,
708						    &dbll_sym);
709
710	/* If symbol not found, check dependent libraries */
711	if (!status1) {
712		for (i = 0; i < root.dep_libs; i++) {
713			status1 =
714			    nldr_obj->ldr_fxns.get_addr_fxn(root.dep_libs_tree
715							    [i].lib, str_fxn,
716							    &dbll_sym);
717			if (!status1) {
718				status1 =
719				    nldr_obj->ldr_fxns.
720				    get_c_addr_fxn(root.dep_libs_tree[i].lib,
721						   str_fxn, &dbll_sym);
722			}
723			if (status1) {
724				/* Symbol found */
725				break;
726			}
727		}
728	}
729	/* Check persistent libraries */
730	if (!status1) {
731		for (i = 0; i < nldr_node_obj->pers_libs; i++) {
732			status1 =
733			    nldr_obj->ldr_fxns.
734			    get_addr_fxn(nldr_node_obj->pers_lib_table[i].lib,
735					 str_fxn, &dbll_sym);
736			if (!status1) {
737				status1 =
738				    nldr_obj->ldr_fxns.
739				    get_c_addr_fxn(nldr_node_obj->pers_lib_table
740						   [i].lib, str_fxn, &dbll_sym);
741			}
742			if (status1) {
743				/* Symbol found */
744				break;
745			}
746		}
747	}
748
749	if (status1)
750		*addr = dbll_sym->value;
751	else
752		status = -ESPIPE;
753
754	return status;
755}
756
757/*
758 *  ======== nldr_get_rmm_manager ========
759 *  Given a NLDR object, retrieve RMM Manager Handle
760 */
761int nldr_get_rmm_manager(struct nldr_object *nldr,
762				struct rmm_target_obj **rmm_mgr)
763{
764	int status = 0;
765	struct nldr_object *nldr_obj = nldr;
766	DBC_REQUIRE(rmm_mgr != NULL);
767
768	if (nldr) {
769		*rmm_mgr = nldr_obj->rmm;
770	} else {
771		*rmm_mgr = NULL;
772		status = -EFAULT;
773	}
774
775	DBC_ENSURE(!status || (rmm_mgr != NULL && *rmm_mgr == NULL));
776
777	return status;
778}
779
780/*
781 *  ======== nldr_init ========
782 *  Initialize the NLDR module.
783 */
784bool nldr_init(void)
785{
786	DBC_REQUIRE(refs >= 0);
787
788	if (refs == 0)
789		rmm_init();
790
791	refs++;
792
793	DBC_ENSURE(refs > 0);
794	return true;
795}
796
797/*
798 *  ======== nldr_load ========
799 */
800int nldr_load(struct nldr_nodeobject *nldr_node_obj,
801		     enum nldr_phase phase)
802{
803	struct nldr_object *nldr_obj;
804	struct dsp_uuid lib_uuid;
805	int status = 0;
806
807	DBC_REQUIRE(refs > 0);
808	DBC_REQUIRE(nldr_node_obj);
809
810	nldr_obj = nldr_node_obj->nldr_obj;
811
812	if (nldr_node_obj->dynamic) {
813		nldr_node_obj->phase = phase;
814
815		lib_uuid = nldr_node_obj->uuid;
816
817		/* At this point, we may not know if node is split into
818		 * different libraries. So we'll go ahead and load the
819		 * library, and then save the pointer to the appropriate
820		 * location after we know. */
821
822		status =
823		    load_lib(nldr_node_obj, &nldr_node_obj->root, lib_uuid,
824			     false, nldr_node_obj->lib_path, phase, 0);
825
826		if (!status) {
827			if (*nldr_node_obj->pf_phase_split) {
828				switch (phase) {
829				case NLDR_CREATE:
830					nldr_node_obj->create_lib =
831					    nldr_node_obj->root;
832					break;
833
834				case NLDR_EXECUTE:
835					nldr_node_obj->execute_lib =
836					    nldr_node_obj->root;
837					break;
838
839				case NLDR_DELETE:
840					nldr_node_obj->delete_lib =
841					    nldr_node_obj->root;
842					break;
843
844				default:
845					DBC_ASSERT(false);
846					break;
847				}
848			}
849		}
850	} else {
851		if (nldr_node_obj->overlay)
852			status = load_ovly(nldr_node_obj, phase);
853
854	}
855
856	return status;
857}
858
859/*
860 *  ======== nldr_unload ========
861 */
862int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
863		       enum nldr_phase phase)
864{
865	int status = 0;
866	struct lib_node *root_lib = NULL;
867	s32 i = 0;
868
869	DBC_REQUIRE(refs > 0);
870	DBC_REQUIRE(nldr_node_obj);
871
872	if (nldr_node_obj != NULL) {
873		if (nldr_node_obj->dynamic) {
874			if (*nldr_node_obj->pf_phase_split) {
875				switch (phase) {
876				case NLDR_CREATE:
877					root_lib = &nldr_node_obj->create_lib;
878					break;
879				case NLDR_EXECUTE:
880					root_lib = &nldr_node_obj->execute_lib;
881					break;
882				case NLDR_DELETE:
883					root_lib = &nldr_node_obj->delete_lib;
884					/* Unload persistent libraries */
885					for (i = 0;
886					     i < nldr_node_obj->pers_libs;
887					     i++) {
888						unload_lib(nldr_node_obj,
889							   &nldr_node_obj->
890							   pers_lib_table[i]);
891					}
892					nldr_node_obj->pers_libs = 0;
893					break;
894				default:
895					DBC_ASSERT(false);
896					break;
897				}
898			} else {
899				/* Unload main library */
900				root_lib = &nldr_node_obj->root;
901			}
902			if (root_lib)
903				unload_lib(nldr_node_obj, root_lib);
904		} else {
905			if (nldr_node_obj->overlay)
906				unload_ovly(nldr_node_obj, phase);
907
908		}
909	}
910	return status;
911}
912
913/*
914 *  ======== add_ovly_info ========
915 */
916static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
917				u32 addr, u32 bytes)
918{
919	char *node_name;
920	char *sect_name = (char *)sect_info->name;
921	bool sect_exists = false;
922	char seps = ':';
923	char *pch;
924	u16 i;
925	struct nldr_object *nldr_obj = (struct nldr_object *)handle;
926	int status = 0;
927
928	/* Is this an overlay section (load address != run address)? */
929	if (sect_info->sect_load_addr == sect_info->sect_run_addr)
930		goto func_end;
931
932	/* Find the node it belongs to */
933	for (i = 0; i < nldr_obj->ovly_nodes; i++) {
934		node_name = nldr_obj->ovly_table[i].node_name;
935		DBC_REQUIRE(node_name);
936		if (strncmp(node_name, sect_name + 1, strlen(node_name)) == 0) {
937			/* Found the node */
938			break;
939		}
940	}
941	if (!(i < nldr_obj->ovly_nodes))
942		goto func_end;
943
944	/* Determine which phase this section belongs to */
945	for (pch = sect_name + 1; *pch && *pch != seps; pch++)
946		;;
947
948	if (*pch) {
949		pch++;		/* Skip over the ':' */
950		if (strncmp(pch, PCREATE, strlen(PCREATE)) == 0) {
951			status =
952			    add_ovly_sect(nldr_obj,
953					  &nldr_obj->
954					  ovly_table[i].create_sects_list,
955					  sect_info, &sect_exists, addr, bytes);
956			if (!status && !sect_exists)
957				nldr_obj->ovly_table[i].create_sects++;
958
959		} else if (strncmp(pch, PDELETE, strlen(PDELETE)) == 0) {
960			status =
961			    add_ovly_sect(nldr_obj,
962					  &nldr_obj->
963					  ovly_table[i].delete_sects_list,
964					  sect_info, &sect_exists, addr, bytes);
965			if (!status && !sect_exists)
966				nldr_obj->ovly_table[i].delete_sects++;
967
968		} else if (strncmp(pch, PEXECUTE, strlen(PEXECUTE)) == 0) {
969			status =
970			    add_ovly_sect(nldr_obj,
971					  &nldr_obj->
972					  ovly_table[i].execute_sects_list,
973					  sect_info, &sect_exists, addr, bytes);
974			if (!status && !sect_exists)
975				nldr_obj->ovly_table[i].execute_sects++;
976
977		} else {
978			/* Put in "other" sectins */
979			status =
980			    add_ovly_sect(nldr_obj,
981					  &nldr_obj->
982					  ovly_table[i].other_sects_list,
983					  sect_info, &sect_exists, addr, bytes);
984			if (!status && !sect_exists)
985				nldr_obj->ovly_table[i].other_sects++;
986
987		}
988	}
989func_end:
990	return status;
991}
992
993/*
994 *  ======== add_ovly_node =========
995 *  Callback function passed to dcd_get_objects.
996 */
997static int add_ovly_node(struct dsp_uuid *uuid_obj,
998				enum dsp_dcdobjtype obj_type, void *handle)
999{
1000	struct nldr_object *nldr_obj = (struct nldr_object *)handle;
1001	char *node_name = NULL;
1002	char *pbuf = NULL;
1003	u32 len;
1004	struct dcd_genericobj obj_def;
1005	int status = 0;
1006
1007	if (obj_type != DSP_DCDNODETYPE)
1008		goto func_end;
1009
1010	status =
1011	    dcd_get_object_def(nldr_obj->hdcd_mgr, uuid_obj, obj_type,
1012			       &obj_def);
1013	if (status)
1014		goto func_end;
1015
1016	/* If overlay node, add to the list */
1017	if (obj_def.obj_data.node_obj.us_load_type == NLDR_OVLYLOAD) {
1018		if (nldr_obj->ovly_table == NULL) {
1019			nldr_obj->ovly_nodes++;
1020		} else {
1021			/* Add node to table */
1022			nldr_obj->ovly_table[nldr_obj->ovly_nid].uuid =
1023			    *uuid_obj;
1024			DBC_REQUIRE(obj_def.obj_data.node_obj.ndb_props.
1025				    ac_name);
1026			len =
1027			    strlen(obj_def.obj_data.node_obj.ndb_props.ac_name);
1028			node_name = obj_def.obj_data.node_obj.ndb_props.ac_name;
1029			pbuf = kzalloc(len + 1, GFP_KERNEL);
1030			if (pbuf == NULL) {
1031				status = -ENOMEM;
1032			} else {
1033				strncpy(pbuf, node_name, len);
1034				nldr_obj->ovly_table[nldr_obj->ovly_nid].
1035				    node_name = pbuf;
1036				nldr_obj->ovly_nid++;
1037			}
1038		}
1039	}
1040	/* These were allocated in dcd_get_object_def */
1041	kfree(obj_def.obj_data.node_obj.pstr_create_phase_fxn);
1042
1043	kfree(obj_def.obj_data.node_obj.pstr_execute_phase_fxn);
1044
1045	kfree(obj_def.obj_data.node_obj.pstr_delete_phase_fxn);
1046
1047	kfree(obj_def.obj_data.node_obj.pstr_i_alg_name);
1048
1049func_end:
1050	return status;
1051}
1052
1053/*
1054 *  ======== add_ovly_sect ========
1055 */
1056static int add_ovly_sect(struct nldr_object *nldr_obj,
1057				struct ovly_sect **lst,
1058				struct dbll_sect_info *sect_inf,
1059				bool *exists, u32 addr, u32 bytes)
1060{
1061	struct ovly_sect *new_sect = NULL;
1062	struct ovly_sect *last_sect;
1063	struct ovly_sect *ovly_section;
1064	int status = 0;
1065
1066	ovly_section = last_sect = *lst;
1067	*exists = false;
1068	while (ovly_section) {
1069		/*
1070		 *  Make sure section has not already been added. Multiple
1071		 *  'write' calls may be made to load the section.
1072		 */
1073		if (ovly_section->sect_load_addr == addr) {
1074			/* Already added */
1075			*exists = true;
1076			break;
1077		}
1078		last_sect = ovly_section;
1079		ovly_section = ovly_section->next_sect;
1080	}
1081
1082	if (!ovly_section) {
1083		/* New section */
1084		new_sect = kzalloc(sizeof(struct ovly_sect), GFP_KERNEL);
1085		if (new_sect == NULL) {
1086			status = -ENOMEM;
1087		} else {
1088			new_sect->sect_load_addr = addr;
1089			new_sect->sect_run_addr = sect_inf->sect_run_addr +
1090			    (addr - sect_inf->sect_load_addr);
1091			new_sect->size = bytes;
1092			new_sect->page = sect_inf->type;
1093		}
1094
1095		/* Add to the list */
1096		if (!status) {
1097			if (*lst == NULL) {
1098				/* First in the list */
1099				*lst = new_sect;
1100			} else {
1101				last_sect->next_sect = new_sect;
1102			}
1103		}
1104	}
1105
1106	return status;
1107}
1108
1109/*
1110 *  ======== fake_ovly_write ========
1111 */
1112static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes,
1113			   s32 mtype)
1114{
1115	return (s32) bytes;
1116}
1117
1118/*
1119 *  ======== free_sects ========
1120 */
1121static void free_sects(struct nldr_object *nldr_obj,
1122		       struct ovly_sect *phase_sects, u16 alloc_num)
1123{
1124	struct ovly_sect *ovly_section = phase_sects;
1125	u16 i = 0;
1126	bool ret;
1127
1128	while (ovly_section && i < alloc_num) {
1129		/* 'Deallocate' */
1130		/* segid - page not supported yet */
1131		/* Reserved memory */
1132		ret =
1133		    rmm_free(nldr_obj->rmm, 0, ovly_section->sect_run_addr,
1134			     ovly_section->size, true);
1135		DBC_ASSERT(ret);
1136		ovly_section = ovly_section->next_sect;
1137		i++;
1138	}
1139}
1140
1141/*
1142 *  ======== get_symbol_value ========
1143 *  Find symbol in library's base image.  If not there, check dependent
1144 *  libraries.
1145 */
1146static bool get_symbol_value(void *handle, void *parg, void *rmm_handle,
1147			     char *sym_name, struct dbll_sym_val **sym)
1148{
1149	struct nldr_object *nldr_obj = (struct nldr_object *)handle;
1150	struct nldr_nodeobject *nldr_node_obj =
1151	    (struct nldr_nodeobject *)rmm_handle;
1152	struct lib_node *root = (struct lib_node *)parg;
1153	u16 i;
1154	bool status = false;
1155
1156	/* check the base image */
1157	status = nldr_obj->ldr_fxns.get_addr_fxn(nldr_obj->base_lib,
1158						 sym_name, sym);
1159	if (!status)
1160		status =
1161		    nldr_obj->ldr_fxns.get_c_addr_fxn(nldr_obj->base_lib,
1162							sym_name, sym);
1163
1164	/*
1165	 *  Check in root lib itself. If the library consists of
1166	 *  multiple object files linked together, some symbols in the
1167	 *  library may need to be resolved.
1168	 */
1169	if (!status) {
1170		status = nldr_obj->ldr_fxns.get_addr_fxn(root->lib, sym_name,
1171							 sym);
1172		if (!status) {
1173			status =
1174			    nldr_obj->ldr_fxns.get_c_addr_fxn(root->lib,
1175							      sym_name, sym);
1176		}
1177	}
1178
1179	/*
1180	 *  Check in root lib's dependent libraries, but not dependent
1181	 *  libraries' dependents.
1182	 */
1183	if (!status) {
1184		for (i = 0; i < root->dep_libs; i++) {
1185			status =
1186			    nldr_obj->ldr_fxns.get_addr_fxn(root->
1187							    dep_libs_tree
1188							    [i].lib,
1189							    sym_name, sym);
1190			if (!status) {
1191				status =
1192				    nldr_obj->ldr_fxns.
1193				    get_c_addr_fxn(root->dep_libs_tree[i].lib,
1194						   sym_name, sym);
1195			}
1196			if (status) {
1197				/* Symbol found */
1198				break;
1199			}
1200		}
1201	}
1202	/*
1203	 * Check in persistent libraries
1204	 */
1205	if (!status) {
1206		for (i = 0; i < nldr_node_obj->pers_libs; i++) {
1207			status =
1208			    nldr_obj->ldr_fxns.
1209			    get_addr_fxn(nldr_node_obj->pers_lib_table[i].lib,
1210					 sym_name, sym);
1211			if (!status) {
1212				status = nldr_obj->ldr_fxns.get_c_addr_fxn
1213				    (nldr_node_obj->pers_lib_table[i].lib,
1214				     sym_name, sym);
1215			}
1216			if (status) {
1217				/* Symbol found */
1218				break;
1219			}
1220		}
1221	}
1222
1223	return status;
1224}
1225
1226/*
1227 *  ======== load_lib ========
1228 *  Recursively load library and all its dependent libraries. The library
1229 *  we're loading is specified by a uuid.
1230 */
1231static int load_lib(struct nldr_nodeobject *nldr_node_obj,
1232			   struct lib_node *root, struct dsp_uuid uuid,
1233			   bool root_prstnt,
1234			   struct dbll_library_obj **lib_path,
1235			   enum nldr_phase phase, u16 depth)
1236{
1237	struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1238	u16 nd_libs = 0;	/* Number of dependent libraries */
1239	u16 np_libs = 0;	/* Number of persistent libraries */
1240	u16 nd_libs_loaded = 0;	/* Number of dep. libraries loaded */
1241	u16 i;
1242	u32 entry;
1243	u32 dw_buf_size = NLDR_MAXPATHLENGTH;
1244	dbll_flags flags = DBLL_SYMB | DBLL_CODE | DBLL_DATA | DBLL_DYNAMIC;
1245	struct dbll_attrs new_attrs;
1246	char *psz_file_name = NULL;
1247	struct dsp_uuid *dep_lib_uui_ds = NULL;
1248	bool *persistent_dep_libs = NULL;
1249	int status = 0;
1250	bool lib_status = false;
1251	struct lib_node *dep_lib;
1252
1253	if (depth > MAXDEPTH) {
1254		/* Error */
1255		DBC_ASSERT(false);
1256	}
1257	root->lib = NULL;
1258	/* Allocate a buffer for library file name of size DBL_MAXPATHLENGTH */
1259	psz_file_name = kzalloc(DBLL_MAXPATHLENGTH, GFP_KERNEL);
1260	if (psz_file_name == NULL)
1261		status = -ENOMEM;
1262
1263	if (!status) {
1264		/* Get the name of the library */
1265		if (depth == 0) {
1266			status =
1267			    dcd_get_library_name(nldr_node_obj->nldr_obj->
1268						 hdcd_mgr, &uuid, psz_file_name,
1269						 &dw_buf_size, phase,
1270						 nldr_node_obj->pf_phase_split);
1271		} else {
1272			/* Dependent libraries are registered with a phase */
1273			status =
1274			    dcd_get_library_name(nldr_node_obj->nldr_obj->
1275						 hdcd_mgr, &uuid, psz_file_name,
1276						 &dw_buf_size, NLDR_NOPHASE,
1277						 NULL);
1278		}
1279	}
1280	if (!status) {
1281		/* Open the library, don't load symbols */
1282		status =
1283		    nldr_obj->ldr_fxns.open_fxn(nldr_obj->dbll, psz_file_name,
1284						DBLL_NOLOAD, &root->lib);
1285	}
1286	/* Done with file name */
1287	kfree(psz_file_name);
1288
1289	/* Check to see if library not already loaded */
1290	if (!status && root_prstnt) {
1291		lib_status =
1292		    find_in_persistent_lib_array(nldr_node_obj, root->lib);
1293		/* Close library */
1294		if (lib_status) {
1295			nldr_obj->ldr_fxns.close_fxn(root->lib);
1296			return 0;
1297		}
1298	}
1299	if (!status) {
1300		/* Check for circular dependencies. */
1301		for (i = 0; i < depth; i++) {
1302			if (root->lib == lib_path[i]) {
1303				/* This condition could be checked by a
1304				 * tool at build time. */
1305				status = -EILSEQ;
1306			}
1307		}
1308	}
1309	if (!status) {
1310		/* Add library to current path in dependency tree */
1311		lib_path[depth] = root->lib;
1312		depth++;
1313		/* Get number of dependent libraries */
1314		status =
1315		    dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->hdcd_mgr,
1316					 &uuid, &nd_libs, &np_libs, phase);
1317	}
1318	DBC_ASSERT(nd_libs >= np_libs);
1319	if (!status) {
1320		if (!(*nldr_node_obj->pf_phase_split))
1321			np_libs = 0;
1322
1323		/* nd_libs = #of dependent libraries */
1324		root->dep_libs = nd_libs - np_libs;
1325		if (nd_libs > 0) {
1326			dep_lib_uui_ds = kzalloc(sizeof(struct dsp_uuid) *
1327							nd_libs, GFP_KERNEL);
1328			persistent_dep_libs =
1329				kzalloc(sizeof(bool) * nd_libs, GFP_KERNEL);
1330			if (!dep_lib_uui_ds || !persistent_dep_libs)
1331				status = -ENOMEM;
1332
1333			if (root->dep_libs > 0) {
1334				/* Allocate arrays for dependent lib UUIDs,
1335				 * lib nodes */
1336				root->dep_libs_tree = kzalloc
1337						(sizeof(struct lib_node) *
1338						(root->dep_libs), GFP_KERNEL);
1339				if (!(root->dep_libs_tree))
1340					status = -ENOMEM;
1341
1342			}
1343
1344			if (!status) {
1345				/* Get the dependent library UUIDs */
1346				status =
1347				    dcd_get_dep_libs(nldr_node_obj->
1348						     nldr_obj->hdcd_mgr, &uuid,
1349						     nd_libs, dep_lib_uui_ds,
1350						     persistent_dep_libs,
1351						     phase);
1352			}
1353		}
1354	}
1355
1356	/*
1357	 *  Recursively load dependent libraries.
1358	 */
1359	if (!status) {
1360		for (i = 0; i < nd_libs; i++) {
1361			/* If root library is NOT persistent, and dep library
1362			 * is, then record it.  If root library IS persistent,
1363			 * the deplib is already included */
1364			if (!root_prstnt && persistent_dep_libs[i] &&
1365			    *nldr_node_obj->pf_phase_split) {
1366				if ((nldr_node_obj->pers_libs) >= MAXLIBS) {
1367					status = -EILSEQ;
1368					break;
1369				}
1370
1371				/* Allocate library outside of phase */
1372				dep_lib =
1373				    &nldr_node_obj->pers_lib_table
1374				    [nldr_node_obj->pers_libs];
1375			} else {
1376				if (root_prstnt)
1377					persistent_dep_libs[i] = true;
1378
1379				/* Allocate library within phase */
1380				dep_lib = &root->dep_libs_tree[nd_libs_loaded];
1381			}
1382
1383			status = load_lib(nldr_node_obj, dep_lib,
1384					  dep_lib_uui_ds[i],
1385					  persistent_dep_libs[i], lib_path,
1386					  phase, depth);
1387
1388			if (!status) {
1389				if ((status != 0) &&
1390				    !root_prstnt && persistent_dep_libs[i] &&
1391				    *nldr_node_obj->pf_phase_split) {
1392					(nldr_node_obj->pers_libs)++;
1393				} else {
1394					if (!persistent_dep_libs[i] ||
1395					    !(*nldr_node_obj->pf_phase_split)) {
1396						nd_libs_loaded++;
1397					}
1398				}
1399			} else {
1400				break;
1401			}
1402		}
1403	}
1404
1405	/* Now we can load the root library */
1406	if (!status) {
1407		new_attrs = nldr_obj->ldr_attrs;
1408		new_attrs.sym_arg = root;
1409		new_attrs.rmm_handle = nldr_node_obj;
1410		new_attrs.input_params = nldr_node_obj->priv_ref;
1411		new_attrs.base_image = false;
1412
1413		status =
1414		    nldr_obj->ldr_fxns.load_fxn(root->lib, flags, &new_attrs,
1415						&entry);
1416	}
1417
1418	/*
1419	 *  In case of failure, unload any dependent libraries that
1420	 *  were loaded, and close the root library.
1421	 *  (Persistent libraries are unloaded from the very top)
1422	 */
1423	if (status) {
1424		if (phase != NLDR_EXECUTE) {
1425			for (i = 0; i < nldr_node_obj->pers_libs; i++)
1426				unload_lib(nldr_node_obj,
1427					   &nldr_node_obj->pers_lib_table[i]);
1428
1429			nldr_node_obj->pers_libs = 0;
1430		}
1431		for (i = 0; i < nd_libs_loaded; i++)
1432			unload_lib(nldr_node_obj, &root->dep_libs_tree[i]);
1433
1434		if (root->lib)
1435			nldr_obj->ldr_fxns.close_fxn(root->lib);
1436
1437	}
1438
1439	/* Going up one node in the dependency tree */
1440	depth--;
1441
1442	kfree(dep_lib_uui_ds);
1443	dep_lib_uui_ds = NULL;
1444
1445	kfree(persistent_dep_libs);
1446	persistent_dep_libs = NULL;
1447
1448	return status;
1449}
1450
1451/*
1452 *  ======== load_ovly ========
1453 */
1454static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
1455			    enum nldr_phase phase)
1456{
1457	struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1458	struct ovly_node *po_node = NULL;
1459	struct ovly_sect *phase_sects = NULL;
1460	struct ovly_sect *other_sects_list = NULL;
1461	u16 i;
1462	u16 alloc_num = 0;
1463	u16 other_alloc = 0;
1464	u16 *ref_count = NULL;
1465	u16 *other_ref = NULL;
1466	u32 bytes;
1467	struct ovly_sect *ovly_section;
1468	int status = 0;
1469
1470	/* Find the node in the table */
1471	for (i = 0; i < nldr_obj->ovly_nodes; i++) {
1472		if (is_equal_uuid
1473		    (&nldr_node_obj->uuid, &nldr_obj->ovly_table[i].uuid)) {
1474			/* Found it */
1475			po_node = &(nldr_obj->ovly_table[i]);
1476			break;
1477		}
1478	}
1479
1480	DBC_ASSERT(i < nldr_obj->ovly_nodes);
1481
1482	if (!po_node) {
1483		status = -ENOENT;
1484		goto func_end;
1485	}
1486
1487	switch (phase) {
1488	case NLDR_CREATE:
1489		ref_count = &(po_node->create_ref);
1490		other_ref = &(po_node->other_ref);
1491		phase_sects = po_node->create_sects_list;
1492		other_sects_list = po_node->other_sects_list;
1493		break;
1494
1495	case NLDR_EXECUTE:
1496		ref_count = &(po_node->execute_ref);
1497		phase_sects = po_node->execute_sects_list;
1498		break;
1499
1500	case NLDR_DELETE:
1501		ref_count = &(po_node->delete_ref);
1502		phase_sects = po_node->delete_sects_list;
1503		break;
1504
1505	default:
1506		DBC_ASSERT(false);
1507		break;
1508	}
1509
1510	if (ref_count == NULL)
1511		goto func_end;
1512
1513	if (*ref_count != 0)
1514		goto func_end;
1515
1516	/* 'Allocate' memory for overlay sections of this phase */
1517	ovly_section = phase_sects;
1518	while (ovly_section) {
1519		/* allocate *//* page not supported yet */
1520		/* reserve *//* align */
1521		status = rmm_alloc(nldr_obj->rmm, 0, ovly_section->size, 0,
1522				   &(ovly_section->sect_run_addr), true);
1523		if (!status) {
1524			ovly_section = ovly_section->next_sect;
1525			alloc_num++;
1526		} else {
1527			break;
1528		}
1529	}
1530	if (other_ref && *other_ref == 0) {
1531		/* 'Allocate' memory for other overlay sections
1532		 * (create phase) */
1533		if (!status) {
1534			ovly_section = other_sects_list;
1535			while (ovly_section) {
1536				/* page not supported *//* align */
1537				/* reserve */
1538				status =
1539				    rmm_alloc(nldr_obj->rmm, 0,
1540					      ovly_section->size, 0,
1541					      &(ovly_section->sect_run_addr),
1542					      true);
1543				if (!status) {
1544					ovly_section = ovly_section->next_sect;
1545					other_alloc++;
1546				} else {
1547					break;
1548				}
1549			}
1550		}
1551	}
1552	if (*ref_count == 0) {
1553		if (!status) {
1554			/* Load sections for this phase */
1555			ovly_section = phase_sects;
1556			while (ovly_section && !status) {
1557				bytes =
1558				    (*nldr_obj->ovly_fxn) (nldr_node_obj->
1559							   priv_ref,
1560							   ovly_section->
1561							   sect_run_addr,
1562							   ovly_section->
1563							   sect_load_addr,
1564							   ovly_section->size,
1565							   ovly_section->page);
1566				if (bytes != ovly_section->size)
1567					status = -EPERM;
1568
1569				ovly_section = ovly_section->next_sect;
1570			}
1571		}
1572	}
1573	if (other_ref && *other_ref == 0) {
1574		if (!status) {
1575			/* Load other sections (create phase) */
1576			ovly_section = other_sects_list;
1577			while (ovly_section && !status) {
1578				bytes =
1579				    (*nldr_obj->ovly_fxn) (nldr_node_obj->
1580							   priv_ref,
1581							   ovly_section->
1582							   sect_run_addr,
1583							   ovly_section->
1584							   sect_load_addr,
1585							   ovly_section->size,
1586							   ovly_section->page);
1587				if (bytes != ovly_section->size)
1588					status = -EPERM;
1589
1590				ovly_section = ovly_section->next_sect;
1591			}
1592		}
1593	}
1594	if (status) {
1595		/* 'Deallocate' memory */
1596		free_sects(nldr_obj, phase_sects, alloc_num);
1597		free_sects(nldr_obj, other_sects_list, other_alloc);
1598	}
1599func_end:
1600	if (!status && (ref_count != NULL)) {
1601		*ref_count += 1;
1602		if (other_ref)
1603			*other_ref += 1;
1604
1605	}
1606
1607	return status;
1608}
1609
1610/*
1611 *  ======== remote_alloc ========
1612 */
1613static int remote_alloc(void **ref, u16 mem_sect, u32 size,
1614			       u32 align, u32 *dsp_address,
1615			       s32 segmnt_id, s32 req,
1616			       bool reserve)
1617{
1618	struct nldr_nodeobject *hnode = (struct nldr_nodeobject *)ref;
1619	struct nldr_object *nldr_obj;
1620	struct rmm_target_obj *rmm;
1621	u16 mem_phase_bit = MAXFLAGS;
1622	u16 segid = 0;
1623	u16 i;
1624	u16 mem_sect_type;
1625	u32 word_size;
1626	struct rmm_addr *rmm_addr_obj = (struct rmm_addr *)dsp_address;
1627	bool mem_load_req = false;
1628	int status = -ENOMEM;	/* Set to fail */
1629	DBC_REQUIRE(hnode);
1630	DBC_REQUIRE(mem_sect == DBLL_CODE || mem_sect == DBLL_DATA ||
1631		    mem_sect == DBLL_BSS);
1632	nldr_obj = hnode->nldr_obj;
1633	rmm = nldr_obj->rmm;
1634	/* Convert size to DSP words */
1635	word_size =
1636	    (size + nldr_obj->us_dsp_word_size -
1637	     1) / nldr_obj->us_dsp_word_size;
1638	/* Modify memory 'align' to account for DSP cache line size */
1639	align = find_lcm(GEM_CACHE_LINE_SIZE, align);
1640	dev_dbg(bridge, "%s: memory align to 0x%x\n", __func__, align);
1641	if (segmnt_id != -1) {
1642		rmm_addr_obj->segid = segmnt_id;
1643		segid = segmnt_id;
1644		mem_load_req = req;
1645	} else {
1646		switch (hnode->phase) {
1647		case NLDR_CREATE:
1648			mem_phase_bit = CREATEDATAFLAGBIT;
1649			break;
1650		case NLDR_DELETE:
1651			mem_phase_bit = DELETEDATAFLAGBIT;
1652			break;
1653		case NLDR_EXECUTE:
1654			mem_phase_bit = EXECUTEDATAFLAGBIT;
1655			break;
1656		default:
1657			DBC_ASSERT(false);
1658			break;
1659		}
1660		if (mem_sect == DBLL_CODE)
1661			mem_phase_bit++;
1662
1663		if (mem_phase_bit < MAXFLAGS)
1664			segid = hnode->seg_id[mem_phase_bit];
1665
1666		/* Determine if there is a memory loading requirement */
1667		if ((hnode->code_data_flag_mask >> mem_phase_bit) & 0x1)
1668			mem_load_req = true;
1669
1670	}
1671	mem_sect_type = (mem_sect == DBLL_CODE) ? DYNM_CODE : DYNM_DATA;
1672
1673	/* Find an appropriate segment based on mem_sect */
1674	if (segid == NULLID) {
1675		/* No memory requirements of preferences */
1676		DBC_ASSERT(!mem_load_req);
1677		goto func_cont;
1678	}
1679	if (segid <= MAXSEGID) {
1680		DBC_ASSERT(segid < nldr_obj->dload_segs);
1681		/* Attempt to allocate from segid first. */
1682		rmm_addr_obj->segid = segid;
1683		status =
1684		    rmm_alloc(rmm, segid, word_size, align, dsp_address, false);
1685		if (status) {
1686			dev_dbg(bridge, "%s: Unable allocate from segment %d\n",
1687				__func__, segid);
1688		}
1689	} else {
1690		/* segid > MAXSEGID ==> Internal or external memory */
1691		DBC_ASSERT(segid == MEMINTERNALID || segid == MEMEXTERNALID);
1692		/*  Check for any internal or external memory segment,
1693		 *  depending on segid. */
1694		mem_sect_type |= segid == MEMINTERNALID ?
1695		    DYNM_INTERNAL : DYNM_EXTERNAL;
1696		for (i = 0; i < nldr_obj->dload_segs; i++) {
1697			if ((nldr_obj->seg_table[i] & mem_sect_type) !=
1698			    mem_sect_type)
1699				continue;
1700
1701			status = rmm_alloc(rmm, i, word_size, align,
1702					dsp_address, false);
1703			if (!status) {
1704				/* Save segid for freeing later */
1705				rmm_addr_obj->segid = i;
1706				break;
1707			}
1708		}
1709	}
1710func_cont:
1711	/* Haven't found memory yet, attempt to find any segment that works */
1712	if (status == -ENOMEM && !mem_load_req) {
1713		dev_dbg(bridge, "%s: Preferred segment unavailable, trying "
1714			"another\n", __func__);
1715		for (i = 0; i < nldr_obj->dload_segs; i++) {
1716			/* All bits of mem_sect_type must be set */
1717			if ((nldr_obj->seg_table[i] & mem_sect_type) !=
1718			    mem_sect_type)
1719				continue;
1720
1721			status = rmm_alloc(rmm, i, word_size, align,
1722					   dsp_address, false);
1723			if (!status) {
1724				/* Save segid */
1725				rmm_addr_obj->segid = i;
1726				break;
1727			}
1728		}
1729	}
1730
1731	return status;
1732}
1733
1734static int remote_free(void **ref, u16 space, u32 dsp_address,
1735			      u32 size, bool reserve)
1736{
1737	struct nldr_object *nldr_obj = (struct nldr_object *)ref;
1738	struct rmm_target_obj *rmm;
1739	u32 word_size;
1740	int status = -ENOMEM;	/* Set to fail */
1741
1742	DBC_REQUIRE(nldr_obj);
1743
1744	rmm = nldr_obj->rmm;
1745
1746	/* Convert size to DSP words */
1747	word_size =
1748	    (size + nldr_obj->us_dsp_word_size -
1749	     1) / nldr_obj->us_dsp_word_size;
1750
1751	if (rmm_free(rmm, space, dsp_address, word_size, reserve))
1752		status = 0;
1753
1754	return status;
1755}
1756
1757/*
1758 *  ======== unload_lib ========
1759 */
1760static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
1761		       struct lib_node *root)
1762{
1763	struct dbll_attrs new_attrs;
1764	struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1765	u16 i;
1766
1767	DBC_ASSERT(root != NULL);
1768
1769	/* Unload dependent libraries */
1770	for (i = 0; i < root->dep_libs; i++)
1771		unload_lib(nldr_node_obj, &root->dep_libs_tree[i]);
1772
1773	root->dep_libs = 0;
1774
1775	new_attrs = nldr_obj->ldr_attrs;
1776	new_attrs.rmm_handle = nldr_obj->rmm;
1777	new_attrs.input_params = nldr_node_obj->priv_ref;
1778	new_attrs.base_image = false;
1779	new_attrs.sym_arg = root;
1780
1781	if (root->lib) {
1782		/* Unload the root library */
1783		nldr_obj->ldr_fxns.unload_fxn(root->lib, &new_attrs);
1784		nldr_obj->ldr_fxns.close_fxn(root->lib);
1785	}
1786
1787	/* Free dependent library list */
1788	kfree(root->dep_libs_tree);
1789	root->dep_libs_tree = NULL;
1790}
1791
1792/*
1793 *  ======== unload_ovly ========
1794 */
1795static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
1796			enum nldr_phase phase)
1797{
1798	struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1799	struct ovly_node *po_node = NULL;
1800	struct ovly_sect *phase_sects = NULL;
1801	struct ovly_sect *other_sects_list = NULL;
1802	u16 i;
1803	u16 alloc_num = 0;
1804	u16 other_alloc = 0;
1805	u16 *ref_count = NULL;
1806	u16 *other_ref = NULL;
1807
1808	/* Find the node in the table */
1809	for (i = 0; i < nldr_obj->ovly_nodes; i++) {
1810		if (is_equal_uuid
1811		    (&nldr_node_obj->uuid, &nldr_obj->ovly_table[i].uuid)) {
1812			/* Found it */
1813			po_node = &(nldr_obj->ovly_table[i]);
1814			break;
1815		}
1816	}
1817
1818	DBC_ASSERT(i < nldr_obj->ovly_nodes);
1819
1820	if (!po_node)
1821		/* TODO: Should we print warning here? */
1822		return;
1823
1824	switch (phase) {
1825	case NLDR_CREATE:
1826		ref_count = &(po_node->create_ref);
1827		phase_sects = po_node->create_sects_list;
1828		alloc_num = po_node->create_sects;
1829		break;
1830	case NLDR_EXECUTE:
1831		ref_count = &(po_node->execute_ref);
1832		phase_sects = po_node->execute_sects_list;
1833		alloc_num = po_node->execute_sects;
1834		break;
1835	case NLDR_DELETE:
1836		ref_count = &(po_node->delete_ref);
1837		other_ref = &(po_node->other_ref);
1838		phase_sects = po_node->delete_sects_list;
1839		/* 'Other' overlay sections are unloaded in the delete phase */
1840		other_sects_list = po_node->other_sects_list;
1841		alloc_num = po_node->delete_sects;
1842		other_alloc = po_node->other_sects;
1843		break;
1844	default:
1845		DBC_ASSERT(false);
1846		break;
1847	}
1848	DBC_ASSERT(ref_count && (*ref_count > 0));
1849	if (ref_count && (*ref_count > 0)) {
1850		*ref_count -= 1;
1851		if (other_ref) {
1852			DBC_ASSERT(*other_ref > 0);
1853			*other_ref -= 1;
1854		}
1855	}
1856
1857	if (ref_count && *ref_count == 0) {
1858		/* 'Deallocate' memory */
1859		free_sects(nldr_obj, phase_sects, alloc_num);
1860	}
1861	if (other_ref && *other_ref == 0)
1862		free_sects(nldr_obj, other_sects_list, other_alloc);
1863}
1864
1865/*
1866 *  ======== find_in_persistent_lib_array ========
1867 */
1868static bool find_in_persistent_lib_array(struct nldr_nodeobject *nldr_node_obj,
1869					 struct dbll_library_obj *lib)
1870{
1871	s32 i = 0;
1872
1873	for (i = 0; i < nldr_node_obj->pers_libs; i++) {
1874		if (lib == nldr_node_obj->pers_lib_table[i].lib)
1875			return true;
1876
1877	}
1878
1879	return false;
1880}
1881
1882/*
1883 * ================ Find LCM (Least Common Multiplier ===
1884 */
1885static u32 find_lcm(u32 a, u32 b)
1886{
1887	u32 ret;
1888
1889	ret = a * b / gcd(a, b);
1890
1891	return ret;
1892}
1893
1894#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
1895/**
1896 * nldr_find_addr() - Find the closest symbol to the given address based on
1897 *		dynamic node object.
1898 *
1899 * @nldr_node:		Dynamic node object
1900 * @sym_addr:		Given address to find the dsp symbol
1901 * @offset_range:		offset range to look for dsp symbol
1902 * @offset_output:		Symbol Output address
1903 * @sym_name:		String with the dsp symbol
1904 *
1905 * 	This function finds the node library for a given address and
1906 *	retrieves the dsp symbol by calling dbll_find_dsp_symbol.
1907 */
1908int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
1909			u32 offset_range, void *offset_output, char *sym_name)
1910{
1911	int status = 0;
1912	bool status1 = false;
1913	s32 i = 0;
1914	struct lib_node root = { NULL, 0, NULL };
1915	DBC_REQUIRE(refs > 0);
1916	DBC_REQUIRE(offset_output != NULL);
1917	DBC_REQUIRE(sym_name != NULL);
1918	pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x,  %s)\n", __func__, (u32) nldr_node,
1919			sym_addr, offset_range, (u32) offset_output, sym_name);
1920
1921	if (nldr_node->dynamic && *nldr_node->pf_phase_split) {
1922		switch (nldr_node->phase) {
1923		case NLDR_CREATE:
1924			root = nldr_node->create_lib;
1925			break;
1926		case NLDR_EXECUTE:
1927			root = nldr_node->execute_lib;
1928			break;
1929		case NLDR_DELETE:
1930			root = nldr_node->delete_lib;
1931			break;
1932		default:
1933			DBC_ASSERT(false);
1934			break;
1935		}
1936	} else {
1937		/* for Overlay nodes or non-split Dynamic nodes */
1938		root = nldr_node->root;
1939	}
1940
1941	status1 = dbll_find_dsp_symbol(root.lib, sym_addr,
1942			offset_range, offset_output, sym_name);
1943
1944	/* If symbol not found, check dependent libraries */
1945	if (!status1)
1946		for (i = 0; i < root.dep_libs; i++) {
1947			status1 = dbll_find_dsp_symbol(
1948				root.dep_libs_tree[i].lib, sym_addr,
1949				offset_range, offset_output, sym_name);
1950			if (status1)
1951				/* Symbol found */
1952				break;
1953		}
1954	/* Check persistent libraries */
1955	if (!status1)
1956		for (i = 0; i < nldr_node->pers_libs; i++) {
1957			status1 = dbll_find_dsp_symbol(
1958				nldr_node->pers_lib_table[i].lib, sym_addr,
1959				offset_range, offset_output, sym_name);
1960			if (status1)
1961				/* Symbol found */
1962				break;
1963		}
1964
1965	if (!status1) {
1966		pr_debug("%s: Address 0x%x not found in range %d.\n",
1967					__func__, sym_addr, offset_range);
1968		status = -ESPIPE;
1969	}
1970
1971	return status;
1972}
1973#endif
1974