• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/staging/tidspbridge/rmgr/
1/*
2 * proc.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Processor interface at the driver level.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#include <linux/types.h>
20/* ------------------------------------ Host OS */
21#include <linux/dma-mapping.h>
22#include <linux/scatterlist.h>
23#include <dspbridge/host_os.h>
24
25/*  ----------------------------------- DSP/BIOS Bridge */
26#include <dspbridge/dbdefs.h>
27
28/*  ----------------------------------- Trace & Debug */
29#include <dspbridge/dbc.h>
30
31/*  ----------------------------------- OS Adaptation Layer */
32#include <dspbridge/cfg.h>
33#include <dspbridge/list.h>
34#include <dspbridge/ntfy.h>
35#include <dspbridge/sync.h>
36/*  ----------------------------------- Bridge Driver */
37#include <dspbridge/dspdefs.h>
38#include <dspbridge/dspdeh.h>
39/*  ----------------------------------- Platform Manager */
40#include <dspbridge/cod.h>
41#include <dspbridge/dev.h>
42#include <dspbridge/procpriv.h>
43#include <dspbridge/dmm.h>
44
45/*  ----------------------------------- Resource Manager */
46#include <dspbridge/mgr.h>
47#include <dspbridge/node.h>
48#include <dspbridge/nldr.h>
49#include <dspbridge/rmm.h>
50
51/*  ----------------------------------- Others */
52#include <dspbridge/dbdcd.h>
53#include <dspbridge/msg.h>
54#include <dspbridge/dspioctl.h>
55#include <dspbridge/drv.h>
56
57/*  ----------------------------------- This */
58#include <dspbridge/proc.h>
59#include <dspbridge/pwr.h>
60
61#include <dspbridge/resourcecleanup.h>
62/*  ----------------------------------- Defines, Data Structures, Typedefs */
63#define MAXCMDLINELEN       255
64#define PROC_ENVPROCID      "PROC_ID=%d"
65#define MAXPROCIDLEN	(8 + 5)
66#define PROC_DFLT_TIMEOUT   10000	/* Time out in milliseconds */
67#define PWR_TIMEOUT	 500	/* Sleep/wake timout in msec */
68#define EXTEND	      "_EXT_END"	/* Extmem end addr in DSP binary */
69
70#define DSP_CACHE_LINE 128
71
72#define BUFMODE_MASK	(3 << 14)
73
74/* Buffer modes from DSP perspective */
75#define RBUF		0x4000		/* Input buffer */
76#define WBUF		0x8000		/* Output Buffer */
77
78extern struct device *bridge;
79
80/*  ----------------------------------- Globals */
81
82/* The proc_object structure. */
83struct proc_object {
84	struct list_head link;	/* Link to next proc_object */
85	struct dev_object *hdev_obj;	/* Device this PROC represents */
86	u32 process;		/* Process owning this Processor */
87	struct mgr_object *hmgr_obj;	/* Manager Object Handle */
88	u32 attach_count;	/* Processor attach count */
89	u32 processor_id;	/* Processor number */
90	u32 utimeout;		/* Time out count */
91	enum dsp_procstate proc_state;	/* Processor state */
92	u32 ul_unit;		/* DDSP unit number */
93	bool is_already_attached;	/*
94					 * True if the Device below has
95					 * GPP Client attached
96					 */
97	struct ntfy_object *ntfy_obj;	/* Manages  notifications */
98	/* Bridge Context Handle */
99	struct bridge_dev_context *hbridge_context;
100	/* Function interface to Bridge driver */
101	struct bridge_drv_interface *intf_fxns;
102	char *psz_last_coff;
103	struct list_head proc_list;
104};
105
106static u32 refs;
107
108DEFINE_MUTEX(proc_lock);	/* For critical sections */
109
110/*  ----------------------------------- Function Prototypes */
111static int proc_monitor(struct proc_object *proc_obj);
112static s32 get_envp_count(char **envp);
113static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
114			   s32 cnew_envp, char *sz_var);
115
116/* remember mapping information */
117static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt,
118				u32 mpu_addr, u32 dsp_addr, u32 size)
119{
120	struct dmm_map_object *map_obj;
121
122	u32 num_usr_pgs = size / PG_SIZE4K;
123
124	pr_debug("%s: adding map info: mpu_addr 0x%x virt 0x%x size 0x%x\n",
125						__func__, mpu_addr,
126						dsp_addr, size);
127
128	map_obj = kzalloc(sizeof(struct dmm_map_object), GFP_KERNEL);
129	if (!map_obj) {
130		pr_err("%s: kzalloc failed\n", __func__);
131		return NULL;
132	}
133	INIT_LIST_HEAD(&map_obj->link);
134
135	map_obj->pages = kcalloc(num_usr_pgs, sizeof(struct page *),
136							GFP_KERNEL);
137	if (!map_obj->pages) {
138		pr_err("%s: kzalloc failed\n", __func__);
139		kfree(map_obj);
140		return NULL;
141	}
142
143	map_obj->mpu_addr = mpu_addr;
144	map_obj->dsp_addr = dsp_addr;
145	map_obj->size = size;
146	map_obj->num_usr_pgs = num_usr_pgs;
147
148	spin_lock(&pr_ctxt->dmm_map_lock);
149	list_add(&map_obj->link, &pr_ctxt->dmm_map_list);
150	spin_unlock(&pr_ctxt->dmm_map_lock);
151
152	return map_obj;
153}
154
155static int match_exact_map_obj(struct dmm_map_object *map_obj,
156					u32 dsp_addr, u32 size)
157{
158	if (map_obj->dsp_addr == dsp_addr && map_obj->size != size)
159		pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
160				__func__, dsp_addr, map_obj->size, size);
161
162	return map_obj->dsp_addr == dsp_addr &&
163		map_obj->size == size;
164}
165
166static void remove_mapping_information(struct process_context *pr_ctxt,
167						u32 dsp_addr, u32 size)
168{
169	struct dmm_map_object *map_obj;
170
171	pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__,
172							dsp_addr, size);
173
174	spin_lock(&pr_ctxt->dmm_map_lock);
175	list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
176		pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
177							__func__,
178							map_obj->mpu_addr,
179							map_obj->dsp_addr,
180							map_obj->size);
181
182		if (match_exact_map_obj(map_obj, dsp_addr, size)) {
183			pr_debug("%s: match, deleting map info\n", __func__);
184			list_del(&map_obj->link);
185			kfree(map_obj->dma_info.sg);
186			kfree(map_obj->pages);
187			kfree(map_obj);
188			goto out;
189		}
190		pr_debug("%s: candidate didn't match\n", __func__);
191	}
192
193	pr_err("%s: failed to find given map info\n", __func__);
194out:
195	spin_unlock(&pr_ctxt->dmm_map_lock);
196}
197
198static int match_containing_map_obj(struct dmm_map_object *map_obj,
199					u32 mpu_addr, u32 size)
200{
201	u32 map_obj_end = map_obj->mpu_addr + map_obj->size;
202
203	return mpu_addr >= map_obj->mpu_addr &&
204		mpu_addr + size <= map_obj_end;
205}
206
207static struct dmm_map_object *find_containing_mapping(
208				struct process_context *pr_ctxt,
209				u32 mpu_addr, u32 size)
210{
211	struct dmm_map_object *map_obj;
212	pr_debug("%s: looking for mpu_addr 0x%x size 0x%x\n", __func__,
213						mpu_addr, size);
214
215	spin_lock(&pr_ctxt->dmm_map_lock);
216	list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
217		pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
218						__func__,
219						map_obj->mpu_addr,
220						map_obj->dsp_addr,
221						map_obj->size);
222		if (match_containing_map_obj(map_obj, mpu_addr, size)) {
223			pr_debug("%s: match!\n", __func__);
224			goto out;
225		}
226
227		pr_debug("%s: no match!\n", __func__);
228	}
229
230	map_obj = NULL;
231out:
232	spin_unlock(&pr_ctxt->dmm_map_lock);
233	return map_obj;
234}
235
236static int find_first_page_in_cache(struct dmm_map_object *map_obj,
237					unsigned long mpu_addr)
238{
239	u32 mapped_base_page = map_obj->mpu_addr >> PAGE_SHIFT;
240	u32 requested_base_page = mpu_addr >> PAGE_SHIFT;
241	int pg_index = requested_base_page - mapped_base_page;
242
243	if (pg_index < 0 || pg_index >= map_obj->num_usr_pgs) {
244		pr_err("%s: failed (got %d)\n", __func__, pg_index);
245		return -1;
246	}
247
248	pr_debug("%s: first page is %d\n", __func__, pg_index);
249	return pg_index;
250}
251
252static inline struct page *get_mapping_page(struct dmm_map_object *map_obj,
253								int pg_i)
254{
255	pr_debug("%s: looking for pg_i %d, num_usr_pgs: %d\n", __func__,
256					pg_i, map_obj->num_usr_pgs);
257
258	if (pg_i < 0 || pg_i >= map_obj->num_usr_pgs) {
259		pr_err("%s: requested pg_i %d is out of mapped range\n",
260				__func__, pg_i);
261		return NULL;
262	}
263
264	return map_obj->pages[pg_i];
265}
266
267/*
268 *  ======== proc_attach ========
269 *  Purpose:
270 *      Prepare for communication with a particular DSP processor, and return
271 *      a handle to the processor object.
272 */
273int
274proc_attach(u32 processor_id,
275	    const struct dsp_processorattrin *attr_in,
276	    void **ph_processor, struct process_context *pr_ctxt)
277{
278	int status = 0;
279	struct dev_object *hdev_obj;
280	struct proc_object *p_proc_object = NULL;
281	struct mgr_object *hmgr_obj = NULL;
282	struct drv_object *hdrv_obj = NULL;
283	u8 dev_type;
284
285	DBC_REQUIRE(refs > 0);
286	DBC_REQUIRE(ph_processor != NULL);
287
288	if (pr_ctxt->hprocessor) {
289		*ph_processor = pr_ctxt->hprocessor;
290		return status;
291	}
292
293	/* Get the Driver and Manager Object Handles */
294	status = cfg_get_object((u32 *) &hdrv_obj, REG_DRV_OBJECT);
295	if (!status)
296		status = cfg_get_object((u32 *) &hmgr_obj, REG_MGR_OBJECT);
297
298	if (!status) {
299		/* Get the Device Object */
300		status = drv_get_dev_object(processor_id, hdrv_obj, &hdev_obj);
301	}
302	if (!status)
303		status = dev_get_dev_type(hdev_obj, &dev_type);
304
305	if (status)
306		goto func_end;
307
308	/* If we made it this far, create the Proceesor object: */
309	p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL);
310	/* Fill out the Processor Object: */
311	if (p_proc_object == NULL) {
312		status = -ENOMEM;
313		goto func_end;
314	}
315	p_proc_object->hdev_obj = hdev_obj;
316	p_proc_object->hmgr_obj = hmgr_obj;
317	p_proc_object->processor_id = dev_type;
318	/* Store TGID instead of process handle */
319	p_proc_object->process = current->tgid;
320
321	INIT_LIST_HEAD(&p_proc_object->proc_list);
322
323	if (attr_in)
324		p_proc_object->utimeout = attr_in->utimeout;
325	else
326		p_proc_object->utimeout = PROC_DFLT_TIMEOUT;
327
328	status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
329	if (!status) {
330		status = dev_get_bridge_context(hdev_obj,
331					     &p_proc_object->hbridge_context);
332		if (status)
333			kfree(p_proc_object);
334	} else
335		kfree(p_proc_object);
336
337	if (status)
338		goto func_end;
339
340	/* Create the Notification Object */
341	/* This is created with no event mask, no notify mask
342	 * and no valid handle to the notification. They all get
343	 * filled up when proc_register_notify is called */
344	p_proc_object->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
345							GFP_KERNEL);
346	if (p_proc_object->ntfy_obj)
347		ntfy_init(p_proc_object->ntfy_obj);
348	else
349		status = -ENOMEM;
350
351	if (!status) {
352		/* Insert the Processor Object into the DEV List.
353		 * Return handle to this Processor Object:
354		 * Find out if the Device is already attached to a
355		 * Processor. If so, return AlreadyAttached status */
356		lst_init_elem(&p_proc_object->link);
357		status = dev_insert_proc_object(p_proc_object->hdev_obj,
358						(u32) p_proc_object,
359						&p_proc_object->
360						is_already_attached);
361		if (!status) {
362			if (p_proc_object->is_already_attached)
363				status = 0;
364		} else {
365			if (p_proc_object->ntfy_obj) {
366				ntfy_delete(p_proc_object->ntfy_obj);
367				kfree(p_proc_object->ntfy_obj);
368			}
369
370			kfree(p_proc_object);
371		}
372		if (!status) {
373			*ph_processor = (void *)p_proc_object;
374			pr_ctxt->hprocessor = *ph_processor;
375			(void)proc_notify_clients(p_proc_object,
376						  DSP_PROCESSORATTACH);
377		}
378	} else {
379		/* Don't leak memory if status is failed */
380		kfree(p_proc_object);
381	}
382func_end:
383	DBC_ENSURE((status == -EPERM && *ph_processor == NULL) ||
384		   (!status && p_proc_object) ||
385		   (status == 0 && p_proc_object));
386
387	return status;
388}
389
390static int get_exec_file(struct cfg_devnode *dev_node_obj,
391				struct dev_object *hdev_obj,
392				u32 size, char *exec_file)
393{
394	u8 dev_type;
395	s32 len;
396
397	dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
398	if (dev_type == DSP_UNIT) {
399		return cfg_get_exec_file(dev_node_obj, size, exec_file);
400	} else if (dev_type == IVA_UNIT) {
401		if (iva_img) {
402			len = strlen(iva_img);
403			strncpy(exec_file, iva_img, len + 1);
404			return 0;
405		}
406	}
407	return -ENOENT;
408}
409
410/*
411 *  ======== proc_auto_start ======== =
412 *  Purpose:
413 *      A Particular device gets loaded with the default image
414 *      if the AutoStart flag is set.
415 *  Parameters:
416 *      hdev_obj:     Handle to the Device
417 *  Returns:
418 *      0:   On Successful Loading
419 *      -EPERM  General Failure
420 *  Requires:
421 *      hdev_obj != NULL
422 *  Ensures:
423 */
424int proc_auto_start(struct cfg_devnode *dev_node_obj,
425			   struct dev_object *hdev_obj)
426{
427	int status = -EPERM;
428	struct proc_object *p_proc_object;
429	char sz_exec_file[MAXCMDLINELEN];
430	char *argv[2];
431	struct mgr_object *hmgr_obj = NULL;
432	u8 dev_type;
433
434	DBC_REQUIRE(refs > 0);
435	DBC_REQUIRE(dev_node_obj != NULL);
436	DBC_REQUIRE(hdev_obj != NULL);
437
438	/* Create a Dummy PROC Object */
439	status = cfg_get_object((u32 *) &hmgr_obj, REG_MGR_OBJECT);
440	if (status)
441		goto func_end;
442
443	p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL);
444	if (p_proc_object == NULL) {
445		status = -ENOMEM;
446		goto func_end;
447	}
448	p_proc_object->hdev_obj = hdev_obj;
449	p_proc_object->hmgr_obj = hmgr_obj;
450	status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
451	if (!status)
452		status = dev_get_bridge_context(hdev_obj,
453					     &p_proc_object->hbridge_context);
454	if (status)
455		goto func_cont;
456
457	/* Stop the Device, put it into standby mode */
458	status = proc_stop(p_proc_object);
459
460	if (status)
461		goto func_cont;
462
463	/* Get the default executable for this board... */
464	dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
465	p_proc_object->processor_id = dev_type;
466	status = get_exec_file(dev_node_obj, hdev_obj, sizeof(sz_exec_file),
467			       sz_exec_file);
468	if (!status) {
469		argv[0] = sz_exec_file;
470		argv[1] = NULL;
471		/* ...and try to load it: */
472		status = proc_load(p_proc_object, 1, (const char **)argv, NULL);
473		if (!status)
474			status = proc_start(p_proc_object);
475	}
476	kfree(p_proc_object->psz_last_coff);
477	p_proc_object->psz_last_coff = NULL;
478func_cont:
479	kfree(p_proc_object);
480func_end:
481	return status;
482}
483
484/*
485 *  ======== proc_ctrl ========
486 *  Purpose:
487 *      Pass control information to the GPP device driver managing the
488 *      DSP processor.
489 *
490 *      This will be an OEM-only function, and not part of the DSP/BIOS Bridge
491 *      application developer's API.
492 *      Call the bridge_dev_ctrl fxn with the Argument. This is a Synchronous
493 *      Operation. arg can be null.
494 */
495int proc_ctrl(void *hprocessor, u32 dw_cmd, struct dsp_cbdata * arg)
496{
497	int status = 0;
498	struct proc_object *p_proc_object = hprocessor;
499	u32 timeout = 0;
500
501	DBC_REQUIRE(refs > 0);
502
503	if (p_proc_object) {
504		/* intercept PWR deep sleep command */
505		if (dw_cmd == BRDIOCTL_DEEPSLEEP) {
506			timeout = arg->cb_data;
507			status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout);
508		}
509		/* intercept PWR emergency sleep command */
510		else if (dw_cmd == BRDIOCTL_EMERGENCYSLEEP) {
511			timeout = arg->cb_data;
512			status = pwr_sleep_dsp(PWR_EMERGENCYDEEPSLEEP, timeout);
513		} else if (dw_cmd == PWR_DEEPSLEEP) {
514			/* timeout = arg->cb_data; */
515			status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout);
516		}
517		/* intercept PWR wake commands */
518		else if (dw_cmd == BRDIOCTL_WAKEUP) {
519			timeout = arg->cb_data;
520			status = pwr_wake_dsp(timeout);
521		} else if (dw_cmd == PWR_WAKEUP) {
522			/* timeout = arg->cb_data; */
523			status = pwr_wake_dsp(timeout);
524		} else
525		    if (!((*p_proc_object->intf_fxns->pfn_dev_cntrl)
526				      (p_proc_object->hbridge_context, dw_cmd,
527				       arg))) {
528			status = 0;
529		} else {
530			status = -EPERM;
531		}
532	} else {
533		status = -EFAULT;
534	}
535
536	return status;
537}
538
539/*
540 *  ======== proc_detach ========
541 *  Purpose:
542 *      Destroys the  Processor Object. Removes the notification from the Dev
543 *      List.
544 */
545int proc_detach(struct process_context *pr_ctxt)
546{
547	int status = 0;
548	struct proc_object *p_proc_object = NULL;
549
550	DBC_REQUIRE(refs > 0);
551
552	p_proc_object = (struct proc_object *)pr_ctxt->hprocessor;
553
554	if (p_proc_object) {
555		/* Notify the Client */
556		ntfy_notify(p_proc_object->ntfy_obj, DSP_PROCESSORDETACH);
557		/* Remove the notification memory */
558		if (p_proc_object->ntfy_obj) {
559			ntfy_delete(p_proc_object->ntfy_obj);
560			kfree(p_proc_object->ntfy_obj);
561		}
562
563		kfree(p_proc_object->psz_last_coff);
564		p_proc_object->psz_last_coff = NULL;
565		/* Remove the Proc from the DEV List */
566		(void)dev_remove_proc_object(p_proc_object->hdev_obj,
567					     (u32) p_proc_object);
568		/* Free the Processor Object */
569		kfree(p_proc_object);
570		pr_ctxt->hprocessor = NULL;
571	} else {
572		status = -EFAULT;
573	}
574
575	return status;
576}
577
578/*
579 *  ======== proc_enum_nodes ========
580 *  Purpose:
581 *      Enumerate and get configuration information about nodes allocated
582 *      on a DSP processor.
583 */
584int proc_enum_nodes(void *hprocessor, void **node_tab,
585			   u32 node_tab_size, u32 *pu_num_nodes,
586			   u32 *pu_allocated)
587{
588	int status = -EPERM;
589	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
590	struct node_mgr *hnode_mgr = NULL;
591
592	DBC_REQUIRE(refs > 0);
593	DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
594	DBC_REQUIRE(pu_num_nodes != NULL);
595	DBC_REQUIRE(pu_allocated != NULL);
596
597	if (p_proc_object) {
598		if (!(dev_get_node_manager(p_proc_object->hdev_obj,
599						       &hnode_mgr))) {
600			if (hnode_mgr) {
601				status = node_enum_nodes(hnode_mgr, node_tab,
602							 node_tab_size,
603							 pu_num_nodes,
604							 pu_allocated);
605			}
606		}
607	} else {
608		status = -EFAULT;
609	}
610
611	return status;
612}
613
614/* Cache operation against kernel address instead of users */
615static int build_dma_sg(struct dmm_map_object *map_obj, unsigned long start,
616						ssize_t len, int pg_i)
617{
618	struct page *page;
619	unsigned long offset;
620	ssize_t rest;
621	int ret = 0, i = 0;
622	struct scatterlist *sg = map_obj->dma_info.sg;
623
624	while (len) {
625		page = get_mapping_page(map_obj, pg_i);
626		if (!page) {
627			pr_err("%s: no page for %08lx\n", __func__, start);
628			ret = -EINVAL;
629			goto out;
630		} else if (IS_ERR(page)) {
631			pr_err("%s: err page for %08lx(%lu)\n", __func__, start,
632			       PTR_ERR(page));
633			ret = PTR_ERR(page);
634			goto out;
635		}
636
637		offset = start & ~PAGE_MASK;
638		rest = min_t(ssize_t, PAGE_SIZE - offset, len);
639
640		sg_set_page(&sg[i], page, rest, offset);
641
642		len -= rest;
643		start += rest;
644		pg_i++, i++;
645	}
646
647	if (i != map_obj->dma_info.num_pages) {
648		pr_err("%s: bad number of sg iterations\n", __func__);
649		ret = -EFAULT;
650		goto out;
651	}
652
653out:
654	return ret;
655}
656
657static int memory_regain_ownership(struct dmm_map_object *map_obj,
658		unsigned long start, ssize_t len, enum dma_data_direction dir)
659{
660	int ret = 0;
661	unsigned long first_data_page = start >> PAGE_SHIFT;
662	unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
663	/* calculating the number of pages this area spans */
664	unsigned long num_pages = last_data_page - first_data_page + 1;
665	struct bridge_dma_map_info *dma_info = &map_obj->dma_info;
666
667	if (!dma_info->sg)
668		goto out;
669
670	if (dma_info->dir != dir || dma_info->num_pages != num_pages) {
671		pr_err("%s: dma info doesn't match given params\n", __func__);
672		return -EINVAL;
673	}
674
675	dma_unmap_sg(bridge, dma_info->sg, num_pages, dma_info->dir);
676
677	pr_debug("%s: dma_map_sg unmapped\n", __func__);
678
679	kfree(dma_info->sg);
680
681	map_obj->dma_info.sg = NULL;
682
683out:
684	return ret;
685}
686
687/* Cache operation against kernel address instead of users */
688static int memory_give_ownership(struct dmm_map_object *map_obj,
689		unsigned long start, ssize_t len, enum dma_data_direction dir)
690{
691	int pg_i, ret, sg_num;
692	struct scatterlist *sg;
693	unsigned long first_data_page = start >> PAGE_SHIFT;
694	unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
695	/* calculating the number of pages this area spans */
696	unsigned long num_pages = last_data_page - first_data_page + 1;
697
698	pg_i = find_first_page_in_cache(map_obj, start);
699	if (pg_i < 0) {
700		pr_err("%s: failed to find first page in cache\n", __func__);
701		ret = -EINVAL;
702		goto out;
703	}
704
705	sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL);
706	if (!sg) {
707		pr_err("%s: kcalloc failed\n", __func__);
708		ret = -ENOMEM;
709		goto out;
710	}
711
712	sg_init_table(sg, num_pages);
713
714	/* cleanup a previous sg allocation */
715	/* this may happen if application doesn't signal for e/o DMA */
716	kfree(map_obj->dma_info.sg);
717
718	map_obj->dma_info.sg = sg;
719	map_obj->dma_info.dir = dir;
720	map_obj->dma_info.num_pages = num_pages;
721
722	ret = build_dma_sg(map_obj, start, len, pg_i);
723	if (ret)
724		goto kfree_sg;
725
726	sg_num = dma_map_sg(bridge, sg, num_pages, dir);
727	if (sg_num < 1) {
728		pr_err("%s: dma_map_sg failed: %d\n", __func__, sg_num);
729		ret = -EFAULT;
730		goto kfree_sg;
731	}
732
733	pr_debug("%s: dma_map_sg mapped %d elements\n", __func__, sg_num);
734	map_obj->dma_info.sg_num = sg_num;
735
736	return 0;
737
738kfree_sg:
739	kfree(sg);
740	map_obj->dma_info.sg = NULL;
741out:
742	return ret;
743}
744
745int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
746				enum dma_data_direction dir)
747{
748	/* Keep STATUS here for future additions to this function */
749	int status = 0;
750	struct process_context *pr_ctxt = (struct process_context *) hprocessor;
751	struct dmm_map_object *map_obj;
752
753	DBC_REQUIRE(refs > 0);
754
755	if (!pr_ctxt) {
756		status = -EFAULT;
757		goto err_out;
758	}
759
760	pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
761							(u32)pmpu_addr,
762							ul_size, dir);
763
764	/* find requested memory are in cached mapping information */
765	map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
766	if (!map_obj) {
767		pr_err("%s: find_containing_mapping failed\n", __func__);
768		status = -EFAULT;
769		goto err_out;
770	}
771
772	if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
773		pr_err("%s: InValid address parameters %p %x\n",
774			       __func__, pmpu_addr, ul_size);
775		status = -EFAULT;
776	}
777
778err_out:
779
780	return status;
781}
782
783int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size,
784			enum dma_data_direction dir)
785{
786	/* Keep STATUS here for future additions to this function */
787	int status = 0;
788	struct process_context *pr_ctxt = (struct process_context *) hprocessor;
789	struct dmm_map_object *map_obj;
790
791	DBC_REQUIRE(refs > 0);
792
793	if (!pr_ctxt) {
794		status = -EFAULT;
795		goto err_out;
796	}
797
798	pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
799							(u32)pmpu_addr,
800							ul_size, dir);
801
802	/* find requested memory are in cached mapping information */
803	map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size);
804	if (!map_obj) {
805		pr_err("%s: find_containing_mapping failed\n", __func__);
806		status = -EFAULT;
807		goto err_out;
808	}
809
810	if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) {
811		pr_err("%s: InValid address parameters %p %x\n",
812		       __func__, pmpu_addr, ul_size);
813		status = -EFAULT;
814		goto err_out;
815	}
816
817err_out:
818	return status;
819}
820
821/*
822 *  ======== proc_flush_memory ========
823 *  Purpose:
824 *     Flush cache
825 */
826int proc_flush_memory(void *hprocessor, void *pmpu_addr,
827			     u32 ul_size, u32 ul_flags)
828{
829	enum dma_data_direction dir = DMA_BIDIRECTIONAL;
830
831	return proc_begin_dma(hprocessor, pmpu_addr, ul_size, dir);
832}
833
834/*
835 *  ======== proc_invalidate_memory ========
836 *  Purpose:
837 *     Invalidates the memory specified
838 */
839int proc_invalidate_memory(void *hprocessor, void *pmpu_addr, u32 size)
840{
841	enum dma_data_direction dir = DMA_FROM_DEVICE;
842
843	return proc_begin_dma(hprocessor, pmpu_addr, size, dir);
844}
845
846/*
847 *  ======== proc_get_resource_info ========
848 *  Purpose:
849 *      Enumerate the resources currently available on a processor.
850 */
851int proc_get_resource_info(void *hprocessor, u32 resource_type,
852				  struct dsp_resourceinfo *resource_info,
853				  u32 resource_info_size)
854{
855	int status = -EPERM;
856	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
857	struct node_mgr *hnode_mgr = NULL;
858	struct nldr_object *nldr_obj = NULL;
859	struct rmm_target_obj *rmm = NULL;
860	struct io_mgr *hio_mgr = NULL;	/* IO manager handle */
861
862	DBC_REQUIRE(refs > 0);
863	DBC_REQUIRE(resource_info != NULL);
864	DBC_REQUIRE(resource_info_size >= sizeof(struct dsp_resourceinfo));
865
866	if (!p_proc_object) {
867		status = -EFAULT;
868		goto func_end;
869	}
870	switch (resource_type) {
871	case DSP_RESOURCE_DYNDARAM:
872	case DSP_RESOURCE_DYNSARAM:
873	case DSP_RESOURCE_DYNEXTERNAL:
874	case DSP_RESOURCE_DYNSRAM:
875		status = dev_get_node_manager(p_proc_object->hdev_obj,
876					      &hnode_mgr);
877		if (!hnode_mgr) {
878			status = -EFAULT;
879			goto func_end;
880		}
881
882		status = node_get_nldr_obj(hnode_mgr, &nldr_obj);
883		if (!status) {
884			status = nldr_get_rmm_manager(nldr_obj, &rmm);
885			if (rmm) {
886				if (!rmm_stat(rmm,
887					      (enum dsp_memtype)resource_type,
888					      (struct dsp_memstat *)
889					      &(resource_info->result.
890						mem_stat)))
891					status = -EINVAL;
892			} else {
893				status = -EFAULT;
894			}
895		}
896		break;
897	case DSP_RESOURCE_PROCLOAD:
898		status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr);
899		if (hio_mgr)
900			status =
901			    p_proc_object->intf_fxns->
902			    pfn_io_get_proc_load(hio_mgr,
903						 (struct dsp_procloadstat *)
904						 &(resource_info->result.
905						   proc_load_stat));
906		else
907			status = -EFAULT;
908		break;
909	default:
910		status = -EPERM;
911		break;
912	}
913func_end:
914	return status;
915}
916
917/*
918 *  ======== proc_exit ========
919 *  Purpose:
920 *      Decrement reference count, and free resources when reference count is
921 *      0.
922 */
923void proc_exit(void)
924{
925	DBC_REQUIRE(refs > 0);
926
927	refs--;
928
929	DBC_ENSURE(refs >= 0);
930}
931
932/*
933 *  ======== proc_get_dev_object ========
934 *  Purpose:
935 *      Return the Dev Object handle for a given Processor.
936 *
937 */
938int proc_get_dev_object(void *hprocessor,
939			       struct dev_object **device_obj)
940{
941	int status = -EPERM;
942	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
943
944	DBC_REQUIRE(refs > 0);
945	DBC_REQUIRE(device_obj != NULL);
946
947	if (p_proc_object) {
948		*device_obj = p_proc_object->hdev_obj;
949		status = 0;
950	} else {
951		*device_obj = NULL;
952		status = -EFAULT;
953	}
954
955	DBC_ENSURE((!status && *device_obj != NULL) ||
956		   (status && *device_obj == NULL));
957
958	return status;
959}
960
961/*
962 *  ======== proc_get_state ========
963 *  Purpose:
964 *      Report the state of the specified DSP processor.
965 */
966int proc_get_state(void *hprocessor,
967			  struct dsp_processorstate *proc_state_obj,
968			  u32 state_info_size)
969{
970	int status = 0;
971	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
972	int brd_status;
973
974	DBC_REQUIRE(refs > 0);
975	DBC_REQUIRE(proc_state_obj != NULL);
976	DBC_REQUIRE(state_info_size >= sizeof(struct dsp_processorstate));
977
978	if (p_proc_object) {
979		/* First, retrieve BRD state information */
980		status = (*p_proc_object->intf_fxns->pfn_brd_status)
981		    (p_proc_object->hbridge_context, &brd_status);
982		if (!status) {
983			switch (brd_status) {
984			case BRD_STOPPED:
985				proc_state_obj->proc_state = PROC_STOPPED;
986				break;
987			case BRD_SLEEP_TRANSITION:
988			case BRD_DSP_HIBERNATION:
989				/* Fall through */
990			case BRD_RUNNING:
991				proc_state_obj->proc_state = PROC_RUNNING;
992				break;
993			case BRD_LOADED:
994				proc_state_obj->proc_state = PROC_LOADED;
995				break;
996			case BRD_ERROR:
997				proc_state_obj->proc_state = PROC_ERROR;
998				break;
999			default:
1000				proc_state_obj->proc_state = 0xFF;
1001				status = -EPERM;
1002				break;
1003			}
1004		}
1005	} else {
1006		status = -EFAULT;
1007	}
1008	dev_dbg(bridge, "%s, results: status: 0x%x proc_state_obj: 0x%x\n",
1009		__func__, status, proc_state_obj->proc_state);
1010	return status;
1011}
1012
1013/*
1014 *  ======== proc_get_trace ========
1015 *  Purpose:
1016 *      Retrieve the current contents of the trace buffer, located on the
1017 *      Processor.  Predefined symbols for the trace buffer must have been
1018 *      configured into the DSP executable.
1019 *  Details:
1020 *      We support using the symbols SYS_PUTCBEG and SYS_PUTCEND to define a
1021 *      trace buffer, only.  Treat it as an undocumented feature.
1022 *      This call is destructive, meaning the processor is placed in the monitor
1023 *      state as a result of this function.
1024 */
1025int proc_get_trace(void *hprocessor, u8 * pbuf, u32 max_size)
1026{
1027	int status;
1028	status = -ENOSYS;
1029	return status;
1030}
1031
1032/*
1033 *  ======== proc_init ========
1034 *  Purpose:
1035 *      Initialize PROC's private state, keeping a reference count on each call
1036 */
1037bool proc_init(void)
1038{
1039	bool ret = true;
1040
1041	DBC_REQUIRE(refs >= 0);
1042
1043	if (ret)
1044		refs++;
1045
1046	DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
1047
1048	return ret;
1049}
1050
1051/*
1052 *  ======== proc_load ========
1053 *  Purpose:
1054 *      Reset a processor and load a new base program image.
1055 *      This will be an OEM-only function, and not part of the DSP/BIOS Bridge
1056 *      application developer's API.
1057 */
1058int proc_load(void *hprocessor, const s32 argc_index,
1059		     const char **user_args, const char **user_envp)
1060{
1061	int status = 0;
1062	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1063	struct io_mgr *hio_mgr;	/* IO manager handle */
1064	struct msg_mgr *hmsg_mgr;
1065	struct cod_manager *cod_mgr;	/* Code manager handle */
1066	char *pargv0;		/* temp argv[0] ptr */
1067	char **new_envp;	/* Updated envp[] array. */
1068	char sz_proc_id[MAXPROCIDLEN];	/* Size of "PROC_ID=<n>" */
1069	s32 envp_elems;		/* Num elements in envp[]. */
1070	s32 cnew_envp;		/* "  " in new_envp[] */
1071	s32 nproc_id = 0;	/* Anticipate MP version. */
1072	struct dcd_manager *hdcd_handle;
1073	struct dmm_object *dmm_mgr;
1074	u32 dw_ext_end;
1075	u32 proc_id;
1076	int brd_state;
1077	struct drv_data *drv_datap = dev_get_drvdata(bridge);
1078
1079#ifdef OPT_LOAD_TIME_INSTRUMENTATION
1080	struct timeval tv1;
1081	struct timeval tv2;
1082#endif
1083
1084#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1085	struct dspbridge_platform_data *pdata =
1086	    omap_dspbridge_dev->dev.platform_data;
1087#endif
1088
1089	DBC_REQUIRE(refs > 0);
1090	DBC_REQUIRE(argc_index > 0);
1091	DBC_REQUIRE(user_args != NULL);
1092
1093#ifdef OPT_LOAD_TIME_INSTRUMENTATION
1094	do_gettimeofday(&tv1);
1095#endif
1096	if (!p_proc_object) {
1097		status = -EFAULT;
1098		goto func_end;
1099	}
1100	dev_get_cod_mgr(p_proc_object->hdev_obj, &cod_mgr);
1101	if (!cod_mgr) {
1102		status = -EPERM;
1103		goto func_end;
1104	}
1105	status = proc_stop(hprocessor);
1106	if (status)
1107		goto func_end;
1108
1109	/* Place the board in the monitor state. */
1110	status = proc_monitor(hprocessor);
1111	if (status)
1112		goto func_end;
1113
1114	/* Save ptr to  original argv[0]. */
1115	pargv0 = (char *)user_args[0];
1116	/*Prepend "PROC_ID=<nproc_id>"to envp array for target. */
1117	envp_elems = get_envp_count((char **)user_envp);
1118	cnew_envp = (envp_elems ? (envp_elems + 1) : (envp_elems + 2));
1119	new_envp = kzalloc(cnew_envp * sizeof(char **), GFP_KERNEL);
1120	if (new_envp) {
1121		status = snprintf(sz_proc_id, MAXPROCIDLEN, PROC_ENVPROCID,
1122				  nproc_id);
1123		if (status == -1) {
1124			dev_dbg(bridge, "%s: Proc ID string overflow\n",
1125				__func__);
1126			status = -EPERM;
1127		} else {
1128			new_envp =
1129			    prepend_envp(new_envp, (char **)user_envp,
1130					 envp_elems, cnew_envp, sz_proc_id);
1131			/* Get the DCD Handle */
1132			status = mgr_get_dcd_handle(p_proc_object->hmgr_obj,
1133						    (u32 *) &hdcd_handle);
1134			if (!status) {
1135				/*  Before proceeding with new load,
1136				 *  check if a previously registered COFF
1137				 *  exists.
1138				 *  If yes, unregister nodes in previously
1139				 *  registered COFF.  If any error occurred,
1140				 *  set previously registered COFF to NULL. */
1141				if (p_proc_object->psz_last_coff != NULL) {
1142					status =
1143					    dcd_auto_unregister(hdcd_handle,
1144								p_proc_object->
1145								psz_last_coff);
1146					/* Regardless of auto unregister status,
1147					 *  free previously allocated
1148					 *  memory. */
1149					kfree(p_proc_object->psz_last_coff);
1150					p_proc_object->psz_last_coff = NULL;
1151				}
1152			}
1153			/* On success, do cod_open_base() */
1154			status = cod_open_base(cod_mgr, (char *)user_args[0],
1155					       COD_SYMB);
1156		}
1157	} else {
1158		status = -ENOMEM;
1159	}
1160	if (!status) {
1161		/* Auto-register data base */
1162		/* Get the DCD Handle */
1163		status = mgr_get_dcd_handle(p_proc_object->hmgr_obj,
1164					    (u32 *) &hdcd_handle);
1165		if (!status) {
1166			/*  Auto register nodes in specified COFF
1167			 *  file.  If registration did not fail,
1168			 *  (status = 0 or -EACCES)
1169			 *  save the name of the COFF file for
1170			 *  de-registration in the future. */
1171			status =
1172			    dcd_auto_register(hdcd_handle,
1173					      (char *)user_args[0]);
1174			if (status == -EACCES)
1175				status = 0;
1176
1177			if (status) {
1178				status = -EPERM;
1179			} else {
1180				DBC_ASSERT(p_proc_object->psz_last_coff ==
1181					   NULL);
1182				/* Allocate memory for pszLastCoff */
1183				p_proc_object->psz_last_coff =
1184						kzalloc((strlen(user_args[0]) +
1185						1), GFP_KERNEL);
1186				/* If memory allocated, save COFF file name */
1187				if (p_proc_object->psz_last_coff) {
1188					strncpy(p_proc_object->psz_last_coff,
1189						(char *)user_args[0],
1190						(strlen((char *)user_args[0]) +
1191						 1));
1192				}
1193			}
1194		}
1195	}
1196	/* Update shared memory address and size */
1197	if (!status) {
1198		/*  Create the message manager. This must be done
1199		 *  before calling the IOOnLoaded function. */
1200		dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr);
1201		if (!hmsg_mgr) {
1202			status = msg_create(&hmsg_mgr, p_proc_object->hdev_obj,
1203					    (msg_onexit) node_on_exit);
1204			DBC_ASSERT(!status);
1205			dev_set_msg_mgr(p_proc_object->hdev_obj, hmsg_mgr);
1206		}
1207	}
1208	if (!status) {
1209		/* Set the Device object's message manager */
1210		status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr);
1211		if (hio_mgr)
1212			status = (*p_proc_object->intf_fxns->pfn_io_on_loaded)
1213								(hio_mgr);
1214		else
1215			status = -EFAULT;
1216	}
1217	if (!status) {
1218		/* Now, attempt to load an exec: */
1219
1220		/* Boost the OPP level to Maximum level supported by baseport */
1221#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1222		if (pdata->cpu_set_freq)
1223			(*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP5]);
1224#endif
1225		status = cod_load_base(cod_mgr, argc_index, (char **)user_args,
1226				       dev_brd_write_fxn,
1227				       p_proc_object->hdev_obj, NULL);
1228		if (status) {
1229			if (status == -EBADF) {
1230				dev_dbg(bridge, "%s: Failure to Load the EXE\n",
1231					__func__);
1232			}
1233			if (status == -ESPIPE) {
1234				pr_err("%s: Couldn't parse the file\n",
1235				       __func__);
1236			}
1237		}
1238		/* Requesting the lowest opp supported */
1239#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1240		if (pdata->cpu_set_freq)
1241			(*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
1242#endif
1243
1244	}
1245	if (!status) {
1246		/* Update the Processor status to loaded */
1247		status = (*p_proc_object->intf_fxns->pfn_brd_set_state)
1248		    (p_proc_object->hbridge_context, BRD_LOADED);
1249		if (!status) {
1250			p_proc_object->proc_state = PROC_LOADED;
1251			if (p_proc_object->ntfy_obj)
1252				proc_notify_clients(p_proc_object,
1253						    DSP_PROCESSORSTATECHANGE);
1254		}
1255	}
1256	if (!status) {
1257		status = proc_get_processor_id(hprocessor, &proc_id);
1258		if (proc_id == DSP_UNIT) {
1259			/* Use all available DSP address space after EXTMEM
1260			 * for DMM */
1261			if (!status)
1262				status = cod_get_sym_value(cod_mgr, EXTEND,
1263							   &dw_ext_end);
1264
1265			/* Reset DMM structs and add an initial free chunk */
1266			if (!status) {
1267				status =
1268				    dev_get_dmm_mgr(p_proc_object->hdev_obj,
1269						    &dmm_mgr);
1270				if (dmm_mgr) {
1271					/* Set dw_ext_end to DMM START u8
1272					 * address */
1273					dw_ext_end =
1274					    (dw_ext_end + 1) * DSPWORDSIZE;
1275					/* DMM memory is from EXT_END */
1276					status = dmm_create_tables(dmm_mgr,
1277								   dw_ext_end,
1278								   DMMPOOLSIZE);
1279				} else {
1280					status = -EFAULT;
1281				}
1282			}
1283		}
1284	}
1285	/* Restore the original argv[0] */
1286	kfree(new_envp);
1287	user_args[0] = pargv0;
1288	if (!status) {
1289		if (!((*p_proc_object->intf_fxns->pfn_brd_status)
1290				(p_proc_object->hbridge_context, &brd_state))) {
1291			pr_info("%s: Processor Loaded %s\n", __func__, pargv0);
1292			kfree(drv_datap->base_img);
1293			drv_datap->base_img = kmalloc(strlen(pargv0) + 1,
1294								GFP_KERNEL);
1295			if (drv_datap->base_img)
1296				strncpy(drv_datap->base_img, pargv0,
1297							strlen(pargv0) + 1);
1298			else
1299				status = -ENOMEM;
1300			DBC_ASSERT(brd_state == BRD_LOADED);
1301		}
1302	}
1303
1304func_end:
1305	if (status) {
1306		pr_err("%s: Processor failed to load\n", __func__);
1307		proc_stop(p_proc_object);
1308	}
1309	DBC_ENSURE((!status
1310		    && p_proc_object->proc_state == PROC_LOADED)
1311		   || status);
1312#ifdef OPT_LOAD_TIME_INSTRUMENTATION
1313	do_gettimeofday(&tv2);
1314	if (tv2.tv_usec < tv1.tv_usec) {
1315		tv2.tv_usec += 1000000;
1316		tv2.tv_sec--;
1317	}
1318	dev_dbg(bridge, "%s: time to load %d sec and %d usec\n", __func__,
1319		tv2.tv_sec - tv1.tv_sec, tv2.tv_usec - tv1.tv_usec);
1320#endif
1321	return status;
1322}
1323
1324/*
1325 *  ======== proc_map ========
1326 *  Purpose:
1327 *      Maps a MPU buffer to DSP address space.
1328 */
1329int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
1330		    void *req_addr, void **pp_map_addr, u32 ul_map_attr,
1331		    struct process_context *pr_ctxt)
1332{
1333	u32 va_align;
1334	u32 pa_align;
1335	struct dmm_object *dmm_mgr;
1336	u32 size_align;
1337	int status = 0;
1338	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1339	struct dmm_map_object *map_obj;
1340	u32 tmp_addr = 0;
1341
1342#ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK
1343	if ((ul_map_attr & BUFMODE_MASK) != RBUF) {
1344		if (!IS_ALIGNED((u32)pmpu_addr, DSP_CACHE_LINE) ||
1345		    !IS_ALIGNED(ul_size, DSP_CACHE_LINE)) {
1346			pr_err("%s: not aligned: 0x%x (%d)\n", __func__,
1347						(u32)pmpu_addr, ul_size);
1348			return -EFAULT;
1349		}
1350	}
1351#endif
1352
1353	/* Calculate the page-aligned PA, VA and size */
1354	va_align = PG_ALIGN_LOW((u32) req_addr, PG_SIZE4K);
1355	pa_align = PG_ALIGN_LOW((u32) pmpu_addr, PG_SIZE4K);
1356	size_align = PG_ALIGN_HIGH(ul_size + (u32) pmpu_addr - pa_align,
1357				   PG_SIZE4K);
1358
1359	if (!p_proc_object) {
1360		status = -EFAULT;
1361		goto func_end;
1362	}
1363	/* Critical section */
1364	mutex_lock(&proc_lock);
1365	dmm_get_handle(p_proc_object, &dmm_mgr);
1366	if (dmm_mgr)
1367		status = dmm_map_memory(dmm_mgr, va_align, size_align);
1368	else
1369		status = -EFAULT;
1370
1371	/* Add mapping to the page tables. */
1372	if (!status) {
1373
1374		/* Mapped address = MSB of VA | LSB of PA */
1375		tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1)));
1376		/* mapped memory resource tracking */
1377		map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr,
1378						size_align);
1379		if (!map_obj)
1380			status = -ENOMEM;
1381		else
1382			status = (*p_proc_object->intf_fxns->pfn_brd_mem_map)
1383			    (p_proc_object->hbridge_context, pa_align, va_align,
1384			     size_align, ul_map_attr, map_obj->pages);
1385	}
1386	if (!status) {
1387		/* Mapped address = MSB of VA | LSB of PA */
1388		*pp_map_addr = (void *) tmp_addr;
1389	} else {
1390		remove_mapping_information(pr_ctxt, tmp_addr, size_align);
1391		dmm_un_map_memory(dmm_mgr, va_align, &size_align);
1392	}
1393	mutex_unlock(&proc_lock);
1394
1395	if (status)
1396		goto func_end;
1397
1398func_end:
1399	dev_dbg(bridge, "%s: hprocessor %p, pmpu_addr %p, ul_size %x, "
1400		"req_addr %p, ul_map_attr %x, pp_map_addr %p, va_align %x, "
1401		"pa_align %x, size_align %x status 0x%x\n", __func__,
1402		hprocessor, pmpu_addr, ul_size, req_addr, ul_map_attr,
1403		pp_map_addr, va_align, pa_align, size_align, status);
1404
1405	return status;
1406}
1407
1408/*
1409 *  ======== proc_register_notify ========
1410 *  Purpose:
1411 *      Register to be notified of specific processor events.
1412 */
1413int proc_register_notify(void *hprocessor, u32 event_mask,
1414				u32 notify_type, struct dsp_notification
1415				* hnotification)
1416{
1417	int status = 0;
1418	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1419	struct deh_mgr *hdeh_mgr;
1420
1421	DBC_REQUIRE(hnotification != NULL);
1422	DBC_REQUIRE(refs > 0);
1423
1424	/* Check processor handle */
1425	if (!p_proc_object) {
1426		status = -EFAULT;
1427		goto func_end;
1428	}
1429	/* Check if event mask is a valid processor related event */
1430	if (event_mask & ~(DSP_PROCESSORSTATECHANGE | DSP_PROCESSORATTACH |
1431			DSP_PROCESSORDETACH | DSP_PROCESSORRESTART |
1432			DSP_MMUFAULT | DSP_SYSERROR | DSP_PWRERROR |
1433			DSP_WDTOVERFLOW))
1434		status = -EINVAL;
1435
1436	/* Check if notify type is valid */
1437	if (notify_type != DSP_SIGNALEVENT)
1438		status = -EINVAL;
1439
1440	if (!status) {
1441		/* If event mask is not DSP_SYSERROR, DSP_MMUFAULT,
1442		 * or DSP_PWRERROR then register event immediately. */
1443		if (event_mask &
1444		    ~(DSP_SYSERROR | DSP_MMUFAULT | DSP_PWRERROR |
1445				DSP_WDTOVERFLOW)) {
1446			status = ntfy_register(p_proc_object->ntfy_obj,
1447					       hnotification, event_mask,
1448					       notify_type);
1449			/* Special case alert, special case alert!
1450			 * If we're trying to *deregister* (i.e. event_mask
1451			 * is 0), a DSP_SYSERROR or DSP_MMUFAULT notification,
1452			 * we have to deregister with the DEH manager.
1453			 * There's no way to know, based on event_mask which
1454			 * manager the notification event was registered with,
1455			 * so if we're trying to deregister and ntfy_register
1456			 * failed, we'll give the deh manager a shot.
1457			 */
1458			if ((event_mask == 0) && status) {
1459				status =
1460				    dev_get_deh_mgr(p_proc_object->hdev_obj,
1461						    &hdeh_mgr);
1462				status =
1463					bridge_deh_register_notify(hdeh_mgr,
1464							event_mask,
1465							notify_type,
1466							hnotification);
1467			}
1468		} else {
1469			status = dev_get_deh_mgr(p_proc_object->hdev_obj,
1470						 &hdeh_mgr);
1471			status =
1472			    bridge_deh_register_notify(hdeh_mgr,
1473					    event_mask,
1474					    notify_type,
1475					    hnotification);
1476
1477		}
1478	}
1479func_end:
1480	return status;
1481}
1482
1483/*
1484 *  ======== proc_reserve_memory ========
1485 *  Purpose:
1486 *      Reserve a virtually contiguous region of DSP address space.
1487 */
1488int proc_reserve_memory(void *hprocessor, u32 ul_size,
1489			       void **pp_rsv_addr,
1490			       struct process_context *pr_ctxt)
1491{
1492	struct dmm_object *dmm_mgr;
1493	int status = 0;
1494	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1495	struct dmm_rsv_object *rsv_obj;
1496
1497	if (!p_proc_object) {
1498		status = -EFAULT;
1499		goto func_end;
1500	}
1501
1502	status = dmm_get_handle(p_proc_object, &dmm_mgr);
1503	if (!dmm_mgr) {
1504		status = -EFAULT;
1505		goto func_end;
1506	}
1507
1508	status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr);
1509	if (status != 0)
1510		goto func_end;
1511
1512	/*
1513	 * A successful reserve should be followed by insertion of rsv_obj
1514	 * into dmm_rsv_list, so that reserved memory resource tracking
1515	 * remains uptodate
1516	 */
1517	rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL);
1518	if (rsv_obj) {
1519		rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr;
1520		spin_lock(&pr_ctxt->dmm_rsv_lock);
1521		list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list);
1522		spin_unlock(&pr_ctxt->dmm_rsv_lock);
1523	}
1524
1525func_end:
1526	dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p "
1527		"status 0x%x\n", __func__, hprocessor,
1528		ul_size, pp_rsv_addr, status);
1529	return status;
1530}
1531
1532/*
1533 *  ======== proc_start ========
1534 *  Purpose:
1535 *      Start a processor running.
1536 */
1537int proc_start(void *hprocessor)
1538{
1539	int status = 0;
1540	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1541	struct cod_manager *cod_mgr;	/* Code manager handle */
1542	u32 dw_dsp_addr;	/* Loaded code's entry point. */
1543	int brd_state;
1544
1545	DBC_REQUIRE(refs > 0);
1546	if (!p_proc_object) {
1547		status = -EFAULT;
1548		goto func_end;
1549	}
1550	/* Call the bridge_brd_start */
1551	if (p_proc_object->proc_state != PROC_LOADED) {
1552		status = -EBADR;
1553		goto func_end;
1554	}
1555	status = dev_get_cod_mgr(p_proc_object->hdev_obj, &cod_mgr);
1556	if (!cod_mgr) {
1557		status = -EFAULT;
1558		goto func_cont;
1559	}
1560
1561	status = cod_get_entry(cod_mgr, &dw_dsp_addr);
1562	if (status)
1563		goto func_cont;
1564
1565	status = (*p_proc_object->intf_fxns->pfn_brd_start)
1566	    (p_proc_object->hbridge_context, dw_dsp_addr);
1567	if (status)
1568		goto func_cont;
1569
1570	/* Call dev_create2 */
1571	status = dev_create2(p_proc_object->hdev_obj);
1572	if (!status) {
1573		p_proc_object->proc_state = PROC_RUNNING;
1574		/* Deep sleep switces off the peripheral clocks.
1575		 * we just put the DSP CPU in idle in the idle loop.
1576		 * so there is no need to send a command to DSP */
1577
1578		if (p_proc_object->ntfy_obj) {
1579			proc_notify_clients(p_proc_object,
1580					    DSP_PROCESSORSTATECHANGE);
1581		}
1582	} else {
1583		/* Failed to Create Node Manager and DISP Object
1584		 * Stop the Processor from running. Put it in STOPPED State */
1585		(void)(*p_proc_object->intf_fxns->
1586		       pfn_brd_stop) (p_proc_object->hbridge_context);
1587		p_proc_object->proc_state = PROC_STOPPED;
1588	}
1589func_cont:
1590	if (!status) {
1591		if (!((*p_proc_object->intf_fxns->pfn_brd_status)
1592				(p_proc_object->hbridge_context, &brd_state))) {
1593			pr_info("%s: dsp in running state\n", __func__);
1594			DBC_ASSERT(brd_state != BRD_HIBERNATION);
1595		}
1596	} else {
1597		pr_err("%s: Failed to start the dsp\n", __func__);
1598		proc_stop(p_proc_object);
1599	}
1600
1601func_end:
1602	DBC_ENSURE((!status && p_proc_object->proc_state ==
1603		    PROC_RUNNING) || status);
1604	return status;
1605}
1606
1607/*
1608 *  ======== proc_stop ========
1609 *  Purpose:
1610 *      Stop a processor running.
1611 */
1612int proc_stop(void *hprocessor)
1613{
1614	int status = 0;
1615	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1616	struct msg_mgr *hmsg_mgr;
1617	struct node_mgr *hnode_mgr;
1618	void *hnode;
1619	u32 node_tab_size = 1;
1620	u32 num_nodes = 0;
1621	u32 nodes_allocated = 0;
1622	int brd_state;
1623
1624	DBC_REQUIRE(refs > 0);
1625	if (!p_proc_object) {
1626		status = -EFAULT;
1627		goto func_end;
1628	}
1629	/* check if there are any running nodes */
1630	status = dev_get_node_manager(p_proc_object->hdev_obj, &hnode_mgr);
1631	if (!status && hnode_mgr) {
1632		status = node_enum_nodes(hnode_mgr, &hnode, node_tab_size,
1633					 &num_nodes, &nodes_allocated);
1634		if ((status == -EINVAL) || (nodes_allocated > 0)) {
1635			pr_err("%s: Can't stop device, active nodes = %d \n",
1636			       __func__, nodes_allocated);
1637			return -EBADR;
1638		}
1639	}
1640	/* Call the bridge_brd_stop */
1641	/* It is OK to stop a device that does n't have nodes OR not started */
1642	status =
1643	    (*p_proc_object->intf_fxns->
1644	     pfn_brd_stop) (p_proc_object->hbridge_context);
1645	if (!status) {
1646		dev_dbg(bridge, "%s: processor in standby mode\n", __func__);
1647		p_proc_object->proc_state = PROC_STOPPED;
1648		/* Destory the Node Manager, msg_ctrl Manager */
1649		if (!(dev_destroy2(p_proc_object->hdev_obj))) {
1650			/* Destroy the msg_ctrl by calling msg_delete */
1651			dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr);
1652			if (hmsg_mgr) {
1653				msg_delete(hmsg_mgr);
1654				dev_set_msg_mgr(p_proc_object->hdev_obj, NULL);
1655			}
1656			if (!((*p_proc_object->
1657			      intf_fxns->pfn_brd_status) (p_proc_object->
1658							  hbridge_context,
1659							  &brd_state)))
1660				DBC_ASSERT(brd_state == BRD_STOPPED);
1661		}
1662	} else {
1663		pr_err("%s: Failed to stop the processor\n", __func__);
1664	}
1665func_end:
1666
1667	return status;
1668}
1669
1670/*
1671 *  ======== proc_un_map ========
1672 *  Purpose:
1673 *      Removes a MPU buffer mapping from the DSP address space.
1674 */
1675int proc_un_map(void *hprocessor, void *map_addr,
1676		       struct process_context *pr_ctxt)
1677{
1678	int status = 0;
1679	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1680	struct dmm_object *dmm_mgr;
1681	u32 va_align;
1682	u32 size_align;
1683
1684	va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K);
1685	if (!p_proc_object) {
1686		status = -EFAULT;
1687		goto func_end;
1688	}
1689
1690	status = dmm_get_handle(hprocessor, &dmm_mgr);
1691	if (!dmm_mgr) {
1692		status = -EFAULT;
1693		goto func_end;
1694	}
1695
1696	/* Critical section */
1697	mutex_lock(&proc_lock);
1698	/*
1699	 * Update DMM structures. Get the size to unmap.
1700	 * This function returns error if the VA is not mapped
1701	 */
1702	status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align);
1703	/* Remove mapping from the page tables. */
1704	if (!status) {
1705		status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map)
1706		    (p_proc_object->hbridge_context, va_align, size_align);
1707	}
1708
1709	mutex_unlock(&proc_lock);
1710	if (status)
1711		goto func_end;
1712
1713	/*
1714	 * A successful unmap should be followed by removal of map_obj
1715	 * from dmm_map_list, so that mapped memory resource tracking
1716	 * remains uptodate
1717	 */
1718	remove_mapping_information(pr_ctxt, (u32) map_addr, size_align);
1719
1720func_end:
1721	dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
1722		__func__, hprocessor, map_addr, status);
1723	return status;
1724}
1725
1726/*
1727 *  ======== proc_un_reserve_memory ========
1728 *  Purpose:
1729 *      Frees a previously reserved region of DSP address space.
1730 */
1731int proc_un_reserve_memory(void *hprocessor, void *prsv_addr,
1732				  struct process_context *pr_ctxt)
1733{
1734	struct dmm_object *dmm_mgr;
1735	int status = 0;
1736	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1737	struct dmm_rsv_object *rsv_obj;
1738
1739	if (!p_proc_object) {
1740		status = -EFAULT;
1741		goto func_end;
1742	}
1743
1744	status = dmm_get_handle(p_proc_object, &dmm_mgr);
1745	if (!dmm_mgr) {
1746		status = -EFAULT;
1747		goto func_end;
1748	}
1749
1750	status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr);
1751	if (status != 0)
1752		goto func_end;
1753
1754	/*
1755	 * A successful unreserve should be followed by removal of rsv_obj
1756	 * from dmm_rsv_list, so that reserved memory resource tracking
1757	 * remains uptodate
1758	 */
1759	spin_lock(&pr_ctxt->dmm_rsv_lock);
1760	list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) {
1761		if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) {
1762			list_del(&rsv_obj->link);
1763			kfree(rsv_obj);
1764			break;
1765		}
1766	}
1767	spin_unlock(&pr_ctxt->dmm_rsv_lock);
1768
1769func_end:
1770	dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n",
1771		__func__, hprocessor, prsv_addr, status);
1772	return status;
1773}
1774
1775/*
1776 *  ======== = proc_monitor ======== ==
1777 *  Purpose:
1778 *      Place the Processor in Monitor State. This is an internal
1779 *      function and a requirement before Processor is loaded.
1780 *      This does a bridge_brd_stop, dev_destroy2 and bridge_brd_monitor.
1781 *      In dev_destroy2 we delete the node manager.
1782 *  Parameters:
1783 *      p_proc_object:    Pointer to Processor Object
1784 *  Returns:
1785 *      0:	Processor placed in monitor mode.
1786 *      !0:       Failed to place processor in monitor mode.
1787 *  Requires:
1788 *      Valid Processor Handle
1789 *  Ensures:
1790 *      Success:	ProcObject state is PROC_IDLE
1791 */
1792static int proc_monitor(struct proc_object *proc_obj)
1793{
1794	int status = -EPERM;
1795	struct msg_mgr *hmsg_mgr;
1796	int brd_state;
1797
1798	DBC_REQUIRE(refs > 0);
1799	DBC_REQUIRE(proc_obj);
1800
1801	/* This is needed only when Device is loaded when it is
1802	 * already 'ACTIVE' */
1803	/* Destory the Node Manager, msg_ctrl Manager */
1804	if (!dev_destroy2(proc_obj->hdev_obj)) {
1805		/* Destroy the msg_ctrl by calling msg_delete */
1806		dev_get_msg_mgr(proc_obj->hdev_obj, &hmsg_mgr);
1807		if (hmsg_mgr) {
1808			msg_delete(hmsg_mgr);
1809			dev_set_msg_mgr(proc_obj->hdev_obj, NULL);
1810		}
1811	}
1812	/* Place the Board in the Monitor State */
1813	if (!((*proc_obj->intf_fxns->pfn_brd_monitor)
1814			  (proc_obj->hbridge_context))) {
1815		status = 0;
1816		if (!((*proc_obj->intf_fxns->pfn_brd_status)
1817				  (proc_obj->hbridge_context, &brd_state)))
1818			DBC_ASSERT(brd_state == BRD_IDLE);
1819	}
1820
1821	DBC_ENSURE((!status && brd_state == BRD_IDLE) ||
1822		   status);
1823	return status;
1824}
1825
1826/*
1827 *  ======== get_envp_count ========
1828 *  Purpose:
1829 *      Return the number of elements in the envp array, including the
1830 *      terminating NULL element.
1831 */
1832static s32 get_envp_count(char **envp)
1833{
1834	s32 ret = 0;
1835	if (envp) {
1836		while (*envp++)
1837			ret++;
1838
1839		ret += 1;	/* Include the terminating NULL in the count. */
1840	}
1841
1842	return ret;
1843}
1844
1845/*
1846 *  ======== prepend_envp ========
1847 *  Purpose:
1848 *      Prepend an environment variable=value pair to the new envp array, and
1849 *      copy in the existing var=value pairs in the old envp array.
1850 */
1851static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems,
1852			   s32 cnew_envp, char *sz_var)
1853{
1854	char **pp_envp = new_envp;
1855
1856	DBC_REQUIRE(new_envp);
1857
1858	/* Prepend new environ var=value string */
1859	*new_envp++ = sz_var;
1860
1861	/* Copy user's environment into our own. */
1862	while (envp_elems--)
1863		*new_envp++ = *envp++;
1864
1865	/* Ensure NULL terminates the new environment strings array. */
1866	if (envp_elems == 0)
1867		*new_envp = NULL;
1868
1869	return pp_envp;
1870}
1871
1872/*
1873 *  ======== proc_notify_clients ========
1874 *  Purpose:
1875 *      Notify the processor the events.
1876 */
1877int proc_notify_clients(void *proc, u32 events)
1878{
1879	int status = 0;
1880	struct proc_object *p_proc_object = (struct proc_object *)proc;
1881
1882	DBC_REQUIRE(p_proc_object);
1883	DBC_REQUIRE(is_valid_proc_event(events));
1884	DBC_REQUIRE(refs > 0);
1885	if (!p_proc_object) {
1886		status = -EFAULT;
1887		goto func_end;
1888	}
1889
1890	ntfy_notify(p_proc_object->ntfy_obj, events);
1891func_end:
1892	return status;
1893}
1894
1895/*
1896 *  ======== proc_notify_all_clients ========
1897 *  Purpose:
1898 *      Notify the processor the events. This includes notifying all clients
1899 *      attached to a particulat DSP.
1900 */
1901int proc_notify_all_clients(void *proc, u32 events)
1902{
1903	int status = 0;
1904	struct proc_object *p_proc_object = (struct proc_object *)proc;
1905
1906	DBC_REQUIRE(is_valid_proc_event(events));
1907	DBC_REQUIRE(refs > 0);
1908
1909	if (!p_proc_object) {
1910		status = -EFAULT;
1911		goto func_end;
1912	}
1913
1914	dev_notify_clients(p_proc_object->hdev_obj, events);
1915
1916func_end:
1917	return status;
1918}
1919
1920/*
1921 *  ======== proc_get_processor_id ========
1922 *  Purpose:
1923 *      Retrieves the processor ID.
1924 */
1925int proc_get_processor_id(void *proc, u32 * proc_id)
1926{
1927	int status = 0;
1928	struct proc_object *p_proc_object = (struct proc_object *)proc;
1929
1930	if (p_proc_object)
1931		*proc_id = p_proc_object->processor_id;
1932	else
1933		status = -EFAULT;
1934
1935	return status;
1936}
1937