• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/staging/tidspbridge/core/
1/*
2 * tiomap.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Processor Manager Driver for TI OMAP3430 EVM.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#include <linux/types.h>
20/*  ----------------------------------- Host OS */
21#include <dspbridge/host_os.h>
22#include <linux/mm.h>
23#include <linux/mmzone.h>
24#include <plat/control.h>
25
26/*  ----------------------------------- DSP/BIOS Bridge */
27#include <dspbridge/dbdefs.h>
28
29/*  ----------------------------------- Trace & Debug */
30#include <dspbridge/dbc.h>
31
32/*  ----------------------------------- OS Adaptation Layer */
33#include <dspbridge/cfg.h>
34#include <dspbridge/drv.h>
35#include <dspbridge/sync.h>
36
37/* ------------------------------------ Hardware Abstraction Layer */
38#include <hw_defs.h>
39#include <hw_mmu.h>
40
41/*  ----------------------------------- Link Driver */
42#include <dspbridge/dspdefs.h>
43#include <dspbridge/dspchnl.h>
44#include <dspbridge/dspdeh.h>
45#include <dspbridge/dspio.h>
46#include <dspbridge/dspmsg.h>
47#include <dspbridge/pwr.h>
48#include <dspbridge/io_sm.h>
49
50/*  ----------------------------------- Platform Manager */
51#include <dspbridge/dev.h>
52#include <dspbridge/dspapi.h>
53#include <dspbridge/dmm.h>
54#include <dspbridge/wdt.h>
55
56/*  ----------------------------------- Local */
57#include "_tiomap.h"
58#include "_tiomap_pwr.h"
59#include "tiomap_io.h"
60
61/* Offset in shared mem to write to in order to synchronize start with DSP */
62#define SHMSYNCOFFSET 4		/* GPP byte offset */
63
64#define BUFFERSIZE 1024
65
66#define TIHELEN_ACKTIMEOUT  10000
67
68#define MMU_SECTION_ADDR_MASK    0xFFF00000
69#define MMU_SSECTION_ADDR_MASK   0xFF000000
70#define MMU_LARGE_PAGE_MASK      0xFFFF0000
71#define MMU_SMALL_PAGE_MASK      0xFFFFF000
72#define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
73#define PAGES_II_LVL_TABLE   512
74#define PHYS_TO_PAGE(phys)      pfn_to_page((phys) >> PAGE_SHIFT)
75
76/* Forward Declarations: */
77static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
78static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
79				  u8 *host_buff,
80				  u32 dsp_addr, u32 ul_num_bytes,
81				  u32 mem_type);
82static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
83				   u32 dsp_addr);
84static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
85				    int *board_state);
86static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt);
87static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
88				   u8 *host_buff,
89				   u32 dsp_addr, u32 ul_num_bytes,
90				   u32 mem_type);
91static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
92				    u32 brd_state);
93static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
94				   u32 dsp_dest_addr, u32 dsp_src_addr,
95				   u32 ul_num_bytes, u32 mem_type);
96static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
97				    u8 *host_buff, u32 dsp_addr,
98				    u32 ul_num_bytes, u32 mem_type);
99static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
100				  u32 ul_mpu_addr, u32 virt_addr,
101				  u32 ul_num_bytes, u32 ul_map_attr,
102				  struct page **mapped_pages);
103static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
104				     u32 virt_addr, u32 ul_num_bytes);
105static int bridge_dev_create(struct bridge_dev_context
106					**dev_cntxt,
107					struct dev_object *hdev_obj,
108					struct cfg_hostres *config_param);
109static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
110				  u32 dw_cmd, void *pargs);
111static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
112static u32 user_va2_pa(struct mm_struct *mm, u32 address);
113static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
114			     u32 va, u32 size,
115			     struct hw_mmu_map_attrs_t *map_attrs);
116static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
117			  u32 size, struct hw_mmu_map_attrs_t *attrs);
118static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
119				  u32 ul_mpu_addr, u32 virt_addr,
120				  u32 ul_num_bytes,
121				  struct hw_mmu_map_attrs_t *hw_attrs);
122
123bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
124
125/*  ----------------------------------- Globals */
126
127/* Attributes of L2 page tables for DSP MMU */
128struct page_info {
129	u32 num_entries;	/* Number of valid PTEs in the L2 PT */
130};
131
132/* Attributes used to manage the DSP MMU page tables */
133struct pg_table_attrs {
134	spinlock_t pg_lock;	/* Critical section object handle */
135
136	u32 l1_base_pa;		/* Physical address of the L1 PT */
137	u32 l1_base_va;		/* Virtual  address of the L1 PT */
138	u32 l1_size;		/* Size of the L1 PT */
139	u32 l1_tbl_alloc_pa;
140	/* Physical address of Allocated mem for L1 table. May not be aligned */
141	u32 l1_tbl_alloc_va;
142	/* Virtual address of Allocated mem for L1 table. May not be aligned */
143	u32 l1_tbl_alloc_sz;
144	/* Size of consistent memory allocated for L1 table.
145	 * May not be aligned */
146
147	u32 l2_base_pa;		/* Physical address of the L2 PT */
148	u32 l2_base_va;		/* Virtual  address of the L2 PT */
149	u32 l2_size;		/* Size of the L2 PT */
150	u32 l2_tbl_alloc_pa;
151	/* Physical address of Allocated mem for L2 table. May not be aligned */
152	u32 l2_tbl_alloc_va;
153	/* Virtual address of Allocated mem for L2 table. May not be aligned */
154	u32 l2_tbl_alloc_sz;
155	/* Size of consistent memory allocated for L2 table.
156	 * May not be aligned */
157
158	u32 l2_num_pages;	/* Number of allocated L2 PT */
159	/* Array [l2_num_pages] of L2 PT info structs */
160	struct page_info *pg_info;
161};
162
163/*
164 *  This Bridge driver's function interface table.
165 */
166static struct bridge_drv_interface drv_interface_fxns = {
167	/* Bridge API ver. for which this bridge driver is built. */
168	BRD_API_MAJOR_VERSION,
169	BRD_API_MINOR_VERSION,
170	bridge_dev_create,
171	bridge_dev_destroy,
172	bridge_dev_ctrl,
173	bridge_brd_monitor,
174	bridge_brd_start,
175	bridge_brd_stop,
176	bridge_brd_status,
177	bridge_brd_read,
178	bridge_brd_write,
179	bridge_brd_set_state,
180	bridge_brd_mem_copy,
181	bridge_brd_mem_write,
182	bridge_brd_mem_map,
183	bridge_brd_mem_un_map,
184	/* The following CHNL functions are provided by chnl_io.lib: */
185	bridge_chnl_create,
186	bridge_chnl_destroy,
187	bridge_chnl_open,
188	bridge_chnl_close,
189	bridge_chnl_add_io_req,
190	bridge_chnl_get_ioc,
191	bridge_chnl_cancel_io,
192	bridge_chnl_flush_io,
193	bridge_chnl_get_info,
194	bridge_chnl_get_mgr_info,
195	bridge_chnl_idle,
196	bridge_chnl_register_notify,
197	/* The following IO functions are provided by chnl_io.lib: */
198	bridge_io_create,
199	bridge_io_destroy,
200	bridge_io_on_loaded,
201	bridge_io_get_proc_load,
202	/* The following msg_ctrl functions are provided by chnl_io.lib: */
203	bridge_msg_create,
204	bridge_msg_create_queue,
205	bridge_msg_delete,
206	bridge_msg_delete_queue,
207	bridge_msg_get,
208	bridge_msg_put,
209	bridge_msg_register_notify,
210	bridge_msg_set_queue_id,
211};
212
213static inline void flush_all(struct bridge_dev_context *dev_context)
214{
215	if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
216	    dev_context->dw_brd_state == BRD_HIBERNATION)
217		wake_dsp(dev_context, NULL);
218
219	hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base);
220}
221
222static void bad_page_dump(u32 pa, struct page *pg)
223{
224	pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
225	pr_emerg("Bad page state in process '%s'\n"
226		 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
227		 "Backtrace:\n",
228		 current->comm, pg, (int)(2 * sizeof(unsigned long)),
229		 (unsigned long)pg->flags, pg->mapping,
230		 page_mapcount(pg), page_count(pg));
231	dump_stack();
232}
233
234/*
235 *  ======== bridge_drv_entry ========
236 *  purpose:
237 *      Bridge Driver entry point.
238 */
239void bridge_drv_entry(struct bridge_drv_interface **drv_intf,
240		   const char *driver_file_name)
241{
242
243	DBC_REQUIRE(driver_file_name != NULL);
244
245	io_sm_init();		/* Initialization of io_sm module */
246
247	if (strcmp(driver_file_name, "UMA") == 0)
248		*drv_intf = &drv_interface_fxns;
249	else
250		dev_dbg(bridge, "%s Unknown Bridge file name", __func__);
251
252}
253
254/*
255 *  ======== bridge_brd_monitor ========
256 *  purpose:
257 *      This bridge_brd_monitor puts DSP into a Loadable state.
258 *      i.e Application can load and start the device.
259 *
260 *  Preconditions:
261 *      Device in 'OFF' state.
262 */
263static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
264{
265	struct bridge_dev_context *dev_context = dev_ctxt;
266	u32 temp;
267	struct dspbridge_platform_data *pdata =
268				    omap_dspbridge_dev->dev.platform_data;
269
270	temp = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
271					OMAP_POWERSTATEST_MASK;
272	if (!(temp & 0x02)) {
273		/* IVA2 is not in ON state */
274		/* Read and set PM_PWSTCTRL_IVA2  to ON */
275		(*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
276			PWRDM_POWER_ON, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
277		/* Set the SW supervised state transition */
278		(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP,
279					OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
280
281		/* Wait until the state has moved to ON */
282		while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
283						OMAP_INTRANSITION_MASK)
284			;
285		/* Disable Automatic transition */
286		(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
287					OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
288	}
289	(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
290					OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
291	dsp_clk_enable(DSP_CLK_IVA2);
292
293	/* set the device state to IDLE */
294	dev_context->dw_brd_state = BRD_IDLE;
295
296	return 0;
297}
298
299/*
300 *  ======== bridge_brd_read ========
301 *  purpose:
302 *      Reads buffers for DSP memory.
303 */
304static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
305				  u8 *host_buff, u32 dsp_addr,
306				  u32 ul_num_bytes, u32 mem_type)
307{
308	int status = 0;
309	struct bridge_dev_context *dev_context = dev_ctxt;
310	u32 offset;
311	u32 dsp_base_addr = dev_ctxt->dw_dsp_base_addr;
312
313	if (dsp_addr < dev_context->dw_dsp_start_add) {
314		status = -EPERM;
315		return status;
316	}
317	/* change here to account for the 3 bands of the DSP internal memory */
318	if ((dsp_addr - dev_context->dw_dsp_start_add) <
319	    dev_context->dw_internal_size) {
320		offset = dsp_addr - dev_context->dw_dsp_start_add;
321	} else {
322		status = read_ext_dsp_data(dev_context, host_buff, dsp_addr,
323					   ul_num_bytes, mem_type);
324		return status;
325	}
326	/* copy the data from  DSP memory, */
327	memcpy(host_buff, (void *)(dsp_base_addr + offset), ul_num_bytes);
328	return status;
329}
330
331/*
332 *  ======== bridge_brd_set_state ========
333 *  purpose:
334 *      This routine updates the Board status.
335 */
336static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
337				    u32 brd_state)
338{
339	int status = 0;
340	struct bridge_dev_context *dev_context = dev_ctxt;
341
342	dev_context->dw_brd_state = brd_state;
343	return status;
344}
345
346/*
347 *  ======== bridge_brd_start ========
348 *  purpose:
349 *      Initializes DSP MMU and Starts DSP.
350 *
351 *  Preconditions:
352 *  a) DSP domain is 'ACTIVE'.
353 *  b) DSP_RST1 is asserted.
354 *  b) DSP_RST2 is released.
355 */
356static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
357				   u32 dsp_addr)
358{
359	int status = 0;
360	struct bridge_dev_context *dev_context = dev_ctxt;
361	u32 dw_sync_addr = 0;
362	u32 ul_shm_base;	/* Gpp Phys SM base addr(byte) */
363	u32 ul_shm_base_virt;	/* Dsp Virt SM base addr */
364	u32 ul_tlb_base_virt;	/* Base of MMU TLB entry */
365	/* Offset of shm_base_virt from tlb_base_virt */
366	u32 ul_shm_offset_virt;
367	s32 entry_ndx;
368	s32 itmp_entry_ndx = 0;	/* DSP-MMU TLB entry base address */
369	struct cfg_hostres *resources = NULL;
370	u32 temp;
371	u32 ul_dsp_clk_rate;
372	u32 ul_dsp_clk_addr;
373	u32 ul_bios_gp_timer;
374	u32 clk_cmd;
375	struct io_mgr *hio_mgr;
376	u32 ul_load_monitor_timer;
377	struct dspbridge_platform_data *pdata =
378				omap_dspbridge_dev->dev.platform_data;
379
380	/* The device context contains all the mmu setup info from when the
381	 * last dsp base image was loaded. The first entry is always
382	 * SHMMEM base. */
383	/* Get SHM_BEG - convert to byte address */
384	(void)dev_get_symbol(dev_context->hdev_obj, SHMBASENAME,
385			     &ul_shm_base_virt);
386	ul_shm_base_virt *= DSPWORDSIZE;
387	DBC_ASSERT(ul_shm_base_virt != 0);
388	/* DSP Virtual address */
389	ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va;
390	DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
391	ul_shm_offset_virt =
392	    ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
393	/* Kernel logical address */
394	ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt;
395
396	DBC_ASSERT(ul_shm_base != 0);
397	/* 2nd wd is used as sync field */
398	dw_sync_addr = ul_shm_base + SHMSYNCOFFSET;
399	/* Write a signature into the shm base + offset; this will
400	 * get cleared when the DSP program starts. */
401	if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) {
402		pr_err("%s: Illegal SM base\n", __func__);
403		status = -EPERM;
404	} else
405		__raw_writel(0xffffffff, dw_sync_addr);
406
407	if (!status) {
408		resources = dev_context->resources;
409		if (!resources)
410			status = -EPERM;
411
412		/* Assert RST1 i.e only the RST only for DSP megacell */
413		if (!status) {
414			(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
415					OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
416					OMAP2_RM_RSTCTRL);
417			/* Mask address with 1K for compatibility */
418			__raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK,
419					OMAP343X_CTRL_REGADDR(
420					OMAP343X_CONTROL_IVA2_BOOTADDR));
421			/*
422			 * Set bootmode to self loop if dsp_debug flag is true
423			 */
424			__raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0,
425					OMAP343X_CTRL_REGADDR(
426					OMAP343X_CONTROL_IVA2_BOOTMOD));
427		}
428	}
429	if (!status) {
430		/* Reset and Unreset the RST2, so that BOOTADDR is copied to
431		 * IVA2 SYSC register */
432		(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
433			OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
434		udelay(100);
435		(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
436					OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
437		udelay(100);
438
439		/* Disbale the DSP MMU */
440		hw_mmu_disable(resources->dw_dmmu_base);
441		/* Disable TWL */
442		hw_mmu_twl_disable(resources->dw_dmmu_base);
443
444		/* Only make TLB entry if both addresses are non-zero */
445		for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
446		     entry_ndx++) {
447			struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
448			struct hw_mmu_map_attrs_t map_attrs = {
449				.endianism = e->endianism,
450				.element_size = e->elem_size,
451				.mixed_size = e->mixed_mode,
452			};
453
454			if (!e->ul_gpp_pa || !e->ul_dsp_va)
455				continue;
456
457			dev_dbg(bridge,
458					"MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
459					itmp_entry_ndx,
460					e->ul_gpp_pa,
461					e->ul_dsp_va,
462					e->ul_size);
463
464			hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base,
465					e->ul_gpp_pa,
466					e->ul_dsp_va,
467					e->ul_size,
468					itmp_entry_ndx,
469					&map_attrs, 1, 1);
470
471			itmp_entry_ndx++;
472		}
473	}
474
475	/* Lock the above TLB entries and get the BIOS and load monitor timer
476	 * information */
477	if (!status) {
478		hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx);
479		hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx);
480		hw_mmu_ttb_set(resources->dw_dmmu_base,
481			       dev_context->pt_attrs->l1_base_pa);
482		hw_mmu_twl_enable(resources->dw_dmmu_base);
483		/* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
484
485		temp = __raw_readl((resources->dw_dmmu_base) + 0x10);
486		temp = (temp & 0xFFFFFFEF) | 0x11;
487		__raw_writel(temp, (resources->dw_dmmu_base) + 0x10);
488
489		/* Let the DSP MMU run */
490		hw_mmu_enable(resources->dw_dmmu_base);
491
492		/* Enable the BIOS clock */
493		(void)dev_get_symbol(dev_context->hdev_obj,
494				     BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
495		(void)dev_get_symbol(dev_context->hdev_obj,
496				     BRIDGEINIT_LOADMON_GPTIMER,
497				     &ul_load_monitor_timer);
498	}
499
500	if (!status) {
501		if (ul_load_monitor_timer != 0xFFFF) {
502			clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
503			    ul_load_monitor_timer;
504			dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
505		} else {
506			dev_dbg(bridge, "Not able to get the symbol for Load "
507				"Monitor Timer\n");
508		}
509	}
510
511	if (!status) {
512		if (ul_bios_gp_timer != 0xFFFF) {
513			clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
514			    ul_bios_gp_timer;
515			dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
516		} else {
517			dev_dbg(bridge,
518				"Not able to get the symbol for BIOS Timer\n");
519		}
520	}
521
522	if (!status) {
523		/* Set the DSP clock rate */
524		(void)dev_get_symbol(dev_context->hdev_obj,
525				     "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
526		/*Set Autoidle Mode for IVA2 PLL */
527		(*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
528				OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);
529
530		if ((unsigned int *)ul_dsp_clk_addr != NULL) {
531			/* Get the clock rate */
532			ul_dsp_clk_rate = dsp_clk_get_iva2_rate();
533			dev_dbg(bridge, "%s: DSP clock rate (KHZ): 0x%x \n",
534				__func__, ul_dsp_clk_rate);
535			(void)bridge_brd_write(dev_context,
536					       (u8 *) &ul_dsp_clk_rate,
537					       ul_dsp_clk_addr, sizeof(u32), 0);
538		}
539		/*
540		 * Enable Mailbox events and also drain any pending
541		 * stale messages.
542		 */
543		dev_context->mbox = omap_mbox_get("dsp");
544		if (IS_ERR(dev_context->mbox)) {
545			dev_context->mbox = NULL;
546			pr_err("%s: Failed to get dsp mailbox handle\n",
547								__func__);
548			status = -EPERM;
549		}
550
551	}
552	if (!status) {
553		dev_context->mbox->rxq->callback = (int (*)(void *))io_mbox_msg;
554
555/*PM_IVA2GRPSEL_PER = 0xC0;*/
556		temp = readl(resources->dw_per_pm_base + 0xA8);
557		temp = (temp & 0xFFFFFF30) | 0xC0;
558		writel(temp, resources->dw_per_pm_base + 0xA8);
559
560/*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */
561		temp = readl(resources->dw_per_pm_base + 0xA4);
562		temp = (temp & 0xFFFFFF3F);
563		writel(temp, resources->dw_per_pm_base + 0xA4);
564/*CM_SLEEPDEP_PER |= 0x04; */
565		temp = readl(resources->dw_per_base + 0x44);
566		temp = (temp & 0xFFFFFFFB) | 0x04;
567		writel(temp, resources->dw_per_base + 0x44);
568
569/*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */
570		(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO,
571					OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
572
573		/* Let DSP go */
574		dev_dbg(bridge, "%s Unreset\n", __func__);
575		/* Enable DSP MMU Interrupts */
576		hw_mmu_event_enable(resources->dw_dmmu_base,
577				    HW_MMU_ALL_INTERRUPTS);
578		/* release the RST1, DSP starts executing now .. */
579		(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
580					OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
581
582		dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", dw_sync_addr);
583		dev_dbg(bridge, "DSP c_int00 Address =  0x%x\n", dsp_addr);
584		if (dsp_debug)
585			while (__raw_readw(dw_sync_addr))
586				;;
587
588		/* Wait for DSP to clear word in shared memory */
589		/* Read the Location */
590		if (!wait_for_start(dev_context, dw_sync_addr))
591			status = -ETIMEDOUT;
592
593		/* Start wdt */
594		dsp_wdt_sm_set((void *)ul_shm_base);
595		dsp_wdt_enable(true);
596
597		status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
598		if (hio_mgr) {
599			io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL);
600			/* Write the synchronization bit to indicate the
601			 * completion of OPP table update to DSP
602			 */
603			__raw_writel(0XCAFECAFE, dw_sync_addr);
604
605			/* update board state */
606			dev_context->dw_brd_state = BRD_RUNNING;
607			/* (void)chnlsm_enable_interrupt(dev_context); */
608		} else {
609			dev_context->dw_brd_state = BRD_UNKNOWN;
610		}
611	}
612	return status;
613}
614
615/*
616 *  ======== bridge_brd_stop ========
617 *  purpose:
618 *      Puts DSP in self loop.
619 *
620 *  Preconditions :
621 *  a) None
622 */
623static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
624{
625	int status = 0;
626	struct bridge_dev_context *dev_context = dev_ctxt;
627	struct pg_table_attrs *pt_attrs;
628	u32 dsp_pwr_state;
629	int clk_status;
630	struct dspbridge_platform_data *pdata =
631				omap_dspbridge_dev->dev.platform_data;
632
633	if (dev_context->dw_brd_state == BRD_STOPPED)
634		return status;
635
636	/* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
637	 * before turning off the clocks.. This is to ensure that there are no
638	 * pending L3 or other transactons from IVA2 */
639	dsp_pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
640					OMAP_POWERSTATEST_MASK;
641	if (dsp_pwr_state != PWRDM_POWER_OFF) {
642		(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
643					OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
644		sm_interrupt_dsp(dev_context, MBX_PM_DSPIDLE);
645		mdelay(10);
646
647		/* IVA2 is not in OFF state */
648		/* Set PM_PWSTCTRL_IVA2  to OFF */
649		(*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
650			PWRDM_POWER_OFF, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
651		/* Set the SW supervised state transition for Sleep */
652		(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP,
653					OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
654	}
655	udelay(10);
656	/* Release the Ext Base virtual Address as the next DSP Program
657	 * may have a different load address */
658	if (dev_context->dw_dsp_ext_base_addr)
659		dev_context->dw_dsp_ext_base_addr = 0;
660
661	dev_context->dw_brd_state = BRD_STOPPED;	/* update board state */
662
663	dsp_wdt_enable(false);
664
665	/* This is a good place to clear the MMU page tables as well */
666	if (dev_context->pt_attrs) {
667		pt_attrs = dev_context->pt_attrs;
668		memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
669		memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
670		memset((u8 *) pt_attrs->pg_info, 0x00,
671		       (pt_attrs->l2_num_pages * sizeof(struct page_info)));
672	}
673	/* Disable the mailbox interrupts */
674	if (dev_context->mbox) {
675		omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
676		omap_mbox_put(dev_context->mbox);
677		dev_context->mbox = NULL;
678	}
679	/* Reset IVA2 clocks*/
680	(*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
681			OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
682
683	clk_status = dsp_clk_disable(DSP_CLK_IVA2);
684
685	return status;
686}
687
688/*
689 *  ======== bridge_brd_status ========
690 *      Returns the board status.
691 */
692static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
693				    int *board_state)
694{
695	struct bridge_dev_context *dev_context = dev_ctxt;
696	*board_state = dev_context->dw_brd_state;
697	return 0;
698}
699
700/*
701 *  ======== bridge_brd_write ========
702 *      Copies the buffers to DSP internal or external memory.
703 */
704static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
705				   u8 *host_buff, u32 dsp_addr,
706				   u32 ul_num_bytes, u32 mem_type)
707{
708	int status = 0;
709	struct bridge_dev_context *dev_context = dev_ctxt;
710
711	if (dsp_addr < dev_context->dw_dsp_start_add) {
712		status = -EPERM;
713		return status;
714	}
715	if ((dsp_addr - dev_context->dw_dsp_start_add) <
716	    dev_context->dw_internal_size) {
717		status = write_dsp_data(dev_ctxt, host_buff, dsp_addr,
718					ul_num_bytes, mem_type);
719	} else {
720		status = write_ext_dsp_data(dev_context, host_buff, dsp_addr,
721					    ul_num_bytes, mem_type, false);
722	}
723
724	return status;
725}
726
727/*
728 *  ======== bridge_dev_create ========
729 *      Creates a driver object. Puts DSP in self loop.
730 */
731static int bridge_dev_create(struct bridge_dev_context
732					**dev_cntxt,
733					struct dev_object *hdev_obj,
734					struct cfg_hostres *config_param)
735{
736	int status = 0;
737	struct bridge_dev_context *dev_context = NULL;
738	s32 entry_ndx;
739	struct cfg_hostres *resources = config_param;
740	struct pg_table_attrs *pt_attrs;
741	u32 pg_tbl_pa;
742	u32 pg_tbl_va;
743	u32 align_size;
744	struct drv_data *drv_datap = dev_get_drvdata(bridge);
745
746	/* Allocate and initialize a data structure to contain the bridge driver
747	 *  state, which becomes the context for later calls into this driver */
748	dev_context = kzalloc(sizeof(struct bridge_dev_context), GFP_KERNEL);
749	if (!dev_context) {
750		status = -ENOMEM;
751		goto func_end;
752	}
753
754	dev_context->dw_dsp_start_add = (u32) OMAP_GEM_BASE;
755	dev_context->dw_self_loop = (u32) NULL;
756	dev_context->dsp_per_clks = 0;
757	dev_context->dw_internal_size = OMAP_DSP_SIZE;
758	/*  Clear dev context MMU table entries.
759	 *  These get set on bridge_io_on_loaded() call after program loaded. */
760	for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) {
761		dev_context->atlb_entry[entry_ndx].ul_gpp_pa =
762		    dev_context->atlb_entry[entry_ndx].ul_dsp_va = 0;
763	}
764	dev_context->dw_dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *)
765								 (config_param->
766								  dw_mem_base
767								  [3]),
768								 config_param->
769								 dw_mem_length
770								 [3]);
771	if (!dev_context->dw_dsp_base_addr)
772		status = -EPERM;
773
774	pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
775	if (pt_attrs != NULL) {
776		/* Assuming that we use only DSP's memory map
777		 * until 0x4000:0000 , we would need only 1024
778		 * L1 enties i.e L1 size = 4K */
779		pt_attrs->l1_size = 0x1000;
780		align_size = pt_attrs->l1_size;
781		/* Align sizes are expected to be power of 2 */
782		/* we like to get aligned on L1 table size */
783		pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
784						     align_size, &pg_tbl_pa);
785
786		/* Check if the PA is aligned for us */
787		if ((pg_tbl_pa) & (align_size - 1)) {
788			/* PA not aligned to page table size ,
789			 * try with more allocation and align */
790			mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
791					  pt_attrs->l1_size);
792			/* we like to get aligned on L1 table size */
793			pg_tbl_va =
794			    (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
795						     align_size, &pg_tbl_pa);
796			/* We should be able to get aligned table now */
797			pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
798			pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
799			pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
800			/* Align the PA to the next 'align'  boundary */
801			pt_attrs->l1_base_pa =
802			    ((pg_tbl_pa) +
803			     (align_size - 1)) & (~(align_size - 1));
804			pt_attrs->l1_base_va =
805			    pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
806		} else {
807			/* We got aligned PA, cool */
808			pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
809			pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
810			pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
811			pt_attrs->l1_base_pa = pg_tbl_pa;
812			pt_attrs->l1_base_va = pg_tbl_va;
813		}
814		if (pt_attrs->l1_base_va)
815			memset((u8 *) pt_attrs->l1_base_va, 0x00,
816			       pt_attrs->l1_size);
817
818		/* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
819		 * L4 pages */
820		pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
821		pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
822		    pt_attrs->l2_num_pages;
823		align_size = 4;	/* Make it u32 aligned */
824		/* we like to get aligned on L1 table size */
825		pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
826						     align_size, &pg_tbl_pa);
827		pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
828		pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
829		pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
830		pt_attrs->l2_base_pa = pg_tbl_pa;
831		pt_attrs->l2_base_va = pg_tbl_va;
832
833		if (pt_attrs->l2_base_va)
834			memset((u8 *) pt_attrs->l2_base_va, 0x00,
835			       pt_attrs->l2_size);
836
837		pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
838					sizeof(struct page_info), GFP_KERNEL);
839		dev_dbg(bridge,
840			"L1 pa %x, va %x, size %x\n L2 pa %x, va "
841			"%x, size %x\n", pt_attrs->l1_base_pa,
842			pt_attrs->l1_base_va, pt_attrs->l1_size,
843			pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
844			pt_attrs->l2_size);
845		dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
846			pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
847	}
848	if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
849	    (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
850		dev_context->pt_attrs = pt_attrs;
851	else
852		status = -ENOMEM;
853
854	if (!status) {
855		spin_lock_init(&pt_attrs->pg_lock);
856		dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
857
858		/* Set the Clock Divisor for the DSP module */
859		udelay(5);
860		/* MMU address is obtained from the host
861		 * resources struct */
862		dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base;
863	}
864	if (!status) {
865		dev_context->hdev_obj = hdev_obj;
866		/* Store current board state. */
867		dev_context->dw_brd_state = BRD_UNKNOWN;
868		dev_context->resources = resources;
869		dsp_clk_enable(DSP_CLK_IVA2);
870		bridge_brd_stop(dev_context);
871		/* Return ptr to our device state to the DSP API for storage */
872		*dev_cntxt = dev_context;
873	} else {
874		if (pt_attrs != NULL) {
875			kfree(pt_attrs->pg_info);
876
877			if (pt_attrs->l2_tbl_alloc_va) {
878				mem_free_phys_mem((void *)
879						  pt_attrs->l2_tbl_alloc_va,
880						  pt_attrs->l2_tbl_alloc_pa,
881						  pt_attrs->l2_tbl_alloc_sz);
882			}
883			if (pt_attrs->l1_tbl_alloc_va) {
884				mem_free_phys_mem((void *)
885						  pt_attrs->l1_tbl_alloc_va,
886						  pt_attrs->l1_tbl_alloc_pa,
887						  pt_attrs->l1_tbl_alloc_sz);
888			}
889		}
890		kfree(pt_attrs);
891		kfree(dev_context);
892	}
893func_end:
894	return status;
895}
896
897/*
898 *  ======== bridge_dev_ctrl ========
899 *      Receives device specific commands.
900 */
901static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
902				  u32 dw_cmd, void *pargs)
903{
904	int status = 0;
905	struct bridge_ioctl_extproc *pa_ext_proc =
906					(struct bridge_ioctl_extproc *)pargs;
907	s32 ndx;
908
909	switch (dw_cmd) {
910	case BRDIOCTL_CHNLREAD:
911		break;
912	case BRDIOCTL_CHNLWRITE:
913		break;
914	case BRDIOCTL_SETMMUCONFIG:
915		/* store away dsp-mmu setup values for later use */
916		for (ndx = 0; ndx < BRDIOCTL_NUMOFMMUTLB; ndx++, pa_ext_proc++)
917			dev_context->atlb_entry[ndx] = *pa_ext_proc;
918		break;
919	case BRDIOCTL_DEEPSLEEP:
920	case BRDIOCTL_EMERGENCYSLEEP:
921		/* Currently only DSP Idle is supported Need to update for
922		 * later releases */
923		status = sleep_dsp(dev_context, PWR_DEEPSLEEP, pargs);
924		break;
925	case BRDIOCTL_WAKEUP:
926		status = wake_dsp(dev_context, pargs);
927		break;
928	case BRDIOCTL_CLK_CTRL:
929		status = 0;
930		/* Looking For Baseport Fix for Clocks */
931		status = dsp_peripheral_clk_ctrl(dev_context, pargs);
932		break;
933	case BRDIOCTL_PWR_HIBERNATE:
934		status = handle_hibernation_from_dsp(dev_context);
935		break;
936	case BRDIOCTL_PRESCALE_NOTIFY:
937		status = pre_scale_dsp(dev_context, pargs);
938		break;
939	case BRDIOCTL_POSTSCALE_NOTIFY:
940		status = post_scale_dsp(dev_context, pargs);
941		break;
942	case BRDIOCTL_CONSTRAINT_REQUEST:
943		status = handle_constraints_set(dev_context, pargs);
944		break;
945	default:
946		status = -EPERM;
947		break;
948	}
949	return status;
950}
951
952/*
953 *  ======== bridge_dev_destroy ========
954 *      Destroys the driver object.
955 */
956static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
957{
958	struct pg_table_attrs *pt_attrs;
959	int status = 0;
960	struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
961	    dev_ctxt;
962	struct cfg_hostres *host_res;
963	u32 shm_size;
964	struct drv_data *drv_datap = dev_get_drvdata(bridge);
965
966	/* It should never happen */
967	if (!dev_ctxt)
968		return -EFAULT;
969
970	/* first put the device to stop state */
971	bridge_brd_stop(dev_context);
972	if (dev_context->pt_attrs) {
973		pt_attrs = dev_context->pt_attrs;
974		kfree(pt_attrs->pg_info);
975
976		if (pt_attrs->l2_tbl_alloc_va) {
977			mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
978					  pt_attrs->l2_tbl_alloc_pa,
979					  pt_attrs->l2_tbl_alloc_sz);
980		}
981		if (pt_attrs->l1_tbl_alloc_va) {
982			mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
983					  pt_attrs->l1_tbl_alloc_pa,
984					  pt_attrs->l1_tbl_alloc_sz);
985		}
986		kfree(pt_attrs);
987
988	}
989
990	if (dev_context->resources) {
991		host_res = dev_context->resources;
992		shm_size = drv_datap->shm_size;
993		if (shm_size >= 0x10000) {
994			if ((host_res->dw_mem_base[1]) &&
995			    (host_res->dw_mem_phys[1])) {
996				mem_free_phys_mem((void *)
997						  host_res->dw_mem_base
998						  [1],
999						  host_res->dw_mem_phys
1000						  [1], shm_size);
1001			}
1002		} else {
1003			dev_dbg(bridge, "%s: Error getting shm size "
1004				"from registry: %x. Not calling "
1005				"mem_free_phys_mem\n", __func__,
1006				status);
1007		}
1008		host_res->dw_mem_base[1] = 0;
1009		host_res->dw_mem_phys[1] = 0;
1010
1011		if (host_res->dw_mem_base[0])
1012			iounmap((void *)host_res->dw_mem_base[0]);
1013		if (host_res->dw_mem_base[2])
1014			iounmap((void *)host_res->dw_mem_base[2]);
1015		if (host_res->dw_mem_base[3])
1016			iounmap((void *)host_res->dw_mem_base[3]);
1017		if (host_res->dw_mem_base[4])
1018			iounmap((void *)host_res->dw_mem_base[4]);
1019		if (host_res->dw_dmmu_base)
1020			iounmap(host_res->dw_dmmu_base);
1021		if (host_res->dw_per_base)
1022			iounmap(host_res->dw_per_base);
1023		if (host_res->dw_per_pm_base)
1024			iounmap((void *)host_res->dw_per_pm_base);
1025		if (host_res->dw_core_pm_base)
1026			iounmap((void *)host_res->dw_core_pm_base);
1027		if (host_res->dw_sys_ctrl_base)
1028			iounmap(host_res->dw_sys_ctrl_base);
1029
1030		host_res->dw_mem_base[0] = (u32) NULL;
1031		host_res->dw_mem_base[2] = (u32) NULL;
1032		host_res->dw_mem_base[3] = (u32) NULL;
1033		host_res->dw_mem_base[4] = (u32) NULL;
1034		host_res->dw_dmmu_base = NULL;
1035		host_res->dw_sys_ctrl_base = NULL;
1036
1037		kfree(host_res);
1038	}
1039
1040	/* Free the driver's device context: */
1041	kfree(drv_datap->base_img);
1042	kfree(drv_datap);
1043	dev_set_drvdata(bridge, NULL);
1044	kfree((void *)dev_ctxt);
1045	return status;
1046}
1047
1048static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
1049				   u32 dsp_dest_addr, u32 dsp_src_addr,
1050				   u32 ul_num_bytes, u32 mem_type)
1051{
1052	int status = 0;
1053	u32 src_addr = dsp_src_addr;
1054	u32 dest_addr = dsp_dest_addr;
1055	u32 copy_bytes = 0;
1056	u32 total_bytes = ul_num_bytes;
1057	u8 host_buf[BUFFERSIZE];
1058	struct bridge_dev_context *dev_context = dev_ctxt;
1059	while (total_bytes > 0 && !status) {
1060		copy_bytes =
1061		    total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes;
1062		/* Read from External memory */
1063		status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr,
1064					   copy_bytes, mem_type);
1065		if (!status) {
1066			if (dest_addr < (dev_context->dw_dsp_start_add +
1067					 dev_context->dw_internal_size)) {
1068				/* Write to Internal memory */
1069				status = write_dsp_data(dev_ctxt, host_buf,
1070							dest_addr, copy_bytes,
1071							mem_type);
1072			} else {
1073				/* Write to External memory */
1074				status =
1075				    write_ext_dsp_data(dev_ctxt, host_buf,
1076						       dest_addr, copy_bytes,
1077						       mem_type, false);
1078			}
1079		}
1080		total_bytes -= copy_bytes;
1081		src_addr += copy_bytes;
1082		dest_addr += copy_bytes;
1083	}
1084	return status;
1085}
1086
1087/* Mem Write does not halt the DSP to write unlike bridge_brd_write */
1088static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
1089				    u8 *host_buff, u32 dsp_addr,
1090				    u32 ul_num_bytes, u32 mem_type)
1091{
1092	int status = 0;
1093	struct bridge_dev_context *dev_context = dev_ctxt;
1094	u32 ul_remain_bytes = 0;
1095	u32 ul_bytes = 0;
1096	ul_remain_bytes = ul_num_bytes;
1097	while (ul_remain_bytes > 0 && !status) {
1098		ul_bytes =
1099		    ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
1100		if (dsp_addr < (dev_context->dw_dsp_start_add +
1101				 dev_context->dw_internal_size)) {
1102			status =
1103			    write_dsp_data(dev_ctxt, host_buff, dsp_addr,
1104					   ul_bytes, mem_type);
1105		} else {
1106			status = write_ext_dsp_data(dev_ctxt, host_buff,
1107						    dsp_addr, ul_bytes,
1108						    mem_type, true);
1109		}
1110		ul_remain_bytes -= ul_bytes;
1111		dsp_addr += ul_bytes;
1112		host_buff = host_buff + ul_bytes;
1113	}
1114	return status;
1115}
1116
1117/*
1118 *  ======== bridge_brd_mem_map ========
1119 *      This function maps MPU buffer to the DSP address space. It performs
1120 *  linear to physical address translation if required. It translates each
1121 *  page since linear addresses can be physically non-contiguous
1122 *  All address & size arguments are assumed to be page aligned (in proc.c)
1123 *
1124 *  TODO: Disable MMU while updating the page tables (but that'll stall DSP)
1125 */
1126static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
1127				  u32 ul_mpu_addr, u32 virt_addr,
1128				  u32 ul_num_bytes, u32 ul_map_attr,
1129				  struct page **mapped_pages)
1130{
1131	u32 attrs;
1132	int status = 0;
1133	struct bridge_dev_context *dev_context = dev_ctxt;
1134	struct hw_mmu_map_attrs_t hw_attrs;
1135	struct vm_area_struct *vma;
1136	struct mm_struct *mm = current->mm;
1137	u32 write = 0;
1138	u32 num_usr_pgs = 0;
1139	struct page *mapped_page, *pg;
1140	s32 pg_num;
1141	u32 va = virt_addr;
1142	struct task_struct *curr_task = current;
1143	u32 pg_i = 0;
1144	u32 mpu_addr, pa;
1145
1146	dev_dbg(bridge,
1147		"%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
1148		__func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
1149		ul_map_attr);
1150	if (ul_num_bytes == 0)
1151		return -EINVAL;
1152
1153	if (ul_map_attr & DSP_MAP_DIR_MASK) {
1154		attrs = ul_map_attr;
1155	} else {
1156		/* Assign default attributes */
1157		attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
1158	}
1159	/* Take mapping properties */
1160	if (attrs & DSP_MAPBIGENDIAN)
1161		hw_attrs.endianism = HW_BIG_ENDIAN;
1162	else
1163		hw_attrs.endianism = HW_LITTLE_ENDIAN;
1164
1165	hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
1166	    ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
1167	/* Ignore element_size if mixed_size is enabled */
1168	if (hw_attrs.mixed_size == 0) {
1169		if (attrs & DSP_MAPELEMSIZE8) {
1170			/* Size is 8 bit */
1171			hw_attrs.element_size = HW_ELEM_SIZE8BIT;
1172		} else if (attrs & DSP_MAPELEMSIZE16) {
1173			/* Size is 16 bit */
1174			hw_attrs.element_size = HW_ELEM_SIZE16BIT;
1175		} else if (attrs & DSP_MAPELEMSIZE32) {
1176			/* Size is 32 bit */
1177			hw_attrs.element_size = HW_ELEM_SIZE32BIT;
1178		} else if (attrs & DSP_MAPELEMSIZE64) {
1179			/* Size is 64 bit */
1180			hw_attrs.element_size = HW_ELEM_SIZE64BIT;
1181		} else {
1182			/*
1183			 * Mixedsize isn't enabled, so size can't be
1184			 * zero here
1185			 */
1186			return -EINVAL;
1187		}
1188	}
1189	if (attrs & DSP_MAPDONOTLOCK)
1190		hw_attrs.donotlockmpupage = 1;
1191	else
1192		hw_attrs.donotlockmpupage = 0;
1193
1194	if (attrs & DSP_MAPVMALLOCADDR) {
1195		return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
1196				       ul_num_bytes, &hw_attrs);
1197	}
1198	/*
1199	 * Do OS-specific user-va to pa translation.
1200	 * Combine physically contiguous regions to reduce TLBs.
1201	 * Pass the translated pa to pte_update.
1202	 */
1203	if ((attrs & DSP_MAPPHYSICALADDR)) {
1204		status = pte_update(dev_context, ul_mpu_addr, virt_addr,
1205				    ul_num_bytes, &hw_attrs);
1206		goto func_cont;
1207	}
1208
1209	/*
1210	 * Important Note: ul_mpu_addr is mapped from user application process
1211	 * to current process - it must lie completely within the current
1212	 * virtual memory address space in order to be of use to us here!
1213	 */
1214	down_read(&mm->mmap_sem);
1215	vma = find_vma(mm, ul_mpu_addr);
1216	if (vma)
1217		dev_dbg(bridge,
1218			"VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
1219			"vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1220			ul_num_bytes, vma->vm_start, vma->vm_end,
1221			vma->vm_flags);
1222
1223	/*
1224	 * It is observed that under some circumstances, the user buffer is
1225	 * spread across several VMAs. So loop through and check if the entire
1226	 * user buffer is covered
1227	 */
1228	while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
1229		/* jump to the next VMA region */
1230		vma = find_vma(mm, vma->vm_end + 1);
1231		dev_dbg(bridge,
1232			"VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
1233			"vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1234			ul_num_bytes, vma->vm_start, vma->vm_end,
1235			vma->vm_flags);
1236	}
1237	if (!vma) {
1238		pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
1239		       __func__, ul_mpu_addr, ul_num_bytes);
1240		status = -EINVAL;
1241		up_read(&mm->mmap_sem);
1242		goto func_cont;
1243	}
1244
1245	if (vma->vm_flags & VM_IO) {
1246		num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1247		mpu_addr = ul_mpu_addr;
1248
1249		/* Get the physical addresses for user buffer */
1250		for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1251			pa = user_va2_pa(mm, mpu_addr);
1252			if (!pa) {
1253				status = -EPERM;
1254				pr_err("DSPBRIDGE: VM_IO mapping physical"
1255				       "address is invalid\n");
1256				break;
1257			}
1258			if (pfn_valid(__phys_to_pfn(pa))) {
1259				pg = PHYS_TO_PAGE(pa);
1260				get_page(pg);
1261				if (page_count(pg) < 1) {
1262					pr_err("Bad page in VM_IO buffer\n");
1263					bad_page_dump(pa, pg);
1264				}
1265			}
1266			status = pte_set(dev_context->pt_attrs, pa,
1267					 va, HW_PAGE_SIZE4KB, &hw_attrs);
1268			if (status)
1269				break;
1270
1271			va += HW_PAGE_SIZE4KB;
1272			mpu_addr += HW_PAGE_SIZE4KB;
1273			pa += HW_PAGE_SIZE4KB;
1274		}
1275	} else {
1276		num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1277		if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
1278			write = 1;
1279
1280		for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1281			pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
1282						write, 1, &mapped_page, NULL);
1283			if (pg_num > 0) {
1284				if (page_count(mapped_page) < 1) {
1285					pr_err("Bad page count after doing"
1286					       "get_user_pages on"
1287					       "user buffer\n");
1288					bad_page_dump(page_to_phys(mapped_page),
1289						      mapped_page);
1290				}
1291				status = pte_set(dev_context->pt_attrs,
1292						 page_to_phys(mapped_page), va,
1293						 HW_PAGE_SIZE4KB, &hw_attrs);
1294				if (status)
1295					break;
1296
1297				if (mapped_pages)
1298					mapped_pages[pg_i] = mapped_page;
1299
1300				va += HW_PAGE_SIZE4KB;
1301				ul_mpu_addr += HW_PAGE_SIZE4KB;
1302			} else {
1303				pr_err("DSPBRIDGE: get_user_pages FAILED,"
1304				       "MPU addr = 0x%x,"
1305				       "vma->vm_flags = 0x%lx,"
1306				       "get_user_pages Err"
1307				       "Value = %d, Buffer"
1308				       "size=0x%x\n", ul_mpu_addr,
1309				       vma->vm_flags, pg_num, ul_num_bytes);
1310				status = -EPERM;
1311				break;
1312			}
1313		}
1314	}
1315	up_read(&mm->mmap_sem);
1316func_cont:
1317	if (status) {
1318		/*
1319		 * Roll out the mapped pages incase it failed in middle of
1320		 * mapping
1321		 */
1322		if (pg_i) {
1323			bridge_brd_mem_un_map(dev_context, virt_addr,
1324					   (pg_i * PG_SIZE4K));
1325		}
1326		status = -EPERM;
1327	}
1328	/*
1329	 * In any case, flush the TLB
1330	 * This is called from here instead from pte_update to avoid unnecessary
1331	 * repetition while mapping non-contiguous physical regions of a virtual
1332	 * region
1333	 */
1334	flush_all(dev_context);
1335	dev_dbg(bridge, "%s status %x\n", __func__, status);
1336	return status;
1337}
1338
1339/*
1340 *  ======== bridge_brd_mem_un_map ========
1341 *      Invalidate the PTEs for the DSP VA block to be unmapped.
1342 *
1343 *      PTEs of a mapped memory block are contiguous in any page table
1344 *      So, instead of looking up the PTE address for every 4K block,
1345 *      we clear consecutive PTEs until we unmap all the bytes
1346 */
1347static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
1348				     u32 virt_addr, u32 ul_num_bytes)
1349{
1350	u32 l1_base_va;
1351	u32 l2_base_va;
1352	u32 l2_base_pa;
1353	u32 l2_page_num;
1354	u32 pte_val;
1355	u32 pte_size;
1356	u32 pte_count;
1357	u32 pte_addr_l1;
1358	u32 pte_addr_l2 = 0;
1359	u32 rem_bytes;
1360	u32 rem_bytes_l2;
1361	u32 va_curr;
1362	struct page *pg = NULL;
1363	int status = 0;
1364	struct bridge_dev_context *dev_context = dev_ctxt;
1365	struct pg_table_attrs *pt = dev_context->pt_attrs;
1366	u32 temp;
1367	u32 paddr;
1368	u32 numof4k_pages = 0;
1369
1370	va_curr = virt_addr;
1371	rem_bytes = ul_num_bytes;
1372	rem_bytes_l2 = 0;
1373	l1_base_va = pt->l1_base_va;
1374	pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1375	dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
1376		"pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
1377		ul_num_bytes, l1_base_va, pte_addr_l1);
1378
1379	while (rem_bytes && !status) {
1380		u32 va_curr_orig = va_curr;
1381		/* Find whether the L1 PTE points to a valid L2 PT */
1382		pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1383		pte_val = *(u32 *) pte_addr_l1;
1384		pte_size = hw_mmu_pte_size_l1(pte_val);
1385
1386		if (pte_size != HW_MMU_COARSE_PAGE_SIZE)
1387			goto skip_coarse_page;
1388
1389		/*
1390		 * Get the L2 PA from the L1 PTE, and find
1391		 * corresponding L2 VA
1392		 */
1393		l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1394		l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1395		l2_page_num =
1396		    (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1397		/*
1398		 * Find the L2 PTE address from which we will start
1399		 * clearing, the number of PTEs to be cleared on this
1400		 * page, and the size of VA space that needs to be
1401		 * cleared on this L2 page
1402		 */
1403		pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
1404		pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
1405		pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
1406		if (rem_bytes < (pte_count * PG_SIZE4K))
1407			pte_count = rem_bytes / PG_SIZE4K;
1408		rem_bytes_l2 = pte_count * PG_SIZE4K;
1409
1410		/*
1411		 * Unmap the VA space on this L2 PT. A quicker way
1412		 * would be to clear pte_count entries starting from
1413		 * pte_addr_l2. However, below code checks that we don't
1414		 * clear invalid entries or less than 64KB for a 64KB
1415		 * entry. Similar checking is done for L1 PTEs too
1416		 * below
1417		 */
1418		while (rem_bytes_l2 && !status) {
1419			pte_val = *(u32 *) pte_addr_l2;
1420			pte_size = hw_mmu_pte_size_l2(pte_val);
1421			/* va_curr aligned to pte_size? */
1422			if (pte_size == 0 || rem_bytes_l2 < pte_size ||
1423			    va_curr & (pte_size - 1)) {
1424				status = -EPERM;
1425				break;
1426			}
1427
1428			/* Collect Physical addresses from VA */
1429			paddr = (pte_val & ~(pte_size - 1));
1430			if (pte_size == HW_PAGE_SIZE64KB)
1431				numof4k_pages = 16;
1432			else
1433				numof4k_pages = 1;
1434			temp = 0;
1435			while (temp++ < numof4k_pages) {
1436				if (!pfn_valid(__phys_to_pfn(paddr))) {
1437					paddr += HW_PAGE_SIZE4KB;
1438					continue;
1439				}
1440				pg = PHYS_TO_PAGE(paddr);
1441				if (page_count(pg) < 1) {
1442					pr_info("DSPBRIDGE: UNMAP function: "
1443						"COUNT 0 FOR PA 0x%x, size = "
1444						"0x%x\n", paddr, ul_num_bytes);
1445					bad_page_dump(paddr, pg);
1446				} else {
1447					set_page_dirty(pg);
1448					page_cache_release(pg);
1449				}
1450				paddr += HW_PAGE_SIZE4KB;
1451			}
1452			if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) {
1453				status = -EPERM;
1454				goto EXIT_LOOP;
1455			}
1456
1457			status = 0;
1458			rem_bytes_l2 -= pte_size;
1459			va_curr += pte_size;
1460			pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
1461		}
1462		spin_lock(&pt->pg_lock);
1463		if (rem_bytes_l2 == 0) {
1464			pt->pg_info[l2_page_num].num_entries -= pte_count;
1465			if (pt->pg_info[l2_page_num].num_entries == 0) {
1466				/*
1467				 * Clear the L1 PTE pointing to the L2 PT
1468				 */
1469				if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig,
1470						     HW_MMU_COARSE_PAGE_SIZE))
1471					status = 0;
1472				else {
1473					status = -EPERM;
1474					spin_unlock(&pt->pg_lock);
1475					goto EXIT_LOOP;
1476				}
1477			}
1478			rem_bytes -= pte_count * PG_SIZE4K;
1479		} else
1480			status = -EPERM;
1481
1482		spin_unlock(&pt->pg_lock);
1483		continue;
1484skip_coarse_page:
1485		/* va_curr aligned to pte_size? */
1486		/* pte_size = 1 MB or 16 MB */
1487		if (pte_size == 0 || rem_bytes < pte_size ||
1488		    va_curr & (pte_size - 1)) {
1489			status = -EPERM;
1490			break;
1491		}
1492
1493		if (pte_size == HW_PAGE_SIZE1MB)
1494			numof4k_pages = 256;
1495		else
1496			numof4k_pages = 4096;
1497		temp = 0;
1498		/* Collect Physical addresses from VA */
1499		paddr = (pte_val & ~(pte_size - 1));
1500		while (temp++ < numof4k_pages) {
1501			if (pfn_valid(__phys_to_pfn(paddr))) {
1502				pg = PHYS_TO_PAGE(paddr);
1503				if (page_count(pg) < 1) {
1504					pr_info("DSPBRIDGE: UNMAP function: "
1505						"COUNT 0 FOR PA 0x%x, size = "
1506						"0x%x\n", paddr, ul_num_bytes);
1507					bad_page_dump(paddr, pg);
1508				} else {
1509					set_page_dirty(pg);
1510					page_cache_release(pg);
1511				}
1512			}
1513			paddr += HW_PAGE_SIZE4KB;
1514		}
1515		if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) {
1516			status = 0;
1517			rem_bytes -= pte_size;
1518			va_curr += pte_size;
1519		} else {
1520			status = -EPERM;
1521			goto EXIT_LOOP;
1522		}
1523	}
1524	/*
1525	 * It is better to flush the TLB here, so that any stale old entries
1526	 * get flushed
1527	 */
1528EXIT_LOOP:
1529	flush_all(dev_context);
1530	dev_dbg(bridge,
1531		"%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
1532		" rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
1533		pte_addr_l2, rem_bytes, rem_bytes_l2, status);
1534	return status;
1535}
1536
1537/*
1538 *  ======== user_va2_pa ========
1539 *  Purpose:
1540 *      This function walks through the page tables to convert a userland
1541 *      virtual address to physical address
1542 */
1543static u32 user_va2_pa(struct mm_struct *mm, u32 address)
1544{
1545	pgd_t *pgd;
1546	pmd_t *pmd;
1547	pte_t *ptep, pte;
1548
1549	pgd = pgd_offset(mm, address);
1550	if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
1551		pmd = pmd_offset(pgd, address);
1552		if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
1553			ptep = pte_offset_map(pmd, address);
1554			if (ptep) {
1555				pte = *ptep;
1556				if (pte_present(pte))
1557					return pte & PAGE_MASK;
1558			}
1559		}
1560	}
1561
1562	return 0;
1563}
1564
1565/*
1566 *  ======== pte_update ========
1567 *      This function calculates the optimum page-aligned addresses and sizes
1568 *      Caller must pass page-aligned values
1569 */
1570static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
1571			     u32 va, u32 size,
1572			     struct hw_mmu_map_attrs_t *map_attrs)
1573{
1574	u32 i;
1575	u32 all_bits;
1576	u32 pa_curr = pa;
1577	u32 va_curr = va;
1578	u32 num_bytes = size;
1579	struct bridge_dev_context *dev_context = dev_ctxt;
1580	int status = 0;
1581	u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
1582		HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
1583	};
1584
1585	while (num_bytes && !status) {
1586		/* To find the max. page size with which both PA & VA are
1587		 * aligned */
1588		all_bits = pa_curr | va_curr;
1589
1590		for (i = 0; i < 4; i++) {
1591			if ((num_bytes >= page_size[i]) && ((all_bits &
1592							     (page_size[i] -
1593							      1)) == 0)) {
1594				status =
1595				    pte_set(dev_context->pt_attrs, pa_curr,
1596					    va_curr, page_size[i], map_attrs);
1597				pa_curr += page_size[i];
1598				va_curr += page_size[i];
1599				num_bytes -= page_size[i];
1600				/* Don't try smaller sizes. Hopefully we have
1601				 * reached an address aligned to a bigger page
1602				 * size */
1603				break;
1604			}
1605		}
1606	}
1607
1608	return status;
1609}
1610
1611/*
1612 *  ======== pte_set ========
1613 *      This function calculates PTE address (MPU virtual) to be updated
1614 *      It also manages the L2 page tables
1615 */
1616static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
1617			  u32 size, struct hw_mmu_map_attrs_t *attrs)
1618{
1619	u32 i;
1620	u32 pte_val;
1621	u32 pte_addr_l1;
1622	u32 pte_size;
1623	/* Base address of the PT that will be updated */
1624	u32 pg_tbl_va;
1625	u32 l1_base_va;
1626	/* Compiler warns that the next three variables might be used
1627	 * uninitialized in this function. Doesn't seem so. Working around,
1628	 * anyways. */
1629	u32 l2_base_va = 0;
1630	u32 l2_base_pa = 0;
1631	u32 l2_page_num = 0;
1632	int status = 0;
1633
1634	l1_base_va = pt->l1_base_va;
1635	pg_tbl_va = l1_base_va;
1636	if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
1637		/* Find whether the L1 PTE points to a valid L2 PT */
1638		pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
1639		if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
1640			pte_val = *(u32 *) pte_addr_l1;
1641			pte_size = hw_mmu_pte_size_l1(pte_val);
1642		} else {
1643			return -EPERM;
1644		}
1645		spin_lock(&pt->pg_lock);
1646		if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1647			/* Get the L2 PA from the L1 PTE, and find
1648			 * corresponding L2 VA */
1649			l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1650			l2_base_va =
1651			    l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1652			l2_page_num =
1653			    (l2_base_pa -
1654			     pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1655		} else if (pte_size == 0) {
1656			/* L1 PTE is invalid. Allocate a L2 PT and
1657			 * point the L1 PTE to it */
1658			/* Find a free L2 PT. */
1659			for (i = 0; (i < pt->l2_num_pages) &&
1660			     (pt->pg_info[i].num_entries != 0); i++)
1661				;;
1662			if (i < pt->l2_num_pages) {
1663				l2_page_num = i;
1664				l2_base_pa = pt->l2_base_pa + (l2_page_num *
1665						HW_MMU_COARSE_PAGE_SIZE);
1666				l2_base_va = pt->l2_base_va + (l2_page_num *
1667						HW_MMU_COARSE_PAGE_SIZE);
1668				/* Endianness attributes are ignored for
1669				 * HW_MMU_COARSE_PAGE_SIZE */
1670				status =
1671				    hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
1672						   HW_MMU_COARSE_PAGE_SIZE,
1673						   attrs);
1674			} else {
1675				status = -ENOMEM;
1676			}
1677		} else {
1678			/* Found valid L1 PTE of another size.
1679			 * Should not overwrite it. */
1680			status = -EPERM;
1681		}
1682		if (!status) {
1683			pg_tbl_va = l2_base_va;
1684			if (size == HW_PAGE_SIZE64KB)
1685				pt->pg_info[l2_page_num].num_entries += 16;
1686			else
1687				pt->pg_info[l2_page_num].num_entries++;
1688			dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
1689				"%x, num_entries %x\n", l2_base_va,
1690				l2_base_pa, l2_page_num,
1691				pt->pg_info[l2_page_num].num_entries);
1692		}
1693		spin_unlock(&pt->pg_lock);
1694	}
1695	if (!status) {
1696		dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
1697			pg_tbl_va, pa, va, size);
1698		dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
1699			"mixed_size %x\n", attrs->endianism,
1700			attrs->element_size, attrs->mixed_size);
1701		status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
1702	}
1703
1704	return status;
1705}
1706
1707/* Memory map kernel VA -- memory allocated with vmalloc */
1708static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
1709				  u32 ul_mpu_addr, u32 virt_addr,
1710				  u32 ul_num_bytes,
1711				  struct hw_mmu_map_attrs_t *hw_attrs)
1712{
1713	int status = 0;
1714	struct page *page[1];
1715	u32 i;
1716	u32 pa_curr;
1717	u32 pa_next;
1718	u32 va_curr;
1719	u32 size_curr;
1720	u32 num_pages;
1721	u32 pa;
1722	u32 num_of4k_pages;
1723	u32 temp = 0;
1724
1725	/*
1726	 * Do Kernel va to pa translation.
1727	 * Combine physically contiguous regions to reduce TLBs.
1728	 * Pass the translated pa to pte_update.
1729	 */
1730	num_pages = ul_num_bytes / PAGE_SIZE;	/* PAGE_SIZE = OS page size */
1731	i = 0;
1732	va_curr = ul_mpu_addr;
1733	page[0] = vmalloc_to_page((void *)va_curr);
1734	pa_next = page_to_phys(page[0]);
1735	while (!status && (i < num_pages)) {
1736		/*
1737		 * Reuse pa_next from the previous iteraion to avoid
1738		 * an extra va2pa call
1739		 */
1740		pa_curr = pa_next;
1741		size_curr = PAGE_SIZE;
1742		/*
1743		 * If the next page is physically contiguous,
1744		 * map it with the current one by increasing
1745		 * the size of the region to be mapped
1746		 */
1747		while (++i < num_pages) {
1748			page[0] =
1749			    vmalloc_to_page((void *)(va_curr + size_curr));
1750			pa_next = page_to_phys(page[0]);
1751
1752			if (pa_next == (pa_curr + size_curr))
1753				size_curr += PAGE_SIZE;
1754			else
1755				break;
1756
1757		}
1758		if (pa_next == 0) {
1759			status = -ENOMEM;
1760			break;
1761		}
1762		pa = pa_curr;
1763		num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
1764		while (temp++ < num_of4k_pages) {
1765			get_page(PHYS_TO_PAGE(pa));
1766			pa += HW_PAGE_SIZE4KB;
1767		}
1768		status = pte_update(dev_context, pa_curr, virt_addr +
1769				    (va_curr - ul_mpu_addr), size_curr,
1770				    hw_attrs);
1771		va_curr += size_curr;
1772	}
1773	/*
1774	 * In any case, flush the TLB
1775	 * This is called from here instead from pte_update to avoid unnecessary
1776	 * repetition while mapping non-contiguous physical regions of a virtual
1777	 * region
1778	 */
1779	flush_all(dev_context);
1780	dev_dbg(bridge, "%s status %x\n", __func__, status);
1781	return status;
1782}
1783
1784/*
1785 *  ======== wait_for_start ========
1786 *      Wait for the singal from DSP that it has started, or time out.
1787 */
1788bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr)
1789{
1790	u16 timeout = TIHELEN_ACKTIMEOUT;
1791
1792	/*  Wait for response from board */
1793	while (__raw_readw(dw_sync_addr) && --timeout)
1794		udelay(10);
1795
1796	/*  If timed out: return false */
1797	if (!timeout) {
1798		pr_err("%s: Timed out waiting DSP to Start\n", __func__);
1799		return false;
1800	}
1801	return true;
1802}
1803