• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/staging/tidspbridge/pmgr/
1/*
2 * dmm.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address
7 * space that can be directly mapped to any MPU buffer or memory region
8 *
9 * Notes:
10 *   Region: Generic memory entitiy having a start address and a size
11 *   Chunk:  Reserved region
12 *
13 * Copyright (C) 2005-2006 Texas Instruments, Inc.
14 *
15 * This package is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2 as
17 * published by the Free Software Foundation.
18 *
19 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
21 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
22 */
23#include <linux/types.h>
24
25/*  ----------------------------------- Host OS */
26#include <dspbridge/host_os.h>
27
28/*  ----------------------------------- DSP/BIOS Bridge */
29#include <dspbridge/dbdefs.h>
30
31/*  ----------------------------------- Trace & Debug */
32#include <dspbridge/dbc.h>
33
34/*  ----------------------------------- OS Adaptation Layer */
35#include <dspbridge/sync.h>
36
37/*  ----------------------------------- Platform Manager */
38#include <dspbridge/dev.h>
39#include <dspbridge/proc.h>
40
41/*  ----------------------------------- This */
42#include <dspbridge/dmm.h>
43
44/*  ----------------------------------- Defines, Data Structures, Typedefs */
45#define DMM_ADDR_VIRTUAL(a) \
46	(((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\
47	dyn_mem_map_beg)
48#define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K)
49
50/* DMM Mgr */
51struct dmm_object {
52	/* Dmm Lock is used to serialize access mem manager for
53	 * multi-threads. */
54	spinlock_t dmm_lock;	/* Lock to access dmm mgr */
55};
56
57/*  ----------------------------------- Globals */
58static u32 refs;		/* module reference count */
59struct map_page {
60	u32 region_size:15;
61	u32 mapped_size:15;
62	u32 reserved:1;
63	u32 mapped:1;
64};
65
66/*  Create the free list */
67static struct map_page *virtual_mapping_table;
68static u32 free_region;		/* The index of free region */
69static u32 free_size;
70static u32 dyn_mem_map_beg;	/* The Beginning of dynamic memory mapping */
71static u32 table_size;		/* The size of virt and phys pages tables */
72
73/*  ----------------------------------- Function Prototypes */
74static struct map_page *get_region(u32 addr);
75static struct map_page *get_free_region(u32 len);
76static struct map_page *get_mapped_region(u32 addrs);
77
78/*  ======== dmm_create_tables ========
79 *  Purpose:
80 *      Create table to hold the information of physical address
81 *      the buffer pages that is passed by the user, and the table
82 *      to hold the information of the virtual memory that is reserved
83 *      for DSP.
84 */
85int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size)
86{
87	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
88	int status = 0;
89
90	status = dmm_delete_tables(dmm_obj);
91	if (!status) {
92		dyn_mem_map_beg = addr;
93		table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K;
94		/*  Create the free list */
95		virtual_mapping_table = __vmalloc(table_size *
96				sizeof(struct map_page), GFP_KERNEL |
97				__GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
98		if (virtual_mapping_table == NULL)
99			status = -ENOMEM;
100		else {
101			/* On successful allocation,
102			 * all entries are zero ('free') */
103			free_region = 0;
104			free_size = table_size * PG_SIZE4K;
105			virtual_mapping_table[0].region_size = table_size;
106		}
107	}
108
109	if (status)
110		pr_err("%s: failure, status 0x%x\n", __func__, status);
111
112	return status;
113}
114
115/*
116 *  ======== dmm_create ========
117 *  Purpose:
118 *      Create a dynamic memory manager object.
119 */
120int dmm_create(struct dmm_object **dmm_manager,
121		      struct dev_object *hdev_obj,
122		      const struct dmm_mgrattrs *mgr_attrts)
123{
124	struct dmm_object *dmm_obj = NULL;
125	int status = 0;
126	DBC_REQUIRE(refs > 0);
127	DBC_REQUIRE(dmm_manager != NULL);
128
129	*dmm_manager = NULL;
130	/* create, zero, and tag a cmm mgr object */
131	dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL);
132	if (dmm_obj != NULL) {
133		spin_lock_init(&dmm_obj->dmm_lock);
134		*dmm_manager = dmm_obj;
135	} else {
136		status = -ENOMEM;
137	}
138
139	return status;
140}
141
142/*
143 *  ======== dmm_destroy ========
144 *  Purpose:
145 *      Release the communication memory manager resources.
146 */
147int dmm_destroy(struct dmm_object *dmm_mgr)
148{
149	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
150	int status = 0;
151
152	DBC_REQUIRE(refs > 0);
153	if (dmm_mgr) {
154		status = dmm_delete_tables(dmm_obj);
155		if (!status)
156			kfree(dmm_obj);
157	} else
158		status = -EFAULT;
159
160	return status;
161}
162
163/*
164 *  ======== dmm_delete_tables ========
165 *  Purpose:
166 *      Delete DMM Tables.
167 */
168int dmm_delete_tables(struct dmm_object *dmm_mgr)
169{
170	int status = 0;
171
172	DBC_REQUIRE(refs > 0);
173	/* Delete all DMM tables */
174	if (dmm_mgr)
175		vfree(virtual_mapping_table);
176	else
177		status = -EFAULT;
178	return status;
179}
180
181/*
182 *  ======== dmm_exit ========
183 *  Purpose:
184 *      Discontinue usage of module; free resources when reference count
185 *      reaches 0.
186 */
187void dmm_exit(void)
188{
189	DBC_REQUIRE(refs > 0);
190
191	refs--;
192}
193
194/*
195 *  ======== dmm_get_handle ========
196 *  Purpose:
197 *      Return the dynamic memory manager object for this device.
198 *      This is typically called from the client process.
199 */
200int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
201{
202	int status = 0;
203	struct dev_object *hdev_obj;
204
205	DBC_REQUIRE(refs > 0);
206	DBC_REQUIRE(dmm_manager != NULL);
207	if (hprocessor != NULL)
208		status = proc_get_dev_object(hprocessor, &hdev_obj);
209	else
210		hdev_obj = dev_get_first();	/* default */
211
212	if (!status)
213		status = dev_get_dmm_mgr(hdev_obj, dmm_manager);
214
215	return status;
216}
217
218/*
219 *  ======== dmm_init ========
220 *  Purpose:
221 *      Initializes private state of DMM module.
222 */
223bool dmm_init(void)
224{
225	bool ret = true;
226
227	DBC_REQUIRE(refs >= 0);
228
229	if (ret)
230		refs++;
231
232	DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
233
234	virtual_mapping_table = NULL;
235	table_size = 0;
236
237	return ret;
238}
239
240/*
241 *  ======== dmm_map_memory ========
242 *  Purpose:
243 *      Add a mapping block to the reserved chunk. DMM assumes that this block
244 *  will be mapped in the DSP/IVA's address space. DMM returns an error if a
245 *  mapping overlaps another one. This function stores the info that will be
246 *  required later while unmapping the block.
247 */
248int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size)
249{
250	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
251	struct map_page *chunk;
252	int status = 0;
253
254	spin_lock(&dmm_obj->dmm_lock);
255	/* Find the Reserved memory chunk containing the DSP block to
256	 * be mapped */
257	chunk = (struct map_page *)get_region(addr);
258	if (chunk != NULL) {
259		/* Mark the region 'mapped', leave the 'reserved' info as-is */
260		chunk->mapped = true;
261		chunk->mapped_size = (size / PG_SIZE4K);
262	} else
263		status = -ENOENT;
264	spin_unlock(&dmm_obj->dmm_lock);
265
266	dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, "
267		"chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
268
269	return status;
270}
271
272/*
273 *  ======== dmm_reserve_memory ========
274 *  Purpose:
275 *      Reserve a chunk of virtually contiguous DSP/IVA address space.
276 */
277int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size,
278			      u32 *prsv_addr)
279{
280	int status = 0;
281	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
282	struct map_page *node;
283	u32 rsv_addr = 0;
284	u32 rsv_size = 0;
285
286	spin_lock(&dmm_obj->dmm_lock);
287
288	/* Try to get a DSP chunk from the free list */
289	node = get_free_region(size);
290	if (node != NULL) {
291		/*  DSP chunk of given size is available. */
292		rsv_addr = DMM_ADDR_VIRTUAL(node);
293		/* Calculate the number entries to use */
294		rsv_size = size / PG_SIZE4K;
295		if (rsv_size < node->region_size) {
296			/* Mark remainder of free region */
297			node[rsv_size].mapped = false;
298			node[rsv_size].reserved = false;
299			node[rsv_size].region_size =
300			    node->region_size - rsv_size;
301			node[rsv_size].mapped_size = 0;
302		}
303		/*  get_region will return first fit chunk. But we only use what
304		   is requested. */
305		node->mapped = false;
306		node->reserved = true;
307		node->region_size = rsv_size;
308		node->mapped_size = 0;
309		/* Return the chunk's starting address */
310		*prsv_addr = rsv_addr;
311	} else
312		/*dSP chunk of given size is not available */
313		status = -ENOMEM;
314
315	spin_unlock(&dmm_obj->dmm_lock);
316
317	dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, "
318		"rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size,
319		prsv_addr, status, rsv_addr, rsv_size);
320
321	return status;
322}
323
324/*
325 *  ======== dmm_un_map_memory ========
326 *  Purpose:
327 *      Remove the mapped block from the reserved chunk.
328 */
329int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize)
330{
331	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
332	struct map_page *chunk;
333	int status = 0;
334
335	spin_lock(&dmm_obj->dmm_lock);
336	chunk = get_mapped_region(addr);
337	if (chunk == NULL)
338		status = -ENOENT;
339
340	if (!status) {
341		/* Unmap the region */
342		*psize = chunk->mapped_size * PG_SIZE4K;
343		chunk->mapped = false;
344		chunk->mapped_size = 0;
345	}
346	spin_unlock(&dmm_obj->dmm_lock);
347
348	dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, "
349		"chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk);
350
351	return status;
352}
353
354/*
355 *  ======== dmm_un_reserve_memory ========
356 *  Purpose:
357 *      Free a chunk of reserved DSP/IVA address space.
358 */
359int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr)
360{
361	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
362	struct map_page *chunk;
363	u32 i;
364	int status = 0;
365	u32 chunk_size;
366
367	spin_lock(&dmm_obj->dmm_lock);
368
369	/* Find the chunk containing the reserved address */
370	chunk = get_mapped_region(rsv_addr);
371	if (chunk == NULL)
372		status = -ENOENT;
373
374	if (!status) {
375		/* Free all the mapped pages for this reserved region */
376		i = 0;
377		while (i < chunk->region_size) {
378			if (chunk[i].mapped) {
379				/* Remove mapping from the page tables. */
380				chunk_size = chunk[i].mapped_size;
381				/* Clear the mapping flags */
382				chunk[i].mapped = false;
383				chunk[i].mapped_size = 0;
384				i += chunk_size;
385			} else
386				i++;
387		}
388		/* Clear the flags (mark the region 'free') */
389		chunk->reserved = false;
390		/* NOTE: We do NOT coalesce free regions here.
391		 * Free regions are coalesced in get_region(), as it traverses
392		 *the whole mapping table
393		 */
394	}
395	spin_unlock(&dmm_obj->dmm_lock);
396
397	dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p",
398		__func__, dmm_mgr, rsv_addr, status, chunk);
399
400	return status;
401}
402
403/*
404 *  ======== get_region ========
405 *  Purpose:
406 *      Returns a region containing the specified memory region
407 */
408static struct map_page *get_region(u32 addr)
409{
410	struct map_page *curr_region = NULL;
411	u32 i = 0;
412
413	if (virtual_mapping_table != NULL) {
414		/* find page mapped by this address */
415		i = DMM_ADDR_TO_INDEX(addr);
416		if (i < table_size)
417			curr_region = virtual_mapping_table + i;
418	}
419
420	dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n",
421		__func__, curr_region, free_region, free_size);
422	return curr_region;
423}
424
425/*
426 *  ======== get_free_region ========
427 *  Purpose:
428 *  Returns the requested free region
429 */
430static struct map_page *get_free_region(u32 len)
431{
432	struct map_page *curr_region = NULL;
433	u32 i = 0;
434	u32 region_size = 0;
435	u32 next_i = 0;
436
437	if (virtual_mapping_table == NULL)
438		return curr_region;
439	if (len > free_size) {
440		/* Find the largest free region
441		 * (coalesce during the traversal) */
442		while (i < table_size) {
443			region_size = virtual_mapping_table[i].region_size;
444			next_i = i + region_size;
445			if (virtual_mapping_table[i].reserved == false) {
446				/* Coalesce, if possible */
447				if (next_i < table_size &&
448				    virtual_mapping_table[next_i].reserved
449				    == false) {
450					virtual_mapping_table[i].region_size +=
451					    virtual_mapping_table
452					    [next_i].region_size;
453					continue;
454				}
455				region_size *= PG_SIZE4K;
456				if (region_size > free_size) {
457					free_region = i;
458					free_size = region_size;
459				}
460			}
461			i = next_i;
462		}
463	}
464	if (len <= free_size) {
465		curr_region = virtual_mapping_table + free_region;
466		free_region += (len / PG_SIZE4K);
467		free_size -= len;
468	}
469	return curr_region;
470}
471
472/*
473 *  ======== get_mapped_region ========
474 *  Purpose:
475 *  Returns the requestedmapped region
476 */
477static struct map_page *get_mapped_region(u32 addrs)
478{
479	u32 i = 0;
480	struct map_page *curr_region = NULL;
481
482	if (virtual_mapping_table == NULL)
483		return curr_region;
484
485	i = DMM_ADDR_TO_INDEX(addrs);
486	if (i < table_size && (virtual_mapping_table[i].mapped ||
487			       virtual_mapping_table[i].reserved))
488		curr_region = virtual_mapping_table + i;
489	return curr_region;
490}
491
492#ifdef DSP_DMM_DEBUG
493u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr)
494{
495	struct map_page *curr_node = NULL;
496	u32 i;
497	u32 freemem = 0;
498	u32 bigsize = 0;
499
500	spin_lock(&dmm_mgr->dmm_lock);
501
502	if (virtual_mapping_table != NULL) {
503		for (i = 0; i < table_size; i +=
504		     virtual_mapping_table[i].region_size) {
505			curr_node = virtual_mapping_table + i;
506			if (curr_node->reserved) {
507				/*printk("RESERVED size = 0x%x, "
508				   "Map size = 0x%x\n",
509				   (curr_node->region_size * PG_SIZE4K),
510				   (curr_node->mapped == false) ? 0 :
511				   (curr_node->mapped_size * PG_SIZE4K));
512				 */
513			} else {
514/*				printk("UNRESERVED size = 0x%x\n",
515					(curr_node->region_size * PG_SIZE4K));
516 */
517				freemem += (curr_node->region_size * PG_SIZE4K);
518				if (curr_node->region_size > bigsize)
519					bigsize = curr_node->region_size;
520			}
521		}
522	}
523	spin_unlock(&dmm_mgr->dmm_lock);
524	printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
525	       freemem / (1024 * 1024));
526	printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
527	       (((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024));
528	printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
529	       (bigsize * PG_SIZE4K / (1024 * 1024)));
530
531	return 0;
532}
533#endif
534