• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/staging/tidspbridge/pmgr/
1/*
2 * cmm.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * The Communication(Shared) Memory Management(CMM) module provides
7 * shared memory management services for DSP/BIOS Bridge data streaming
8 * and messaging.
9 *
10 * Multiple shared memory segments can be registered with CMM.
11 * Each registered SM segment is represented by a SM "allocator" that
12 * describes a block of physically contiguous shared memory used for
13 * future allocations by CMM.
14 *
15 * Memory is coelesced back to the appropriate heap when a buffer is
16 * freed.
17 *
18 * Notes:
19 *   Va: Virtual address.
20 *   Pa: Physical or kernel system address.
21 *
22 * Copyright (C) 2005-2006 Texas Instruments, Inc.
23 *
24 * This package is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License version 2 as
26 * published by the Free Software Foundation.
27 *
28 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
29 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
30 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
31 */
32#include <linux/types.h>
33
34/*  ----------------------------------- DSP/BIOS Bridge */
35#include <dspbridge/dbdefs.h>
36
37/*  ----------------------------------- Trace & Debug */
38#include <dspbridge/dbc.h>
39
40/*  ----------------------------------- OS Adaptation Layer */
41#include <dspbridge/cfg.h>
42#include <dspbridge/list.h>
43#include <dspbridge/sync.h>
44#include <dspbridge/utildefs.h>
45
46/*  ----------------------------------- Platform Manager */
47#include <dspbridge/dev.h>
48#include <dspbridge/proc.h>
49
50/*  ----------------------------------- This */
51#include <dspbridge/cmm.h>
52
53/*  ----------------------------------- Defines, Data Structures, Typedefs */
54#define NEXT_PA(pnode)   (pnode->dw_pa + pnode->ul_size)
55
56/* Other bus/platform translations */
57#define DSPPA2GPPPA(base, x, y)  ((x)+(y))
58#define GPPPA2DSPPA(base, x, y)  ((x)-(y))
59
60/*
61 *  Allocators define a block of contiguous memory used for future allocations.
62 *
63 *      sma - shared memory allocator.
64 *      vma - virtual memory allocator.(not used).
65 */
66struct cmm_allocator {		/* sma */
67	unsigned int shm_base;	/* Start of physical SM block */
68	u32 ul_sm_size;		/* Size of SM block in bytes */
69	unsigned int dw_vm_base;	/* Start of VM block. (Dev driver
70					 * context for 'sma') */
71	u32 dw_dsp_phys_addr_offset;	/* DSP PA to GPP PA offset for this
72					 * SM space */
73	s8 c_factor;		/* DSPPa to GPPPa Conversion Factor */
74	unsigned int dw_dsp_base;	/* DSP virt base byte address */
75	u32 ul_dsp_size;	/* DSP seg size in bytes */
76	struct cmm_object *hcmm_mgr;	/* back ref to parent mgr */
77	/* node list of available memory */
78	struct lst_list *free_list_head;
79	/* node list of memory in use */
80	struct lst_list *in_use_list_head;
81};
82
83struct cmm_xlator {		/* Pa<->Va translator object */
84	/* CMM object this translator associated */
85	struct cmm_object *hcmm_mgr;
86	/*
87	 *  Client process virtual base address that corresponds to phys SM
88	 *  base address for translator's ul_seg_id.
89	 *  Only 1 segment ID currently supported.
90	 */
91	unsigned int dw_virt_base;	/* virtual base address */
92	u32 ul_virt_size;	/* size of virt space in bytes */
93	u32 ul_seg_id;		/* Segment Id */
94};
95
96/* CMM Mgr */
97struct cmm_object {
98	/*
99	 * Cmm Lock is used to serialize access mem manager for multi-threads.
100	 */
101	struct mutex cmm_lock;	/* Lock to access cmm mgr */
102	struct lst_list *node_free_list_head;	/* Free list of memory nodes */
103	u32 ul_min_block_size;	/* Min SM block; default 16 bytes */
104	u32 dw_page_size;	/* Memory Page size (1k/4k) */
105	/* GPP SM segment ptrs */
106	struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS];
107};
108
109/* Default CMM Mgr attributes */
110static struct cmm_mgrattrs cmm_dfltmgrattrs = {
111	/* ul_min_block_size, min block size(bytes) allocated by cmm mgr */
112	16
113};
114
115/* Default allocation attributes */
116static struct cmm_attrs cmm_dfltalctattrs = {
117	1		/* ul_seg_id, default segment Id for allocator */
118};
119
120/* Address translator default attrs */
121static struct cmm_xlatorattrs cmm_dfltxlatorattrs = {
122	/* ul_seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */
123	1,
124	0,			/* dw_dsp_bufs */
125	0,			/* dw_dsp_buf_size */
126	NULL,			/* vm_base */
127	0,			/* dw_vm_size */
128};
129
130/* SM node representing a block of memory. */
131struct cmm_mnode {
132	struct list_head link;	/* must be 1st element */
133	u32 dw_pa;		/* Phys addr */
134	u32 dw_va;		/* Virtual address in device process context */
135	u32 ul_size;		/* SM block size in bytes */
136	u32 client_proc;	/* Process that allocated this mem block */
137};
138
139/*  ----------------------------------- Globals */
140static u32 refs;		/* module reference count */
141
142/*  ----------------------------------- Function Prototypes */
143static void add_to_free_list(struct cmm_allocator *allocator,
144			     struct cmm_mnode *pnode);
145static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
146					   u32 ul_seg_id);
147static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
148					u32 usize);
149static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
150				  u32 dw_va, u32 ul_size);
151/* get available slot for new allocator */
152static s32 get_slot(struct cmm_object *cmm_mgr_obj);
153static void un_register_gppsm_seg(struct cmm_allocator *psma);
154
155/*
156 *  ======== cmm_calloc_buf ========
157 *  Purpose:
158 *      Allocate a SM buffer, zero contents, and return the physical address
159 *      and optional driver context virtual address(pp_buf_va).
160 *
161 *      The freelist is sorted in increasing size order. Get the first
162 *      block that satifies the request and sort the remaining back on
163 *      the freelist; if large enough. The kept block is placed on the
164 *      inUseList.
165 */
166void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
167		     struct cmm_attrs *pattrs, void **pp_buf_va)
168{
169	struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
170	void *buf_pa = NULL;
171	struct cmm_mnode *pnode = NULL;
172	struct cmm_mnode *new_node = NULL;
173	struct cmm_allocator *allocator = NULL;
174	u32 delta_size;
175	u8 *pbyte = NULL;
176	s32 cnt;
177
178	if (pattrs == NULL)
179		pattrs = &cmm_dfltalctattrs;
180
181	if (pp_buf_va != NULL)
182		*pp_buf_va = NULL;
183
184	if (cmm_mgr_obj && (usize != 0)) {
185		if (pattrs->ul_seg_id > 0) {
186			/* SegId > 0 is SM */
187			/* get the allocator object for this segment id */
188			allocator =
189			    get_allocator(cmm_mgr_obj, pattrs->ul_seg_id);
190			/* keep block size a multiple of ul_min_block_size */
191			usize =
192			    ((usize - 1) & ~(cmm_mgr_obj->ul_min_block_size -
193					     1))
194			    + cmm_mgr_obj->ul_min_block_size;
195			mutex_lock(&cmm_mgr_obj->cmm_lock);
196			pnode = get_free_block(allocator, usize);
197		}
198		if (pnode) {
199			delta_size = (pnode->ul_size - usize);
200			if (delta_size >= cmm_mgr_obj->ul_min_block_size) {
201				/* create a new block with the leftovers and
202				 * add to freelist */
203				new_node =
204				    get_node(cmm_mgr_obj, pnode->dw_pa + usize,
205					     pnode->dw_va + usize,
206					     (u32) delta_size);
207				/* leftovers go free */
208				add_to_free_list(allocator, new_node);
209				/* adjust our node's size */
210				pnode->ul_size = usize;
211			}
212			/* Tag node with client process requesting allocation
213			 * We'll need to free up a process's alloc'd SM if the
214			 * client process goes away.
215			 */
216			/* Return TGID instead of process handle */
217			pnode->client_proc = current->tgid;
218
219			/* put our node on InUse list */
220			lst_put_tail(allocator->in_use_list_head,
221				     (struct list_head *)pnode);
222			buf_pa = (void *)pnode->dw_pa;	/* physical address */
223			/* clear mem */
224			pbyte = (u8 *) pnode->dw_va;
225			for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++)
226				*pbyte = 0;
227
228			if (pp_buf_va != NULL) {
229				/* Virtual address */
230				*pp_buf_va = (void *)pnode->dw_va;
231			}
232		}
233		mutex_unlock(&cmm_mgr_obj->cmm_lock);
234	}
235	return buf_pa;
236}
237
238/*
239 *  ======== cmm_create ========
240 *  Purpose:
241 *      Create a communication memory manager object.
242 */
243int cmm_create(struct cmm_object **ph_cmm_mgr,
244		      struct dev_object *hdev_obj,
245		      const struct cmm_mgrattrs *mgr_attrts)
246{
247	struct cmm_object *cmm_obj = NULL;
248	int status = 0;
249	struct util_sysinfo sys_info;
250
251	DBC_REQUIRE(refs > 0);
252	DBC_REQUIRE(ph_cmm_mgr != NULL);
253
254	*ph_cmm_mgr = NULL;
255	/* create, zero, and tag a cmm mgr object */
256	cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL);
257	if (cmm_obj != NULL) {
258		if (mgr_attrts == NULL)
259			mgr_attrts = &cmm_dfltmgrattrs;	/* set defaults */
260
261		/* 4 bytes minimum */
262		DBC_ASSERT(mgr_attrts->ul_min_block_size >= 4);
263		/* save away smallest block allocation for this cmm mgr */
264		cmm_obj->ul_min_block_size = mgr_attrts->ul_min_block_size;
265		/* save away the systems memory page size */
266		sys_info.dw_page_size = PAGE_SIZE;
267		sys_info.dw_allocation_granularity = PAGE_SIZE;
268		sys_info.dw_number_of_processors = 1;
269
270		cmm_obj->dw_page_size = sys_info.dw_page_size;
271
272		/* Note: DSP SM seg table(aDSPSMSegTab[]) zero'd by
273		 * MEM_ALLOC_OBJECT */
274
275		/* create node free list */
276		cmm_obj->node_free_list_head =
277				kzalloc(sizeof(struct lst_list),
278						GFP_KERNEL);
279		if (cmm_obj->node_free_list_head == NULL) {
280			status = -ENOMEM;
281			cmm_destroy(cmm_obj, true);
282		} else {
283			INIT_LIST_HEAD(&cmm_obj->
284				       node_free_list_head->head);
285			mutex_init(&cmm_obj->cmm_lock);
286			*ph_cmm_mgr = cmm_obj;
287		}
288	} else {
289		status = -ENOMEM;
290	}
291	return status;
292}
293
294/*
295 *  ======== cmm_destroy ========
296 *  Purpose:
297 *      Release the communication memory manager resources.
298 */
299int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
300{
301	struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
302	struct cmm_info temp_info;
303	int status = 0;
304	s32 slot_seg;
305	struct cmm_mnode *pnode;
306
307	DBC_REQUIRE(refs > 0);
308	if (!hcmm_mgr) {
309		status = -EFAULT;
310		return status;
311	}
312	mutex_lock(&cmm_mgr_obj->cmm_lock);
313	/* If not force then fail if outstanding allocations exist */
314	if (!force) {
315		/* Check for outstanding memory allocations */
316		status = cmm_get_info(hcmm_mgr, &temp_info);
317		if (!status) {
318			if (temp_info.ul_total_in_use_cnt > 0) {
319				/* outstanding allocations */
320				status = -EPERM;
321			}
322		}
323	}
324	if (!status) {
325		/* UnRegister SM allocator */
326		for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
327			if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] != NULL) {
328				un_register_gppsm_seg
329				    (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg]);
330				/* Set slot to NULL for future reuse */
331				cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = NULL;
332			}
333		}
334	}
335	if (cmm_mgr_obj->node_free_list_head != NULL) {
336		/* Free the free nodes */
337		while (!LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) {
338			pnode = (struct cmm_mnode *)
339			    lst_get_head(cmm_mgr_obj->node_free_list_head);
340			kfree(pnode);
341		}
342		/* delete NodeFreeList list */
343		kfree(cmm_mgr_obj->node_free_list_head);
344	}
345	mutex_unlock(&cmm_mgr_obj->cmm_lock);
346	if (!status) {
347		/* delete CS & cmm mgr object */
348		mutex_destroy(&cmm_mgr_obj->cmm_lock);
349		kfree(cmm_mgr_obj);
350	}
351	return status;
352}
353
354/*
355 *  ======== cmm_exit ========
356 *  Purpose:
357 *      Discontinue usage of module; free resources when reference count
358 *      reaches 0.
359 */
360void cmm_exit(void)
361{
362	DBC_REQUIRE(refs > 0);
363
364	refs--;
365}
366
367/*
368 *  ======== cmm_free_buf ========
369 *  Purpose:
370 *      Free the given buffer.
371 */
372int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa,
373			u32 ul_seg_id)
374{
375	struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
376	int status = -EFAULT;
377	struct cmm_mnode *mnode_obj = NULL;
378	struct cmm_allocator *allocator = NULL;
379	struct cmm_attrs *pattrs;
380
381	DBC_REQUIRE(refs > 0);
382	DBC_REQUIRE(buf_pa != NULL);
383
384	if (ul_seg_id == 0) {
385		pattrs = &cmm_dfltalctattrs;
386		ul_seg_id = pattrs->ul_seg_id;
387	}
388	if (!hcmm_mgr || !(ul_seg_id > 0)) {
389		status = -EFAULT;
390		return status;
391	}
392	/* get the allocator for this segment id */
393	allocator = get_allocator(cmm_mgr_obj, ul_seg_id);
394	if (allocator != NULL) {
395		mutex_lock(&cmm_mgr_obj->cmm_lock);
396		mnode_obj =
397		    (struct cmm_mnode *)lst_first(allocator->in_use_list_head);
398		while (mnode_obj) {
399			if ((u32) buf_pa == mnode_obj->dw_pa) {
400				/* Found it */
401				lst_remove_elem(allocator->in_use_list_head,
402						(struct list_head *)mnode_obj);
403				/* back to freelist */
404				add_to_free_list(allocator, mnode_obj);
405				status = 0;	/* all right! */
406				break;
407			}
408			/* next node. */
409			mnode_obj = (struct cmm_mnode *)
410			    lst_next(allocator->in_use_list_head,
411				     (struct list_head *)mnode_obj);
412		}
413		mutex_unlock(&cmm_mgr_obj->cmm_lock);
414	}
415	return status;
416}
417
418/*
419 *  ======== cmm_get_handle ========
420 *  Purpose:
421 *      Return the communication memory manager object for this device.
422 *      This is typically called from the client process.
423 */
424int cmm_get_handle(void *hprocessor, struct cmm_object ** ph_cmm_mgr)
425{
426	int status = 0;
427	struct dev_object *hdev_obj;
428
429	DBC_REQUIRE(refs > 0);
430	DBC_REQUIRE(ph_cmm_mgr != NULL);
431	if (hprocessor != NULL)
432		status = proc_get_dev_object(hprocessor, &hdev_obj);
433	else
434		hdev_obj = dev_get_first();	/* default */
435
436	if (!status)
437		status = dev_get_cmm_mgr(hdev_obj, ph_cmm_mgr);
438
439	return status;
440}
441
442/*
443 *  ======== cmm_get_info ========
444 *  Purpose:
445 *      Return the current memory utilization information.
446 */
447int cmm_get_info(struct cmm_object *hcmm_mgr,
448			struct cmm_info *cmm_info_obj)
449{
450	struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
451	u32 ul_seg;
452	int status = 0;
453	struct cmm_allocator *altr;
454	struct cmm_mnode *mnode_obj = NULL;
455
456	DBC_REQUIRE(cmm_info_obj != NULL);
457
458	if (!hcmm_mgr) {
459		status = -EFAULT;
460		return status;
461	}
462	mutex_lock(&cmm_mgr_obj->cmm_lock);
463	cmm_info_obj->ul_num_gppsm_segs = 0;	/* # of SM segments */
464	/* Total # of outstanding alloc */
465	cmm_info_obj->ul_total_in_use_cnt = 0;
466	/* min block size */
467	cmm_info_obj->ul_min_block_size = cmm_mgr_obj->ul_min_block_size;
468	/* check SM memory segments */
469	for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) {
470		/* get the allocator object for this segment id */
471		altr = get_allocator(cmm_mgr_obj, ul_seg);
472		if (altr != NULL) {
473			cmm_info_obj->ul_num_gppsm_segs++;
474			cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_pa =
475			    altr->shm_base - altr->ul_dsp_size;
476			cmm_info_obj->seg_info[ul_seg - 1].ul_total_seg_size =
477			    altr->ul_dsp_size + altr->ul_sm_size;
478			cmm_info_obj->seg_info[ul_seg - 1].dw_gpp_base_pa =
479			    altr->shm_base;
480			cmm_info_obj->seg_info[ul_seg - 1].ul_gpp_size =
481			    altr->ul_sm_size;
482			cmm_info_obj->seg_info[ul_seg - 1].dw_dsp_base_va =
483			    altr->dw_dsp_base;
484			cmm_info_obj->seg_info[ul_seg - 1].ul_dsp_size =
485			    altr->ul_dsp_size;
486			cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_va =
487			    altr->dw_vm_base - altr->ul_dsp_size;
488			cmm_info_obj->seg_info[ul_seg - 1].ul_in_use_cnt = 0;
489			mnode_obj = (struct cmm_mnode *)
490			    lst_first(altr->in_use_list_head);
491			/* Count inUse blocks */
492			while (mnode_obj) {
493				cmm_info_obj->ul_total_in_use_cnt++;
494				cmm_info_obj->seg_info[ul_seg -
495						       1].ul_in_use_cnt++;
496				/* next node. */
497				mnode_obj = (struct cmm_mnode *)
498				    lst_next(altr->in_use_list_head,
499					     (struct list_head *)mnode_obj);
500			}
501		}
502	}			/* end for */
503	mutex_unlock(&cmm_mgr_obj->cmm_lock);
504	return status;
505}
506
507/*
508 *  ======== cmm_init ========
509 *  Purpose:
510 *      Initializes private state of CMM module.
511 */
512bool cmm_init(void)
513{
514	bool ret = true;
515
516	DBC_REQUIRE(refs >= 0);
517	if (ret)
518		refs++;
519
520	DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
521
522	return ret;
523}
524
525/*
526 *  ======== cmm_register_gppsm_seg ========
527 *  Purpose:
528 *      Register a block of SM with the CMM to be used for later GPP SM
529 *      allocations.
530 */
531int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
532				  u32 dw_gpp_base_pa, u32 ul_size,
533				  u32 dsp_addr_offset, s8 c_factor,
534				  u32 dw_dsp_base, u32 ul_dsp_size,
535				  u32 *sgmt_id, u32 gpp_base_va)
536{
537	struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
538	struct cmm_allocator *psma = NULL;
539	int status = 0;
540	struct cmm_mnode *new_node;
541	s32 slot_seg;
542
543	DBC_REQUIRE(ul_size > 0);
544	DBC_REQUIRE(sgmt_id != NULL);
545	DBC_REQUIRE(dw_gpp_base_pa != 0);
546	DBC_REQUIRE(gpp_base_va != 0);
547	DBC_REQUIRE((c_factor <= CMM_ADDTODSPPA) &&
548		    (c_factor >= CMM_SUBFROMDSPPA));
549	dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
550		"dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n", __func__,
551		dw_gpp_base_pa, ul_size, dsp_addr_offset, dw_dsp_base,
552		ul_dsp_size, gpp_base_va);
553	if (!hcmm_mgr) {
554		status = -EFAULT;
555		return status;
556	}
557	/* make sure we have room for another allocator */
558	mutex_lock(&cmm_mgr_obj->cmm_lock);
559	slot_seg = get_slot(cmm_mgr_obj);
560	if (slot_seg < 0) {
561		/* get a slot number */
562		status = -EPERM;
563		goto func_end;
564	}
565	/* Check if input ul_size is big enough to alloc at least one block */
566	if (ul_size < cmm_mgr_obj->ul_min_block_size) {
567		status = -EINVAL;
568		goto func_end;
569	}
570
571	/* create, zero, and tag an SM allocator object */
572	psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL);
573	if (psma != NULL) {
574		psma->hcmm_mgr = hcmm_mgr;	/* ref to parent */
575		psma->shm_base = dw_gpp_base_pa;	/* SM Base phys */
576		psma->ul_sm_size = ul_size;	/* SM segment size in bytes */
577		psma->dw_vm_base = gpp_base_va;
578		psma->dw_dsp_phys_addr_offset = dsp_addr_offset;
579		psma->c_factor = c_factor;
580		psma->dw_dsp_base = dw_dsp_base;
581		psma->ul_dsp_size = ul_dsp_size;
582		if (psma->dw_vm_base == 0) {
583			status = -EPERM;
584			goto func_end;
585		}
586		/* return the actual segment identifier */
587		*sgmt_id = (u32) slot_seg + 1;
588		/* create memory free list */
589		psma->free_list_head = kzalloc(sizeof(struct lst_list),
590							GFP_KERNEL);
591		if (psma->free_list_head == NULL) {
592			status = -ENOMEM;
593			goto func_end;
594		}
595		INIT_LIST_HEAD(&psma->free_list_head->head);
596
597		/* create memory in-use list */
598		psma->in_use_list_head = kzalloc(sizeof(struct
599						lst_list), GFP_KERNEL);
600		if (psma->in_use_list_head == NULL) {
601			status = -ENOMEM;
602			goto func_end;
603		}
604		INIT_LIST_HEAD(&psma->in_use_list_head->head);
605
606		/* Get a mem node for this hunk-o-memory */
607		new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa,
608				    psma->dw_vm_base, ul_size);
609		/* Place node on the SM allocator's free list */
610		if (new_node) {
611			lst_put_tail(psma->free_list_head,
612				     (struct list_head *)new_node);
613		} else {
614			status = -ENOMEM;
615			goto func_end;
616		}
617	} else {
618		status = -ENOMEM;
619		goto func_end;
620	}
621	/* make entry */
622	cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma;
623
624func_end:
625	if (status && psma) {
626		/* Cleanup allocator */
627		un_register_gppsm_seg(psma);
628	}
629
630	mutex_unlock(&cmm_mgr_obj->cmm_lock);
631	return status;
632}
633
634/*
635 *  ======== cmm_un_register_gppsm_seg ========
636 *  Purpose:
637 *      UnRegister GPP SM segments with the CMM.
638 */
639int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
640				     u32 ul_seg_id)
641{
642	struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
643	int status = 0;
644	struct cmm_allocator *psma;
645	u32 ul_id = ul_seg_id;
646
647	DBC_REQUIRE(ul_seg_id > 0);
648	if (hcmm_mgr) {
649		if (ul_seg_id == CMM_ALLSEGMENTS)
650			ul_id = 1;
651
652		if ((ul_id > 0) && (ul_id <= CMM_MAXGPPSEGS)) {
653			while (ul_id <= CMM_MAXGPPSEGS) {
654				mutex_lock(&cmm_mgr_obj->cmm_lock);
655				/* slot = seg_id-1 */
656				psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1];
657				if (psma != NULL) {
658					un_register_gppsm_seg(psma);
659					/* Set alctr ptr to NULL for future
660					 * reuse */
661					cmm_mgr_obj->pa_gppsm_seg_tab[ul_id -
662								      1] = NULL;
663				} else if (ul_seg_id != CMM_ALLSEGMENTS) {
664					status = -EPERM;
665				}
666				mutex_unlock(&cmm_mgr_obj->cmm_lock);
667				if (ul_seg_id != CMM_ALLSEGMENTS)
668					break;
669
670				ul_id++;
671			}	/* end while */
672		} else {
673			status = -EINVAL;
674		}
675	} else {
676		status = -EFAULT;
677	}
678	return status;
679}
680
681/*
682 *  ======== un_register_gppsm_seg ========
683 *  Purpose:
684 *      UnRegister the SM allocator by freeing all its resources and
685 *      nulling cmm mgr table entry.
686 *  Note:
687 *      This routine is always called within cmm lock crit sect.
688 */
689static void un_register_gppsm_seg(struct cmm_allocator *psma)
690{
691	struct cmm_mnode *mnode_obj = NULL;
692	struct cmm_mnode *next_node = NULL;
693
694	DBC_REQUIRE(psma != NULL);
695	if (psma->free_list_head != NULL) {
696		/* free nodes on free list */
697		mnode_obj = (struct cmm_mnode *)lst_first(psma->free_list_head);
698		while (mnode_obj) {
699			next_node =
700			    (struct cmm_mnode *)lst_next(psma->free_list_head,
701							 (struct list_head *)
702							 mnode_obj);
703			lst_remove_elem(psma->free_list_head,
704					(struct list_head *)mnode_obj);
705			kfree((void *)mnode_obj);
706			/* next node. */
707			mnode_obj = next_node;
708		}
709		kfree(psma->free_list_head);	/* delete freelist */
710		/* free nodes on InUse list */
711		mnode_obj =
712		    (struct cmm_mnode *)lst_first(psma->in_use_list_head);
713		while (mnode_obj) {
714			next_node =
715			    (struct cmm_mnode *)lst_next(psma->in_use_list_head,
716							 (struct list_head *)
717							 mnode_obj);
718			lst_remove_elem(psma->in_use_list_head,
719					(struct list_head *)mnode_obj);
720			kfree((void *)mnode_obj);
721			/* next node. */
722			mnode_obj = next_node;
723		}
724		kfree(psma->in_use_list_head);	/* delete InUse list */
725	}
726	if ((void *)psma->dw_vm_base != NULL)
727		MEM_UNMAP_LINEAR_ADDRESS((void *)psma->dw_vm_base);
728
729	/* Free allocator itself */
730	kfree(psma);
731}
732
733/*
734 *  ======== get_slot ========
735 *  Purpose:
736 *      An available slot # is returned. Returns negative on failure.
737 */
738static s32 get_slot(struct cmm_object *cmm_mgr_obj)
739{
740	s32 slot_seg = -1;	/* neg on failure */
741	DBC_REQUIRE(cmm_mgr_obj != NULL);
742	/* get first available slot in cmm mgr SMSegTab[] */
743	for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
744		if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL)
745			break;
746
747	}
748	if (slot_seg == CMM_MAXGPPSEGS)
749		slot_seg = -1;	/* failed */
750
751	return slot_seg;
752}
753
754/*
755 *  ======== get_node ========
756 *  Purpose:
757 *      Get a memory node from freelist or create a new one.
758 */
759static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
760				  u32 dw_va, u32 ul_size)
761{
762	struct cmm_mnode *pnode = NULL;
763
764	DBC_REQUIRE(cmm_mgr_obj != NULL);
765	DBC_REQUIRE(dw_pa != 0);
766	DBC_REQUIRE(dw_va != 0);
767	DBC_REQUIRE(ul_size != 0);
768	/* Check cmm mgr's node freelist */
769	if (LST_IS_EMPTY(cmm_mgr_obj->node_free_list_head)) {
770		pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
771	} else {
772		/* surely a valid element */
773		pnode = (struct cmm_mnode *)
774		    lst_get_head(cmm_mgr_obj->node_free_list_head);
775	}
776	if (pnode) {
777		lst_init_elem((struct list_head *)pnode);	/* set self */
778		pnode->dw_pa = dw_pa;	/* Physical addr of start of block */
779		pnode->dw_va = dw_va;	/* Virtual   "            " */
780		pnode->ul_size = ul_size;	/* Size of block */
781	}
782	return pnode;
783}
784
785/*
786 *  ======== delete_node ========
787 *  Purpose:
788 *      Put a memory node on the cmm nodelist for later use.
789 *      Doesn't actually delete the node. Heap thrashing friendly.
790 */
791static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
792{
793	DBC_REQUIRE(pnode != NULL);
794	lst_init_elem((struct list_head *)pnode);	/* init .self ptr */
795	lst_put_tail(cmm_mgr_obj->node_free_list_head,
796		     (struct list_head *)pnode);
797}
798
799/*
800 * ====== get_free_block ========
801 *  Purpose:
802 *      Scan the free block list and return the first block that satisfies
803 *      the size.
804 */
805static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
806					u32 usize)
807{
808	if (allocator) {
809		struct cmm_mnode *mnode_obj = (struct cmm_mnode *)
810		    lst_first(allocator->free_list_head);
811		while (mnode_obj) {
812			if (usize <= (u32) mnode_obj->ul_size) {
813				lst_remove_elem(allocator->free_list_head,
814						(struct list_head *)mnode_obj);
815				return mnode_obj;
816			}
817			/* next node. */
818			mnode_obj = (struct cmm_mnode *)
819			    lst_next(allocator->free_list_head,
820				     (struct list_head *)mnode_obj);
821		}
822	}
823	return NULL;
824}
825
826/*
827 *  ======== add_to_free_list ========
828 *  Purpose:
829 *      Coelesce node into the freelist in ascending size order.
830 */
831static void add_to_free_list(struct cmm_allocator *allocator,
832			     struct cmm_mnode *pnode)
833{
834	struct cmm_mnode *node_prev = NULL;
835	struct cmm_mnode *node_next = NULL;
836	struct cmm_mnode *mnode_obj;
837	u32 dw_this_pa;
838	u32 dw_next_pa;
839
840	DBC_REQUIRE(pnode != NULL);
841	DBC_REQUIRE(allocator != NULL);
842	dw_this_pa = pnode->dw_pa;
843	dw_next_pa = NEXT_PA(pnode);
844	mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
845	while (mnode_obj) {
846		if (dw_this_pa == NEXT_PA(mnode_obj)) {
847			/* found the block ahead of this one */
848			node_prev = mnode_obj;
849		} else if (dw_next_pa == mnode_obj->dw_pa) {
850			node_next = mnode_obj;
851		}
852		if ((node_prev == NULL) || (node_next == NULL)) {
853			/* next node. */
854			mnode_obj = (struct cmm_mnode *)
855			    lst_next(allocator->free_list_head,
856				     (struct list_head *)mnode_obj);
857		} else {
858			/* got 'em */
859			break;
860		}
861	}			/* while */
862	if (node_prev != NULL) {
863		/* combine with previous block */
864		lst_remove_elem(allocator->free_list_head,
865				(struct list_head *)node_prev);
866		/* grow node to hold both */
867		pnode->ul_size += node_prev->ul_size;
868		pnode->dw_pa = node_prev->dw_pa;
869		pnode->dw_va = node_prev->dw_va;
870		/* place node on mgr nodeFreeList */
871		delete_node((struct cmm_object *)allocator->hcmm_mgr,
872			    node_prev);
873	}
874	if (node_next != NULL) {
875		/* combine with next block */
876		lst_remove_elem(allocator->free_list_head,
877				(struct list_head *)node_next);
878		/* grow da node */
879		pnode->ul_size += node_next->ul_size;
880		/* place node on mgr nodeFreeList */
881		delete_node((struct cmm_object *)allocator->hcmm_mgr,
882			    node_next);
883	}
884	/* Now, let's add to freelist in increasing size order */
885	mnode_obj = (struct cmm_mnode *)lst_first(allocator->free_list_head);
886	while (mnode_obj) {
887		if (pnode->ul_size <= mnode_obj->ul_size)
888			break;
889
890		/* next node. */
891		mnode_obj =
892		    (struct cmm_mnode *)lst_next(allocator->free_list_head,
893						 (struct list_head *)mnode_obj);
894	}
895	/* if mnode_obj is NULL then add our pnode to the end of the freelist */
896	if (mnode_obj == NULL) {
897		lst_put_tail(allocator->free_list_head,
898			     (struct list_head *)pnode);
899	} else {
900		/* insert our node before the current traversed node */
901		lst_insert_before(allocator->free_list_head,
902				  (struct list_head *)pnode,
903				  (struct list_head *)mnode_obj);
904	}
905}
906
907/*
908 * ======== get_allocator ========
909 *  Purpose:
910 *      Return the allocator for the given SM Segid.
911 *      SegIds:  1,2,3..max.
912 */
913static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
914					   u32 ul_seg_id)
915{
916	struct cmm_allocator *allocator = NULL;
917
918	DBC_REQUIRE(cmm_mgr_obj != NULL);
919	DBC_REQUIRE((ul_seg_id > 0) && (ul_seg_id <= CMM_MAXGPPSEGS));
920	allocator = cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
921	if (allocator != NULL) {
922		/* make sure it's for real */
923		if (!allocator) {
924			allocator = NULL;
925			DBC_ASSERT(false);
926		}
927	}
928	return allocator;
929}
930
931
932/*
933 *  ======== cmm_xlator_create ========
934 *  Purpose:
935 *      Create an address translator object.
936 */
937int cmm_xlator_create(struct cmm_xlatorobject **xlator,
938			     struct cmm_object *hcmm_mgr,
939			     struct cmm_xlatorattrs *xlator_attrs)
940{
941	struct cmm_xlator *xlator_object = NULL;
942	int status = 0;
943
944	DBC_REQUIRE(refs > 0);
945	DBC_REQUIRE(xlator != NULL);
946	DBC_REQUIRE(hcmm_mgr != NULL);
947
948	*xlator = NULL;
949	if (xlator_attrs == NULL)
950		xlator_attrs = &cmm_dfltxlatorattrs;	/* set defaults */
951
952	xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL);
953	if (xlator_object != NULL) {
954		xlator_object->hcmm_mgr = hcmm_mgr;	/* ref back to CMM */
955		/* SM seg_id */
956		xlator_object->ul_seg_id = xlator_attrs->ul_seg_id;
957	} else {
958		status = -ENOMEM;
959	}
960	if (!status)
961		*xlator = (struct cmm_xlatorobject *)xlator_object;
962
963	return status;
964}
965
966/*
967 *  ======== cmm_xlator_delete ========
968 *  Purpose:
969 *      Free the Xlator resources.
970 *      VM gets freed later.
971 */
972int cmm_xlator_delete(struct cmm_xlatorobject *xlator, bool force)
973{
974	struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
975
976	DBC_REQUIRE(refs > 0);
977
978	kfree(xlator_obj);
979
980	return 0;
981}
982
983/*
984 *  ======== cmm_xlator_alloc_buf ========
985 */
986void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf,
987			   u32 pa_size)
988{
989	struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
990	void *pbuf = NULL;
991	void *tmp_va_buff;
992	struct cmm_attrs attrs;
993
994	DBC_REQUIRE(refs > 0);
995	DBC_REQUIRE(xlator != NULL);
996	DBC_REQUIRE(xlator_obj->hcmm_mgr != NULL);
997	DBC_REQUIRE(va_buf != NULL);
998	DBC_REQUIRE(pa_size > 0);
999	DBC_REQUIRE(xlator_obj->ul_seg_id > 0);
1000
1001	if (xlator_obj) {
1002		attrs.ul_seg_id = xlator_obj->ul_seg_id;
1003		__raw_writel(0, va_buf);
1004		/* Alloc SM */
1005		pbuf =
1006		    cmm_calloc_buf(xlator_obj->hcmm_mgr, pa_size, &attrs, NULL);
1007		if (pbuf) {
1008			/* convert to translator(node/strm) process Virtual
1009			 * address */
1010			 tmp_va_buff = cmm_xlator_translate(xlator,
1011							 pbuf, CMM_PA2VA);
1012			__raw_writel((u32)tmp_va_buff, va_buf);
1013		}
1014	}
1015	return pbuf;
1016}
1017
1018/*
1019 *  ======== cmm_xlator_free_buf ========
1020 *  Purpose:
1021 *      Free the given SM buffer and descriptor.
1022 *      Does not free virtual memory.
1023 */
1024int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
1025{
1026	struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
1027	int status = -EPERM;
1028	void *buf_pa = NULL;
1029
1030	DBC_REQUIRE(refs > 0);
1031	DBC_REQUIRE(buf_va != NULL);
1032	DBC_REQUIRE(xlator_obj->ul_seg_id > 0);
1033
1034	if (xlator_obj) {
1035		/* convert Va to Pa so we can free it. */
1036		buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA);
1037		if (buf_pa) {
1038			status = cmm_free_buf(xlator_obj->hcmm_mgr, buf_pa,
1039					      xlator_obj->ul_seg_id);
1040			if (status) {
1041				/* Uh oh, this shouldn't happen. Descriptor
1042				 * gone! */
1043				DBC_ASSERT(false);	/* CMM is leaking mem */
1044			}
1045		}
1046	}
1047	return status;
1048}
1049
1050/*
1051 *  ======== cmm_xlator_info ========
1052 *  Purpose:
1053 *      Set/Get translator info.
1054 */
1055int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
1056			   u32 ul_size, u32 segm_id, bool set_info)
1057{
1058	struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
1059	int status = 0;
1060
1061	DBC_REQUIRE(refs > 0);
1062	DBC_REQUIRE(paddr != NULL);
1063	DBC_REQUIRE((segm_id > 0) && (segm_id <= CMM_MAXGPPSEGS));
1064
1065	if (xlator_obj) {
1066		if (set_info) {
1067			/* set translators virtual address range */
1068			xlator_obj->dw_virt_base = (u32) *paddr;
1069			xlator_obj->ul_virt_size = ul_size;
1070		} else {	/* return virt base address */
1071			*paddr = (u8 *) xlator_obj->dw_virt_base;
1072		}
1073	} else {
1074		status = -EFAULT;
1075	}
1076	return status;
1077}
1078
1079/*
1080 *  ======== cmm_xlator_translate ========
1081 */
1082void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
1083			   enum cmm_xlatetype xtype)
1084{
1085	u32 dw_addr_xlate = 0;
1086	struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
1087	struct cmm_object *cmm_mgr_obj = NULL;
1088	struct cmm_allocator *allocator = NULL;
1089	u32 dw_offset = 0;
1090
1091	DBC_REQUIRE(refs > 0);
1092	DBC_REQUIRE(paddr != NULL);
1093	DBC_REQUIRE((xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA));
1094
1095	if (!xlator_obj)
1096		goto loop_cont;
1097
1098	cmm_mgr_obj = (struct cmm_object *)xlator_obj->hcmm_mgr;
1099	/* get this translator's default SM allocator */
1100	DBC_ASSERT(xlator_obj->ul_seg_id > 0);
1101	allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->ul_seg_id - 1];
1102	if (!allocator)
1103		goto loop_cont;
1104
1105	if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_VA2PA) ||
1106	    (xtype == CMM_PA2VA)) {
1107		if (xtype == CMM_PA2VA) {
1108			/* Gpp Va = Va Base + offset */
1109			dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base -
1110							   allocator->
1111							   ul_dsp_size);
1112			dw_addr_xlate = xlator_obj->dw_virt_base + dw_offset;
1113			/* Check if translated Va base is in range */
1114			if ((dw_addr_xlate < xlator_obj->dw_virt_base) ||
1115			    (dw_addr_xlate >=
1116			     (xlator_obj->dw_virt_base +
1117			      xlator_obj->ul_virt_size))) {
1118				dw_addr_xlate = 0;	/* bad address */
1119			}
1120		} else {
1121			/* Gpp PA =  Gpp Base + offset */
1122			dw_offset =
1123			    (u8 *) paddr - (u8 *) xlator_obj->dw_virt_base;
1124			dw_addr_xlate =
1125			    allocator->shm_base - allocator->ul_dsp_size +
1126			    dw_offset;
1127		}
1128	} else {
1129		dw_addr_xlate = (u32) paddr;
1130	}
1131	/*Now convert address to proper target physical address if needed */
1132	if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) {
1133		/* Got Gpp Pa now, convert to DSP Pa */
1134		dw_addr_xlate =
1135		    GPPPA2DSPPA((allocator->shm_base - allocator->ul_dsp_size),
1136				dw_addr_xlate,
1137				allocator->dw_dsp_phys_addr_offset *
1138				allocator->c_factor);
1139	} else if (xtype == CMM_DSPPA2PA) {
1140		/* Got DSP Pa, convert to GPP Pa */
1141		dw_addr_xlate =
1142		    DSPPA2GPPPA(allocator->shm_base - allocator->ul_dsp_size,
1143				dw_addr_xlate,
1144				allocator->dw_dsp_phys_addr_offset *
1145				allocator->c_factor);
1146	}
1147loop_cont:
1148	return (void *)dw_addr_xlate;
1149}
1150