• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/arm/plat-omap/
1/*
2 * omap iommu: simple virtual address space management
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/err.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/device.h>
17#include <linux/scatterlist.h>
18
19#include <asm/cacheflush.h>
20#include <asm/mach/map.h>
21
22#include <plat/iommu.h>
23#include <plat/iovmm.h>
24
25#include "iopgtable.h"
26
27/*
28 * A device driver needs to create address mappings between:
29 *
30 * - iommu/device address
31 * - physical address
32 * - mpu virtual address
33 *
34 * There are 4 possible patterns for them:
35 *
36 *    |iova/			  mapping		iommu_		page
37 *    | da	pa	va	(d)-(p)-(v)		function	type
38 *  ---------------------------------------------------------------------------
39 *  1 | c	c	c	 1 - 1 - 1	  _kmap() / _kunmap()	s
40 *  2 | c	c,a	c	 1 - 1 - 1	_kmalloc()/ _kfree()	s
41 *  3 | c	d	c	 1 - n - 1	  _vmap() / _vunmap()	s
42 *  4 | c	d,a	c	 1 - n - 1	_vmalloc()/ _vfree()	n*
43 *
44 *
45 *	'iova':	device iommu virtual address
46 *	'da':	alias of 'iova'
47 *	'pa':	physical address
48 *	'va':	mpu virtual address
49 *
50 *	'c':	contiguous memory area
51 *	'd':	discontiguous memory area
52 *	'a':	anonymous memory allocation
53 *	'()':	optional feature
54 *
55 *	'n':	a normal page(4KB) size is used.
56 *	's':	multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
57 *
58 *	'*':	not yet, but feasible.
59 */
60
61static struct kmem_cache *iovm_area_cachep;
62
63/* return total bytes of sg buffers */
64static size_t sgtable_len(const struct sg_table *sgt)
65{
66	unsigned int i, total = 0;
67	struct scatterlist *sg;
68
69	if (!sgt)
70		return 0;
71
72	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
73		size_t bytes;
74
75		bytes = sg_dma_len(sg);
76
77		if (!iopgsz_ok(bytes)) {
78			pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
79			       __func__, i, bytes);
80			return 0;
81		}
82
83		total += bytes;
84	}
85
86	return total;
87}
88#define sgtable_ok(x)	(!!sgtable_len(x))
89
90/*
91 * calculate the optimal number sg elements from total bytes based on
92 * iommu superpages
93 */
94static unsigned int sgtable_nents(size_t bytes)
95{
96	int i;
97	unsigned int nr_entries;
98	const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
99
100	if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
101		pr_err("%s: wrong size %08x\n", __func__, bytes);
102		return 0;
103	}
104
105	nr_entries = 0;
106	for (i = 0; i < ARRAY_SIZE(pagesize); i++) {
107		if (bytes >= pagesize[i]) {
108			nr_entries += (bytes / pagesize[i]);
109			bytes %= pagesize[i];
110		}
111	}
112	BUG_ON(bytes);
113
114	return nr_entries;
115}
116
117/* allocate and initialize sg_table header(a kind of 'superblock') */
118static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags)
119{
120	unsigned int nr_entries;
121	int err;
122	struct sg_table *sgt;
123
124	if (!bytes)
125		return ERR_PTR(-EINVAL);
126
127	if (!IS_ALIGNED(bytes, PAGE_SIZE))
128		return ERR_PTR(-EINVAL);
129
130	if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) {
131		nr_entries = sgtable_nents(bytes);
132		if (!nr_entries)
133			return ERR_PTR(-EINVAL);
134	} else
135		nr_entries =  bytes / PAGE_SIZE;
136
137	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
138	if (!sgt)
139		return ERR_PTR(-ENOMEM);
140
141	err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
142	if (err) {
143		kfree(sgt);
144		return ERR_PTR(err);
145	}
146
147	pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
148
149	return sgt;
150}
151
152/* free sg_table header(a kind of superblock) */
153static void sgtable_free(struct sg_table *sgt)
154{
155	if (!sgt)
156		return;
157
158	sg_free_table(sgt);
159	kfree(sgt);
160
161	pr_debug("%s: sgt:%p\n", __func__, sgt);
162}
163
164/* map 'sglist' to a contiguous mpu virtual area and return 'va' */
165static void *vmap_sg(const struct sg_table *sgt)
166{
167	u32 va;
168	size_t total;
169	unsigned int i;
170	struct scatterlist *sg;
171	struct vm_struct *new;
172	const struct mem_type *mtype;
173
174	mtype = get_mem_type(MT_DEVICE);
175	if (!mtype)
176		return ERR_PTR(-EINVAL);
177
178	total = sgtable_len(sgt);
179	if (!total)
180		return ERR_PTR(-EINVAL);
181
182	new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
183	if (!new)
184		return ERR_PTR(-ENOMEM);
185	va = (u32)new->addr;
186
187	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
188		size_t bytes;
189		u32 pa;
190		int err;
191
192		pa = sg_phys(sg);
193		bytes = sg_dma_len(sg);
194
195		BUG_ON(bytes != PAGE_SIZE);
196
197		err = ioremap_page(va,  pa, mtype);
198		if (err)
199			goto err_out;
200
201		va += bytes;
202	}
203
204	flush_cache_vmap((unsigned long)new->addr,
205				(unsigned long)(new->addr + total));
206	return new->addr;
207
208err_out:
209	WARN_ON(1);
210	vunmap(new->addr);
211	return ERR_PTR(-EAGAIN);
212}
213
214static inline void vunmap_sg(const void *va)
215{
216	vunmap(va);
217}
218
219static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
220{
221	struct iovm_struct *tmp;
222
223	list_for_each_entry(tmp, &obj->mmap, list) {
224		if ((da >= tmp->da_start) && (da < tmp->da_end)) {
225			size_t len;
226
227			len = tmp->da_end - tmp->da_start;
228
229			dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
230				__func__, tmp->da_start, da, tmp->da_end, len,
231				tmp->flags);
232
233			return tmp;
234		}
235	}
236
237	return NULL;
238}
239
240/**
241 * find_iovm_area  -  find iovma which includes @da
242 * @da:		iommu device virtual address
243 *
244 * Find the existing iovma starting at @da
245 */
246struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
247{
248	struct iovm_struct *area;
249
250	mutex_lock(&obj->mmap_lock);
251	area = __find_iovm_area(obj, da);
252	mutex_unlock(&obj->mmap_lock);
253
254	return area;
255}
256EXPORT_SYMBOL_GPL(find_iovm_area);
257
258/*
259 * This finds the hole(area) which fits the requested address and len
260 * in iovmas mmap, and returns the new allocated iovma.
261 */
262static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
263					   size_t bytes, u32 flags)
264{
265	struct iovm_struct *new, *tmp;
266	u32 start, prev_end, alignement;
267
268	if (!obj || !bytes)
269		return ERR_PTR(-EINVAL);
270
271	start = da;
272	alignement = PAGE_SIZE;
273
274	if (flags & IOVMF_DA_ANON) {
275		/*
276		 * Reserve the first page for NULL
277		 */
278		start = PAGE_SIZE;
279		if (flags & IOVMF_LINEAR)
280			alignement = iopgsz_max(bytes);
281		start = roundup(start, alignement);
282	}
283
284	tmp = NULL;
285	if (list_empty(&obj->mmap))
286		goto found;
287
288	prev_end = 0;
289	list_for_each_entry(tmp, &obj->mmap, list) {
290
291		if (prev_end >= start)
292			break;
293
294		if (start + bytes < tmp->da_start)
295			goto found;
296
297		if (flags & IOVMF_DA_ANON)
298			start = roundup(tmp->da_end + 1, alignement);
299
300		prev_end = tmp->da_end;
301	}
302
303	if ((start > prev_end) && (ULONG_MAX - start >= bytes))
304		goto found;
305
306	dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
307		__func__, da, bytes, flags);
308
309	return ERR_PTR(-EINVAL);
310
311found:
312	new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
313	if (!new)
314		return ERR_PTR(-ENOMEM);
315
316	new->iommu = obj;
317	new->da_start = start;
318	new->da_end = start + bytes;
319	new->flags = flags;
320
321	/*
322	 * keep ascending order of iovmas
323	 */
324	if (tmp)
325		list_add_tail(&new->list, &tmp->list);
326	else
327		list_add(&new->list, &obj->mmap);
328
329	dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
330		__func__, new->da_start, start, new->da_end, bytes, flags);
331
332	return new;
333}
334
335static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
336{
337	size_t bytes;
338
339	BUG_ON(!obj || !area);
340
341	bytes = area->da_end - area->da_start;
342
343	dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
344		__func__, area->da_start, area->da_end, bytes, area->flags);
345
346	list_del(&area->list);
347	kmem_cache_free(iovm_area_cachep, area);
348}
349
350/**
351 * da_to_va - convert (d) to (v)
352 * @obj:	objective iommu
353 * @da:		iommu device virtual address
354 * @va:		mpu virtual address
355 *
356 * Returns mpu virtual addr which corresponds to a given device virtual addr
357 */
358void *da_to_va(struct iommu *obj, u32 da)
359{
360	void *va = NULL;
361	struct iovm_struct *area;
362
363	mutex_lock(&obj->mmap_lock);
364
365	area = __find_iovm_area(obj, da);
366	if (!area) {
367		dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
368		goto out;
369	}
370	va = area->va;
371out:
372	mutex_unlock(&obj->mmap_lock);
373
374	return va;
375}
376EXPORT_SYMBOL_GPL(da_to_va);
377
378static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
379{
380	unsigned int i;
381	struct scatterlist *sg;
382	void *va = _va;
383	void *va_end;
384
385	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
386		struct page *pg;
387		const size_t bytes = PAGE_SIZE;
388
389		/*
390		 * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
391		 */
392		pg = vmalloc_to_page(va);
393		BUG_ON(!pg);
394		sg_set_page(sg, pg, bytes, 0);
395
396		va += bytes;
397	}
398
399	va_end = _va + PAGE_SIZE * i;
400}
401
402static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
403{
404	/*
405	 * Actually this is not necessary at all, just exists for
406	 * consistency of the code readability.
407	 */
408	BUG_ON(!sgt);
409}
410
411static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len)
412{
413	unsigned int i;
414	struct scatterlist *sg;
415	void *va;
416
417	va = phys_to_virt(pa);
418
419	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
420		size_t bytes;
421
422		bytes = iopgsz_max(len);
423
424		BUG_ON(!iopgsz_ok(bytes));
425
426		sg_set_buf(sg, phys_to_virt(pa), bytes);
427		/*
428		 * 'pa' is cotinuous(linear).
429		 */
430		pa += bytes;
431		len -= bytes;
432	}
433	BUG_ON(len);
434}
435
436static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
437{
438	/*
439	 * Actually this is not necessary at all, just exists for
440	 * consistency of the code readability
441	 */
442	BUG_ON(!sgt);
443}
444
445/* create 'da' <-> 'pa' mapping from 'sgt' */
446static int map_iovm_area(struct iommu *obj, struct iovm_struct *new,
447			 const struct sg_table *sgt, u32 flags)
448{
449	int err;
450	unsigned int i, j;
451	struct scatterlist *sg;
452	u32 da = new->da_start;
453
454	if (!obj || !sgt)
455		return -EINVAL;
456
457	BUG_ON(!sgtable_ok(sgt));
458
459	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
460		u32 pa;
461		int pgsz;
462		size_t bytes;
463		struct iotlb_entry e;
464
465		pa = sg_phys(sg);
466		bytes = sg_dma_len(sg);
467
468		flags &= ~IOVMF_PGSZ_MASK;
469		pgsz = bytes_to_iopgsz(bytes);
470		if (pgsz < 0)
471			goto err_out;
472		flags |= pgsz;
473
474		pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
475			 i, da, pa, bytes);
476
477		iotlb_init_entry(&e, da, pa, flags);
478		err = iopgtable_store_entry(obj, &e);
479		if (err)
480			goto err_out;
481
482		da += bytes;
483	}
484	return 0;
485
486err_out:
487	da = new->da_start;
488
489	for_each_sg(sgt->sgl, sg, i, j) {
490		size_t bytes;
491
492		bytes = iopgtable_clear_entry(obj, da);
493
494		BUG_ON(!iopgsz_ok(bytes));
495
496		da += bytes;
497	}
498	return err;
499}
500
501/* release 'da' <-> 'pa' mapping */
502static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
503{
504	u32 start;
505	size_t total = area->da_end - area->da_start;
506
507	BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
508
509	start = area->da_start;
510	while (total > 0) {
511		size_t bytes;
512
513		bytes = iopgtable_clear_entry(obj, start);
514		if (bytes == 0)
515			bytes = PAGE_SIZE;
516		else
517			dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
518				__func__, start, bytes, area->flags);
519
520		BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
521
522		total -= bytes;
523		start += bytes;
524	}
525	BUG_ON(total);
526}
527
528/* template function for all unmapping */
529static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
530				      void (*fn)(const void *), u32 flags)
531{
532	struct sg_table *sgt = NULL;
533	struct iovm_struct *area;
534
535	if (!IS_ALIGNED(da, PAGE_SIZE)) {
536		dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
537		return NULL;
538	}
539
540	mutex_lock(&obj->mmap_lock);
541
542	area = __find_iovm_area(obj, da);
543	if (!area) {
544		dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
545		goto out;
546	}
547
548	if ((area->flags & flags) != flags) {
549		dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
550			area->flags);
551		goto out;
552	}
553	sgt = (struct sg_table *)area->sgt;
554
555	unmap_iovm_area(obj, area);
556
557	fn(area->va);
558
559	dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
560		area->da_start, da, area->da_end,
561		area->da_end - area->da_start, area->flags);
562
563	free_iovm_area(obj, area);
564out:
565	mutex_unlock(&obj->mmap_lock);
566
567	return sgt;
568}
569
570static u32 map_iommu_region(struct iommu *obj, u32 da,
571	      const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
572{
573	int err = -ENOMEM;
574	struct iovm_struct *new;
575
576	mutex_lock(&obj->mmap_lock);
577
578	new = alloc_iovm_area(obj, da, bytes, flags);
579	if (IS_ERR(new)) {
580		err = PTR_ERR(new);
581		goto err_alloc_iovma;
582	}
583	new->va = va;
584	new->sgt = sgt;
585
586	if (map_iovm_area(obj, new, sgt, new->flags))
587		goto err_map;
588
589	mutex_unlock(&obj->mmap_lock);
590
591	dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
592		__func__, new->da_start, bytes, new->flags, va);
593
594	return new->da_start;
595
596err_map:
597	free_iovm_area(obj, new);
598err_alloc_iovma:
599	mutex_unlock(&obj->mmap_lock);
600	return err;
601}
602
603static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
604		 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
605{
606	return map_iommu_region(obj, da, sgt, va, bytes, flags);
607}
608
609/**
610 * iommu_vmap  -  (d)-(p)-(v) address mapper
611 * @obj:	objective iommu
612 * @sgt:	address of scatter gather table
613 * @flags:	iovma and page property
614 *
615 * Creates 1-n-1 mapping with given @sgt and returns @da.
616 * All @sgt element must be io page size aligned.
617 */
618u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
619		 u32 flags)
620{
621	size_t bytes;
622	void *va = NULL;
623
624	if (!obj || !obj->dev || !sgt)
625		return -EINVAL;
626
627	bytes = sgtable_len(sgt);
628	if (!bytes)
629		return -EINVAL;
630	bytes = PAGE_ALIGN(bytes);
631
632	if (flags & IOVMF_MMIO) {
633		va = vmap_sg(sgt);
634		if (IS_ERR(va))
635			return PTR_ERR(va);
636	}
637
638	flags &= IOVMF_HW_MASK;
639	flags |= IOVMF_DISCONT;
640	flags |= IOVMF_MMIO;
641	flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
642
643	da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
644	if (IS_ERR_VALUE(da))
645		vunmap_sg(va);
646
647	return da;
648}
649EXPORT_SYMBOL_GPL(iommu_vmap);
650
651/**
652 * iommu_vunmap  -  release virtual mapping obtained by 'iommu_vmap()'
653 * @obj:	objective iommu
654 * @da:		iommu device virtual address
655 *
656 * Free the iommu virtually contiguous memory area starting at
657 * @da, which was returned by 'iommu_vmap()'.
658 */
659struct sg_table *iommu_vunmap(struct iommu *obj, u32 da)
660{
661	struct sg_table *sgt;
662	/*
663	 * 'sgt' is allocated before 'iommu_vmalloc()' is called.
664	 * Just returns 'sgt' to the caller to free
665	 */
666	sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO);
667	if (!sgt)
668		dev_dbg(obj->dev, "%s: No sgt\n", __func__);
669	return sgt;
670}
671EXPORT_SYMBOL_GPL(iommu_vunmap);
672
673/**
674 * iommu_vmalloc  -  (d)-(p)-(v) address allocator and mapper
675 * @obj:	objective iommu
676 * @da:		contiguous iommu virtual memory
677 * @bytes:	allocation size
678 * @flags:	iovma and page property
679 *
680 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
681 * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
682 */
683u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
684{
685	void *va;
686	struct sg_table *sgt;
687
688	if (!obj || !obj->dev || !bytes)
689		return -EINVAL;
690
691	bytes = PAGE_ALIGN(bytes);
692
693	va = vmalloc(bytes);
694	if (!va)
695		return -ENOMEM;
696
697	sgt = sgtable_alloc(bytes, flags);
698	if (IS_ERR(sgt)) {
699		da = PTR_ERR(sgt);
700		goto err_sgt_alloc;
701	}
702	sgtable_fill_vmalloc(sgt, va);
703
704	flags &= IOVMF_HW_MASK;
705	flags |= IOVMF_DISCONT;
706	flags |= IOVMF_ALLOC;
707	flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
708
709	da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
710	if (IS_ERR_VALUE(da))
711		goto err_iommu_vmap;
712
713	return da;
714
715err_iommu_vmap:
716	sgtable_drain_vmalloc(sgt);
717	sgtable_free(sgt);
718err_sgt_alloc:
719	vfree(va);
720	return da;
721}
722EXPORT_SYMBOL_GPL(iommu_vmalloc);
723
724/**
725 * iommu_vfree  -  release memory allocated by 'iommu_vmalloc()'
726 * @obj:	objective iommu
727 * @da:		iommu device virtual address
728 *
729 * Frees the iommu virtually continuous memory area starting at
730 * @da, as obtained from 'iommu_vmalloc()'.
731 */
732void iommu_vfree(struct iommu *obj, const u32 da)
733{
734	struct sg_table *sgt;
735
736	sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC);
737	if (!sgt)
738		dev_dbg(obj->dev, "%s: No sgt\n", __func__);
739	sgtable_free(sgt);
740}
741EXPORT_SYMBOL_GPL(iommu_vfree);
742
743static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
744			  size_t bytes, u32 flags)
745{
746	struct sg_table *sgt;
747
748	sgt = sgtable_alloc(bytes, flags);
749	if (IS_ERR(sgt))
750		return PTR_ERR(sgt);
751
752	sgtable_fill_kmalloc(sgt, pa, bytes);
753
754	da = map_iommu_region(obj, da, sgt, va, bytes, flags);
755	if (IS_ERR_VALUE(da)) {
756		sgtable_drain_kmalloc(sgt);
757		sgtable_free(sgt);
758	}
759
760	return da;
761}
762
763/**
764 * iommu_kmap  -  (d)-(p)-(v) address mapper
765 * @obj:	objective iommu
766 * @da:		contiguous iommu virtual memory
767 * @pa:		contiguous physical memory
768 * @flags:	iovma and page property
769 *
770 * Creates 1-1-1 mapping and returns @da again, which can be
771 * adjusted if 'IOVMF_DA_ANON' is set.
772 */
773u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
774		 u32 flags)
775{
776	void *va;
777
778	if (!obj || !obj->dev || !bytes)
779		return -EINVAL;
780
781	bytes = PAGE_ALIGN(bytes);
782
783	va = ioremap(pa, bytes);
784	if (!va)
785		return -ENOMEM;
786
787	flags &= IOVMF_HW_MASK;
788	flags |= IOVMF_LINEAR;
789	flags |= IOVMF_MMIO;
790	flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
791
792	da = __iommu_kmap(obj, da, pa, va, bytes, flags);
793	if (IS_ERR_VALUE(da))
794		iounmap(va);
795
796	return da;
797}
798EXPORT_SYMBOL_GPL(iommu_kmap);
799
800/**
801 * iommu_kunmap  -  release virtual mapping obtained by 'iommu_kmap()'
802 * @obj:	objective iommu
803 * @da:		iommu device virtual address
804 *
805 * Frees the iommu virtually contiguous memory area starting at
806 * @da, which was passed to and was returned by'iommu_kmap()'.
807 */
808void iommu_kunmap(struct iommu *obj, u32 da)
809{
810	struct sg_table *sgt;
811	typedef void (*func_t)(const void *);
812
813	sgt = unmap_vm_area(obj, da, (func_t)__iounmap,
814			    IOVMF_LINEAR | IOVMF_MMIO);
815	if (!sgt)
816		dev_dbg(obj->dev, "%s: No sgt\n", __func__);
817	sgtable_free(sgt);
818}
819EXPORT_SYMBOL_GPL(iommu_kunmap);
820
821/**
822 * iommu_kmalloc  -  (d)-(p)-(v) address allocator and mapper
823 * @obj:	objective iommu
824 * @da:		contiguous iommu virtual memory
825 * @bytes:	bytes for allocation
826 * @flags:	iovma and page property
827 *
828 * Allocate @bytes linearly and creates 1-1-1 mapping and returns
829 * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
830 */
831u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
832{
833	void *va;
834	u32 pa;
835
836	if (!obj || !obj->dev || !bytes)
837		return -EINVAL;
838
839	bytes = PAGE_ALIGN(bytes);
840
841	va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
842	if (!va)
843		return -ENOMEM;
844	pa = virt_to_phys(va);
845
846	flags &= IOVMF_HW_MASK;
847	flags |= IOVMF_LINEAR;
848	flags |= IOVMF_ALLOC;
849	flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
850
851	da = __iommu_kmap(obj, da, pa, va, bytes, flags);
852	if (IS_ERR_VALUE(da))
853		kfree(va);
854
855	return da;
856}
857EXPORT_SYMBOL_GPL(iommu_kmalloc);
858
859/**
860 * iommu_kfree  -  release virtual mapping obtained by 'iommu_kmalloc()'
861 * @obj:	objective iommu
862 * @da:		iommu device virtual address
863 *
864 * Frees the iommu virtually contiguous memory area starting at
865 * @da, which was passed to and was returned by'iommu_kmalloc()'.
866 */
867void iommu_kfree(struct iommu *obj, u32 da)
868{
869	struct sg_table *sgt;
870
871	sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
872	if (!sgt)
873		dev_dbg(obj->dev, "%s: No sgt\n", __func__);
874	sgtable_free(sgt);
875}
876EXPORT_SYMBOL_GPL(iommu_kfree);
877
878
879static int __init iovmm_init(void)
880{
881	const unsigned long flags = SLAB_HWCACHE_ALIGN;
882	struct kmem_cache *p;
883
884	p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
885			      flags, NULL);
886	if (!p)
887		return -ENOMEM;
888	iovm_area_cachep = p;
889
890	return 0;
891}
892module_init(iovmm_init);
893
894static void __exit iovmm_exit(void)
895{
896	kmem_cache_destroy(iovm_area_cachep);
897}
898module_exit(iovmm_exit);
899
900MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
901MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
902MODULE_LICENSE("GPL v2");
903