• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/ieee1394/
1/*
2 * DMA region bookkeeping routines
3 *
4 * Copyright (C) 2002 Maas Digital LLC
5 *
6 * This code is licensed under the GPL.  See the file COPYING in the root
7 * directory of the kernel sources for details.
8 */
9
10#include <linux/mm.h>
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/vmalloc.h>
14#include <linux/scatterlist.h>
15
16#include "dma.h"
17
18/* dma_prog_region */
19
20void dma_prog_region_init(struct dma_prog_region *prog)
21{
22	prog->kvirt = NULL;
23	prog->dev = NULL;
24	prog->n_pages = 0;
25	prog->bus_addr = 0;
26}
27
28int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
29			  struct pci_dev *dev)
30{
31	/* round up to page size */
32	n_bytes = PAGE_ALIGN(n_bytes);
33
34	prog->n_pages = n_bytes >> PAGE_SHIFT;
35
36	prog->kvirt = pci_alloc_consistent(dev, n_bytes, &prog->bus_addr);
37	if (!prog->kvirt) {
38		printk(KERN_ERR
39		       "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
40		dma_prog_region_free(prog);
41		return -ENOMEM;
42	}
43
44	prog->dev = dev;
45
46	return 0;
47}
48
49void dma_prog_region_free(struct dma_prog_region *prog)
50{
51	if (prog->kvirt) {
52		pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT,
53				    prog->kvirt, prog->bus_addr);
54	}
55
56	prog->kvirt = NULL;
57	prog->dev = NULL;
58	prog->n_pages = 0;
59	prog->bus_addr = 0;
60}
61
62/* dma_region */
63
64/**
65 * dma_region_init - clear out all fields but do not allocate anything
66 */
67void dma_region_init(struct dma_region *dma)
68{
69	dma->kvirt = NULL;
70	dma->dev = NULL;
71	dma->n_pages = 0;
72	dma->n_dma_pages = 0;
73	dma->sglist = NULL;
74}
75
76/**
77 * dma_region_alloc - allocate the buffer and map it to the IOMMU
78 */
79int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
80		     struct pci_dev *dev, int direction)
81{
82	unsigned int i;
83
84	/* round up to page size */
85	n_bytes = PAGE_ALIGN(n_bytes);
86
87	dma->n_pages = n_bytes >> PAGE_SHIFT;
88
89	dma->kvirt = vmalloc_32(n_bytes);
90	if (!dma->kvirt) {
91		printk(KERN_ERR "dma_region_alloc: vmalloc_32() failed\n");
92		goto err;
93	}
94
95	/* Clear the ram out, no junk to the user */
96	memset(dma->kvirt, 0, n_bytes);
97
98	/* allocate scatter/gather list */
99	dma->sglist = vmalloc(dma->n_pages * sizeof(*dma->sglist));
100	if (!dma->sglist) {
101		printk(KERN_ERR "dma_region_alloc: vmalloc(sglist) failed\n");
102		goto err;
103	}
104
105	sg_init_table(dma->sglist, dma->n_pages);
106
107	/* fill scatter/gather list with pages */
108	for (i = 0; i < dma->n_pages; i++) {
109		unsigned long va =
110		    (unsigned long)dma->kvirt + (i << PAGE_SHIFT);
111
112		sg_set_page(&dma->sglist[i], vmalloc_to_page((void *)va),
113				PAGE_SIZE, 0);
114	}
115
116	/* map sglist to the IOMMU */
117	dma->n_dma_pages =
118	    pci_map_sg(dev, dma->sglist, dma->n_pages, direction);
119
120	if (dma->n_dma_pages == 0) {
121		printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n");
122		goto err;
123	}
124
125	dma->dev = dev;
126	dma->direction = direction;
127
128	return 0;
129
130      err:
131	dma_region_free(dma);
132	return -ENOMEM;
133}
134
135/**
136 * dma_region_free - unmap and free the buffer
137 */
138void dma_region_free(struct dma_region *dma)
139{
140	if (dma->n_dma_pages) {
141		pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages,
142			     dma->direction);
143		dma->n_dma_pages = 0;
144		dma->dev = NULL;
145	}
146
147	vfree(dma->sglist);
148	dma->sglist = NULL;
149
150	vfree(dma->kvirt);
151	dma->kvirt = NULL;
152	dma->n_pages = 0;
153}
154
155/* find the scatterlist index and remaining offset corresponding to a
156   given offset from the beginning of the buffer */
157static inline int dma_region_find(struct dma_region *dma, unsigned long offset,
158				  unsigned int start, unsigned long *rem)
159{
160	int i;
161	unsigned long off = offset;
162
163	for (i = start; i < dma->n_dma_pages; i++) {
164		if (off < sg_dma_len(&dma->sglist[i])) {
165			*rem = off;
166			break;
167		}
168
169		off -= sg_dma_len(&dma->sglist[i]);
170	}
171
172	BUG_ON(i >= dma->n_dma_pages);
173
174	return i;
175}
176
177/**
178 * dma_region_offset_to_bus - get bus address of an offset within a DMA region
179 *
180 * Returns the DMA bus address of the byte with the given @offset relative to
181 * the beginning of the @dma.
182 */
183dma_addr_t dma_region_offset_to_bus(struct dma_region * dma,
184				    unsigned long offset)
185{
186	unsigned long rem = 0;
187
188	struct scatterlist *sg =
189	    &dma->sglist[dma_region_find(dma, offset, 0, &rem)];
190	return sg_dma_address(sg) + rem;
191}
192
193/**
194 * dma_region_sync_for_cpu - sync the CPU's view of the buffer
195 */
196void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
197			     unsigned long len)
198{
199	int first, last;
200	unsigned long rem = 0;
201
202	if (!len)
203		len = 1;
204
205	first = dma_region_find(dma, offset, 0, &rem);
206	last = dma_region_find(dma, rem + len - 1, first, &rem);
207
208	pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1,
209				dma->direction);
210}
211
212/**
213 * dma_region_sync_for_device - sync the IO bus' view of the buffer
214 */
215void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
216				unsigned long len)
217{
218	int first, last;
219	unsigned long rem = 0;
220
221	if (!len)
222		len = 1;
223
224	first = dma_region_find(dma, offset, 0, &rem);
225	last = dma_region_find(dma, rem + len - 1, first, &rem);
226
227	pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first],
228				   last - first + 1, dma->direction);
229}
230
231#ifdef CONFIG_MMU
232
233static int dma_region_pagefault(struct vm_area_struct *vma,
234				struct vm_fault *vmf)
235{
236	struct dma_region *dma = (struct dma_region *)vma->vm_private_data;
237
238	if (!dma->kvirt)
239		return VM_FAULT_SIGBUS;
240
241	if (vmf->pgoff >= dma->n_pages)
242		return VM_FAULT_SIGBUS;
243
244	vmf->page = vmalloc_to_page(dma->kvirt + (vmf->pgoff << PAGE_SHIFT));
245	get_page(vmf->page);
246	return 0;
247}
248
249static const struct vm_operations_struct dma_region_vm_ops = {
250	.fault = dma_region_pagefault,
251};
252
253/**
254 * dma_region_mmap - map the buffer into a user space process
255 */
256int dma_region_mmap(struct dma_region *dma, struct file *file,
257		    struct vm_area_struct *vma)
258{
259	unsigned long size;
260
261	if (!dma->kvirt)
262		return -EINVAL;
263
264	if (vma->vm_pgoff != 0)
265		return -EINVAL;
266
267	/* check the length */
268	size = vma->vm_end - vma->vm_start;
269	if (size > (dma->n_pages << PAGE_SHIFT))
270		return -EINVAL;
271
272	vma->vm_ops = &dma_region_vm_ops;
273	vma->vm_private_data = dma;
274	vma->vm_file = file;
275	vma->vm_flags |= VM_RESERVED | VM_ALWAYSDUMP;
276
277	return 0;
278}
279
280#else				/* CONFIG_MMU */
281
282int dma_region_mmap(struct dma_region *dma, struct file *file,
283		    struct vm_area_struct *vma)
284{
285	return -EINVAL;
286}
287
288#endif				/* CONFIG_MMU */
289