busdma_machdep.c revision 33676
1/*
2 * Copyright (c) 1997 Justin T. Gibbs.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 *      $Id: busdma_machdep.c,v 1.3 1998/02/06 12:13:04 eivind Exp $
27 */
28
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/malloc.h>
32
33#include <vm/vm.h>
34#include <vm/vm_prot.h>
35#include <vm/vm_page.h>
36
37#include <machine/bus.h>
38#include <machine/md_var.h>
39
40#define MAX(a,b) (((a) > (b)) ? (a) : (b))
41#define MIN(a,b) (((a) < (b)) ? (a) : (b))
42#define MAX_BPAGES 128
43
44struct bus_dma_tag {
45	bus_dma_tag_t	  parent;
46	bus_size_t	  boundary;
47	bus_addr_t	  lowaddr;
48	bus_addr_t	  highaddr;
49	bus_dma_filter_t *filter;
50	void		 *filterarg;
51	bus_size_t	  maxsize;
52	int		  nsegments;
53	bus_size_t	  maxsegsz;
54	int		  flags;
55	int		  ref_count;
56	int		  map_count;
57};
58
59struct bounce_page {
60	vm_offset_t	vaddr;		/* kva of bounce buffer */
61	bus_addr_t	busaddr;	/* Physical address */
62	vm_offset_t	datavaddr;	/* kva of client data */
63	bus_size_t	datacount;	/* client data count */
64	STAILQ_ENTRY(bounce_page) links;
65};
66
67int busdma_swi_pending;
68
69static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
70static int free_bpages;
71static int reserved_bpages;
72static int active_bpages;
73static int total_bpages;
74static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
75
76struct bus_dmamap {
77	struct bp_list	       bpages;
78	int		       pagesneeded;
79	int		       pagesreserved;
80	bus_dma_tag_t	       dmat;
81	void		      *buf;		/* unmapped buffer pointer */
82	bus_size_t	       buflen;		/* unmapped buffer length */
83	bus_dmamap_callback_t *callback;
84	void		      *callback_arg;
85	STAILQ_ENTRY(bus_dmamap) links;
86};
87
88static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
89static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
90static struct bus_dmamap nobounce_dmamap;
91
92static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
93static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
94static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
95				   vm_offset_t vaddr, bus_size_t size);
96static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
97static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
98
99static __inline int
100run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
101{
102	int retval;
103
104	retval = 0;
105	do {
106		if (paddr > dmat->lowaddr
107		 && paddr <= dmat->highaddr
108		 && (dmat->filter == NULL
109		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
110			retval = 1;
111
112		dmat = dmat->parent;
113	} while (retval == 0 && dmat != NULL);
114	return (retval);
115}
116
117/*
118 * Allocate a device specific dma_tag.
119 */
120int
121bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t boundary,
122		   bus_addr_t lowaddr, bus_addr_t highaddr,
123		   bus_dma_filter_t *filter, void *filterarg,
124		   bus_size_t maxsize, int nsegments, bus_size_t maxsegsz,
125		   int flags, bus_dma_tag_t *dmat)
126{
127	bus_dma_tag_t newtag;
128	int error = 0;
129
130	/* Return a NULL tag on failure */
131	*dmat = NULL;
132
133	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
134	if (newtag == NULL)
135		return (ENOMEM);
136
137	newtag->parent = parent;
138	newtag->boundary = boundary;
139	newtag->lowaddr = trunc_page(lowaddr) + (PAGE_SIZE - 1);
140	newtag->highaddr = trunc_page(highaddr) + (PAGE_SIZE - 1);
141	newtag->filter = filter;
142	newtag->filterarg = filterarg;
143	newtag->maxsize = maxsize;
144	newtag->nsegments = nsegments;
145	newtag->maxsegsz = maxsegsz;
146	newtag->flags = flags;
147	newtag->ref_count = 1; /* Count ourself */
148	newtag->map_count = 0;
149
150	/* Take into account any restrictions imposed by our parent tag */
151	if (parent != NULL) {
152		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
153		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
154		/*
155		 * XXX Not really correct??? Probably need to honor boundary
156		 *     all the way up the inheritence chain.
157		 */
158		newtag->boundary = MIN(parent->boundary, newtag->boundary);
159		if (newtag->filter == NULL) {
160			/*
161			 * Short circuit looking at our parent directly
162			 * since we have encapsulated all of it's information
163			 */
164			newtag->filter = parent->filter;
165			newtag->filterarg = parent->filterarg;
166			newtag->parent = parent->parent;
167		}
168		if (newtag->parent != NULL) {
169			parent->ref_count++;
170		}
171	}
172
173	if (newtag->lowaddr < ptoa(Maxmem)) {
174		/* Must bounce */
175
176		if (lowaddr > bounce_lowaddr) {
177			/*
178			 * Go through the pool and kill any pages
179			 * that don't reside below lowaddr.
180			 */
181			panic("bus_dmamap_create: page reallocation "
182			      "not implemented");
183		}
184		if (ptoa(total_bpages) < maxsize) {
185			int pages;
186
187			pages = atop(maxsize) - total_bpages;
188
189			/* Add pages to our bounce pool */
190			if (alloc_bounce_pages(newtag, pages) < pages)
191				error = ENOMEM;
192		}
193	}
194
195	if (error != 0) {
196		free(newtag, M_DEVBUF);
197	} else {
198		*dmat = newtag;
199	}
200	return (error);
201}
202
203int
204bus_dma_tag_destroy(bus_dma_tag_t dmat)
205{
206	if (dmat != NULL) {
207
208		if (dmat->map_count != 0)
209			return (EBUSY);
210
211		while (dmat != NULL) {
212			bus_dma_tag_t parent;
213
214			parent = dmat->parent;
215			dmat->ref_count--;
216			if (dmat->ref_count == 0) {
217				free(dmat, M_DEVBUF);
218			}
219			dmat = parent;
220		}
221	}
222	return (0);
223}
224
225/*
226 * Allocate a handle for mapping from kva/uva/physical
227 * address space into bus device space.
228 */
229int
230bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
231{
232	int error;
233
234	error = 0;
235
236	if (dmat->lowaddr < ptoa(Maxmem)) {
237		/* Must bounce */
238		int maxpages;
239
240		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
241					     M_NOWAIT);
242		if (*mapp == NULL) {
243			error = ENOMEM;
244		} else {
245			/* Initialize the new map */
246			bzero(*mapp, sizeof(**mapp));
247			STAILQ_INIT(&((*mapp)->bpages));
248		}
249		/*
250		 * Attempt to add pages to our pool on a per-instance
251		 * basis up to a sane limit.
252		 */
253		maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
254		if (dmat->map_count > 0
255		 && total_bpages < maxpages) {
256			int pages;
257
258			pages = atop(dmat->maxsize);
259			pages = MIN(maxpages - total_bpages, pages);
260			alloc_bounce_pages(dmat, pages);
261		}
262	} else {
263		*mapp = NULL;
264	}
265	if (error == 0)
266		dmat->map_count++;
267	return (error);
268}
269
270/*
271 * Destroy a handle for mapping from kva/uva/physical
272 * address space into bus device space.
273 */
274int
275bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
276{
277	if (map != NULL) {
278		if (STAILQ_FIRST(&map->bpages) != NULL)
279			return (EBUSY);
280		free(map, M_DEVBUF);
281	}
282	dmat->map_count--;
283	return (0);
284}
285
286#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
287
288/*
289 * Map the buffer buf into bus space using the dmamap map.
290 */
291int
292bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
293		bus_size_t buflen, bus_dmamap_callback_t *callback,
294		void *callback_arg, int flags)
295{
296	vm_offset_t		vaddr;
297	vm_offset_t		paddr;
298#ifdef __GNUC__
299	bus_dma_segment_t	dm_segments[dmat->nsegments];
300#else
301	bus_dma_segment_t	dm_segments[BUS_DMAMAP_NSEGS];
302#endif
303	bus_dma_segment_t      *sg;
304	int			seg;
305	int			error;
306
307	error = 0;
308	/*
309	 * If we are being called during a callback, pagesneeded will
310	 * be non-zero, so we can avoid doing the work twice.
311	 */
312	if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) {
313		vm_offset_t	vendaddr;
314
315		/*
316		 * Count the number of bounce pages
317		 * needed in order to complete this transfer
318		 */
319		vaddr = trunc_page(buf);
320		vendaddr = (vm_offset_t)buf + buflen;
321
322		while (vaddr < vendaddr) {
323			paddr = pmap_kextract(vaddr);
324			if (run_filter(dmat, paddr) != 0) {
325
326				map->pagesneeded++;
327			}
328			vaddr += PAGE_SIZE;
329		}
330	}
331
332	if (map == NULL)
333		map = &nobounce_dmamap;
334
335	/* Reserve Necessary Bounce Pages */
336	if (map->pagesneeded != 0) {
337		int s;
338
339		s = splhigh();
340	 	if (reserve_bounce_pages(dmat, map) != 0) {
341
342			/* Queue us for resources */
343			map->dmat = dmat;
344			map->buf = buf;
345			map->buflen = buflen;
346			map->callback = callback;
347			map->callback_arg = callback_arg;
348
349			STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
350			splx(s);
351
352			return (EINPROGRESS);
353		}
354		splx(s);
355	}
356
357	vaddr = (vm_offset_t)buf;
358	sg = &dm_segments[0];
359	seg = 1;
360	sg->ds_len = 0;
361
362	do {
363		bus_size_t	size;
364		vm_offset_t	nextpaddr;
365
366		paddr = pmap_kextract(vaddr);
367		size = PAGE_SIZE - (paddr & PAGE_MASK);
368		if (size > buflen)
369			size = buflen;
370
371		if (map->pagesneeded != 0
372		 && run_filter(dmat, paddr)) {
373			paddr = add_bounce_page(dmat, map, vaddr, size);
374		}
375
376		if (sg->ds_len == 0) {
377			sg->ds_addr = paddr;
378			sg->ds_len = size;
379		} else if (paddr == nextpaddr) {
380			sg->ds_len += size;
381		} else {
382			/* Go to the next segment */
383			sg++;
384			seg++;
385			if (seg > dmat->nsegments)
386				break;
387			sg->ds_addr = paddr;
388			sg->ds_len = size;
389		}
390		vaddr += size;
391		nextpaddr = paddr + size;
392		buflen -= size;
393	} while (buflen > 0);
394
395	if (buflen != 0) {
396		printf("bus_dmamap_load: Too many segs!\n");
397		error = EFBIG;
398	}
399
400	(*callback)(callback_arg, dm_segments, seg, error);
401
402	return (0);
403}
404
405/*
406 * Release the mapping held by map.
407 */
408void
409_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
410{
411	struct bounce_page *bpage;
412
413	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
414		STAILQ_REMOVE_HEAD(&map->bpages, links);
415		free_bounce_page(dmat, bpage);
416	}
417}
418
419void
420_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
421{
422	struct bounce_page *bpage;
423
424	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
425
426		/*
427		 * Handle data bouncing.  We might also
428		 * want to add support for invalidating
429		 * the caches on broken hardware
430		 */
431		switch (op) {
432		case BUS_DMASYNC_PREWRITE:
433			while (bpage != NULL) {
434				bcopy((void *)bpage->datavaddr,
435				      (void *)bpage->vaddr,
436				      bpage->datacount);
437				bpage = STAILQ_NEXT(bpage, links);
438			}
439			break;
440
441		case BUS_DMASYNC_POSTREAD:
442			while (bpage != NULL) {
443				bcopy((void *)bpage->vaddr,
444				      (void *)bpage->datavaddr,
445				      bpage->datacount);
446				bpage = STAILQ_NEXT(bpage, links);
447			}
448			break;
449		case BUS_DMASYNC_PREREAD:
450		case BUS_DMASYNC_POSTWRITE:
451			/* No-ops */
452			break;
453		}
454	}
455}
456
457static int
458alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
459{
460	int count;
461
462	count = 0;
463	if (total_bpages == 0) {
464		STAILQ_INIT(&bounce_page_list);
465		STAILQ_INIT(&bounce_map_waitinglist);
466		STAILQ_INIT(&bounce_map_callbacklist);
467	}
468
469	while (numpages > 0) {
470		struct bounce_page *bpage;
471		int s;
472
473		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
474						     M_NOWAIT);
475
476		if (bpage == NULL)
477			break;
478		bzero(bpage, sizeof(*bpage));
479		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
480							 M_NOWAIT, 0ul,
481							 dmat->lowaddr,
482							 PAGE_SIZE, 0x10000);
483		if (bpage->vaddr == NULL) {
484			free(bpage, M_DEVBUF);
485			break;
486		}
487		bpage->busaddr = pmap_kextract(bpage->vaddr);
488		s = splhigh();
489		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
490		total_bpages++;
491		free_bpages++;
492		splx(s);
493		count++;
494		numpages--;
495	}
496	return (count);
497}
498
499static int
500reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
501{
502	int pages;
503
504	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
505	free_bpages -= pages;
506	reserved_bpages += pages;
507	map->pagesreserved += pages;
508	pages = map->pagesneeded - map->pagesreserved;
509
510	return (pages);
511}
512
513static vm_offset_t
514add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
515		bus_size_t size)
516{
517	int s;
518	struct bounce_page *bpage;
519
520	if (map->pagesneeded == 0)
521		panic("add_bounce_page: map doesn't need any pages");
522	map->pagesneeded--;
523
524	if (map->pagesreserved == 0)
525		panic("add_bounce_page: map doesn't need any pages");
526	map->pagesreserved--;
527
528	s = splhigh();
529	bpage = STAILQ_FIRST(&bounce_page_list);
530	if (bpage == NULL)
531		panic("add_bounce_page: free page list is empty");
532
533	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
534	reserved_bpages--;
535	active_bpages++;
536	splx(s);
537
538	bpage->datavaddr = vaddr;
539	bpage->datacount = size;
540	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
541	return (bpage->busaddr);
542}
543
544static void
545free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
546{
547	int s;
548	struct bus_dmamap *map;
549
550	bpage->datavaddr = 0;
551	bpage->datacount = 0;
552
553	s = splhigh();
554	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
555	free_bpages++;
556	active_bpages--;
557	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
558		if (reserve_bounce_pages(map->dmat, map) == 0) {
559			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
560			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
561					   map, links);
562			busdma_swi_pending = 1;
563			setsoftvm();
564		}
565	}
566	splx(s);
567}
568
569void
570busdma_swi()
571{
572	int s;
573	struct bus_dmamap *map;
574
575	s = splhigh();
576	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
577		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
578		splx(s);
579		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
580				map->callback, map->callback_arg, /*flags*/0);
581		s = splhigh();
582	}
583	splx(s);
584}
585