bus_machdep.c revision 166096
1156283Srwatson/*-
2156283Srwatson * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
3156283Srwatson * All rights reserved.
4156283Srwatson *
5156283Srwatson * This code is derived from software contributed to The NetBSD Foundation
6156283Srwatson * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
7156283Srwatson * NASA Ames Research Center.
8156283Srwatson *
9156283Srwatson * Redistribution and use in source and binary forms, with or without
10156283Srwatson * modification, are permitted provided that the following conditions
11156283Srwatson * are met:
12156283Srwatson * 1. Redistributions of source code must retain the above copyright
13156283Srwatson *    notice, this list of conditions and the following disclaimer.
14156283Srwatson * 2. Redistributions in binary form must reproduce the above copyright
15161630Srwatson *    notice, this list of conditions and the following disclaimer in the
16161630Srwatson *    documentation and/or other materials provided with the distribution.
17161630Srwatson * 3. All advertising materials mentioning features or use of this software
18156283Srwatson *    must display the following acknowledgement:
19156283Srwatson *	This product includes software developed by the NetBSD
20156283Srwatson *	Foundation, Inc. and its contributors.
21156283Srwatson * 4. Neither the name of The NetBSD Foundation nor the names of its
22156283Srwatson *    contributors may be used to endorse or promote products derived
23156283Srwatson *    from this software without specific prior written permission.
24156283Srwatson *
25156283Srwatson * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26156283Srwatson * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27156283Srwatson * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28156283Srwatson * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29156283Srwatson * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30156283Srwatson * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31156283Srwatson * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32156283Srwatson * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33156283Srwatson * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34156283Srwatson * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35156283Srwatson * POSSIBILITY OF SUCH DAMAGE.
36156283Srwatson */
37156283Srwatson/*-
38156283Srwatson * Copyright (c) 1992, 1993
39156283Srwatson *	The Regents of the University of California.  All rights reserved.
40156283Srwatson *
41156283Srwatson * This software was developed by the Computer Systems Engineering group
42156283Srwatson * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
43156283Srwatson * contributed to Berkeley.
44156283Srwatson *
45156283Srwatson * Redistribution and use in source and binary forms, with or without
46156283Srwatson * modification, are permitted provided that the following conditions
47156283Srwatson * are met:
48156283Srwatson * 1. Redistributions of source code must retain the above copyright
49156283Srwatson *    notice, this list of conditions and the following disclaimer.
50156283Srwatson * 2. Redistributions in binary form must reproduce the above copyright
51156283Srwatson *    notice, this list of conditions and the following disclaimer in the
52156283Srwatson *    documentation and/or other materials provided with the distribution.
53156283Srwatson * 4. Neither the name of the University nor the names of its contributors
54156283Srwatson *    may be used to endorse or promote products derived from this software
55156283Srwatson *    without specific prior written permission.
56156283Srwatson *
57156283Srwatson * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
58156283Srwatson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59156283Srwatson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60156283Srwatson * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
61156283Srwatson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
62156283Srwatson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
63156283Srwatson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
64156283Srwatson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
65156283Srwatson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
66156283Srwatson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67156283Srwatson * SUCH DAMAGE.
68156283Srwatson */
69156283Srwatson/*-
70156283Srwatson * Copyright (c) 1997, 1998 Justin T. Gibbs.
71156283Srwatson * All rights reserved.
72156283Srwatson * Copyright 2001 by Thomas Moestl <tmm@FreeBSD.org>.  All rights reserved.
73156283Srwatson *
74156283Srwatson * Redistribution and use in source and binary forms, with or without
75156283Srwatson * modification, are permitted provided that the following conditions
76156283Srwatson * are met:
77156283Srwatson * 1. Redistributions of source code must retain the above copyright
78156283Srwatson *    notice, this list of conditions, and the following disclaimer,
79156283Srwatson *    without modification, immediately at the beginning of the file.
80156283Srwatson * 2. The name of the author may not be used to endorse or promote products
81156283Srwatson *    derived from this software without specific prior written permission.
82156283Srwatson *
83156283Srwatson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
84156283Srwatson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
85156283Srwatson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
86156283Srwatson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
87156283Srwatson * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
88156283Srwatson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
89162503Srwatson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
90162503Srwatson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
91162503Srwatson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
92156283Srwatson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
93156283Srwatson * SUCH DAMAGE.
94156283Srwatson *
95156283Srwatson *	from: @(#)machdep.c	8.6 (Berkeley) 1/14/94
96156283Srwatson *	from: NetBSD: machdep.c,v 1.111 2001/09/15 07:13:40 eeh Exp
97156283Srwatson *	and
98156283Srwatson * 	from: FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.24 2001/08/15
99156283Srwatson */
100156283Srwatson
101156283Srwatson#include <sys/cdefs.h>
102156283Srwatson__FBSDID("$FreeBSD: head/sys/sparc64/sparc64/bus_machdep.c 166096 2007-01-18 18:32:26Z marius $");
103156283Srwatson
104156283Srwatson#include <sys/param.h>
105156283Srwatson#include <sys/bus.h>
106156283Srwatson#include <sys/lock.h>
107156283Srwatson#include <sys/malloc.h>
108156283Srwatson#include <sys/mbuf.h>
109156283Srwatson#include <sys/mutex.h>
110156283Srwatson#include <sys/proc.h>
111156283Srwatson#include <sys/smp.h>
112156283Srwatson#include <sys/systm.h>
113156283Srwatson#include <sys/uio.h>
114156283Srwatson
115156283Srwatson#include <vm/vm.h>
116156283Srwatson#include <vm/vm_extern.h>
117156283Srwatson#include <vm/vm_kern.h>
118156283Srwatson#include <vm/vm_page.h>
119156283Srwatson#include <vm/vm_param.h>
120156283Srwatson#include <vm/vm_map.h>
121156283Srwatson
122156283Srwatson#include <machine/asi.h>
123156283Srwatson#include <machine/atomic.h>
124156283Srwatson#include <machine/bus.h>
125156283Srwatson#include <machine/bus_private.h>
126156283Srwatson#include <machine/cache.h>
127156283Srwatson#include <machine/smp.h>
128156283Srwatson#include <machine/tlb.h>
129156283Srwatson
130156283Srwatsonstatic void nexus_bus_barrier(bus_space_tag_t, bus_space_handle_t,
131156283Srwatson    bus_size_t, bus_size_t, int);
132156283Srwatson
133156283Srwatson/* ASI's for bus access. */
134156283Srwatsonint bus_type_asi[] = {
135156283Srwatson	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* nexus */
136156283Srwatson	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* SBus */
137156283Srwatson	ASI_PHYS_BYPASS_EC_WITH_EBIT_L,		/* PCI configuration space */
138156283Srwatson	ASI_PHYS_BYPASS_EC_WITH_EBIT_L,		/* PCI memory space */
139156283Srwatson	ASI_PHYS_BYPASS_EC_WITH_EBIT_L,		/* PCI I/O space */
140156283Srwatson	0
141156283Srwatson};
142156283Srwatson
143156283Srwatsonint bus_stream_asi[] = {
144156283Srwatson	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* nexus */
145156283Srwatson	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* SBus */
146156283Srwatson	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* PCI configuration space */
147156283Srwatson	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* PCI memory space */
148156283Srwatson	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* PCI I/O space */
149156283Srwatson	0
150156283Srwatson};
151156283Srwatson
152156283Srwatson/*
153156283Srwatson * Convenience function for manipulating driver locks from busdma (during
154156283Srwatson * busdma_swi, for example).  Drivers that don't provide their own locks
155156283Srwatson * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
156156283Srwatson * non-mutex locking scheme don't have to use this at all.
157156283Srwatson */
158156283Srwatsonvoid
159156283Srwatsonbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
160156283Srwatson{
161156283Srwatson	struct mtx *dmtx;
162156283Srwatson
163156283Srwatson	dmtx = (struct mtx *)arg;
164156283Srwatson	switch (op) {
165156283Srwatson	case BUS_DMA_LOCK:
166156283Srwatson		mtx_lock(dmtx);
167156283Srwatson		break;
168156283Srwatson	case BUS_DMA_UNLOCK:
169156283Srwatson		mtx_unlock(dmtx);
170156283Srwatson		break;
171156283Srwatson	default:
172156283Srwatson		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
173156283Srwatson	}
174156283Srwatson}
175156283Srwatson
176156283Srwatson/*
177156283Srwatson * dflt_lock should never get called.  It gets put into the dma tag when
178156283Srwatson * lockfunc == NULL, which is only valid if the maps that are associated
179156283Srwatson * with the tag are meant to never be defered.
180156283Srwatson * XXX Should have a way to identify which driver is responsible here.
181156283Srwatson */
182156283Srwatsonstatic void
183156283Srwatsondflt_lock(void *arg, bus_dma_lock_op_t op)
184156283Srwatson{
185156283Srwatson#ifdef INVARIANTS
186156283Srwatson	panic("driver error: busdma dflt_lock called");
187156283Srwatson#else
188156283Srwatson	printf("DRIVER_ERROR: busdma dflt_lock called\n");
189156283Srwatson#endif
190156283Srwatson}
191156283Srwatson
192156283Srwatson/*
193156283Srwatson * Since there is no way for a device to obtain a dma tag from its parent
194 * we use this kluge to handle different the different supported bus systems.
195 * The sparc64_root_dma_tag is used as parent for tags that have none, so that
196 * the correct methods will be used.
197 */
198bus_dma_tag_t sparc64_root_dma_tag;
199
200/*
201 * Allocate a device specific dma_tag.
202 */
203int
204bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
205    bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
206    bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
207    int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
208    void *lockfuncarg, bus_dma_tag_t *dmat)
209{
210	bus_dma_tag_t impptag;
211	bus_dma_tag_t newtag;
212
213	/* Return a NULL tag on failure */
214	*dmat = NULL;
215
216	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
217	if (newtag == NULL)
218		return (ENOMEM);
219
220	impptag = parent != NULL ? parent : sparc64_root_dma_tag;
221	/*
222	 * The method table pointer and the cookie need to be taken over from
223	 * the parent or the root tag.
224	 */
225	newtag->dt_cookie = impptag->dt_cookie;
226	newtag->dt_mt = impptag->dt_mt;
227
228	newtag->dt_parent = parent;
229	newtag->dt_alignment = alignment;
230	newtag->dt_boundary = boundary;
231	newtag->dt_lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
232	newtag->dt_highaddr = trunc_page((vm_offset_t)highaddr) +
233	    (PAGE_SIZE - 1);
234	newtag->dt_filter = filter;
235	newtag->dt_filterarg = filterarg;
236	newtag->dt_maxsize = maxsize;
237	newtag->dt_nsegments = nsegments;
238	newtag->dt_maxsegsz = maxsegsz;
239	newtag->dt_flags = flags;
240	newtag->dt_ref_count = 1; /* Count ourselves */
241	newtag->dt_map_count = 0;
242
243	if (lockfunc != NULL) {
244		newtag->dt_lockfunc = lockfunc;
245		newtag->dt_lockfuncarg = lockfuncarg;
246	} else {
247		newtag->dt_lockfunc = dflt_lock;
248		newtag->dt_lockfuncarg = NULL;
249	}
250
251	newtag->dt_segments = NULL;
252
253	/* Take into account any restrictions imposed by our parent tag */
254	if (parent != NULL) {
255		newtag->dt_lowaddr = ulmin(parent->dt_lowaddr,
256		    newtag->dt_lowaddr);
257		newtag->dt_highaddr = ulmax(parent->dt_highaddr,
258		    newtag->dt_highaddr);
259		if (newtag->dt_boundary == 0)
260			newtag->dt_boundary = parent->dt_boundary;
261		else if (parent->dt_boundary != 0)
262			newtag->dt_boundary = ulmin(parent->dt_boundary,
263			    newtag->dt_boundary);
264		atomic_add_int(&parent->dt_ref_count, 1);
265	}
266
267	if (newtag->dt_boundary > 0)
268		newtag->dt_maxsegsz = ulmin(newtag->dt_maxsegsz,
269		    newtag->dt_boundary);
270
271	*dmat = newtag;
272	return (0);
273}
274
275int
276bus_dma_tag_destroy(bus_dma_tag_t dmat)
277{
278	bus_dma_tag_t parent;
279
280	if (dmat != NULL) {
281		if (dmat->dt_map_count != 0)
282			return (EBUSY);
283		while (dmat != NULL) {
284			parent = dmat->dt_parent;
285			atomic_subtract_int(&dmat->dt_ref_count, 1);
286			if (dmat->dt_ref_count == 0) {
287				if (dmat->dt_segments != NULL)
288					free(dmat->dt_segments, M_DEVBUF);
289				free(dmat, M_DEVBUF);
290				/*
291				 * Last reference count, so
292				 * release our reference
293				 * count on our parent.
294				 */
295				dmat = parent;
296			} else
297				dmat = NULL;
298		}
299	}
300	return (0);
301}
302
303/* Allocate/free a tag, and do the necessary management work. */
304int
305sparc64_dma_alloc_map(bus_dma_tag_t dmat, bus_dmamap_t *mapp)
306{
307
308	if (dmat->dt_segments == NULL) {
309		dmat->dt_segments = (bus_dma_segment_t *)malloc(
310		    sizeof(bus_dma_segment_t) * dmat->dt_nsegments, M_DEVBUF,
311		    M_NOWAIT);
312		if (dmat->dt_segments == NULL)
313			return (ENOMEM);
314	}
315	*mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
316	if (*mapp == NULL)
317		return (ENOMEM);
318
319	SLIST_INIT(&(*mapp)->dm_reslist);
320	dmat->dt_map_count++;
321	return (0);
322}
323
324void
325sparc64_dma_free_map(bus_dma_tag_t dmat, bus_dmamap_t map)
326{
327
328	free(map, M_DEVBUF);
329	dmat->dt_map_count--;
330}
331
332static int
333nexus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
334{
335
336	return (sparc64_dma_alloc_map(dmat, mapp));
337}
338
339static int
340nexus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
341{
342
343	sparc64_dma_free_map(dmat, map);
344	return (0);
345}
346
347/*
348 * Utility function to load a linear buffer.  lastaddrp holds state
349 * between invocations (for multiple-buffer loads).  segp contains
350 * the starting segment on entrace, and the ending segment on exit.
351 * first indicates if this is the first invocation of this function.
352 */
353static int
354_nexus_dmamap_load_buffer(bus_dma_tag_t dmat, void *buf, bus_size_t buflen,
355    struct thread *td, int flags, bus_addr_t *lastaddrp,
356    bus_dma_segment_t *segs, int *segp, int first)
357{
358	bus_size_t sgsize;
359	bus_addr_t curaddr, lastaddr, baddr, bmask;
360	vm_offset_t vaddr = (vm_offset_t)buf;
361	int seg;
362	pmap_t pmap;
363
364	if (td != NULL)
365		pmap = vmspace_pmap(td->td_proc->p_vmspace);
366	else
367		pmap = NULL;
368
369	lastaddr = *lastaddrp;
370	bmask  = ~(dmat->dt_boundary - 1);
371
372	for (seg = *segp; buflen > 0 ; ) {
373		/*
374		 * Get the physical address for this segment.
375		 */
376		if (pmap)
377			curaddr = pmap_extract(pmap, vaddr);
378		else
379			curaddr = pmap_kextract(vaddr);
380
381		/*
382		 * Compute the segment size, and adjust counts.
383		 */
384		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
385		if (buflen < sgsize)
386			sgsize = buflen;
387
388		/*
389		 * Make sure we don't cross any boundaries.
390		 */
391		if (dmat->dt_boundary > 0) {
392			baddr = (curaddr + dmat->dt_boundary) & bmask;
393			if (sgsize > (baddr - curaddr))
394				sgsize = (baddr - curaddr);
395		}
396
397		/*
398		 * Insert chunk into a segment, coalescing with
399		 * previous segment if possible.
400		 */
401		if (first) {
402			segs[seg].ds_addr = curaddr;
403			segs[seg].ds_len = sgsize;
404			first = 0;
405		} else {
406			if (curaddr == lastaddr &&
407			    (segs[seg].ds_len + sgsize) <= dmat->dt_maxsegsz &&
408			    (dmat->dt_boundary == 0 ||
409			    (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
410				segs[seg].ds_len += sgsize;
411			else {
412				if (++seg >= dmat->dt_nsegments)
413					break;
414				segs[seg].ds_addr = curaddr;
415				segs[seg].ds_len = sgsize;
416			}
417		}
418
419		lastaddr = curaddr + sgsize;
420		vaddr += sgsize;
421		buflen -= sgsize;
422	}
423
424	*segp = seg;
425	*lastaddrp = lastaddr;
426
427	/*
428	 * Did we fit?
429	 */
430	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
431}
432
433/*
434 * Common function for loading a DMA map with a linear buffer.  May
435 * be called by bus-specific DMA map load functions.
436 *
437 * Most SPARCs have IOMMUs in the bus controllers.  In those cases
438 * they only need one segment and will use virtual addresses for DVMA.
439 * Those bus controllers should intercept these vectors and should
440 * *NEVER* call nexus_dmamap_load() which is used only by devices that
441 * bypass DVMA.
442 */
443static int
444nexus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
445    bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg,
446    int flags)
447{
448	bus_addr_t lastaddr;
449	int error, nsegs;
450
451	error = _nexus_dmamap_load_buffer(dmat, buf, buflen, NULL, flags,
452	    &lastaddr, dmat->dt_segments, &nsegs, 1);
453
454	if (error == 0) {
455		(*callback)(callback_arg, dmat->dt_segments, nsegs + 1, 0);
456		map->dm_flags |= DMF_LOADED;
457	} else
458		(*callback)(callback_arg, NULL, 0, error);
459
460	return (0);
461}
462
463/*
464 * Like nexus_dmamap_load(), but for mbufs.
465 */
466static int
467nexus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
468    bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
469{
470	int nsegs, error;
471
472	M_ASSERTPKTHDR(m0);
473
474	nsegs = 0;
475	error = 0;
476	if (m0->m_pkthdr.len <= dmat->dt_maxsize) {
477		int first = 1;
478		bus_addr_t lastaddr = 0;
479		struct mbuf *m;
480
481		for (m = m0; m != NULL && error == 0; m = m->m_next) {
482			if (m->m_len > 0) {
483				error = _nexus_dmamap_load_buffer(dmat,
484				    m->m_data, m->m_len,NULL, flags, &lastaddr,
485				    dmat->dt_segments, &nsegs, first);
486				first = 0;
487			}
488		}
489	} else {
490		error = EINVAL;
491	}
492
493	if (error) {
494		/* force "no valid mappings" in callback */
495		(*callback)(callback_arg, dmat->dt_segments, 0, 0, error);
496	} else {
497		map->dm_flags |= DMF_LOADED;
498		(*callback)(callback_arg, dmat->dt_segments, nsegs + 1,
499		    m0->m_pkthdr.len, error);
500	}
501	return (error);
502}
503
504static int
505nexus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
506    bus_dma_segment_t *segs, int *nsegs, int flags)
507{
508	int error;
509
510	M_ASSERTPKTHDR(m0);
511
512	*nsegs = 0;
513	error = 0;
514	if (m0->m_pkthdr.len <= dmat->dt_maxsize) {
515		int first = 1;
516		bus_addr_t lastaddr = 0;
517		struct mbuf *m;
518
519		for (m = m0; m != NULL && error == 0; m = m->m_next) {
520			if (m->m_len > 0) {
521				error = _nexus_dmamap_load_buffer(dmat,
522				    m->m_data, m->m_len,NULL, flags, &lastaddr,
523				    segs, nsegs, first);
524				first = 0;
525			}
526		}
527	} else {
528		error = EINVAL;
529	}
530
531	++*nsegs;
532	return (error);
533}
534
535/*
536 * Like nexus_dmamap_load(), but for uios.
537 */
538static int
539nexus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
540    bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
541{
542	bus_addr_t lastaddr;
543	int nsegs, error, first, i;
544	bus_size_t resid;
545	struct iovec *iov;
546	struct thread *td = NULL;
547
548	resid = uio->uio_resid;
549	iov = uio->uio_iov;
550
551	if (uio->uio_segflg == UIO_USERSPACE) {
552		td = uio->uio_td;
553		KASSERT(td != NULL,
554			("nexus_dmamap_load_uio: USERSPACE but no proc"));
555	}
556
557	nsegs = 0;
558	error = 0;
559	first = 1;
560	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
561		/*
562		 * Now at the first iovec to load.  Load each iovec
563		 * until we have exhausted the residual count.
564		 */
565		bus_size_t minlen =
566			resid < iov[i].iov_len ? resid : iov[i].iov_len;
567		caddr_t addr = (caddr_t) iov[i].iov_base;
568
569		if (minlen > 0) {
570			error = _nexus_dmamap_load_buffer(dmat, addr, minlen,
571			    td, flags, &lastaddr, dmat->dt_segments, &nsegs,
572			    first);
573			first = 0;
574
575			resid -= minlen;
576		}
577	}
578
579	if (error) {
580		/* force "no valid mappings" in callback */
581		(*callback)(callback_arg, dmat->dt_segments, 0, 0, error);
582	} else {
583		map->dm_flags |= DMF_LOADED;
584		(*callback)(callback_arg, dmat->dt_segments, nsegs + 1,
585		    uio->uio_resid, error);
586	}
587	return (error);
588}
589
590/*
591 * Common function for unloading a DMA map.  May be called by
592 * bus-specific DMA map unload functions.
593 */
594static void
595nexus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
596{
597
598	map->dm_flags &= ~DMF_LOADED;
599}
600
601/*
602 * Common function for DMA map synchronization.  May be called
603 * by bus-specific DMA map synchronization functions.
604 */
605static void
606nexus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
607{
608
609	/*
610	 * We sync out our caches, but the bus must do the same.
611	 *
612	 * Actually a #Sync is expensive.  We should optimize.
613	 */
614	if ((op & BUS_DMASYNC_PREREAD) || (op & BUS_DMASYNC_PREWRITE)) {
615		/*
616		 * Don't really need to do anything, but flush any pending
617		 * writes anyway.
618		 */
619		membar(Sync);
620	}
621#if 0
622	/* Should not be needed. */
623	if (op & BUS_DMASYNC_POSTREAD) {
624		ecache_flush((vm_offset_t)map->buf,
625		    (vm_offset_t)map->buf + map->buflen - 1);
626	}
627#endif
628	if (op & BUS_DMASYNC_POSTWRITE) {
629		/* Nothing to do.  Handled by the bus controller. */
630	}
631}
632
633/*
634 * Common function for DMA-safe memory allocation.  May be called
635 * by bus-specific DMA memory allocation functions.
636 */
637static int
638nexus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
639    bus_dmamap_t *mapp)
640{
641	int mflags;
642
643	if (flags & BUS_DMA_NOWAIT)
644		mflags = M_NOWAIT;
645	else
646		mflags = M_WAITOK;
647	if (flags & BUS_DMA_ZERO)
648		mflags |= M_ZERO;
649
650	if ((dmat->dt_maxsize <= PAGE_SIZE)) {
651		*vaddr = malloc(dmat->dt_maxsize, M_DEVBUF, mflags);
652	} else {
653		/*
654		 * XXX: Use contigmalloc until it is merged into this facility
655		 * and handles multi-seg allocations.  Nobody is doing multi-seg
656		 * allocations yet though.
657		 */
658		*vaddr = contigmalloc(dmat->dt_maxsize, M_DEVBUF, mflags,
659		    0ul, dmat->dt_lowaddr,
660		    dmat->dt_alignment ? dmat->dt_alignment : 1UL,
661		    dmat->dt_boundary);
662	}
663	if (*vaddr == NULL)
664		return (ENOMEM);
665	return (0);
666}
667
668/*
669 * Common function for freeing DMA-safe memory.  May be called by
670 * bus-specific DMA memory free functions.
671 */
672static void
673nexus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
674{
675
676	if ((dmat->dt_maxsize <= PAGE_SIZE))
677		free(vaddr, M_DEVBUF);
678	else {
679		contigfree(vaddr, dmat->dt_maxsize, M_DEVBUF);
680	}
681}
682
683struct bus_dma_methods nexus_dma_methods = {
684	nexus_dmamap_create,
685	nexus_dmamap_destroy,
686	nexus_dmamap_load,
687	nexus_dmamap_load_mbuf,
688	nexus_dmamap_load_mbuf_sg,
689	nexus_dmamap_load_uio,
690	nexus_dmamap_unload,
691	nexus_dmamap_sync,
692	nexus_dmamem_alloc,
693	nexus_dmamem_free,
694};
695
696struct bus_dma_tag nexus_dmatag = {
697	NULL,
698	NULL,
699	8,
700	0,
701	0,
702	0x3ffffffff,
703	NULL,		/* XXX */
704	NULL,
705	0x3ffffffff,	/* XXX */
706	0xff,		/* XXX */
707	0xffffffff,	/* XXX */
708	0,
709	0,
710	0,
711	NULL,
712	NULL,
713	NULL,
714	&nexus_dma_methods,
715};
716
717/*
718 * Helpers to map/unmap bus memory
719 */
720int
721sparc64_bus_mem_map(bus_space_tag_t tag, bus_space_handle_t handle,
722    bus_size_t size, int flags, vm_offset_t vaddr, void **hp)
723{
724	vm_offset_t addr;
725	vm_offset_t sva;
726	vm_offset_t va;
727	vm_paddr_t pa;
728	vm_size_t vsz;
729	u_long pm_flags;
730
731	addr = (vm_offset_t)handle;
732	size = round_page(size);
733	if (size == 0) {
734		printf("%s: zero size\n", __func__);
735		return (EINVAL);
736	}
737	switch (tag->bst_type) {
738	case PCI_CONFIG_BUS_SPACE:
739	case PCI_IO_BUS_SPACE:
740	case PCI_MEMORY_BUS_SPACE:
741		pm_flags = TD_IE;
742		break;
743	default:
744		pm_flags = 0;
745		break;
746	}
747
748	if (!(flags & BUS_SPACE_MAP_CACHEABLE))
749		pm_flags |= TD_E;
750
751	if (vaddr != 0L)
752		sva = trunc_page(vaddr);
753	else {
754		if ((sva = kmem_alloc_nofault(kernel_map, size)) == 0)
755			panic("%s: cannot allocate virtual memory", __func__);
756	}
757
758	/* Preserve page offset. */
759	*hp = (void *)(sva | ((u_long)addr & PAGE_MASK));
760
761	pa = trunc_page(addr);
762	if ((flags & BUS_SPACE_MAP_READONLY) == 0)
763		pm_flags |= TD_W;
764
765	va = sva;
766	vsz = size;
767	do {
768		pmap_kenter_flags(va, pa, pm_flags);
769		va += PAGE_SIZE;
770		pa += PAGE_SIZE;
771	} while ((vsz -= PAGE_SIZE) > 0);
772	tlb_range_demap(kernel_pmap, sva, sva + size - 1);
773	return (0);
774}
775
776int
777sparc64_bus_mem_unmap(void *bh, bus_size_t size)
778{
779	vm_offset_t sva;
780	vm_offset_t va;
781	vm_offset_t endva;
782
783	sva = trunc_page((vm_offset_t)bh);
784	endva = sva + round_page(size);
785	for (va = sva; va < endva; va += PAGE_SIZE)
786		pmap_kremove_flags(va);
787	tlb_range_demap(kernel_pmap, sva, sva + size - 1);
788	kmem_free(kernel_map, sva, size);
789	return (0);
790}
791
792/*
793 * Fake up a bus tag, for use by console drivers in early boot when the regular
794 * means to allocate resources are not yet available.
795 * Addr is the physical address of the desired start of the handle.
796 */
797bus_space_handle_t
798sparc64_fake_bustag(int space, bus_addr_t addr, struct bus_space_tag *ptag)
799{
800
801	ptag->bst_cookie = NULL;
802	ptag->bst_parent = NULL;
803	ptag->bst_type = space;
804	ptag->bst_bus_barrier = nexus_bus_barrier;
805	return (addr);
806}
807
808/*
809 * Base bus space handlers.
810 */
811
812static void
813nexus_bus_barrier(bus_space_tag_t t, bus_space_handle_t h, bus_size_t offset,
814    bus_size_t size, int flags)
815{
816
817	/*
818	 * We have lots of alternatives depending on whether we're
819	 * synchronizing loads with loads, loads with stores, stores
820	 * with loads, or stores with stores.  The only ones that seem
821	 * generic are #Sync and #MemIssue.  I'll use #Sync for safety.
822	 */
823	switch(flags) {
824	case BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE:
825	case BUS_SPACE_BARRIER_READ:
826	case BUS_SPACE_BARRIER_WRITE:
827		membar(Sync);
828		break;
829	default:
830		panic("%s: unknown flags", __func__);
831	}
832	return;
833}
834
835struct bus_space_tag nexus_bustag = {
836	NULL,				/* cookie */
837	NULL,				/* parent bus tag */
838	NEXUS_BUS_SPACE,		/* type */
839	nexus_bus_barrier,		/* bus_space_barrier */
840};
841