bus_machdep.c revision 118081
1/*-
2 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
7 * NASA Ames Research Center.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by the NetBSD
20 *	Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 *    contributors may be used to endorse or promote products derived
23 *    from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37/*
38 * Copyright (c) 1992, 1993
39 *	The Regents of the University of California.  All rights reserved.
40 *
41 * This software was developed by the Computer Systems Engineering group
42 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
43 * contributed to Berkeley.
44 *
45 * All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 *	This product includes software developed by the University of
48 *	California, Lawrence Berkeley Laboratory.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 *    notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 *    notice, this list of conditions and the following disclaimer in the
57 *    documentation and/or other materials provided with the distribution.
58 * 3. All advertising materials mentioning features or use of this software
59 *    must display the following acknowledgement:
60 *	This product includes software developed by the University of
61 *	California, Berkeley and its contributors.
62 * 4. Neither the name of the University nor the names of its contributors
63 *    may be used to endorse or promote products derived from this software
64 *    without specific prior written permission.
65 *
66 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
67 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
68 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
69 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
70 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
71 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
72 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
73 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
74 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
75 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
76 * SUCH DAMAGE.
77 */
78/*
79 * Copyright (c) 1997, 1998 Justin T. Gibbs.
80 * All rights reserved.
81 * Copyright 2001 by Thomas Moestl <tmm@FreeBSD.org>.  All rights reserved.
82 *
83 * Redistribution and use in source and binary forms, with or without
84 * modification, are permitted provided that the following conditions
85 * are met:
86 * 1. Redistributions of source code must retain the above copyright
87 *    notice, this list of conditions, and the following disclaimer,
88 *    without modification, immediately at the beginning of the file.
89 * 2. The name of the author may not be used to endorse or promote products
90 *    derived from this software without specific prior written permission.
91 *
92 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
93 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
94 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
95 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
96 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
97 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
98 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
99 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
100 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
101 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
102 * SUCH DAMAGE.
103 *
104 *	from: @(#)machdep.c	8.6 (Berkeley) 1/14/94
105 *	from: NetBSD: machdep.c,v 1.111 2001/09/15 07:13:40 eeh Exp
106 *	and
107 * 	from: FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.24 2001/08/15
108 *
109 * $FreeBSD: head/sys/sparc64/sparc64/bus_machdep.c 118081 2003-07-27 13:52:10Z mux $
110 */
111
112#include <sys/param.h>
113#include <sys/bus.h>
114#include <sys/lock.h>
115#include <sys/malloc.h>
116#include <sys/mbuf.h>
117#include <sys/mutex.h>
118#include <sys/proc.h>
119#include <sys/smp.h>
120#include <sys/systm.h>
121#include <sys/uio.h>
122
123#include <vm/vm.h>
124#include <vm/vm_extern.h>
125#include <vm/vm_kern.h>
126#include <vm/vm_page.h>
127#include <vm/vm_param.h>
128#include <vm/vm_map.h>
129
130#include <machine/asi.h>
131#include <machine/atomic.h>
132#include <machine/bus.h>
133#include <machine/bus_private.h>
134#include <machine/cache.h>
135#include <machine/smp.h>
136#include <machine/tlb.h>
137
138/* ASI's for bus access. */
139int bus_type_asi[] = {
140	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* UPA */
141	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* SBUS */
142	ASI_PHYS_BYPASS_EC_WITH_EBIT_L,		/* PCI configuration space */
143	ASI_PHYS_BYPASS_EC_WITH_EBIT_L,		/* PCI memory space */
144	ASI_PHYS_BYPASS_EC_WITH_EBIT_L,		/* PCI I/O space */
145	0
146};
147
148int bus_stream_asi[] = {
149	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* UPA */
150	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* SBUS */
151	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* PCI configuration space */
152	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* PCI memory space */
153	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* PCI I/O space */
154	0
155};
156
157/*
158 * Convenience function for manipulating driver locks from busdma (during
159 * busdma_swi, for example).  Drivers that don't provide their own locks
160 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
161 * non-mutex locking scheme don't have to use this at all.
162 */
163void
164busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
165{
166	struct mtx *dmtx;
167
168	dmtx = (struct mtx *)arg;
169	switch (op) {
170	case BUS_DMA_LOCK:
171		mtx_lock(dmtx);
172		break;
173	case BUS_DMA_UNLOCK:
174		mtx_unlock(dmtx);
175		break;
176	default:
177		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
178	}
179}
180
181/*
182 * dflt_lock should never get called.  It gets put into the dma tag when
183 * lockfunc == NULL, which is only valid if the maps that are associated
184 * with the tag are meant to never be defered.
185 * XXX Should have a way to identify which driver is responsible here.
186 */
187static void
188dflt_lock(void *arg, bus_dma_lock_op_t op)
189{
190#ifdef INVARIANTS
191	panic("driver error: busdma dflt_lock called");
192#else
193	printf("DRIVER_ERROR: busdma dflt_lock called\n");
194#endif
195}
196
197/*
198 * Since there is no way for a device to obtain a dma tag from its parent
199 * we use this kluge to handle different the different supported bus systems.
200 * The sparc64_root_dma_tag is used as parent for tags that have none, so that
201 * the correct methods will be used.
202 */
203bus_dma_tag_t sparc64_root_dma_tag;
204
205/*
206 * Allocate a device specific dma_tag.
207 */
208int
209bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
210    bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
211    bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
212    int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
213    void *lockfuncarg, bus_dma_tag_t *dmat)
214{
215	bus_dma_tag_t impptag;
216	bus_dma_tag_t newtag;
217
218	/* Return a NULL tag on failure */
219	*dmat = NULL;
220
221	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
222	if (newtag == NULL)
223		return (ENOMEM);
224
225	impptag = parent != NULL ? parent : sparc64_root_dma_tag;
226	/*
227	 * The method table pointer and the cookie need to be taken over from
228	 * the parent or the root tag.
229	 */
230	newtag->dt_cookie = impptag->dt_cookie;
231	newtag->dt_mt = impptag->dt_mt;
232
233	newtag->dt_parent = parent;
234	newtag->dt_alignment = alignment;
235	newtag->dt_boundary = boundary;
236	newtag->dt_lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
237	newtag->dt_highaddr = trunc_page((vm_offset_t)highaddr) +
238	    (PAGE_SIZE - 1);
239	newtag->dt_filter = filter;
240	newtag->dt_filterarg = filterarg;
241	newtag->dt_maxsize = maxsize;
242	newtag->dt_nsegments = nsegments;
243	newtag->dt_maxsegsz = maxsegsz;
244	newtag->dt_flags = flags;
245	newtag->dt_ref_count = 1; /* Count ourselves */
246	newtag->dt_map_count = 0;
247
248	if (lockfunc != NULL) {
249		newtag->dt_lockfunc = lockfunc;
250		newtag->dt_lockfuncarg = lockfuncarg;
251	} else {
252		newtag->dt_lockfunc = dflt_lock;
253		newtag->dt_lockfuncarg = NULL;
254	}
255
256	/* Take into account any restrictions imposed by our parent tag */
257	if (parent != NULL) {
258		newtag->dt_lowaddr = ulmin(parent->dt_lowaddr,
259		    newtag->dt_lowaddr);
260		newtag->dt_highaddr = ulmax(parent->dt_highaddr,
261		    newtag->dt_highaddr);
262		/*
263		 * XXX Not really correct??? Probably need to honor boundary
264		 *     all the way up the inheritence chain.
265		 */
266		newtag->dt_boundary = ulmin(parent->dt_boundary,
267		    newtag->dt_boundary);
268		atomic_add_int(&parent->dt_ref_count, 1);
269	}
270
271	*dmat = newtag;
272	return (0);
273}
274
275int
276bus_dma_tag_destroy(bus_dma_tag_t dmat)
277{
278	bus_dma_tag_t parent;
279
280	if (dmat != NULL) {
281		if (dmat->dt_map_count != 0)
282			return (EBUSY);
283		while (dmat != NULL) {
284			parent = dmat->dt_parent;
285			atomic_subtract_int(&dmat->dt_ref_count, 1);
286			if (dmat->dt_ref_count == 0) {
287				free(dmat, M_DEVBUF);
288				/*
289				 * Last reference count, so
290				 * release our reference
291				 * count on our parent.
292				 */
293				dmat = parent;
294			} else
295				dmat = NULL;
296		}
297	}
298	return (0);
299}
300
301/* Allocate/free a tag, and do the necessary management work. */
302int
303sparc64_dma_alloc_map(bus_dma_tag_t dmat, bus_dmamap_t *mapp)
304{
305
306	*mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
307	if (*mapp == NULL)
308		return (ENOMEM);
309
310	SLIST_INIT(&(*mapp)->dm_reslist);
311	dmat->dt_map_count++;
312	return (0);
313}
314
315void
316sparc64_dma_free_map(bus_dma_tag_t dmat, bus_dmamap_t map)
317{
318
319	free(map, M_DEVBUF);
320	dmat->dt_map_count--;
321}
322
323static int
324nexus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
325{
326
327	return (sparc64_dma_alloc_map(dmat, mapp));
328}
329
330static int
331nexus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
332{
333
334	sparc64_dma_free_map(dmat, map);
335	return (0);
336}
337
338/*
339 * Utility function to load a linear buffer.  lastaddrp holds state
340 * between invocations (for multiple-buffer loads).  segp contains
341 * the starting segment on entrace, and the ending segment on exit.
342 * first indicates if this is the first invocation of this function.
343 */
344static int
345_nexus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
346    void *buf, bus_size_t buflen, struct thread *td, int flags,
347    bus_addr_t *lastaddrp, int *segp, int first)
348{
349	bus_size_t sgsize;
350	bus_addr_t curaddr, lastaddr, baddr, bmask;
351	vm_offset_t vaddr = (vm_offset_t)buf;
352	int seg;
353	pmap_t pmap;
354
355	if (td != NULL)
356		pmap = vmspace_pmap(td->td_proc->p_vmspace);
357	else
358		pmap = NULL;
359
360	lastaddr = *lastaddrp;
361	bmask  = ~(dmat->dt_boundary - 1);
362
363	for (seg = *segp; buflen > 0 ; ) {
364		/*
365		 * Get the physical address for this segment.
366		 */
367		if (pmap)
368			curaddr = pmap_extract(pmap, vaddr);
369		else
370			curaddr = pmap_kextract(vaddr);
371
372		/*
373		 * Compute the segment size, and adjust counts.
374		 */
375		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
376		if (buflen < sgsize)
377			sgsize = buflen;
378
379		/*
380		 * Make sure we don't cross any boundaries.
381		 */
382		if (dmat->dt_boundary > 0) {
383			baddr = (curaddr + dmat->dt_boundary) & bmask;
384			if (sgsize > (baddr - curaddr))
385				sgsize = (baddr - curaddr);
386		}
387
388		/*
389		 * Insert chunk into a segment, coalescing with
390		 * previous segment if possible.
391		 */
392		if (first) {
393			segs[seg].ds_addr = curaddr;
394			segs[seg].ds_len = sgsize;
395			first = 0;
396		} else {
397			if (curaddr == lastaddr &&
398			    (segs[seg].ds_len + sgsize) <= dmat->dt_maxsegsz &&
399			    (dmat->dt_boundary == 0 ||
400			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
401				segs[seg].ds_len += sgsize;
402			else {
403				if (++seg >= dmat->dt_nsegments)
404					break;
405				segs[seg].ds_addr = curaddr;
406				segs[seg].ds_len = sgsize;
407			}
408		}
409
410		lastaddr = curaddr + sgsize;
411		vaddr += sgsize;
412		buflen -= sgsize;
413	}
414
415	*segp = seg;
416	*lastaddrp = lastaddr;
417
418	/*
419	 * Did we fit?
420	 */
421	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
422}
423
424/*
425 * Common function for loading a DMA map with a linear buffer.  May
426 * be called by bus-specific DMA map load functions.
427 *
428 * Most SPARCs have IOMMUs in the bus controllers.  In those cases
429 * they only need one segment and will use virtual addresses for DVMA.
430 * Those bus controllers should intercept these vectors and should
431 * *NEVER* call nexus_dmamap_load() which is used only by devices that
432 * bypass DVMA.
433 */
434static int
435nexus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
436    bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg,
437    int flags)
438{
439#ifdef __GNUC__
440	bus_dma_segment_t dm_segments[dmat->dt_nsegments];
441#else
442	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
443#endif
444	bus_addr_t lastaddr;
445	int error, nsegs;
446
447	error = _nexus_dmamap_load_buffer(dmat, dm_segments, buf, buflen,
448	    NULL, flags, &lastaddr, &nsegs, 1);
449
450	if (error == 0) {
451		(*callback)(callback_arg, dm_segments, nsegs + 1, 0);
452		map->dm_flags |= DMF_LOADED;
453	} else
454		(*callback)(callback_arg, NULL, 0, error);
455
456	return (0);
457}
458
459/*
460 * Like nexus_dmamap_load(), but for mbufs.
461 */
462static int
463nexus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
464    bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
465{
466#ifdef __GNUC__
467	bus_dma_segment_t dm_segments[dmat->dt_nsegments];
468#else
469	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
470#endif
471	int nsegs, error;
472
473	M_ASSERTPKTHDR(m0);
474
475	nsegs = 0;
476	error = 0;
477	if (m0->m_pkthdr.len <= dmat->dt_maxsize) {
478		int first = 1;
479		bus_addr_t lastaddr = 0;
480		struct mbuf *m;
481
482		for (m = m0; m != NULL && error == 0; m = m->m_next) {
483			if (m->m_len > 0) {
484				error = _nexus_dmamap_load_buffer(dmat,
485				    dm_segments, m->m_data, m->m_len, NULL,
486				    flags, &lastaddr, &nsegs, first);
487				first = 0;
488			}
489		}
490	} else {
491		error = EINVAL;
492	}
493
494	if (error) {
495		/* force "no valid mappings" in callback */
496		(*callback)(callback_arg, dm_segments, 0, 0, error);
497	} else {
498		map->dm_flags |= DMF_LOADED;
499		(*callback)(callback_arg, dm_segments, nsegs + 1,
500		    m0->m_pkthdr.len, error);
501	}
502	return (error);
503}
504
505/*
506 * Like nexus_dmamap_load(), but for uios.
507 */
508static int
509nexus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
510    bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
511{
512	bus_addr_t lastaddr;
513#ifdef __GNUC__
514	bus_dma_segment_t dm_segments[dmat->dt_nsegments];
515#else
516	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
517#endif
518	int nsegs, error, first, i;
519	bus_size_t resid;
520	struct iovec *iov;
521	struct thread *td = NULL;
522
523	resid = uio->uio_resid;
524	iov = uio->uio_iov;
525
526	if (uio->uio_segflg == UIO_USERSPACE) {
527		td = uio->uio_td;
528		KASSERT(td != NULL,
529			("nexus_dmamap_load_uio: USERSPACE but no proc"));
530	}
531
532	nsegs = 0;
533	error = 0;
534	first = 1;
535	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
536		/*
537		 * Now at the first iovec to load.  Load each iovec
538		 * until we have exhausted the residual count.
539		 */
540		bus_size_t minlen =
541			resid < iov[i].iov_len ? resid : iov[i].iov_len;
542		caddr_t addr = (caddr_t) iov[i].iov_base;
543
544		if (minlen > 0) {
545			error = _nexus_dmamap_load_buffer(dmat, dm_segments,
546			    addr, minlen, td, flags, &lastaddr, &nsegs, first);
547			first = 0;
548
549			resid -= minlen;
550		}
551	}
552
553	if (error) {
554		/* force "no valid mappings" in callback */
555		(*callback)(callback_arg, dm_segments, 0, 0, error);
556	} else {
557		map->dm_flags |= DMF_LOADED;
558		(*callback)(callback_arg, dm_segments, nsegs + 1,
559		    uio->uio_resid, error);
560	}
561	return (error);
562}
563
564/*
565 * Common function for unloading a DMA map.  May be called by
566 * bus-specific DMA map unload functions.
567 */
568static void
569nexus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
570{
571
572	map->dm_flags &= ~DMF_LOADED;
573}
574
575/*
576 * Common function for DMA map synchronization.  May be called
577 * by bus-specific DMA map synchronization functions.
578 */
579static void
580nexus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
581{
582
583	/*
584	 * We sync out our caches, but the bus must do the same.
585	 *
586	 * Actually a #Sync is expensive.  We should optimize.
587	 */
588	if ((op & BUS_DMASYNC_PREREAD) || (op & BUS_DMASYNC_PREWRITE)) {
589		/*
590		 * Don't really need to do anything, but flush any pending
591		 * writes anyway.
592		 */
593		membar(Sync);
594	}
595#if 0
596	/* Should not be needed. */
597	if (op & BUS_DMASYNC_POSTREAD) {
598		ecache_flush((vm_offset_t)map->buf,
599		    (vm_offset_t)map->buf + map->buflen - 1);
600	}
601#endif
602	if (op & BUS_DMASYNC_POSTWRITE) {
603		/* Nothing to do.  Handled by the bus controller. */
604	}
605}
606
607/*
608 * Common function for DMA-safe memory allocation.  May be called
609 * by bus-specific DMA memory allocation functions.
610 */
611static int
612nexus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
613    bus_dmamap_t *mapp)
614{
615	int mflags;
616
617	if (flags & BUS_DMA_NOWAIT)
618		mflags = M_NOWAIT;
619	else
620		mflags = M_WAITOK;
621	if (flags & BUS_DMA_ZERO)
622		mflags |= M_ZERO;
623
624	if ((dmat->dt_maxsize <= PAGE_SIZE)) {
625		*vaddr = malloc(dmat->dt_maxsize, M_DEVBUF, mflags);
626	} else {
627		/*
628		 * XXX: Use contigmalloc until it is merged into this facility
629		 * and handles multi-seg allocations.  Nobody is doing multi-seg
630		 * allocations yet though.
631		 */
632		*vaddr = contigmalloc(dmat->dt_maxsize, M_DEVBUF, mflags,
633		    0ul, dmat->dt_lowaddr,
634		    dmat->dt_alignment ? dmat->dt_alignment : 1UL,
635		    dmat->dt_boundary);
636	}
637	if (*vaddr == NULL)
638		return (ENOMEM);
639	return (0);
640}
641
642/*
643 * Common function for freeing DMA-safe memory.  May be called by
644 * bus-specific DMA memory free functions.
645 */
646static void
647nexus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
648{
649
650	if ((dmat->dt_maxsize <= PAGE_SIZE))
651		free(vaddr, M_DEVBUF);
652	else {
653		mtx_lock(&Giant);
654		contigfree(vaddr, dmat->dt_maxsize, M_DEVBUF);
655		mtx_unlock(&Giant);
656	}
657}
658
659struct bus_dma_methods nexus_dma_methods = {
660	nexus_dmamap_create,
661	nexus_dmamap_destroy,
662	nexus_dmamap_load,
663	nexus_dmamap_load_mbuf,
664	nexus_dmamap_load_uio,
665	nexus_dmamap_unload,
666	nexus_dmamap_sync,
667	nexus_dmamem_alloc,
668	nexus_dmamem_free,
669};
670
671struct bus_dma_tag nexus_dmatag = {
672	NULL,
673	NULL,
674	8,
675	0,
676	0,
677	0x3ffffffff,
678	NULL,		/* XXX */
679	NULL,
680	0x3ffffffff,	/* XXX */
681	0xff,		/* XXX */
682	0xffffffff,	/* XXX */
683	0,
684	0,
685	0,
686	NULL,
687	NULL,
688	&nexus_dma_methods,
689};
690
691/*
692 * Helpers to map/unmap bus memory
693 */
694int
695sparc64_bus_mem_map(bus_space_tag_t tag, bus_space_handle_t handle,
696    bus_size_t size, int flags, vm_offset_t vaddr, void **hp)
697{
698	vm_offset_t addr;
699	vm_offset_t sva;
700	vm_offset_t va;
701	vm_paddr_t pa;
702	vm_size_t vsz;
703	u_long pm_flags;
704
705	addr = (vm_offset_t)handle;
706	size = round_page(size);
707	if (size == 0) {
708		printf("sparc64_bus_map: zero size\n");
709		return (EINVAL);
710	}
711	switch (tag->bst_type) {
712	case PCI_CONFIG_BUS_SPACE:
713	case PCI_IO_BUS_SPACE:
714	case PCI_MEMORY_BUS_SPACE:
715		pm_flags = TD_IE;
716		break;
717	default:
718		pm_flags = 0;
719		break;
720	}
721
722	if (!(flags & BUS_SPACE_MAP_CACHEABLE))
723		pm_flags |= TD_E;
724
725	if (vaddr != NULL)
726		sva = trunc_page(vaddr);
727	else {
728		if ((sva = kmem_alloc_nofault(kernel_map, size)) == NULL)
729			panic("sparc64_bus_map: cannot allocate virtual "
730			    "memory");
731	}
732
733	/* Preserve page offset. */
734	*hp = (void *)(sva | ((u_long)addr & PAGE_MASK));
735
736	pa = trunc_page(addr);
737	if ((flags & BUS_SPACE_MAP_READONLY) == 0)
738		pm_flags |= TD_W;
739
740	va = sva;
741	vsz = size;
742	do {
743		pmap_kenter_flags(va, pa, pm_flags);
744		va += PAGE_SIZE;
745		pa += PAGE_SIZE;
746	} while ((vsz -= PAGE_SIZE) > 0);
747	tlb_range_demap(kernel_pmap, sva, sva + size - 1);
748	return (0);
749}
750
751int
752sparc64_bus_mem_unmap(void *bh, bus_size_t size)
753{
754	vm_offset_t sva;
755	vm_offset_t va;
756	vm_offset_t endva;
757
758	sva = trunc_page((vm_offset_t)bh);
759	endva = sva + round_page(size);
760	for (va = sva; va < endva; va += PAGE_SIZE)
761		pmap_kremove_flags(va);
762	tlb_range_demap(kernel_pmap, sva, sva + size - 1);
763	kmem_free(kernel_map, sva, size);
764	return (0);
765}
766
767/*
768 * Fake up a bus tag, for use by console drivers in early boot when the regular
769 * means to allocate resources are not yet available.
770 * Note that these tags are not eligible for bus_space_barrier operations.
771 * Addr is the physical address of the desired start of the handle.
772 */
773bus_space_handle_t
774sparc64_fake_bustag(int space, bus_addr_t addr, struct bus_space_tag *ptag)
775{
776
777	ptag->bst_cookie = NULL;
778	ptag->bst_parent = NULL;
779	ptag->bst_type = space;
780	ptag->bst_bus_barrier = NULL;
781	return (addr);
782}
783
784/*
785 * Base bus space handlers.
786 */
787static void nexus_bus_barrier(bus_space_tag_t, bus_space_handle_t,
788    bus_size_t, bus_size_t, int);
789
790static void
791nexus_bus_barrier(bus_space_tag_t t, bus_space_handle_t h, bus_size_t offset,
792    bus_size_t size, int flags)
793{
794
795	/*
796	 * We have lots of alternatives depending on whether we're
797	 * synchronizing loads with loads, loads with stores, stores
798	 * with loads, or stores with stores.  The only ones that seem
799	 * generic are #Sync and #MemIssue.  I'll use #Sync for safety.
800	 */
801	switch(flags) {
802	case BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE:
803	case BUS_SPACE_BARRIER_READ:
804	case BUS_SPACE_BARRIER_WRITE:
805		membar(Sync);
806		break;
807	default:
808		panic("sparc64_bus_barrier: unknown flags");
809	}
810	return;
811}
812
813struct bus_space_tag nexus_bustag = {
814	NULL,				/* cookie */
815	NULL,				/* parent bus tag */
816	UPA_BUS_SPACE,			/* type */
817	nexus_bus_barrier,		/* bus_space_barrier */
818};
819