bus_machdep.c revision 123865
1/*-
2 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
7 * NASA Ames Research Center.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by the NetBSD
20 *	Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 *    contributors may be used to endorse or promote products derived
23 *    from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37/*
38 * Copyright (c) 1992, 1993
39 *	The Regents of the University of California.  All rights reserved.
40 *
41 * This software was developed by the Computer Systems Engineering group
42 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
43 * contributed to Berkeley.
44 *
45 * All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 *	This product includes software developed by the University of
48 *	California, Lawrence Berkeley Laboratory.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 *    notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 *    notice, this list of conditions and the following disclaimer in the
57 *    documentation and/or other materials provided with the distribution.
58 * 3. All advertising materials mentioning features or use of this software
59 *    must display the following acknowledgement:
60 *	This product includes software developed by the University of
61 *	California, Berkeley and its contributors.
62 * 4. Neither the name of the University nor the names of its contributors
63 *    may be used to endorse or promote products derived from this software
64 *    without specific prior written permission.
65 *
66 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
67 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
68 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
69 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
70 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
71 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
72 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
73 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
74 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
75 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
76 * SUCH DAMAGE.
77 */
78/*
79 * Copyright (c) 1997, 1998 Justin T. Gibbs.
80 * All rights reserved.
81 * Copyright 2001 by Thomas Moestl <tmm@FreeBSD.org>.  All rights reserved.
82 *
83 * Redistribution and use in source and binary forms, with or without
84 * modification, are permitted provided that the following conditions
85 * are met:
86 * 1. Redistributions of source code must retain the above copyright
87 *    notice, this list of conditions, and the following disclaimer,
88 *    without modification, immediately at the beginning of the file.
89 * 2. The name of the author may not be used to endorse or promote products
90 *    derived from this software without specific prior written permission.
91 *
92 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
93 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
94 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
95 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
96 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
97 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
98 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
99 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
100 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
101 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
102 * SUCH DAMAGE.
103 *
104 *	from: @(#)machdep.c	8.6 (Berkeley) 1/14/94
105 *	from: NetBSD: machdep.c,v 1.111 2001/09/15 07:13:40 eeh Exp
106 *	and
107 * 	from: FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.24 2001/08/15
108 *
109 * $FreeBSD: head/sys/sparc64/sparc64/bus_machdep.c 123865 2003-12-26 14:22:26Z obrien $
110 */
111
112#include <sys/param.h>
113#include <sys/bus.h>
114#include <sys/lock.h>
115#include <sys/malloc.h>
116#include <sys/mbuf.h>
117#include <sys/mutex.h>
118#include <sys/proc.h>
119#include <sys/smp.h>
120#include <sys/systm.h>
121#include <sys/uio.h>
122
123#include <vm/vm.h>
124#include <vm/vm_extern.h>
125#include <vm/vm_kern.h>
126#include <vm/vm_page.h>
127#include <vm/vm_param.h>
128#include <vm/vm_map.h>
129
130#include <machine/asi.h>
131#include <machine/atomic.h>
132#include <machine/bus.h>
133#include <machine/bus_private.h>
134#include <machine/cache.h>
135#include <machine/smp.h>
136#include <machine/tlb.h>
137
138static void nexus_bus_barrier(bus_space_tag_t, bus_space_handle_t,
139    bus_size_t, bus_size_t, int);
140
141/* ASI's for bus access. */
142int bus_type_asi[] = {
143	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* UPA */
144	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* SBUS */
145	ASI_PHYS_BYPASS_EC_WITH_EBIT_L,		/* PCI configuration space */
146	ASI_PHYS_BYPASS_EC_WITH_EBIT_L,		/* PCI memory space */
147	ASI_PHYS_BYPASS_EC_WITH_EBIT_L,		/* PCI I/O space */
148	0
149};
150
151int bus_stream_asi[] = {
152	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* UPA */
153	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* SBUS */
154	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* PCI configuration space */
155	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* PCI memory space */
156	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* PCI I/O space */
157	0
158};
159
160/*
161 * Convenience function for manipulating driver locks from busdma (during
162 * busdma_swi, for example).  Drivers that don't provide their own locks
163 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
164 * non-mutex locking scheme don't have to use this at all.
165 */
166void
167busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
168{
169	struct mtx *dmtx;
170
171	dmtx = (struct mtx *)arg;
172	switch (op) {
173	case BUS_DMA_LOCK:
174		mtx_lock(dmtx);
175		break;
176	case BUS_DMA_UNLOCK:
177		mtx_unlock(dmtx);
178		break;
179	default:
180		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
181	}
182}
183
184/*
185 * dflt_lock should never get called.  It gets put into the dma tag when
186 * lockfunc == NULL, which is only valid if the maps that are associated
187 * with the tag are meant to never be defered.
188 * XXX Should have a way to identify which driver is responsible here.
189 */
190static void
191dflt_lock(void *arg, bus_dma_lock_op_t op)
192{
193#ifdef INVARIANTS
194	panic("driver error: busdma dflt_lock called");
195#else
196	printf("DRIVER_ERROR: busdma dflt_lock called\n");
197#endif
198}
199
200/*
201 * Since there is no way for a device to obtain a dma tag from its parent
202 * we use this kluge to handle different the different supported bus systems.
203 * The sparc64_root_dma_tag is used as parent for tags that have none, so that
204 * the correct methods will be used.
205 */
206bus_dma_tag_t sparc64_root_dma_tag;
207
208/*
209 * Allocate a device specific dma_tag.
210 */
211int
212bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
213    bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
214    bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
215    int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
216    void *lockfuncarg, bus_dma_tag_t *dmat)
217{
218	bus_dma_tag_t impptag;
219	bus_dma_tag_t newtag;
220
221	/* Return a NULL tag on failure */
222	*dmat = NULL;
223
224	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
225	if (newtag == NULL)
226		return (ENOMEM);
227
228	impptag = parent != NULL ? parent : sparc64_root_dma_tag;
229	/*
230	 * The method table pointer and the cookie need to be taken over from
231	 * the parent or the root tag.
232	 */
233	newtag->dt_cookie = impptag->dt_cookie;
234	newtag->dt_mt = impptag->dt_mt;
235
236	newtag->dt_parent = parent;
237	newtag->dt_alignment = alignment;
238	newtag->dt_boundary = boundary;
239	newtag->dt_lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
240	newtag->dt_highaddr = trunc_page((vm_offset_t)highaddr) +
241	    (PAGE_SIZE - 1);
242	newtag->dt_filter = filter;
243	newtag->dt_filterarg = filterarg;
244	newtag->dt_maxsize = maxsize;
245	newtag->dt_nsegments = nsegments;
246	newtag->dt_maxsegsz = maxsegsz;
247	newtag->dt_flags = flags;
248	newtag->dt_ref_count = 1; /* Count ourselves */
249	newtag->dt_map_count = 0;
250
251	if (lockfunc != NULL) {
252		newtag->dt_lockfunc = lockfunc;
253		newtag->dt_lockfuncarg = lockfuncarg;
254	} else {
255		newtag->dt_lockfunc = dflt_lock;
256		newtag->dt_lockfuncarg = NULL;
257	}
258
259	/* Take into account any restrictions imposed by our parent tag */
260	if (parent != NULL) {
261		newtag->dt_lowaddr = ulmin(parent->dt_lowaddr,
262		    newtag->dt_lowaddr);
263		newtag->dt_highaddr = ulmax(parent->dt_highaddr,
264		    newtag->dt_highaddr);
265		/*
266		 * XXX Not really correct??? Probably need to honor boundary
267		 *     all the way up the inheritence chain.
268		 */
269		newtag->dt_boundary = ulmin(parent->dt_boundary,
270		    newtag->dt_boundary);
271		atomic_add_int(&parent->dt_ref_count, 1);
272	}
273
274	*dmat = newtag;
275	return (0);
276}
277
278int
279bus_dma_tag_destroy(bus_dma_tag_t dmat)
280{
281	bus_dma_tag_t parent;
282
283	if (dmat != NULL) {
284		if (dmat->dt_map_count != 0)
285			return (EBUSY);
286		while (dmat != NULL) {
287			parent = dmat->dt_parent;
288			atomic_subtract_int(&dmat->dt_ref_count, 1);
289			if (dmat->dt_ref_count == 0) {
290				free(dmat, M_DEVBUF);
291				/*
292				 * Last reference count, so
293				 * release our reference
294				 * count on our parent.
295				 */
296				dmat = parent;
297			} else
298				dmat = NULL;
299		}
300	}
301	return (0);
302}
303
304/* Allocate/free a tag, and do the necessary management work. */
305int
306sparc64_dma_alloc_map(bus_dma_tag_t dmat, bus_dmamap_t *mapp)
307{
308
309	*mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
310	if (*mapp == NULL)
311		return (ENOMEM);
312
313	SLIST_INIT(&(*mapp)->dm_reslist);
314	dmat->dt_map_count++;
315	return (0);
316}
317
318void
319sparc64_dma_free_map(bus_dma_tag_t dmat, bus_dmamap_t map)
320{
321
322	free(map, M_DEVBUF);
323	dmat->dt_map_count--;
324}
325
326static int
327nexus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
328{
329
330	return (sparc64_dma_alloc_map(dmat, mapp));
331}
332
333static int
334nexus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
335{
336
337	sparc64_dma_free_map(dmat, map);
338	return (0);
339}
340
341/*
342 * Utility function to load a linear buffer.  lastaddrp holds state
343 * between invocations (for multiple-buffer loads).  segp contains
344 * the starting segment on entrace, and the ending segment on exit.
345 * first indicates if this is the first invocation of this function.
346 */
347static int
348_nexus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
349    void *buf, bus_size_t buflen, struct thread *td, int flags,
350    bus_addr_t *lastaddrp, int *segp, int first)
351{
352	bus_size_t sgsize;
353	bus_addr_t curaddr, lastaddr, baddr, bmask;
354	vm_offset_t vaddr = (vm_offset_t)buf;
355	int seg;
356	pmap_t pmap;
357
358	if (td != NULL)
359		pmap = vmspace_pmap(td->td_proc->p_vmspace);
360	else
361		pmap = NULL;
362
363	lastaddr = *lastaddrp;
364	bmask  = ~(dmat->dt_boundary - 1);
365
366	for (seg = *segp; buflen > 0 ; ) {
367		/*
368		 * Get the physical address for this segment.
369		 */
370		if (pmap)
371			curaddr = pmap_extract(pmap, vaddr);
372		else
373			curaddr = pmap_kextract(vaddr);
374
375		/*
376		 * Compute the segment size, and adjust counts.
377		 */
378		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
379		if (buflen < sgsize)
380			sgsize = buflen;
381
382		/*
383		 * Make sure we don't cross any boundaries.
384		 */
385		if (dmat->dt_boundary > 0) {
386			baddr = (curaddr + dmat->dt_boundary) & bmask;
387			if (sgsize > (baddr - curaddr))
388				sgsize = (baddr - curaddr);
389		}
390
391		/*
392		 * Insert chunk into a segment, coalescing with
393		 * previous segment if possible.
394		 */
395		if (first) {
396			segs[seg].ds_addr = curaddr;
397			segs[seg].ds_len = sgsize;
398			first = 0;
399		} else {
400			if (curaddr == lastaddr &&
401			    (segs[seg].ds_len + sgsize) <= dmat->dt_maxsegsz &&
402			    (dmat->dt_boundary == 0 ||
403			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
404				segs[seg].ds_len += sgsize;
405			else {
406				if (++seg >= dmat->dt_nsegments)
407					break;
408				segs[seg].ds_addr = curaddr;
409				segs[seg].ds_len = sgsize;
410			}
411		}
412
413		lastaddr = curaddr + sgsize;
414		vaddr += sgsize;
415		buflen -= sgsize;
416	}
417
418	*segp = seg;
419	*lastaddrp = lastaddr;
420
421	/*
422	 * Did we fit?
423	 */
424	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
425}
426
427/*
428 * Common function for loading a DMA map with a linear buffer.  May
429 * be called by bus-specific DMA map load functions.
430 *
431 * Most SPARCs have IOMMUs in the bus controllers.  In those cases
432 * they only need one segment and will use virtual addresses for DVMA.
433 * Those bus controllers should intercept these vectors and should
434 * *NEVER* call nexus_dmamap_load() which is used only by devices that
435 * bypass DVMA.
436 */
437static int
438nexus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
439    bus_size_t buflen, bus_dmamap_callback_t *callback, void *callback_arg,
440    int flags)
441{
442#ifdef __GNUC__
443	bus_dma_segment_t dm_segments[dmat->dt_nsegments];
444#else
445	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
446#endif
447	bus_addr_t lastaddr;
448	int error, nsegs;
449
450	error = _nexus_dmamap_load_buffer(dmat, dm_segments, buf, buflen,
451	    NULL, flags, &lastaddr, &nsegs, 1);
452
453	if (error == 0) {
454		(*callback)(callback_arg, dm_segments, nsegs + 1, 0);
455		map->dm_flags |= DMF_LOADED;
456	} else
457		(*callback)(callback_arg, NULL, 0, error);
458
459	return (0);
460}
461
462/*
463 * Like nexus_dmamap_load(), but for mbufs.
464 */
465static int
466nexus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
467    bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
468{
469#ifdef __GNUC__
470	bus_dma_segment_t dm_segments[dmat->dt_nsegments];
471#else
472	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
473#endif
474	int nsegs, error;
475
476	M_ASSERTPKTHDR(m0);
477
478	nsegs = 0;
479	error = 0;
480	if (m0->m_pkthdr.len <= dmat->dt_maxsize) {
481		int first = 1;
482		bus_addr_t lastaddr = 0;
483		struct mbuf *m;
484
485		for (m = m0; m != NULL && error == 0; m = m->m_next) {
486			if (m->m_len > 0) {
487				error = _nexus_dmamap_load_buffer(dmat,
488				    dm_segments, m->m_data, m->m_len, NULL,
489				    flags, &lastaddr, &nsegs, first);
490				first = 0;
491			}
492		}
493	} else {
494		error = EINVAL;
495	}
496
497	if (error) {
498		/* force "no valid mappings" in callback */
499		(*callback)(callback_arg, dm_segments, 0, 0, error);
500	} else {
501		map->dm_flags |= DMF_LOADED;
502		(*callback)(callback_arg, dm_segments, nsegs + 1,
503		    m0->m_pkthdr.len, error);
504	}
505	return (error);
506}
507
508/*
509 * Like nexus_dmamap_load(), but for uios.
510 */
511static int
512nexus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
513    bus_dmamap_callback2_t *callback, void *callback_arg, int flags)
514{
515	bus_addr_t lastaddr;
516#ifdef __GNUC__
517	bus_dma_segment_t dm_segments[dmat->dt_nsegments];
518#else
519	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
520#endif
521	int nsegs, error, first, i;
522	bus_size_t resid;
523	struct iovec *iov;
524	struct thread *td = NULL;
525
526	resid = uio->uio_resid;
527	iov = uio->uio_iov;
528
529	if (uio->uio_segflg == UIO_USERSPACE) {
530		td = uio->uio_td;
531		KASSERT(td != NULL,
532			("nexus_dmamap_load_uio: USERSPACE but no proc"));
533	}
534
535	nsegs = 0;
536	error = 0;
537	first = 1;
538	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
539		/*
540		 * Now at the first iovec to load.  Load each iovec
541		 * until we have exhausted the residual count.
542		 */
543		bus_size_t minlen =
544			resid < iov[i].iov_len ? resid : iov[i].iov_len;
545		caddr_t addr = (caddr_t) iov[i].iov_base;
546
547		if (minlen > 0) {
548			error = _nexus_dmamap_load_buffer(dmat, dm_segments,
549			    addr, minlen, td, flags, &lastaddr, &nsegs, first);
550			first = 0;
551
552			resid -= minlen;
553		}
554	}
555
556	if (error) {
557		/* force "no valid mappings" in callback */
558		(*callback)(callback_arg, dm_segments, 0, 0, error);
559	} else {
560		map->dm_flags |= DMF_LOADED;
561		(*callback)(callback_arg, dm_segments, nsegs + 1,
562		    uio->uio_resid, error);
563	}
564	return (error);
565}
566
567/*
568 * Common function for unloading a DMA map.  May be called by
569 * bus-specific DMA map unload functions.
570 */
571static void
572nexus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
573{
574
575	map->dm_flags &= ~DMF_LOADED;
576}
577
578/*
579 * Common function for DMA map synchronization.  May be called
580 * by bus-specific DMA map synchronization functions.
581 */
582static void
583nexus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
584{
585
586	/*
587	 * We sync out our caches, but the bus must do the same.
588	 *
589	 * Actually a #Sync is expensive.  We should optimize.
590	 */
591	if ((op & BUS_DMASYNC_PREREAD) || (op & BUS_DMASYNC_PREWRITE)) {
592		/*
593		 * Don't really need to do anything, but flush any pending
594		 * writes anyway.
595		 */
596		membar(Sync);
597	}
598#if 0
599	/* Should not be needed. */
600	if (op & BUS_DMASYNC_POSTREAD) {
601		ecache_flush((vm_offset_t)map->buf,
602		    (vm_offset_t)map->buf + map->buflen - 1);
603	}
604#endif
605	if (op & BUS_DMASYNC_POSTWRITE) {
606		/* Nothing to do.  Handled by the bus controller. */
607	}
608}
609
610/*
611 * Common function for DMA-safe memory allocation.  May be called
612 * by bus-specific DMA memory allocation functions.
613 */
614static int
615nexus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
616    bus_dmamap_t *mapp)
617{
618	int mflags;
619
620	if (flags & BUS_DMA_NOWAIT)
621		mflags = M_NOWAIT;
622	else
623		mflags = M_WAITOK;
624	if (flags & BUS_DMA_ZERO)
625		mflags |= M_ZERO;
626
627	if ((dmat->dt_maxsize <= PAGE_SIZE)) {
628		*vaddr = malloc(dmat->dt_maxsize, M_DEVBUF, mflags);
629	} else {
630		/*
631		 * XXX: Use contigmalloc until it is merged into this facility
632		 * and handles multi-seg allocations.  Nobody is doing multi-seg
633		 * allocations yet though.
634		 */
635		*vaddr = contigmalloc(dmat->dt_maxsize, M_DEVBUF, mflags,
636		    0ul, dmat->dt_lowaddr,
637		    dmat->dt_alignment ? dmat->dt_alignment : 1UL,
638		    dmat->dt_boundary);
639	}
640	if (*vaddr == NULL)
641		return (ENOMEM);
642	return (0);
643}
644
645/*
646 * Common function for freeing DMA-safe memory.  May be called by
647 * bus-specific DMA memory free functions.
648 */
649static void
650nexus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
651{
652
653	if ((dmat->dt_maxsize <= PAGE_SIZE))
654		free(vaddr, M_DEVBUF);
655	else {
656		mtx_lock(&Giant);
657		contigfree(vaddr, dmat->dt_maxsize, M_DEVBUF);
658		mtx_unlock(&Giant);
659	}
660}
661
662struct bus_dma_methods nexus_dma_methods = {
663	nexus_dmamap_create,
664	nexus_dmamap_destroy,
665	nexus_dmamap_load,
666	nexus_dmamap_load_mbuf,
667	nexus_dmamap_load_uio,
668	nexus_dmamap_unload,
669	nexus_dmamap_sync,
670	nexus_dmamem_alloc,
671	nexus_dmamem_free,
672};
673
674struct bus_dma_tag nexus_dmatag = {
675	NULL,
676	NULL,
677	8,
678	0,
679	0,
680	0x3ffffffff,
681	NULL,		/* XXX */
682	NULL,
683	0x3ffffffff,	/* XXX */
684	0xff,		/* XXX */
685	0xffffffff,	/* XXX */
686	0,
687	0,
688	0,
689	NULL,
690	NULL,
691	&nexus_dma_methods,
692};
693
694/*
695 * Helpers to map/unmap bus memory
696 */
697int
698sparc64_bus_mem_map(bus_space_tag_t tag, bus_space_handle_t handle,
699    bus_size_t size, int flags, vm_offset_t vaddr, void **hp)
700{
701	vm_offset_t addr;
702	vm_offset_t sva;
703	vm_offset_t va;
704	vm_paddr_t pa;
705	vm_size_t vsz;
706	u_long pm_flags;
707
708	addr = (vm_offset_t)handle;
709	size = round_page(size);
710	if (size == 0) {
711		printf("sparc64_bus_map: zero size\n");
712		return (EINVAL);
713	}
714	switch (tag->bst_type) {
715	case PCI_CONFIG_BUS_SPACE:
716	case PCI_IO_BUS_SPACE:
717	case PCI_MEMORY_BUS_SPACE:
718		pm_flags = TD_IE;
719		break;
720	default:
721		pm_flags = 0;
722		break;
723	}
724
725	if (!(flags & BUS_SPACE_MAP_CACHEABLE))
726		pm_flags |= TD_E;
727
728	if (vaddr != 0L)
729		sva = trunc_page(vaddr);
730	else {
731		if ((sva = kmem_alloc_nofault(kernel_map, size)) == 0)
732			panic("sparc64_bus_map: cannot allocate virtual "
733			    "memory");
734	}
735
736	/* Preserve page offset. */
737	*hp = (void *)(sva | ((u_long)addr & PAGE_MASK));
738
739	pa = trunc_page(addr);
740	if ((flags & BUS_SPACE_MAP_READONLY) == 0)
741		pm_flags |= TD_W;
742
743	va = sva;
744	vsz = size;
745	do {
746		pmap_kenter_flags(va, pa, pm_flags);
747		va += PAGE_SIZE;
748		pa += PAGE_SIZE;
749	} while ((vsz -= PAGE_SIZE) > 0);
750	tlb_range_demap(kernel_pmap, sva, sva + size - 1);
751	return (0);
752}
753
754int
755sparc64_bus_mem_unmap(void *bh, bus_size_t size)
756{
757	vm_offset_t sva;
758	vm_offset_t va;
759	vm_offset_t endva;
760
761	sva = trunc_page((vm_offset_t)bh);
762	endva = sva + round_page(size);
763	for (va = sva; va < endva; va += PAGE_SIZE)
764		pmap_kremove_flags(va);
765	tlb_range_demap(kernel_pmap, sva, sva + size - 1);
766	kmem_free(kernel_map, sva, size);
767	return (0);
768}
769
770/*
771 * Fake up a bus tag, for use by console drivers in early boot when the regular
772 * means to allocate resources are not yet available.
773 * Addr is the physical address of the desired start of the handle.
774 */
775bus_space_handle_t
776sparc64_fake_bustag(int space, bus_addr_t addr, struct bus_space_tag *ptag)
777{
778
779	ptag->bst_cookie = NULL;
780	ptag->bst_parent = NULL;
781	ptag->bst_type = space;
782	ptag->bst_bus_barrier = nexus_bus_barrier;
783	return (addr);
784}
785
786/*
787 * Base bus space handlers.
788 */
789
790static void
791nexus_bus_barrier(bus_space_tag_t t, bus_space_handle_t h, bus_size_t offset,
792    bus_size_t size, int flags)
793{
794
795	/*
796	 * We have lots of alternatives depending on whether we're
797	 * synchronizing loads with loads, loads with stores, stores
798	 * with loads, or stores with stores.  The only ones that seem
799	 * generic are #Sync and #MemIssue.  I'll use #Sync for safety.
800	 */
801	switch(flags) {
802	case BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE:
803	case BUS_SPACE_BARRIER_READ:
804	case BUS_SPACE_BARRIER_WRITE:
805		membar(Sync);
806		break;
807	default:
808		panic("sparc64_bus_barrier: unknown flags");
809	}
810	return;
811}
812
813struct bus_space_tag nexus_bustag = {
814	NULL,				/* cookie */
815	NULL,				/* parent bus tag */
816	UPA_BUS_SPACE,			/* type */
817	nexus_bus_barrier,		/* bus_space_barrier */
818};
819