bus_machdep.c revision 108810
1/*-
2 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
7 * NASA Ames Research Center.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by the NetBSD
20 *	Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 *    contributors may be used to endorse or promote products derived
23 *    from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37/*
38 * Copyright (c) 1992, 1993
39 *	The Regents of the University of California.  All rights reserved.
40 *
41 * This software was developed by the Computer Systems Engineering group
42 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
43 * contributed to Berkeley.
44 *
45 * All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 *	This product includes software developed by the University of
48 *	California, Lawrence Berkeley Laboratory.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 *    notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 *    notice, this list of conditions and the following disclaimer in the
57 *    documentation and/or other materials provided with the distribution.
58 * 3. All advertising materials mentioning features or use of this software
59 *    must display the following acknowledgement:
60 *	This product includes software developed by the University of
61 *	California, Berkeley and its contributors.
62 * 4. Neither the name of the University nor the names of its contributors
63 *    may be used to endorse or promote products derived from this software
64 *    without specific prior written permission.
65 *
66 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
67 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
68 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
69 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
70 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
71 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
72 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
73 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
74 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
75 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
76 * SUCH DAMAGE.
77 */
78/*
79 * Copyright (c) 1997, 1998 Justin T. Gibbs.
80 * All rights reserved.
81 * Copyright 2001 by Thomas Moestl <tmm@FreeBSD.org>.  All rights reserved.
82 *
83 * Redistribution and use in source and binary forms, with or without
84 * modification, are permitted provided that the following conditions
85 * are met:
86 * 1. Redistributions of source code must retain the above copyright
87 *    notice, this list of conditions, and the following disclaimer,
88 *    without modification, immediately at the beginning of the file.
89 * 2. The name of the author may not be used to endorse or promote products
90 *    derived from this software without specific prior written permission.
91 *
92 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
93 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
94 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
95 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
96 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
97 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
98 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
99 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
100 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
101 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
102 * SUCH DAMAGE.
103 *
104 *	from: @(#)machdep.c	8.6 (Berkeley) 1/14/94
105 *	from: NetBSD: machdep.c,v 1.111 2001/09/15 07:13:40 eeh Exp
106 *	and
107 * 	from: FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.24 2001/08/15
108 *
109 * $FreeBSD: head/sys/sparc64/sparc64/bus_machdep.c 108810 2003-01-06 17:35:40Z tmm $
110 */
111
112#include <sys/param.h>
113#include <sys/bus.h>
114#include <sys/malloc.h>
115#include <sys/mbuf.h>
116#include <sys/proc.h>
117#include <sys/smp.h>
118#include <sys/systm.h>
119#include <sys/uio.h>
120
121#include <vm/vm.h>
122#include <vm/vm_extern.h>
123#include <vm/vm_kern.h>
124#include <vm/vm_page.h>
125#include <vm/vm_param.h>
126#include <vm/vm_map.h>
127
128#include <machine/asi.h>
129#include <machine/bus.h>
130#include <machine/bus_private.h>
131#include <machine/cache.h>
132#include <machine/smp.h>
133#include <machine/tlb.h>
134
135/* ASI's for bus access. */
136int bus_type_asi[] = {
137	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* UPA */
138	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* SBUS */
139	ASI_PHYS_BYPASS_EC_WITH_EBIT_L,		/* PCI configuration space */
140	ASI_PHYS_BYPASS_EC_WITH_EBIT_L,		/* PCI memory space */
141	ASI_PHYS_BYPASS_EC_WITH_EBIT_L,		/* PCI I/O space */
142	0
143};
144
145int bus_stream_asi[] = {
146	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* UPA */
147	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* SBUS */
148	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* PCI configuration space */
149	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* PCI memory space */
150	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* PCI I/O space */
151	0
152};
153
154/*
155 * busdma support code.
156 * Note: there is no support for bounce buffers yet.
157 */
158
159static int nexus_dmamap_create(bus_dma_tag_t, bus_dma_tag_t, int,
160    bus_dmamap_t *);
161static int nexus_dmamap_destroy(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
162static int nexus_dmamap_load(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
163    void *, bus_size_t, bus_dmamap_callback_t *, void *, int);
164static int nexus_dmamap_load_mbuf(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
165    struct mbuf *, bus_dmamap_callback2_t *, void *, int);
166static int nexus_dmamap_load_uio(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
167    struct uio *, bus_dmamap_callback2_t *, void *, int);
168static void nexus_dmamap_unload(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t);
169static void nexus_dmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t,
170    bus_dmasync_op_t);
171static int nexus_dmamem_alloc(bus_dma_tag_t, bus_dma_tag_t, void **, int,
172    bus_dmamap_t *);
173static void nexus_dmamem_free(bus_dma_tag_t, bus_dma_tag_t, void *,
174    bus_dmamap_t);
175
176/*
177 * Since there is now way for a device to obtain a dma tag from its parent
178 * we use this kluge to handle different the different supported bus systems.
179 * The sparc64_root_dma_tag is used as parent for tags that have none, so that
180 * the correct methods will be used.
181 */
182bus_dma_tag_t sparc64_root_dma_tag;
183
184/*
185 * Allocate a device specific dma_tag.
186 */
187int
188bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
189    bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
190    bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
191    int nsegments, bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
192{
193
194	bus_dma_tag_t newtag;
195
196	/* Return a NULL tag on failure */
197	*dmat = NULL;
198
199	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
200	if (newtag == NULL)
201		return (ENOMEM);
202
203	newtag->parent = parent != NULL ? parent : sparc64_root_dma_tag;
204	newtag->alignment = alignment;
205	newtag->boundary = boundary;
206	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
207	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
208	newtag->filter = filter;
209	newtag->filterarg = filterarg;
210	newtag->maxsize = maxsize;
211	newtag->nsegments = nsegments;
212	newtag->maxsegsz = maxsegsz;
213	newtag->flags = flags;
214	newtag->ref_count = 1; /* Count ourselves */
215	newtag->map_count = 0;
216
217	newtag->dmamap_create = NULL;
218	newtag->dmamap_destroy = NULL;
219	newtag->dmamap_load = NULL;
220	newtag->dmamap_load_mbuf = NULL;
221	newtag->dmamap_load_uio = NULL;
222	newtag->dmamap_unload = NULL;
223	newtag->dmamap_sync = NULL;
224	newtag->dmamem_alloc = NULL;
225	newtag->dmamem_free = NULL;
226
227	/* Take into account any restrictions imposed by our parent tag */
228	if (parent != NULL) {
229		newtag->lowaddr = ulmin(parent->lowaddr, newtag->lowaddr);
230		newtag->highaddr = ulmax(parent->highaddr, newtag->highaddr);
231		/*
232		 * XXX Not really correct??? Probably need to honor boundary
233		 *     all the way up the inheritence chain.
234		 */
235		newtag->boundary = ulmax(parent->boundary, newtag->boundary);
236	}
237	newtag->parent->ref_count++;
238
239	*dmat = newtag;
240	return (0);
241}
242
243int
244bus_dma_tag_destroy(bus_dma_tag_t dmat)
245{
246	bus_dma_tag_t parent;
247
248	if (dmat != NULL) {
249		if (dmat->map_count != 0)
250			return (EBUSY);
251		while (dmat != NULL) {
252			parent = dmat->parent;
253			dmat->ref_count--;
254			if (dmat->ref_count == 0) {
255				free(dmat, M_DEVBUF);
256				/*
257				 * Last reference count, so
258				 * release our reference
259				 * count on our parent.
260				 */
261				dmat = parent;
262			} else
263				dmat = NULL;
264		}
265	}
266	return (0);
267}
268
269/*
270 * Common function for DMA map creation.  May be called by bus-specific
271 * DMA map creation functions.
272 */
273static int
274nexus_dmamap_create(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, int flags,
275    bus_dmamap_t *mapp)
276{
277
278	/* Not much to do...? */
279	*mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
280	if (*mapp != NULL) {
281		ddmat->map_count++;
282		return (0);
283	} else
284		return (ENOMEM);
285}
286
287/*
288 * Common function for DMA map destruction.  May be called by bus-specific
289 * DMA map destruction functions.
290 */
291static int
292nexus_dmamap_destroy(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map)
293{
294
295	free(map, M_DEVBUF);
296	ddmat->map_count--;
297	return (0);
298}
299
300#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
301
302/*
303 * Common function for loading a DMA map with a linear buffer.  May
304 * be called by bus-specific DMA map load functions.
305 *
306 * Most SPARCs have IOMMUs in the bus controllers.  In those cases
307 * they only need one segment and will use virtual addresses for DVMA.
308 * Those bus controllers should intercept these vectors and should
309 * *NEVER* call nexus_dmamap_load() which is used only by devices that
310 * bypass DVMA.
311 */
312static int
313nexus_dmamap_load(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
314    void *buf, bus_size_t buflen, bus_dmamap_callback_t *callback,
315    void *callback_arg, int flags)
316{
317	vm_offset_t vaddr;
318	vm_offset_t paddr;
319#ifdef __GNUC__
320	bus_dma_segment_t dm_segments[ddmat->nsegments];
321#else
322	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
323#endif
324	bus_dma_segment_t *sg;
325	int seg;
326	int error;
327	vm_offset_t nextpaddr;
328	bus_size_t size;
329
330	error = 0;
331
332	vaddr = (vm_offset_t)buf;
333	sg = &dm_segments[0];
334	seg = 1;
335	sg->ds_len = 0;
336
337	map->buf = buf;
338	map->buflen = buflen;
339	map->start = (bus_addr_t)buf;
340
341	nextpaddr = 0;
342	do {
343		paddr = pmap_kextract(vaddr);
344		size = PAGE_SIZE - (paddr & PAGE_MASK);
345		if (size > buflen)
346			size = buflen;
347
348		if (sg->ds_len == 0) {
349			sg->ds_addr = paddr;
350			sg->ds_len = size;
351		} else if (paddr == nextpaddr) {
352			sg->ds_len += size;
353		} else {
354			/* Go to the next segment */
355			sg++;
356			seg++;
357			if (seg > ddmat->nsegments)
358				break;
359			sg->ds_addr = paddr;
360			sg->ds_len = size;
361		}
362		vaddr += size;
363		nextpaddr = paddr + size;
364		buflen -= size;
365	} while (buflen > 0);
366
367	if (buflen != 0) {
368		printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
369		       (u_long)buflen);
370		error = EFBIG;
371	}
372
373	(*callback)(callback_arg, dm_segments, seg, error);
374
375	return (0);
376}
377
378/*
379 * Utility function to load a linear buffer.  lastaddrp holds state
380 * between invocations (for multiple-buffer loads).  segp contains
381 * the starting segment on entrace, and the ending segment on exit.
382 * first indicates if this is the first invocation of this function.
383 */
384static int
385_nexus_dmamap_load_buffer(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat,
386    bus_dma_segment_t segs[], void *buf, bus_size_t buflen,
387    struct thread *td, int flags, vm_offset_t *lastaddrp,
388    int *segp, int first)
389{
390	bus_size_t sgsize;
391	bus_addr_t curaddr, lastaddr, baddr, bmask;
392	vm_offset_t vaddr = (vm_offset_t)buf;
393	int seg;
394	pmap_t pmap;
395
396	if (td != NULL)
397		pmap = vmspace_pmap(td->td_proc->p_vmspace);
398	else
399		pmap = NULL;
400
401	lastaddr = *lastaddrp;
402	bmask  = ~(ddmat->boundary - 1);
403
404	for (seg = *segp; buflen > 0 ; ) {
405		/*
406		 * Get the physical address for this segment.
407		 */
408		if (pmap)
409			curaddr = pmap_extract(pmap, vaddr);
410		else
411			curaddr = pmap_kextract(vaddr);
412
413		/*
414		 * Compute the segment size, and adjust counts.
415		 */
416		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
417		if (buflen < sgsize)
418			sgsize = buflen;
419
420		/*
421		 * Make sure we don't cross any boundaries.
422		 */
423		if (ddmat->boundary > 0) {
424			baddr = (curaddr + ddmat->boundary) & bmask;
425			if (sgsize > (baddr - curaddr))
426				sgsize = (baddr - curaddr);
427		}
428
429		/*
430		 * Insert chunk into a segment, coalescing with
431		 * previous segment if possible.
432		 */
433		if (first) {
434			segs[seg].ds_addr = curaddr;
435			segs[seg].ds_len = sgsize;
436			first = 0;
437		} else {
438			if (curaddr == lastaddr &&
439			    (segs[seg].ds_len + sgsize) <= ddmat->maxsegsz &&
440			    (ddmat->boundary == 0 ||
441			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
442				segs[seg].ds_len += sgsize;
443			else {
444				if (++seg >= ddmat->nsegments)
445					break;
446				segs[seg].ds_addr = curaddr;
447				segs[seg].ds_len = sgsize;
448			}
449		}
450
451		lastaddr = curaddr + sgsize;
452		vaddr += sgsize;
453		buflen -= sgsize;
454	}
455
456	*segp = seg;
457	*lastaddrp = lastaddr;
458
459	/*
460	 * Did we fit?
461	 */
462	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
463}
464
465/*
466 * Like nexus_dmamap_load(), but for mbufs.
467 */
468static int
469nexus_dmamap_load_mbuf(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat,
470    bus_dmamap_t map, struct mbuf *m0, bus_dmamap_callback2_t *callback,
471    void *callback_arg, int flags)
472{
473#ifdef __GNUC__
474	bus_dma_segment_t dm_segments[ddmat->nsegments];
475#else
476	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
477#endif
478	int nsegs, error;
479
480	KASSERT(m0->m_flags & M_PKTHDR,
481		("nexus_dmamap_load_mbuf: no packet header"));
482
483	nsegs = 0;
484	error = 0;
485	if (m0->m_pkthdr.len <= ddmat->maxsize) {
486		int first = 1;
487		vm_offset_t lastaddr = 0;
488		struct mbuf *m;
489
490		for (m = m0; m != NULL && error == 0; m = m->m_next) {
491			error = _nexus_dmamap_load_buffer(pdmat, ddmat,
492			    dm_segments, m->m_data, m->m_len, NULL, flags,
493			    &lastaddr, &nsegs, first);
494			first = 0;
495		}
496	} else {
497		error = EINVAL;
498	}
499
500	if (error) {
501		/* force "no valid mappings" in callback */
502		(*callback)(callback_arg, dm_segments, 0, 0, error);
503	} else {
504		(*callback)(callback_arg, dm_segments, nsegs + 1,
505		    m0->m_pkthdr.len, error);
506	}
507	return (error);
508}
509
510/*
511 * Like nexus_dmamap_load(), but for uios.
512 */
513static int
514nexus_dmamap_load_uio(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat,
515    bus_dmamap_t map, struct uio *uio, bus_dmamap_callback2_t *callback,
516    void *callback_arg, int flags)
517{
518	vm_offset_t lastaddr;
519#ifdef __GNUC__
520	bus_dma_segment_t dm_segments[ddmat->nsegments];
521#else
522	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
523#endif
524	int nsegs, error, first, i;
525	bus_size_t resid;
526	struct iovec *iov;
527	struct thread *td = NULL;
528
529	resid = uio->uio_resid;
530	iov = uio->uio_iov;
531
532	if (uio->uio_segflg == UIO_USERSPACE) {
533		td = uio->uio_td;
534		KASSERT(td != NULL,
535			("nexus_dmamap_load_uio: USERSPACE but no proc"));
536	}
537
538	nsegs = 0;
539	error = 0;
540	first = 1;
541	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
542		/*
543		 * Now at the first iovec to load.  Load each iovec
544		 * until we have exhausted the residual count.
545		 */
546		bus_size_t minlen =
547			resid < iov[i].iov_len ? resid : iov[i].iov_len;
548		caddr_t addr = (caddr_t) iov[i].iov_base;
549
550		error = _nexus_dmamap_load_buffer(pdmat, ddmat, dm_segments,
551		    addr, minlen, td, flags, &lastaddr, &nsegs, first);
552		first = 0;
553
554		resid -= minlen;
555	}
556
557	if (error) {
558		/* force "no valid mappings" in callback */
559		(*callback)(callback_arg, dm_segments, 0, 0, error);
560	} else {
561		(*callback)(callback_arg, dm_segments, nsegs + 1,
562		    uio->uio_resid, error);
563	}
564	return (error);
565}
566
567/*
568 * Common function for unloading a DMA map.  May be called by
569 * bus-specific DMA map unload functions.
570 */
571static void
572nexus_dmamap_unload(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map)
573{
574
575	/* Nothing to do...? */
576}
577
578/*
579 * Common function for DMA map synchronization.  May be called
580 * by bus-specific DMA map synchronization functions.
581 */
582static void
583nexus_dmamap_sync(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, bus_dmamap_t map,
584    bus_dmasync_op_t op)
585{
586
587	/*
588	 * We sync out our caches, but the bus must do the same.
589	 *
590	 * Actually a #Sync is expensive.  We should optimize.
591	 */
592	if ((op == BUS_DMASYNC_PREREAD) || (op == BUS_DMASYNC_PREWRITE)) {
593		/*
594		 * Don't really need to do anything, but flush any pending
595		 * writes anyway.
596		 */
597		membar(Sync);
598	}
599	if (op == BUS_DMASYNC_POSTREAD) {
600		/*
601		 * Invalidate the caches (it is unclear whether that is really
602		 * needed. The manual only mentions that PCI transactions are
603		 * cache coherent).
604		 */
605		ecache_flush((vm_offset_t)map->buf,
606		    (vm_offset_t)map->buf + map->buflen - 1);
607	}
608	if (op == BUS_DMASYNC_POSTWRITE) {
609		/* Nothing to do.  Handled by the bus controller. */
610	}
611}
612
613/*
614 * Helper functions for buses that use their private dmamem_alloc/dmamem_free
615 * versions.
616 * These differ from the dmamap_alloc() functions in that they create a tag
617 * that is specifically for use with dmamem_alloc'ed memory.
618 * These are primitive now, but I expect that some fields of the map will need
619 * to be filled soon.
620 */
621int
622sparc64_dmamem_alloc_map(bus_dma_tag_t dmat, bus_dmamap_t *mapp)
623{
624
625	*mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
626	if (*mapp == NULL)
627		return (ENOMEM);
628
629	dmat->map_count++;
630	return (0);
631}
632
633void
634sparc64_dmamem_free_map(bus_dma_tag_t dmat, bus_dmamap_t map)
635{
636
637	free(map, M_DEVBUF);
638	dmat->map_count--;
639}
640
641/*
642 * Common function for DMA-safe memory allocation.  May be called
643 * by bus-specific DMA memory allocation functions.
644 */
645static int
646nexus_dmamem_alloc(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void **vaddr,
647    int flags, bus_dmamap_t *mapp)
648{
649
650	if ((ddmat->maxsize <= PAGE_SIZE)) {
651		*vaddr = malloc(ddmat->maxsize, M_DEVBUF,
652		    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
653	} else {
654		/*
655		 * XXX: Use contigmalloc until it is merged into this facility
656		 * and handles multi-seg allocations.  Nobody is doing multi-seg
657		 * allocations yet though.
658		 */
659		*vaddr = contigmalloc(ddmat->maxsize, M_DEVBUF,
660		    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK,
661		    0ul, ddmat->lowaddr,
662		    ddmat->alignment ? ddmat->alignment : 1UL,
663		    ddmat->boundary);
664	}
665	if (*vaddr == NULL) {
666		free(*mapp, M_DEVBUF);
667		return (ENOMEM);
668	}
669	return (0);
670}
671
672/*
673 * Common function for freeing DMA-safe memory.  May be called by
674 * bus-specific DMA memory free functions.
675 */
676static void
677nexus_dmamem_free(bus_dma_tag_t pdmat, bus_dma_tag_t ddmat, void *vaddr,
678    bus_dmamap_t map)
679{
680
681	sparc64_dmamem_free_map(ddmat, map);
682	if ((ddmat->maxsize <= PAGE_SIZE))
683		free(vaddr, M_DEVBUF);
684	else
685		contigfree(vaddr, ddmat->maxsize, M_DEVBUF);
686}
687
688struct bus_dma_tag nexus_dmatag = {
689	NULL,
690	NULL,
691	8,
692	0,
693	0,
694	0x3ffffffff,
695	NULL,		/* XXX */
696	NULL,
697	0x3ffffffff,	/* XXX */
698	0xff,		/* XXX */
699	0xffffffff,	/* XXX */
700	0,
701	0,
702	0,
703	nexus_dmamap_create,
704	nexus_dmamap_destroy,
705	nexus_dmamap_load,
706	nexus_dmamap_load_mbuf,
707	nexus_dmamap_load_uio,
708	nexus_dmamap_unload,
709	nexus_dmamap_sync,
710
711	nexus_dmamem_alloc,
712	nexus_dmamem_free,
713};
714
715/*
716 * Helpers to map/unmap bus memory
717 */
718int
719sparc64_bus_mem_map(bus_space_tag_t tag, bus_space_handle_t handle,
720    bus_size_t size, int flags, vm_offset_t vaddr, void **hp)
721{
722	vm_offset_t addr;
723	vm_offset_t sva;
724	vm_offset_t va;
725	vm_offset_t pa;
726	vm_size_t vsz;
727	u_long pm_flags;
728
729	addr = (vm_offset_t)handle;
730	size = round_page(size);
731	if (size == 0) {
732		printf("sparc64_bus_map: zero size\n");
733		return (EINVAL);
734	}
735	switch (tag->type) {
736	case PCI_CONFIG_BUS_SPACE:
737	case PCI_IO_BUS_SPACE:
738	case PCI_MEMORY_BUS_SPACE:
739		pm_flags = TD_IE;
740		break;
741	default:
742		pm_flags = 0;
743		break;
744	}
745
746	if (!(flags & BUS_SPACE_MAP_CACHEABLE))
747		pm_flags |= TD_E;
748
749	if (vaddr != NULL)
750		sva = trunc_page(vaddr);
751	else {
752		if ((sva = kmem_alloc_nofault(kernel_map, size)) == NULL)
753			panic("sparc64_bus_map: cannot allocate virtual "
754			    "memory");
755	}
756
757	/* Preserve page offset. */
758	*hp = (void *)(sva | ((u_long)addr & PAGE_MASK));
759
760	pa = trunc_page(addr);
761	if ((flags & BUS_SPACE_MAP_READONLY) == 0)
762		pm_flags |= TD_W;
763
764	va = sva;
765	vsz = size;
766	do {
767		pmap_kenter_flags(va, pa, pm_flags);
768		va += PAGE_SIZE;
769		pa += PAGE_SIZE;
770	} while ((vsz -= PAGE_SIZE) > 0);
771	tlb_range_demap(kernel_pmap, sva, sva + size - 1);
772	return (0);
773}
774
775int
776sparc64_bus_mem_unmap(void *bh, bus_size_t size)
777{
778	vm_offset_t sva;
779	vm_offset_t va;
780	vm_offset_t endva;
781
782	sva = trunc_page((vm_offset_t)bh);
783	endva = sva + round_page(size);
784	for (va = sva; va < endva; va += PAGE_SIZE)
785		pmap_kremove_flags(va);
786	tlb_range_demap(kernel_pmap, sva, sva + size - 1);
787	kmem_free(kernel_map, sva, size);
788	return (0);
789}
790
791/*
792 * Fake up a bus tag, for use by console drivers in early boot when the regular
793 * means to allocate resources are not yet available.
794 * Note that these tags are not eligible for bus_space_barrier operations.
795 * Addr is the physical address of the desired start of the handle.
796 */
797bus_space_handle_t
798sparc64_fake_bustag(int space, bus_addr_t addr, struct bus_space_tag *ptag)
799{
800
801	ptag->cookie = NULL;
802	ptag->parent = NULL;
803	ptag->type = space;
804	ptag->bus_barrier = NULL;
805	return (addr);
806}
807
808/*
809 * Base bus space handlers.
810 */
811static void nexus_bus_barrier(bus_space_tag_t, bus_space_handle_t,
812    bus_size_t, bus_size_t, int);
813
814static void
815nexus_bus_barrier(bus_space_tag_t t, bus_space_handle_t h, bus_size_t offset,
816    bus_size_t size, int flags)
817{
818
819	/*
820	 * We have lots of alternatives depending on whether we're
821	 * synchronizing loads with loads, loads with stores, stores
822	 * with loads, or stores with stores.  The only ones that seem
823	 * generic are #Sync and #MemIssue.  I'll use #Sync for safety.
824	 */
825	switch(flags) {
826	case BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE:
827	case BUS_SPACE_BARRIER_READ:
828	case BUS_SPACE_BARRIER_WRITE:
829		membar(Sync);
830		break;
831	default:
832		panic("sparc64_bus_barrier: unknown flags");
833	}
834	return;
835}
836
837struct bus_space_tag nexus_bustag = {
838	NULL,				/* cookie */
839	NULL,				/* parent bus tag */
840	UPA_BUS_SPACE,			/* type */
841	nexus_bus_barrier,		/* bus_space_barrier */
842};
843