bus_machdep.c revision 254025
1/*-
2 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
7 * NASA Ames Research Center.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30/*-
31 * Copyright (c) 1992, 1993
32 *	The Regents of the University of California.  All rights reserved.
33 *
34 * This software was developed by the Computer Systems Engineering group
35 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
36 * contributed to Berkeley.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 *    notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 *    notice, this list of conditions and the following disclaimer in the
45 *    documentation and/or other materials provided with the distribution.
46 * 4. Neither the name of the University nor the names of its contributors
47 *    may be used to endorse or promote products derived from this software
48 *    without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * SUCH DAMAGE.
61 */
62/*-
63 * Copyright (c) 1997, 1998 Justin T. Gibbs.
64 * All rights reserved.
65 * Copyright 2001 by Thomas Moestl <tmm@FreeBSD.org>.  All rights reserved.
66 *
67 * Redistribution and use in source and binary forms, with or without
68 * modification, are permitted provided that the following conditions
69 * are met:
70 * 1. Redistributions of source code must retain the above copyright
71 *    notice, this list of conditions, and the following disclaimer,
72 *    without modification, immediately at the beginning of the file.
73 * 2. The name of the author may not be used to endorse or promote products
74 *    derived from this software without specific prior written permission.
75 *
76 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
77 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
78 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
79 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
80 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
81 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
82 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
83 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
84 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
85 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
86 * SUCH DAMAGE.
87 *
88 *	from: @(#)machdep.c	8.6 (Berkeley) 1/14/94
89 *	from: NetBSD: machdep.c,v 1.221 2008/04/28 20:23:37 martin Exp
90 *	and
91 *	from: FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.24 2001/08/15
92 */
93
94#include <sys/cdefs.h>
95__FBSDID("$FreeBSD: head/sys/sparc64/sparc64/bus_machdep.c 254025 2013-08-07 06:21:20Z jeff $");
96
97#include <sys/param.h>
98#include <sys/bus.h>
99#include <sys/lock.h>
100#include <sys/malloc.h>
101#include <sys/mutex.h>
102#include <sys/proc.h>
103#include <sys/rman.h>
104#include <sys/smp.h>
105#include <sys/systm.h>
106
107#include <vm/vm.h>
108#include <vm/vm_extern.h>
109#include <vm/vm_kern.h>
110#include <vm/vm_page.h>
111#include <vm/vm_param.h>
112#include <vm/vm_map.h>
113
114#include <machine/asi.h>
115#include <machine/atomic.h>
116#include <machine/bus.h>
117#include <machine/bus_private.h>
118#include <machine/cache.h>
119#include <machine/smp.h>
120#include <machine/tlb.h>
121
122static void nexus_bus_barrier(bus_space_tag_t, bus_space_handle_t,
123    bus_size_t, bus_size_t, int);
124
125/* ASIs for bus access */
126const int bus_type_asi[] = {
127	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* nexus */
128	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* SBus */
129	ASI_PHYS_BYPASS_EC_WITH_EBIT_L,		/* PCI configuration space */
130	ASI_PHYS_BYPASS_EC_WITH_EBIT_L,		/* PCI memory space */
131	ASI_PHYS_BYPASS_EC_WITH_EBIT_L,		/* PCI I/O space */
132	0
133};
134
135const int bus_stream_asi[] = {
136	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* nexus */
137	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* SBus */
138	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* PCI configuration space */
139	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* PCI memory space */
140	ASI_PHYS_BYPASS_EC_WITH_EBIT,		/* PCI I/O space */
141	0
142};
143
144/*
145 * Convenience function for manipulating driver locks from busdma (during
146 * busdma_swi, for example).  Drivers that don't provide their own locks
147 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
148 * non-mutex locking scheme don't have to use this at all.
149 */
150void
151busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
152{
153	struct mtx *dmtx;
154
155	dmtx = (struct mtx *)arg;
156	switch (op) {
157	case BUS_DMA_LOCK:
158		mtx_lock(dmtx);
159		break;
160	case BUS_DMA_UNLOCK:
161		mtx_unlock(dmtx);
162		break;
163	default:
164		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
165	}
166}
167
168/*
169 * dflt_lock should never get called.  It gets put into the dma tag when
170 * lockfunc == NULL, which is only valid if the maps that are associated
171 * with the tag are meant to never be defered.
172 * XXX Should have a way to identify which driver is responsible here.
173 */
174static void
175dflt_lock(void *arg, bus_dma_lock_op_t op)
176{
177
178	panic("driver error: busdma dflt_lock called");
179}
180
181/*
182 * Allocate a device specific dma_tag.
183 */
184int
185bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
186    bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
187    bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
188    int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
189    void *lockfuncarg, bus_dma_tag_t *dmat)
190{
191	bus_dma_tag_t newtag;
192
193	/* Return a NULL tag on failure */
194	*dmat = NULL;
195
196	/* Enforce the usage of BUS_GET_DMA_TAG(). */
197	if (parent == NULL)
198		panic("%s: parent DMA tag NULL", __func__);
199
200	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
201	if (newtag == NULL)
202		return (ENOMEM);
203
204	/*
205	 * The method table pointer and the cookie need to be taken over from
206	 * the parent.
207	 */
208	newtag->dt_cookie = parent->dt_cookie;
209	newtag->dt_mt = parent->dt_mt;
210
211	newtag->dt_parent = parent;
212	newtag->dt_alignment = alignment;
213	newtag->dt_boundary = boundary;
214	newtag->dt_lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
215	newtag->dt_highaddr = trunc_page((vm_offset_t)highaddr) +
216	    (PAGE_SIZE - 1);
217	newtag->dt_filter = filter;
218	newtag->dt_filterarg = filterarg;
219	newtag->dt_maxsize = maxsize;
220	newtag->dt_nsegments = nsegments;
221	newtag->dt_maxsegsz = maxsegsz;
222	newtag->dt_flags = flags;
223	newtag->dt_ref_count = 1; /* Count ourselves */
224	newtag->dt_map_count = 0;
225
226	if (lockfunc != NULL) {
227		newtag->dt_lockfunc = lockfunc;
228		newtag->dt_lockfuncarg = lockfuncarg;
229	} else {
230		newtag->dt_lockfunc = dflt_lock;
231		newtag->dt_lockfuncarg = NULL;
232	}
233
234	newtag->dt_segments = NULL;
235
236	/* Take into account any restrictions imposed by our parent tag. */
237	newtag->dt_lowaddr = ulmin(parent->dt_lowaddr, newtag->dt_lowaddr);
238	newtag->dt_highaddr = ulmax(parent->dt_highaddr, newtag->dt_highaddr);
239	if (newtag->dt_boundary == 0)
240		newtag->dt_boundary = parent->dt_boundary;
241	else if (parent->dt_boundary != 0)
242		newtag->dt_boundary = ulmin(parent->dt_boundary,
243		    newtag->dt_boundary);
244	atomic_add_int(&parent->dt_ref_count, 1);
245
246	if (newtag->dt_boundary > 0)
247		newtag->dt_maxsegsz = ulmin(newtag->dt_maxsegsz,
248		    newtag->dt_boundary);
249
250	*dmat = newtag;
251	return (0);
252}
253
254int
255bus_dma_tag_destroy(bus_dma_tag_t dmat)
256{
257	bus_dma_tag_t parent;
258
259	if (dmat != NULL) {
260		if (dmat->dt_map_count != 0)
261			return (EBUSY);
262		while (dmat != NULL) {
263			parent = dmat->dt_parent;
264			atomic_subtract_int(&dmat->dt_ref_count, 1);
265			if (dmat->dt_ref_count == 0) {
266				if (dmat->dt_segments != NULL)
267					free(dmat->dt_segments, M_DEVBUF);
268				free(dmat, M_DEVBUF);
269				/*
270				 * Last reference count, so
271				 * release our reference
272				 * count on our parent.
273				 */
274				dmat = parent;
275			} else
276				dmat = NULL;
277		}
278	}
279	return (0);
280}
281
282/* Allocate/free a tag, and do the necessary management work. */
283int
284sparc64_dma_alloc_map(bus_dma_tag_t dmat, bus_dmamap_t *mapp)
285{
286
287	if (dmat->dt_segments == NULL) {
288		dmat->dt_segments = (bus_dma_segment_t *)malloc(
289		    sizeof(bus_dma_segment_t) * dmat->dt_nsegments, M_DEVBUF,
290		    M_NOWAIT);
291		if (dmat->dt_segments == NULL)
292			return (ENOMEM);
293	}
294	*mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
295	if (*mapp == NULL)
296		return (ENOMEM);
297
298	SLIST_INIT(&(*mapp)->dm_reslist);
299	dmat->dt_map_count++;
300	return (0);
301}
302
303void
304sparc64_dma_free_map(bus_dma_tag_t dmat, bus_dmamap_t map)
305{
306
307	free(map, M_DEVBUF);
308	dmat->dt_map_count--;
309}
310
311static int
312nexus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
313{
314
315	return (sparc64_dma_alloc_map(dmat, mapp));
316}
317
318static int
319nexus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
320{
321
322	sparc64_dma_free_map(dmat, map);
323	return (0);
324}
325
326/*
327 * Add a single contiguous physical range to the segment list.
328 */
329static int
330nexus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
331    bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
332{
333	bus_addr_t baddr, bmask;
334	int seg;
335
336	/*
337	 * Make sure we don't cross any boundaries.
338	 */
339	bmask  = ~(dmat->dt_boundary - 1);
340	if (dmat->dt_boundary > 0) {
341		baddr = (curaddr + dmat->dt_boundary) & bmask;
342		if (sgsize > (baddr - curaddr))
343			sgsize = (baddr - curaddr);
344	}
345
346	/*
347	 * Insert chunk into a segment, coalescing with
348	 * previous segment if possible.
349	 */
350	seg = *segp;
351	if (seg == -1) {
352		seg = 0;
353		segs[seg].ds_addr = curaddr;
354		segs[seg].ds_len = sgsize;
355	} else {
356		if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
357		    (segs[seg].ds_len + sgsize) <= dmat->dt_maxsegsz &&
358		    (dmat->dt_boundary == 0 ||
359		    (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
360			segs[seg].ds_len += sgsize;
361		else {
362			if (++seg >= dmat->dt_nsegments)
363				return (0);
364			segs[seg].ds_addr = curaddr;
365			segs[seg].ds_len = sgsize;
366		}
367	}
368	*segp = seg;
369	return (sgsize);
370}
371
372/*
373 * Utility function to load a physical buffer.  segp contains
374 * the starting segment on entrace, and the ending segment on exit.
375 */
376static int
377nexus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
378    bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp)
379{
380	bus_addr_t curaddr;
381	bus_size_t sgsize;
382
383	if (segs == NULL)
384		segs = dmat->dt_segments;
385
386	curaddr = buf;
387	while (buflen > 0) {
388		sgsize = MIN(buflen, dmat->dt_maxsegsz);
389		sgsize = nexus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
390		    segp);
391		if (sgsize == 0)
392			break;
393		curaddr += sgsize;
394		buflen -= sgsize;
395	}
396
397	/*
398	 * Did we fit?
399	 */
400	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
401}
402
403/*
404 * Utility function to load a linear buffer.  segp contains
405 * the starting segment on entrace, and the ending segment on exit.
406 */
407static int
408nexus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
409    bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
410    int *segp)
411{
412	bus_size_t sgsize;
413	bus_addr_t curaddr;
414	vm_offset_t vaddr = (vm_offset_t)buf;
415
416	if (segs == NULL)
417		segs = dmat->dt_segments;
418
419	while (buflen > 0) {
420		/*
421		 * Get the physical address for this segment.
422		 */
423		if (pmap == kernel_pmap)
424			curaddr = pmap_kextract(vaddr);
425		else
426			curaddr = pmap_extract(pmap, vaddr);
427
428		/*
429		 * Compute the segment size, and adjust counts.
430		 */
431		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
432		if (sgsize > dmat->dt_maxsegsz)
433			sgsize = dmat->dt_maxsegsz;
434		if (buflen < sgsize)
435			sgsize = buflen;
436
437		sgsize = nexus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
438		    segp);
439		if (sgsize == 0)
440			break;
441
442		vaddr += sgsize;
443		buflen -= sgsize;
444	}
445
446	/*
447	 * Did we fit?
448	 */
449	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
450}
451
452static void
453nexus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
454    struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
455{
456
457}
458
459static bus_dma_segment_t *
460nexus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
461    bus_dma_segment_t *segs, int nsegs, int error)
462{
463
464	if (segs == NULL)
465		segs = dmat->dt_segments;
466	return (segs);
467}
468
469/*
470 * Common function for unloading a DMA map.  May be called by
471 * bus-specific DMA map unload functions.
472 */
473static void
474nexus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
475{
476
477	map->dm_flags &= ~DMF_LOADED;
478}
479
480/*
481 * Common function for DMA map synchronization.  May be called
482 * by bus-specific DMA map synchronization functions.
483 */
484static void
485nexus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
486{
487
488	/*
489	 * We sync out our caches, but the bus must do the same.
490	 *
491	 * Actually a #Sync is expensive.  We should optimize.
492	 */
493	if ((op & BUS_DMASYNC_PREREAD) || (op & BUS_DMASYNC_PREWRITE)) {
494		/*
495		 * Don't really need to do anything, but flush any pending
496		 * writes anyway.
497		 */
498		membar(Sync);
499	}
500	if (op & BUS_DMASYNC_POSTWRITE) {
501		/* Nothing to do.  Handled by the bus controller. */
502	}
503}
504
505/*
506 * Common function for DMA-safe memory allocation.  May be called
507 * by bus-specific DMA memory allocation functions.
508 */
509static int
510nexus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
511    bus_dmamap_t *mapp)
512{
513	int mflags;
514
515	if (flags & BUS_DMA_NOWAIT)
516		mflags = M_NOWAIT;
517	else
518		mflags = M_WAITOK;
519	if (flags & BUS_DMA_ZERO)
520		mflags |= M_ZERO;
521
522	/*
523	 * XXX:
524	 * (dmat->dt_alignment < dmat->dt_maxsize) is just a quick hack; the
525	 * exact alignment guarantees of malloc need to be nailed down, and
526	 * the code below should be rewritten to take that into account.
527	 *
528	 * In the meantime, we'll warn the user if malloc gets it wrong.
529	 */
530	if (dmat->dt_maxsize <= PAGE_SIZE &&
531	    dmat->dt_alignment < dmat->dt_maxsize)
532		*vaddr = malloc(dmat->dt_maxsize, M_DEVBUF, mflags);
533	else {
534		/*
535		 * XXX use contigmalloc until it is merged into this
536		 * facility and handles multi-seg allocations.  Nobody
537		 * is doing multi-seg allocations yet though.
538		 */
539		*vaddr = contigmalloc(dmat->dt_maxsize, M_DEVBUF, mflags,
540		    0ul, dmat->dt_lowaddr,
541		    dmat->dt_alignment ? dmat->dt_alignment : 1UL,
542		    dmat->dt_boundary);
543	}
544	if (*vaddr == NULL)
545		return (ENOMEM);
546	if (vtophys(*vaddr) % dmat->dt_alignment)
547		printf("%s: failed to align memory properly.\n", __func__);
548	return (0);
549}
550
551/*
552 * Common function for freeing DMA-safe memory.  May be called by
553 * bus-specific DMA memory free functions.
554 */
555static void
556nexus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
557{
558
559	if (dmat->dt_maxsize <= PAGE_SIZE &&
560	    dmat->dt_alignment < dmat->dt_maxsize)
561		free(vaddr, M_DEVBUF);
562	else
563		contigfree(vaddr, dmat->dt_maxsize, M_DEVBUF);
564}
565
566static struct bus_dma_methods nexus_dma_methods = {
567	nexus_dmamap_create,
568	nexus_dmamap_destroy,
569	nexus_dmamap_load_phys,
570	nexus_dmamap_load_buffer,
571	nexus_dmamap_waitok,
572	nexus_dmamap_complete,
573	nexus_dmamap_unload,
574	nexus_dmamap_sync,
575	nexus_dmamem_alloc,
576	nexus_dmamem_free,
577};
578
579struct bus_dma_tag nexus_dmatag = {
580	NULL,
581	NULL,
582	1,
583	0,
584	~0,
585	~0,
586	NULL,		/* XXX */
587	NULL,
588	~0,
589	~0,
590	~0,
591	0,
592	0,
593	0,
594	NULL,
595	NULL,
596	NULL,
597	&nexus_dma_methods,
598};
599
600/*
601 * Helpers to map/unmap bus memory
602 */
603int
604bus_space_map(bus_space_tag_t tag, bus_addr_t address, bus_size_t size,
605    int flags, bus_space_handle_t *handlep)
606{
607
608	return (sparc64_bus_mem_map(tag, address, size, flags, 0, handlep));
609}
610
611int
612sparc64_bus_mem_map(bus_space_tag_t tag, bus_addr_t addr, bus_size_t size,
613    int flags, vm_offset_t vaddr, bus_space_handle_t *hp)
614{
615	vm_offset_t sva;
616	vm_offset_t va;
617	vm_paddr_t pa;
618	vm_size_t vsz;
619	u_long pm_flags;
620
621	/*
622	 * Given that we use physical access for bus_space(9) there's no need
623	 * need to map anything in unless BUS_SPACE_MAP_LINEAR is requested.
624	 */
625	if ((flags & BUS_SPACE_MAP_LINEAR) == 0) {
626		*hp = addr;
627		return (0);
628	}
629
630	if (tag->bst_cookie == NULL) {
631		printf("%s: resource cookie not set\n", __func__);
632		return (EINVAL);
633	}
634
635	size = round_page(size);
636	if (size == 0) {
637		printf("%s: zero size\n", __func__);
638		return (EINVAL);
639	}
640
641	switch (tag->bst_type) {
642	case PCI_CONFIG_BUS_SPACE:
643	case PCI_IO_BUS_SPACE:
644	case PCI_MEMORY_BUS_SPACE:
645		pm_flags = TD_IE;
646		break;
647	default:
648		pm_flags = 0;
649		break;
650	}
651
652	if ((flags & BUS_SPACE_MAP_CACHEABLE) == 0)
653		pm_flags |= TD_E;
654
655	if (vaddr != 0L)
656		sva = trunc_page(vaddr);
657	else {
658		if ((sva = kva_alloc(size)) == 0)
659			panic("%s: cannot allocate virtual memory", __func__);
660	}
661
662	pa = trunc_page(addr);
663	if ((flags & BUS_SPACE_MAP_READONLY) == 0)
664		pm_flags |= TD_W;
665
666	va = sva;
667	vsz = size;
668	do {
669		pmap_kenter_flags(va, pa, pm_flags);
670		va += PAGE_SIZE;
671		pa += PAGE_SIZE;
672	} while ((vsz -= PAGE_SIZE) > 0);
673	tlb_range_demap(kernel_pmap, sva, sva + size - 1);
674
675	/* Note: we preserve the page offset. */
676	rman_set_virtual(tag->bst_cookie, (void *)(sva | (addr & PAGE_MASK)));
677	return (0);
678}
679
680void
681bus_space_unmap(bus_space_tag_t tag, bus_space_handle_t handle,
682    bus_size_t size)
683{
684
685	sparc64_bus_mem_unmap(tag, handle, size);
686}
687
688int
689sparc64_bus_mem_unmap(bus_space_tag_t tag, bus_space_handle_t handle,
690    bus_size_t size)
691{
692	vm_offset_t sva;
693	vm_offset_t va;
694	vm_offset_t endva;
695
696	if (tag->bst_cookie == NULL ||
697	    (sva = (vm_offset_t)rman_get_virtual(tag->bst_cookie)) == 0)
698		return (0);
699	sva = trunc_page(sva);
700	endva = sva + round_page(size);
701	for (va = sva; va < endva; va += PAGE_SIZE)
702		pmap_kremove_flags(va);
703	tlb_range_demap(kernel_pmap, sva, sva + size - 1);
704	kva_free(sva, size);
705	return (0);
706}
707
708/*
709 * Fake up a bus tag, for use by console drivers in early boot when the
710 * regular means to allocate resources are not yet available.
711 * Addr is the physical address of the desired start of the handle.
712 */
713bus_space_handle_t
714sparc64_fake_bustag(int space, bus_addr_t addr, struct bus_space_tag *ptag)
715{
716
717	ptag->bst_cookie = NULL;
718	ptag->bst_parent = NULL;
719	ptag->bst_type = space;
720	ptag->bst_bus_barrier = nexus_bus_barrier;
721	return (addr);
722}
723
724/*
725 * Allocate a bus tag.
726 */
727bus_space_tag_t
728sparc64_alloc_bus_tag(void *cookie, struct bus_space_tag *ptag, int type,
729    void *barrier)
730{
731	bus_space_tag_t bt;
732
733	bt = malloc(sizeof(struct bus_space_tag), M_DEVBUF, M_NOWAIT);
734	if (bt == NULL)
735		return (NULL);
736	bt->bst_cookie = cookie;
737	bt->bst_parent = ptag;
738	bt->bst_type = type;
739	bt->bst_bus_barrier = barrier;
740	return (bt);
741}
742
743/*
744 * Base bus space handlers.
745 */
746
747static void
748nexus_bus_barrier(bus_space_tag_t t, bus_space_handle_t h, bus_size_t offset,
749    bus_size_t size, int flags)
750{
751
752	/*
753	 * We have lots of alternatives depending on whether we're
754	 * synchronizing loads with loads, loads with stores, stores
755	 * with loads, or stores with stores.  The only ones that seem
756	 * generic are #Sync and #MemIssue.  I'll use #Sync for safety.
757	 */
758	switch(flags) {
759	case BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE:
760	case BUS_SPACE_BARRIER_READ:
761	case BUS_SPACE_BARRIER_WRITE:
762		membar(Sync);
763		break;
764	default:
765		panic("%s: unknown flags", __func__);
766	}
767	return;
768}
769
770struct bus_space_tag nexus_bustag = {
771	NULL,				/* cookie */
772	NULL,				/* parent bus tag */
773	NEXUS_BUS_SPACE,		/* type */
774	nexus_bus_barrier,		/* bus_space_barrier */
775};
776