intel_gas.c revision 257251
1/*-
2 * Copyright (c) 2013 The FreeBSD Foundation
3 * All rights reserved.
4 *
5 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
6 * under sponsorship from the FreeBSD Foundation.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/x86/iommu/intel_gas.c 257251 2013-10-28 13:33:29Z kib $");
32
33#define	RB_AUGMENT(entry) dmar_gas_augment_entry(entry)
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/malloc.h>
38#include <sys/bus.h>
39#include <sys/interrupt.h>
40#include <sys/kernel.h>
41#include <sys/ktr.h>
42#include <sys/lock.h>
43#include <sys/proc.h>
44#include <sys/rwlock.h>
45#include <sys/memdesc.h>
46#include <sys/mutex.h>
47#include <sys/sysctl.h>
48#include <sys/rman.h>
49#include <sys/taskqueue.h>
50#include <sys/tree.h>
51#include <sys/uio.h>
52#include <dev/pci/pcivar.h>
53#include <vm/vm.h>
54#include <vm/vm_extern.h>
55#include <vm/vm_kern.h>
56#include <vm/vm_object.h>
57#include <vm/vm_page.h>
58#include <vm/vm_map.h>
59#include <vm/uma.h>
60#include <machine/atomic.h>
61#include <machine/bus.h>
62#include <machine/md_var.h>
63#include <machine/specialreg.h>
64#include <x86/include/busdma_impl.h>
65#include <x86/iommu/intel_reg.h>
66#include <x86/iommu/busdma_dmar.h>
67#include <x86/iommu/intel_dmar.h>
68
69/*
70 * Guest Address Space management.
71 */
72
73static uma_zone_t dmar_map_entry_zone;
74
75static void
76intel_gas_init(void)
77{
78
79	dmar_map_entry_zone = uma_zcreate("DMAR_MAP_ENTRY",
80	    sizeof(struct dmar_map_entry), NULL, NULL,
81	    NULL, NULL, UMA_ALIGN_PTR, 0);
82}
83SYSINIT(intel_gas, SI_SUB_DRIVERS, SI_ORDER_FIRST, intel_gas_init, NULL);
84
85struct dmar_map_entry *
86dmar_gas_alloc_entry(struct dmar_ctx *ctx, u_int flags)
87{
88	struct dmar_map_entry *res;
89
90	KASSERT((flags & ~(DMAR_PGF_WAITOK)) == 0,
91	    ("unsupported flags %x", flags));
92
93	res = uma_zalloc(dmar_map_entry_zone, ((flags & DMAR_PGF_WAITOK) !=
94	    0 ? M_WAITOK : M_NOWAIT) | M_ZERO);
95	if (res != NULL)
96		atomic_add_int(&ctx->entries_cnt, 1);
97	return (res);
98}
99
100void
101dmar_gas_free_entry(struct dmar_ctx *ctx, struct dmar_map_entry *entry)
102{
103
104	atomic_subtract_int(&ctx->entries_cnt, 1);
105	uma_zfree(dmar_map_entry_zone, entry);
106}
107
108static int
109dmar_gas_cmp_entries(struct dmar_map_entry *a, struct dmar_map_entry *b)
110{
111
112	/* Last entry have zero size, so <= */
113	KASSERT(a->start <= a->end, ("inverted entry %p (%jx, %jx)",
114	    a, (uintmax_t)a->start, (uintmax_t)a->end));
115	KASSERT(b->start <= b->end, ("inverted entry %p (%jx, %jx)",
116	    b, (uintmax_t)b->start, (uintmax_t)b->end));
117	KASSERT(a->end <= b->start || b->end <= a->start ||
118	    a->end == a->start || b->end == b->start,
119	    ("overlapping entries %p (%jx, %jx) %p (%jx, %jx)",
120	    a, (uintmax_t)a->start, (uintmax_t)a->end,
121	    b, (uintmax_t)b->start, (uintmax_t)b->end));
122
123	if (a->end < b->end)
124		return (-1);
125	else if (b->end < a->end)
126		return (1);
127	return (0);
128}
129
130static void
131dmar_gas_augment_entry(struct dmar_map_entry *entry)
132{
133	struct dmar_map_entry *l, *r;
134
135	for (; entry != NULL; entry = RB_PARENT(entry, rb_entry)) {
136		l = RB_LEFT(entry, rb_entry);
137		r = RB_RIGHT(entry, rb_entry);
138		if (l == NULL && r == NULL) {
139			entry->free_down = entry->free_after;
140		} else if (l == NULL && r != NULL) {
141			entry->free_down = MAX(entry->free_after, r->free_down);
142		} else if (/*l != NULL && */ r == NULL) {
143			entry->free_down = MAX(entry->free_after, l->free_down);
144		} else /* if (l != NULL && r != NULL) */ {
145			entry->free_down = MAX(entry->free_after, l->free_down);
146			entry->free_down = MAX(entry->free_down, r->free_down);
147		}
148	}
149}
150
151RB_GENERATE(dmar_gas_entries_tree, dmar_map_entry, rb_entry,
152    dmar_gas_cmp_entries);
153
154static void
155dmar_gas_fix_free(struct dmar_ctx *ctx, struct dmar_map_entry *entry)
156{
157	struct dmar_map_entry *next;
158
159	next = RB_NEXT(dmar_gas_entries_tree, &ctx->rb_root, entry);
160	entry->free_after = (next != NULL ? next->start : ctx->end) -
161	    entry->end;
162	dmar_gas_augment_entry(entry);
163}
164
165#ifdef INVARIANTS
166static void
167dmar_gas_check_free(struct dmar_ctx *ctx)
168{
169	struct dmar_map_entry *entry, *next, *l, *r;
170	dmar_gaddr_t v;
171
172	RB_FOREACH(entry, dmar_gas_entries_tree, &ctx->rb_root) {
173		next = RB_NEXT(dmar_gas_entries_tree, &ctx->rb_root, entry);
174		if (next == NULL) {
175			MPASS(entry->free_after == ctx->end - entry->end);
176		} else {
177			MPASS(entry->free_after = next->start - entry->end);
178			MPASS(entry->end <= next->start);
179		}
180		l = RB_LEFT(entry, rb_entry);
181		r = RB_RIGHT(entry, rb_entry);
182		if (l == NULL && r == NULL) {
183			MPASS(entry->free_down == entry->free_after);
184		} else if (l == NULL && r != NULL) {
185			MPASS(entry->free_down = MAX(entry->free_after,
186			    r->free_down));
187		} else if (r == NULL) {
188			MPASS(entry->free_down = MAX(entry->free_after,
189			    l->free_down));
190		} else {
191			v = MAX(entry->free_after, l->free_down);
192			v = MAX(entry->free_down, r->free_down);
193			MPASS(entry->free_down == v);
194		}
195	}
196}
197#endif
198
199static bool
200dmar_gas_rb_insert(struct dmar_ctx *ctx, struct dmar_map_entry *entry)
201{
202	struct dmar_map_entry *prev, *found;
203
204	found = RB_INSERT(dmar_gas_entries_tree, &ctx->rb_root, entry);
205	dmar_gas_fix_free(ctx, entry);
206	prev = RB_PREV(dmar_gas_entries_tree, &ctx->rb_root, entry);
207	if (prev != NULL)
208		dmar_gas_fix_free(ctx, prev);
209	return (found == NULL);
210}
211
212static void
213dmar_gas_rb_remove(struct dmar_ctx *ctx, struct dmar_map_entry *entry)
214{
215	struct dmar_map_entry *prev;
216
217	prev = RB_PREV(dmar_gas_entries_tree, &ctx->rb_root, entry);
218	RB_REMOVE(dmar_gas_entries_tree, &ctx->rb_root, entry);
219	if (prev != NULL)
220		dmar_gas_fix_free(ctx, prev);
221}
222
223void
224dmar_gas_init_ctx(struct dmar_ctx *ctx)
225{
226	struct dmar_map_entry *begin, *end;
227
228	begin = dmar_gas_alloc_entry(ctx, DMAR_PGF_WAITOK);
229	end = dmar_gas_alloc_entry(ctx, DMAR_PGF_WAITOK);
230
231	DMAR_CTX_LOCK(ctx);
232	KASSERT(ctx->entries_cnt == 2, ("dirty ctx %p", ctx));
233	KASSERT(RB_EMPTY(&ctx->rb_root), ("non-empty entries %p", ctx));
234
235	begin->start = 0;
236	begin->end = DMAR_PAGE_SIZE;
237	begin->free_after = ctx->end - begin->end;
238	begin->flags = DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_UNMAPPED;
239	dmar_gas_rb_insert(ctx, begin);
240
241	end->start = ctx->end;
242	end->end = ctx->end;
243	end->free_after = 0;
244	end->flags = DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_UNMAPPED;
245	dmar_gas_rb_insert(ctx, end);
246
247	ctx->first_place = begin;
248	ctx->last_place = end;
249	DMAR_CTX_UNLOCK(ctx);
250}
251
252void
253dmar_gas_fini_ctx(struct dmar_ctx *ctx)
254{
255	struct dmar_map_entry *entry, *entry1;
256
257	DMAR_CTX_ASSERT_LOCKED(ctx);
258	KASSERT(ctx->entries_cnt == 2, ("ctx still in use %p", ctx));
259
260	entry = RB_MIN(dmar_gas_entries_tree, &ctx->rb_root);
261	KASSERT(entry->start == 0, ("start entry start %p", ctx));
262	KASSERT(entry->end == DMAR_PAGE_SIZE, ("start entry end %p", ctx));
263	KASSERT(entry->flags == DMAR_MAP_ENTRY_PLACE,
264	    ("start entry flags %p", ctx));
265	RB_REMOVE(dmar_gas_entries_tree, &ctx->rb_root, entry);
266	dmar_gas_free_entry(ctx, entry);
267
268	entry = RB_MAX(dmar_gas_entries_tree, &ctx->rb_root);
269	KASSERT(entry->start == ctx->end, ("end entry start %p", ctx));
270	KASSERT(entry->end == ctx->end, ("end entry end %p", ctx));
271	KASSERT(entry->free_after == 0, ("end entry free_after%p", ctx));
272	KASSERT(entry->flags == DMAR_MAP_ENTRY_PLACE,
273	    ("end entry flags %p", ctx));
274	RB_REMOVE(dmar_gas_entries_tree, &ctx->rb_root, entry);
275	dmar_gas_free_entry(ctx, entry);
276
277	RB_FOREACH_SAFE(entry, dmar_gas_entries_tree, &ctx->rb_root, entry1) {
278		KASSERT((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0,
279		    ("non-RMRR entry left %p", ctx));
280		RB_REMOVE(dmar_gas_entries_tree, &ctx->rb_root, entry);
281		dmar_gas_free_entry(ctx, entry);
282	}
283}
284
285struct dmar_gas_match_args {
286	struct dmar_ctx *ctx;
287	dmar_gaddr_t size;
288	const struct bus_dma_tag_common *common;
289	u_int gas_flags;
290	struct dmar_map_entry *entry;
291};
292
293static bool
294dmar_gas_match_one(struct dmar_gas_match_args *a, struct dmar_map_entry *prev,
295    dmar_gaddr_t end)
296{
297	dmar_gaddr_t bs, start;
298
299	if (a->entry->start + a->size > end)
300		return (false);
301
302	/* DMAR_PAGE_SIZE to create gap after new entry. */
303	if (a->entry->start < prev->end + DMAR_PAGE_SIZE ||
304	    a->entry->start + a->size + DMAR_PAGE_SIZE > prev->end +
305	    prev->free_after)
306		return (false);
307
308	/* No boundary crossing. */
309	if (dmar_test_boundary(a->entry->start, a->size, a->common->boundary))
310		return (true);
311
312	/*
313	 * The start to start + size region crosses the boundary.
314	 * Check if there is enough space after the next boundary
315	 * after the prev->end.
316	 */
317	bs = (a->entry->start + a->common->boundary) & ~(a->common->boundary
318	    - 1);
319	start = roundup2(bs, a->common->alignment);
320	/* DMAR_PAGE_SIZE to create gap after new entry. */
321	if (start + a->size + DMAR_PAGE_SIZE <= prev->end + prev->free_after &&
322	    start + a->size <= end) {
323		a->entry->start = start;
324		return (true);
325	}
326
327	/*
328	 * Not enough space to align at boundary, but allowed to split.
329	 * We already checked that start + size does not overlap end.
330	 *
331	 * XXXKIB. It is possible that bs is exactly at the start of
332	 * the next entry, then we do not have gap.  Ignore for now.
333	 */
334	if ((a->gas_flags & DMAR_GM_CANSPLIT) != 0) {
335		a->size = bs - a->entry->start;
336		return (true);
337	}
338
339	return (false);
340}
341
342static void
343dmar_gas_match_insert(struct dmar_gas_match_args *a,
344    struct dmar_map_entry *prev)
345{
346	struct dmar_map_entry *next;
347	bool found;
348
349	/*
350	 * The prev->end is always aligned on the page size, which
351	 * causes page alignment for the entry->start too.  The size
352	 * is checked to be multiple of the page size.
353	 *
354	 * The page sized gap is created between consequent
355	 * allocations to ensure that out-of-bounds accesses fault.
356	 */
357	a->entry->end = a->entry->start + a->size;
358
359	next = RB_NEXT(dmar_gas_entries_tree, &a->ctx->rb_root, prev);
360	KASSERT(next->start >= a->entry->end &&
361	    next->start - a->entry->start >= a->size,
362	    ("dmar_gas_match_insert hole failed %p prev (%jx, %jx) "
363	    "free_after %jx next (%jx, %jx) entry (%jx, %jx)", a->ctx,
364	    (uintmax_t)prev->start, (uintmax_t)prev->end,
365	    (uintmax_t)prev->free_after,
366	    (uintmax_t)next->start, (uintmax_t)next->end,
367	    (uintmax_t)a->entry->start, (uintmax_t)a->entry->end));
368
369	prev->free_after = a->entry->start - prev->end;
370	a->entry->free_after = next->start - a->entry->end;
371
372	found = dmar_gas_rb_insert(a->ctx, a->entry);
373	KASSERT(found, ("found dup %p start %jx size %jx",
374	    a->ctx, (uintmax_t)a->entry->start, (uintmax_t)a->size));
375	a->entry->flags = DMAR_MAP_ENTRY_MAP;
376
377	KASSERT(RB_PREV(dmar_gas_entries_tree, &a->ctx->rb_root,
378	    a->entry) == prev,
379	    ("entry %p prev %p inserted prev %p", a->entry, prev,
380	    RB_PREV(dmar_gas_entries_tree, &a->ctx->rb_root, a->entry)));
381	KASSERT(RB_NEXT(dmar_gas_entries_tree, &a->ctx->rb_root,
382	    a->entry) == next,
383	    ("entry %p next %p inserted next %p", a->entry, next,
384	    RB_NEXT(dmar_gas_entries_tree, &a->ctx->rb_root, a->entry)));
385}
386
387static int
388dmar_gas_lowermatch(struct dmar_gas_match_args *a, struct dmar_map_entry *prev)
389{
390	struct dmar_map_entry *l;
391	int ret;
392
393	if (prev->end < a->common->lowaddr) {
394		a->entry->start = roundup2(prev->end + DMAR_PAGE_SIZE,
395		    a->common->alignment);
396		if (dmar_gas_match_one(a, prev, a->common->lowaddr)) {
397			dmar_gas_match_insert(a, prev);
398			return (0);
399		}
400	}
401	if (prev->free_down < a->size + DMAR_PAGE_SIZE)
402		return (ENOMEM);
403	l = RB_LEFT(prev, rb_entry);
404	if (l != NULL) {
405		ret = dmar_gas_lowermatch(a, l);
406		if (ret == 0)
407			return (0);
408	}
409	l = RB_RIGHT(prev, rb_entry);
410	if (l != NULL)
411		return (dmar_gas_lowermatch(a, l));
412	return (ENOMEM);
413}
414
415static int
416dmar_gas_uppermatch(struct dmar_gas_match_args *a)
417{
418	struct dmar_map_entry *next, *prev, find_entry;
419
420	find_entry.start = a->common->highaddr;
421	next = RB_NFIND(dmar_gas_entries_tree, &a->ctx->rb_root, &find_entry);
422	if (next == NULL)
423		return (ENOMEM);
424	prev = RB_PREV(dmar_gas_entries_tree, &a->ctx->rb_root, next);
425	KASSERT(prev != NULL, ("no prev %p %jx", a->ctx,
426	    (uintmax_t)find_entry.start));
427	for (;;) {
428		a->entry->start = prev->start + DMAR_PAGE_SIZE;
429		if (a->entry->start < a->common->highaddr)
430			a->entry->start = a->common->highaddr;
431		a->entry->start = roundup2(a->entry->start,
432		    a->common->alignment);
433		if (dmar_gas_match_one(a, prev, a->ctx->end)) {
434			dmar_gas_match_insert(a, prev);
435			return (0);
436		}
437
438		/*
439		 * XXXKIB.  This falls back to linear iteration over
440		 * the free space in the high region.  But high
441		 * regions are almost unused, the code should be
442		 * enough to cover the case, although in the
443		 * non-optimal way.
444		 */
445		prev = next;
446		next = RB_NEXT(dmar_gas_entries_tree, &a->ctx->rb_root, prev);
447		KASSERT(next != NULL, ("no next %p %jx", a->ctx,
448		    (uintmax_t)find_entry.start));
449		if (next->end >= a->ctx->end)
450			return (ENOMEM);
451	}
452}
453
454static int
455dmar_gas_find_space(struct dmar_ctx *ctx,
456    const struct bus_dma_tag_common *common, dmar_gaddr_t size,
457    u_int flags, struct dmar_map_entry *entry)
458{
459	struct dmar_gas_match_args a;
460	int error;
461
462	DMAR_CTX_ASSERT_LOCKED(ctx);
463	KASSERT(entry->flags == 0, ("dirty entry %p %p", ctx, entry));
464	KASSERT((size & DMAR_PAGE_MASK) == 0, ("size %jx", (uintmax_t)size));
465
466	a.ctx = ctx;
467	a.size = size;
468	a.common = common;
469	a.gas_flags = flags;
470	a.entry = entry;
471
472	/* Handle lower region. */
473	if (common->lowaddr > 0) {
474		error = dmar_gas_lowermatch(&a, RB_ROOT(&ctx->rb_root));
475		if (error == 0)
476			return (0);
477		KASSERT(error == ENOMEM,
478		    ("error %d from dmar_gas_lowermatch", error));
479	}
480	/* Handle upper region. */
481	if (common->highaddr >= ctx->end)
482		return (ENOMEM);
483	error = dmar_gas_uppermatch(&a);
484	KASSERT(error == ENOMEM,
485	    ("error %d from dmar_gas_uppermatch", error));
486	return (error);
487}
488
489static int
490dmar_gas_alloc_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry,
491    u_int flags)
492{
493	struct dmar_map_entry *next, *prev;
494	bool found;
495
496	DMAR_CTX_ASSERT_LOCKED(ctx);
497
498	if ((entry->start & DMAR_PAGE_MASK) != 0 ||
499	    (entry->end & DMAR_PAGE_MASK) != 0)
500		return (EINVAL);
501	if (entry->start >= entry->end)
502		return (EINVAL);
503	if (entry->end >= ctx->end)
504		return (EINVAL);
505
506	next = RB_NFIND(dmar_gas_entries_tree, &ctx->rb_root, entry);
507	KASSERT(next != NULL, ("next must be non-null %p %jx", ctx,
508	    (uintmax_t)entry->start));
509	prev = RB_PREV(dmar_gas_entries_tree, &ctx->rb_root, next);
510	/* prev could be NULL */
511
512	/*
513	 * Adapt to broken BIOSes which specify overlapping RMRR
514	 * entries.
515	 *
516	 * XXXKIB: this does not handle a case when prev or next
517	 * entries are completely covered by the current one, which
518	 * extends both ways.
519	 */
520	if (prev != NULL && prev->end > entry->start &&
521	    (prev->flags & DMAR_MAP_ENTRY_PLACE) == 0) {
522		if ((prev->flags & DMAR_MAP_ENTRY_RMRR) == 0)
523			return (EBUSY);
524		entry->start = prev->end;
525	}
526	if (next != NULL && next->start < entry->end &&
527	    (next->flags & DMAR_MAP_ENTRY_PLACE) == 0) {
528		if ((next->flags & DMAR_MAP_ENTRY_RMRR) == 0)
529			return (EBUSY);
530		entry->end = next->start;
531	}
532	if (entry->end == entry->start)
533		return (0);
534
535	if (prev != NULL && prev->end > entry->start) {
536		/* This assumes that prev is the placeholder entry. */
537		dmar_gas_rb_remove(ctx, prev);
538		prev = NULL;
539	}
540	if (next != NULL && next->start < entry->end) {
541		dmar_gas_rb_remove(ctx, next);
542		next = NULL;
543	}
544
545	found = dmar_gas_rb_insert(ctx, entry);
546	KASSERT(found, ("found RMRR dup %p start %jx end %jx",
547	    ctx, (uintmax_t)entry->start, (uintmax_t)entry->end));
548	entry->flags = DMAR_MAP_ENTRY_RMRR;
549
550#ifdef INVARIANTS
551	struct dmar_map_entry *ip, *in;
552	ip = RB_PREV(dmar_gas_entries_tree, &ctx->rb_root, entry);
553	in = RB_NEXT(dmar_gas_entries_tree, &ctx->rb_root, entry);
554	KASSERT(prev == NULL || ip == prev,
555	    ("RMRR %p (%jx %jx) prev %p (%jx %jx) ins prev %p (%jx %jx)",
556	    entry, entry->start, entry->end, prev,
557	    prev == NULL ? 0 : prev->start, prev == NULL ? 0 : prev->end,
558	    ip, ip == NULL ? 0 : ip->start, ip == NULL ? 0 : ip->end));
559	KASSERT(next == NULL || in == next,
560	    ("RMRR %p (%jx %jx) next %p (%jx %jx) ins next %p (%jx %jx)",
561	    entry, entry->start, entry->end, next,
562	    next == NULL ? 0 : next->start, next == NULL ? 0 : next->end,
563	    in, in == NULL ? 0 : in->start, in == NULL ? 0 : in->end));
564#endif
565
566	return (0);
567}
568
569void
570dmar_gas_free_space(struct dmar_ctx *ctx, struct dmar_map_entry *entry)
571{
572
573	DMAR_CTX_ASSERT_LOCKED(ctx);
574	KASSERT((entry->flags & (DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_RMRR |
575	    DMAR_MAP_ENTRY_MAP)) == DMAR_MAP_ENTRY_MAP,
576	    ("permanent entry %p %p", ctx, entry));
577
578	dmar_gas_rb_remove(ctx, entry);
579	entry->flags &= ~DMAR_MAP_ENTRY_MAP;
580#ifdef INVARIANTS
581	if (dmar_check_free)
582		dmar_gas_check_free(ctx);
583#endif
584}
585
586static void
587dmar_gas_free_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry)
588{
589	struct dmar_map_entry *next, *prev;
590
591	DMAR_CTX_ASSERT_LOCKED(ctx);
592	KASSERT((entry->flags & (DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_RMRR |
593	    DMAR_MAP_ENTRY_MAP)) == DMAR_MAP_ENTRY_RMRR,
594	    ("non-RMRR entry %p %p", ctx, entry));
595
596	prev = RB_PREV(dmar_gas_entries_tree, &ctx->rb_root, entry);
597	next = RB_NEXT(dmar_gas_entries_tree, &ctx->rb_root, entry);
598	dmar_gas_rb_remove(ctx, entry);
599	entry->flags &= ~DMAR_MAP_ENTRY_RMRR;
600
601	if (prev == NULL)
602		dmar_gas_rb_insert(ctx, ctx->first_place);
603	if (next == NULL)
604		dmar_gas_rb_insert(ctx, ctx->last_place);
605}
606
607int
608dmar_gas_map(struct dmar_ctx *ctx, const struct bus_dma_tag_common *common,
609    dmar_gaddr_t size, u_int eflags, u_int flags, vm_page_t *ma,
610    struct dmar_map_entry **res)
611{
612	struct dmar_map_entry *entry;
613	int error;
614
615	KASSERT((flags & ~(DMAR_GM_CANWAIT | DMAR_GM_CANSPLIT)) == 0,
616	    ("invalid flags 0x%x", flags));
617
618	entry = dmar_gas_alloc_entry(ctx, (flags & DMAR_GM_CANWAIT) != 0 ?
619	    DMAR_PGF_WAITOK : 0);
620	if (entry == NULL)
621		return (ENOMEM);
622	DMAR_CTX_LOCK(ctx);
623	error = dmar_gas_find_space(ctx, common, size, flags, entry);
624	if (error == ENOMEM) {
625		DMAR_CTX_UNLOCK(ctx);
626		dmar_gas_free_entry(ctx, entry);
627		return (error);
628	}
629#ifdef INVARIANTS
630	if (dmar_check_free)
631		dmar_gas_check_free(ctx);
632#endif
633	KASSERT(error == 0,
634	    ("unexpected error %d from dmar_gas_find_entry", error));
635	KASSERT(entry->end < ctx->end, ("allocated GPA %jx, max GPA %jx",
636	    (uintmax_t)entry->end, (uintmax_t)ctx->end));
637	entry->flags |= eflags;
638	DMAR_CTX_UNLOCK(ctx);
639
640	error = ctx_map_buf(ctx, entry->start, size, ma,
641	    ((eflags & DMAR_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
642	    ((eflags & DMAR_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
643	    ((eflags & DMAR_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
644	    ((eflags & DMAR_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
645	    (flags & DMAR_GM_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
646	if (error == ENOMEM) {
647		DMAR_CTX_LOCK(ctx);
648		dmar_gas_free_space(ctx, entry);
649		DMAR_CTX_UNLOCK(ctx);
650		dmar_gas_free_entry(ctx, entry);
651		return (error);
652	}
653	KASSERT(error == 0,
654	    ("unexpected error %d from ctx_map_buf", error));
655
656	*res = entry;
657	return (0);
658}
659
660int
661dmar_gas_map_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry,
662    u_int eflags, u_int flags, vm_page_t *ma)
663{
664	dmar_gaddr_t start;
665	int error;
666
667	KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", ctx,
668	    entry, entry->flags));
669	KASSERT((flags & ~(DMAR_GM_CANWAIT)) == 0,
670	    ("invalid flags 0x%x", flags));
671
672	start = entry->start;
673	DMAR_CTX_LOCK(ctx);
674	error = dmar_gas_alloc_region(ctx, entry, flags);
675	if (error != 0) {
676		DMAR_CTX_UNLOCK(ctx);
677		return (error);
678	}
679	entry->flags |= eflags;
680	DMAR_CTX_UNLOCK(ctx);
681	if (entry->end == entry->start)
682		return (0);
683
684	error = ctx_map_buf(ctx, entry->start, entry->end - entry->start,
685	    ma + OFF_TO_IDX(start - entry->start),
686	    ((eflags & DMAR_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
687	    ((eflags & DMAR_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
688	    ((eflags & DMAR_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
689	    ((eflags & DMAR_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
690	    (flags & DMAR_GM_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
691	if (error == ENOMEM) {
692		DMAR_CTX_LOCK(ctx);
693		dmar_gas_free_region(ctx, entry);
694		DMAR_CTX_UNLOCK(ctx);
695		entry->flags = 0;
696		return (error);
697	}
698	KASSERT(error == 0,
699	    ("unexpected error %d from ctx_map_buf", error));
700
701	return (0);
702}
703
704int
705dmar_gas_reserve_region(struct dmar_ctx *ctx, dmar_gaddr_t start,
706    dmar_gaddr_t end)
707{
708	struct dmar_map_entry *entry;
709	int error;
710
711	entry = dmar_gas_alloc_entry(ctx, DMAR_PGF_WAITOK);
712	entry->start = start;
713	entry->end = end;
714	DMAR_CTX_LOCK(ctx);
715	error = dmar_gas_alloc_region(ctx, entry, DMAR_GM_CANWAIT);
716	if (error == 0)
717		entry->flags |= DMAR_MAP_ENTRY_UNMAPPED;
718	DMAR_CTX_UNLOCK(ctx);
719	if (error != 0)
720		dmar_gas_free_entry(ctx, entry);
721	return (error);
722}
723