Deleted Added
full compact
busdma_dmar.c (257251) busdma_dmar.c (259512)
1/*-
2 * Copyright (c) 2013 The FreeBSD Foundation
3 * All rights reserved.
4 *
5 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
6 * under sponsorship from the FreeBSD Foundation.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2013 The FreeBSD Foundation
3 * All rights reserved.
4 *
5 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
6 * under sponsorship from the FreeBSD Foundation.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/x86/iommu/busdma_dmar.c 257251 2013-10-28 13:33:29Z kib $");
31__FBSDID("$FreeBSD: stable/10/sys/x86/iommu/busdma_dmar.c 259512 2013-12-17 13:49:35Z kib $");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/malloc.h>
36#include <sys/bus.h>
37#include <sys/conf.h>
38#include <sys/interrupt.h>
39#include <sys/kernel.h>
40#include <sys/ktr.h>
41#include <sys/lock.h>
42#include <sys/proc.h>
43#include <sys/memdesc.h>
44#include <sys/mutex.h>
45#include <sys/sysctl.h>
46#include <sys/rman.h>
47#include <sys/taskqueue.h>
48#include <sys/tree.h>
49#include <sys/uio.h>
50#include <dev/pci/pcivar.h>
51#include <vm/vm.h>
52#include <vm/vm_extern.h>
53#include <vm/vm_kern.h>
54#include <vm/vm_object.h>
55#include <vm/vm_page.h>
56#include <vm/vm_map.h>
57#include <machine/atomic.h>
58#include <machine/bus.h>
59#include <machine/md_var.h>
60#include <machine/specialreg.h>
61#include <x86/include/busdma_impl.h>
62#include <x86/iommu/intel_reg.h>
63#include <x86/iommu/busdma_dmar.h>
64#include <x86/iommu/intel_dmar.h>
65
66/*
67 * busdma_dmar.c, the implementation of the busdma(9) interface using
68 * DMAR units from Intel VT-d.
69 */
70
71static bool
72dmar_bus_dma_is_dev_disabled(device_t dev)
73{
74 char str[128], *env;
75 int domain, bus, slot, func;
76
77 domain = pci_get_domain(dev);
78 bus = pci_get_bus(dev);
79 slot = pci_get_slot(dev);
80 func = pci_get_function(dev);
81 snprintf(str, sizeof(str), "hw.busdma.pci%d.%d.%d.%d.bounce",
82 domain, bus, slot, func);
83 env = getenv(str);
84 if (env == NULL)
85 return (false);
86 freeenv(env);
87 return (true);
88}
89
90struct dmar_ctx *
91dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev, bool rmrr)
92{
93 struct dmar_ctx *ctx;
94 bool disabled;
95
96 /*
97 * If the user requested the IOMMU disabled for the device, we
98 * cannot disable the DMAR, due to possibility of other
99 * devices on the same DMAR still requiring translation.
100 * Instead provide the identity mapping for the device
101 * context.
102 */
103 disabled = dmar_bus_dma_is_dev_disabled(dev);
104 ctx = dmar_get_ctx(dmar, dev, disabled, rmrr);
105 if (ctx == NULL)
106 return (NULL);
107 ctx->ctx_tag.owner = dev;
108 if (disabled) {
109 /*
110 * Keep the first reference on context, release the
111 * later refs.
112 */
113 DMAR_LOCK(dmar);
114 if ((ctx->flags & DMAR_CTX_DISABLED) == 0) {
115 ctx->flags |= DMAR_CTX_DISABLED;
116 DMAR_UNLOCK(dmar);
117 } else {
118 dmar_free_ctx_locked(dmar, ctx);
119 }
120 ctx = NULL;
121 }
122 return (ctx);
123}
124
125bus_dma_tag_t
126dmar_get_dma_tag(device_t dev, device_t child)
127{
128 struct dmar_unit *dmar;
129 struct dmar_ctx *ctx;
130 bus_dma_tag_t res;
131
132 dmar = dmar_find(child);
133 /* Not in scope of any DMAR ? */
134 if (dmar == NULL)
135 return (NULL);
136 dmar_quirks_pre_use(dmar);
137 dmar_instantiate_rmrr_ctxs(dmar);
138
139 ctx = dmar_instantiate_ctx(dmar, child, false);
140 res = ctx == NULL ? NULL : (bus_dma_tag_t)&ctx->ctx_tag;
141 return (res);
142}
143
144static MALLOC_DEFINE(M_DMAR_DMAMAP, "dmar_dmamap", "Intel DMAR DMA Map");
145
146static void dmar_bus_schedule_dmamap(struct dmar_unit *unit,
147 struct bus_dmamap_dmar *map);
148
149static int
150dmar_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
151 bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
152 bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
153 int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
154 void *lockfuncarg, bus_dma_tag_t *dmat)
155{
156 struct bus_dma_tag_dmar *newtag, *oldtag;
157 int error;
158
159 *dmat = NULL;
160 error = common_bus_dma_tag_create(parent != NULL ?
161 &((struct bus_dma_tag_dmar *)parent)->common : NULL, alignment,
162 boundary, lowaddr, highaddr, filter, filterarg, maxsize,
163 nsegments, maxsegsz, flags, lockfunc, lockfuncarg,
164 sizeof(struct bus_dma_tag_dmar), (void **)&newtag);
165 if (error != 0)
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/malloc.h>
36#include <sys/bus.h>
37#include <sys/conf.h>
38#include <sys/interrupt.h>
39#include <sys/kernel.h>
40#include <sys/ktr.h>
41#include <sys/lock.h>
42#include <sys/proc.h>
43#include <sys/memdesc.h>
44#include <sys/mutex.h>
45#include <sys/sysctl.h>
46#include <sys/rman.h>
47#include <sys/taskqueue.h>
48#include <sys/tree.h>
49#include <sys/uio.h>
50#include <dev/pci/pcivar.h>
51#include <vm/vm.h>
52#include <vm/vm_extern.h>
53#include <vm/vm_kern.h>
54#include <vm/vm_object.h>
55#include <vm/vm_page.h>
56#include <vm/vm_map.h>
57#include <machine/atomic.h>
58#include <machine/bus.h>
59#include <machine/md_var.h>
60#include <machine/specialreg.h>
61#include <x86/include/busdma_impl.h>
62#include <x86/iommu/intel_reg.h>
63#include <x86/iommu/busdma_dmar.h>
64#include <x86/iommu/intel_dmar.h>
65
66/*
67 * busdma_dmar.c, the implementation of the busdma(9) interface using
68 * DMAR units from Intel VT-d.
69 */
70
71static bool
72dmar_bus_dma_is_dev_disabled(device_t dev)
73{
74 char str[128], *env;
75 int domain, bus, slot, func;
76
77 domain = pci_get_domain(dev);
78 bus = pci_get_bus(dev);
79 slot = pci_get_slot(dev);
80 func = pci_get_function(dev);
81 snprintf(str, sizeof(str), "hw.busdma.pci%d.%d.%d.%d.bounce",
82 domain, bus, slot, func);
83 env = getenv(str);
84 if (env == NULL)
85 return (false);
86 freeenv(env);
87 return (true);
88}
89
90struct dmar_ctx *
91dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev, bool rmrr)
92{
93 struct dmar_ctx *ctx;
94 bool disabled;
95
96 /*
97 * If the user requested the IOMMU disabled for the device, we
98 * cannot disable the DMAR, due to possibility of other
99 * devices on the same DMAR still requiring translation.
100 * Instead provide the identity mapping for the device
101 * context.
102 */
103 disabled = dmar_bus_dma_is_dev_disabled(dev);
104 ctx = dmar_get_ctx(dmar, dev, disabled, rmrr);
105 if (ctx == NULL)
106 return (NULL);
107 ctx->ctx_tag.owner = dev;
108 if (disabled) {
109 /*
110 * Keep the first reference on context, release the
111 * later refs.
112 */
113 DMAR_LOCK(dmar);
114 if ((ctx->flags & DMAR_CTX_DISABLED) == 0) {
115 ctx->flags |= DMAR_CTX_DISABLED;
116 DMAR_UNLOCK(dmar);
117 } else {
118 dmar_free_ctx_locked(dmar, ctx);
119 }
120 ctx = NULL;
121 }
122 return (ctx);
123}
124
125bus_dma_tag_t
126dmar_get_dma_tag(device_t dev, device_t child)
127{
128 struct dmar_unit *dmar;
129 struct dmar_ctx *ctx;
130 bus_dma_tag_t res;
131
132 dmar = dmar_find(child);
133 /* Not in scope of any DMAR ? */
134 if (dmar == NULL)
135 return (NULL);
136 dmar_quirks_pre_use(dmar);
137 dmar_instantiate_rmrr_ctxs(dmar);
138
139 ctx = dmar_instantiate_ctx(dmar, child, false);
140 res = ctx == NULL ? NULL : (bus_dma_tag_t)&ctx->ctx_tag;
141 return (res);
142}
143
144static MALLOC_DEFINE(M_DMAR_DMAMAP, "dmar_dmamap", "Intel DMAR DMA Map");
145
146static void dmar_bus_schedule_dmamap(struct dmar_unit *unit,
147 struct bus_dmamap_dmar *map);
148
149static int
150dmar_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
151 bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
152 bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
153 int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
154 void *lockfuncarg, bus_dma_tag_t *dmat)
155{
156 struct bus_dma_tag_dmar *newtag, *oldtag;
157 int error;
158
159 *dmat = NULL;
160 error = common_bus_dma_tag_create(parent != NULL ?
161 &((struct bus_dma_tag_dmar *)parent)->common : NULL, alignment,
162 boundary, lowaddr, highaddr, filter, filterarg, maxsize,
163 nsegments, maxsegsz, flags, lockfunc, lockfuncarg,
164 sizeof(struct bus_dma_tag_dmar), (void **)&newtag);
165 if (error != 0)
166 return (error);
166 goto out;
167
168 oldtag = (struct bus_dma_tag_dmar *)parent;
169 newtag->common.impl = &bus_dma_dmar_impl;
170 newtag->ctx = oldtag->ctx;
171 newtag->owner = oldtag->owner;
167
168 oldtag = (struct bus_dma_tag_dmar *)parent;
169 newtag->common.impl = &bus_dma_dmar_impl;
170 newtag->ctx = oldtag->ctx;
171 newtag->owner = oldtag->owner;
172 error = 0;
173
172
174 if (error != 0)
175 free(newtag, M_DEVBUF);
176 else
177 *dmat = (bus_dma_tag_t)newtag;
173 *dmat = (bus_dma_tag_t)newtag;
174out:
178 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
179 __func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
180 error);
181 return (error);
182}
183
184static int
185dmar_bus_dma_tag_destroy(bus_dma_tag_t dmat1)
186{
187 struct bus_dma_tag_dmar *dmat, *dmat_copy, *parent;
188 int error;
189
190 error = 0;
191 dmat_copy = dmat = (struct bus_dma_tag_dmar *)dmat1;
192
193 if (dmat != NULL) {
194 if (dmat->map_count != 0) {
195 error = EBUSY;
196 goto out;
197 }
198 while (dmat != NULL) {
199 parent = (struct bus_dma_tag_dmar *)dmat->common.parent;
200 if (atomic_fetchadd_int(&dmat->common.ref_count, -1) ==
201 1) {
202 if (dmat == &dmat->ctx->ctx_tag)
203 dmar_free_ctx(dmat->ctx);
204 free(dmat->segments, M_DMAR_DMAMAP);
205 free(dmat, M_DEVBUF);
206 dmat = parent;
207 } else
208 dmat = NULL;
209 }
210 }
211out:
212 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
213 return (error);
214}
215
216static int
217dmar_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
218{
219 struct bus_dma_tag_dmar *tag;
220 struct bus_dmamap_dmar *map;
221
222 tag = (struct bus_dma_tag_dmar *)dmat;
223 map = malloc(sizeof(*map), M_DMAR_DMAMAP, M_NOWAIT | M_ZERO);
224 if (map == NULL) {
225 *mapp = NULL;
226 return (ENOMEM);
227 }
228 if (tag->segments == NULL) {
229 tag->segments = malloc(sizeof(bus_dma_segment_t) *
230 tag->common.nsegments, M_DMAR_DMAMAP, M_NOWAIT);
231 if (tag->segments == NULL) {
232 free(map, M_DMAR_DMAMAP);
233 *mapp = NULL;
234 return (ENOMEM);
235 }
236 }
237 TAILQ_INIT(&map->map_entries);
238 map->tag = tag;
239 map->locked = true;
240 map->cansleep = false;
241 tag->map_count++;
242 *mapp = (bus_dmamap_t)map;
243
244 return (0);
245}
246
247static int
248dmar_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1)
249{
250 struct bus_dma_tag_dmar *tag;
251 struct bus_dmamap_dmar *map;
252
253 tag = (struct bus_dma_tag_dmar *)dmat;
254 map = (struct bus_dmamap_dmar *)map1;
255 if (map != NULL) {
256 DMAR_CTX_LOCK(tag->ctx);
257 if (!TAILQ_EMPTY(&map->map_entries)) {
258 DMAR_CTX_UNLOCK(tag->ctx);
259 return (EBUSY);
260 }
261 DMAR_CTX_UNLOCK(tag->ctx);
262 free(map, M_DMAR_DMAMAP);
263 }
264 tag->map_count--;
265 return (0);
266}
267
268
269static int
270dmar_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
271 bus_dmamap_t *mapp)
272{
273 struct bus_dma_tag_dmar *tag;
274 struct bus_dmamap_dmar *map;
275 int error, mflags;
276 vm_memattr_t attr;
277
278 error = dmar_bus_dmamap_create(dmat, flags, mapp);
279 if (error != 0)
280 return (error);
281
282 mflags = (flags & BUS_DMA_NOWAIT) != 0 ? M_NOWAIT : M_WAITOK;
283 mflags |= (flags & BUS_DMA_ZERO) != 0 ? M_ZERO : 0;
284 attr = (flags & BUS_DMA_NOCACHE) != 0 ? VM_MEMATTR_UNCACHEABLE :
285 VM_MEMATTR_DEFAULT;
286
287 tag = (struct bus_dma_tag_dmar *)dmat;
288 map = (struct bus_dmamap_dmar *)*mapp;
289
290 if (tag->common.maxsize < PAGE_SIZE &&
291 tag->common.alignment <= tag->common.maxsize &&
292 attr == VM_MEMATTR_DEFAULT) {
293 *vaddr = malloc(tag->common.maxsize, M_DEVBUF, mflags);
294 map->flags |= BUS_DMAMAP_DMAR_MALLOC;
295 } else {
296 *vaddr = (void *)kmem_alloc_attr(kernel_arena,
297 tag->common.maxsize, mflags, 0ul, BUS_SPACE_MAXADDR,
298 attr);
299 map->flags |= BUS_DMAMAP_DMAR_KMEM_ALLOC;
300 }
301 if (*vaddr == NULL) {
302 dmar_bus_dmamap_destroy(dmat, *mapp);
303 *mapp = NULL;
304 return (ENOMEM);
305 }
306 return (0);
307}
308
309static void
310dmar_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1)
311{
312 struct bus_dma_tag_dmar *tag;
313 struct bus_dmamap_dmar *map;
314
315 tag = (struct bus_dma_tag_dmar *)dmat;
316 map = (struct bus_dmamap_dmar *)map1;
317
318 if ((map->flags & BUS_DMAMAP_DMAR_MALLOC) != 0) {
319 free(vaddr, M_DEVBUF);
320 map->flags &= ~BUS_DMAMAP_DMAR_MALLOC;
321 } else {
322 KASSERT((map->flags & BUS_DMAMAP_DMAR_KMEM_ALLOC) != 0,
323 ("dmar_bus_dmamem_free for non alloced map %p", map));
324 kmem_free(kernel_arena, (vm_offset_t)vaddr, tag->common.maxsize);
325 map->flags &= ~BUS_DMAMAP_DMAR_KMEM_ALLOC;
326 }
327
328 dmar_bus_dmamap_destroy(dmat, map1);
329}
330
331static int
332dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag,
333 struct bus_dmamap_dmar *map, vm_page_t *ma, int offset, bus_size_t buflen,
334 int flags, bus_dma_segment_t *segs, int *segp,
335 struct dmar_map_entries_tailq *unroll_list)
336{
337 struct dmar_ctx *ctx;
338 struct dmar_map_entry *entry;
339 dmar_gaddr_t size;
340 bus_size_t buflen1;
341 int error, idx, gas_flags, seg;
342
343 if (segs == NULL)
344 segs = tag->segments;
345 ctx = tag->ctx;
346 seg = *segp;
175 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
176 __func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
177 error);
178 return (error);
179}
180
181static int
182dmar_bus_dma_tag_destroy(bus_dma_tag_t dmat1)
183{
184 struct bus_dma_tag_dmar *dmat, *dmat_copy, *parent;
185 int error;
186
187 error = 0;
188 dmat_copy = dmat = (struct bus_dma_tag_dmar *)dmat1;
189
190 if (dmat != NULL) {
191 if (dmat->map_count != 0) {
192 error = EBUSY;
193 goto out;
194 }
195 while (dmat != NULL) {
196 parent = (struct bus_dma_tag_dmar *)dmat->common.parent;
197 if (atomic_fetchadd_int(&dmat->common.ref_count, -1) ==
198 1) {
199 if (dmat == &dmat->ctx->ctx_tag)
200 dmar_free_ctx(dmat->ctx);
201 free(dmat->segments, M_DMAR_DMAMAP);
202 free(dmat, M_DEVBUF);
203 dmat = parent;
204 } else
205 dmat = NULL;
206 }
207 }
208out:
209 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
210 return (error);
211}
212
213static int
214dmar_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
215{
216 struct bus_dma_tag_dmar *tag;
217 struct bus_dmamap_dmar *map;
218
219 tag = (struct bus_dma_tag_dmar *)dmat;
220 map = malloc(sizeof(*map), M_DMAR_DMAMAP, M_NOWAIT | M_ZERO);
221 if (map == NULL) {
222 *mapp = NULL;
223 return (ENOMEM);
224 }
225 if (tag->segments == NULL) {
226 tag->segments = malloc(sizeof(bus_dma_segment_t) *
227 tag->common.nsegments, M_DMAR_DMAMAP, M_NOWAIT);
228 if (tag->segments == NULL) {
229 free(map, M_DMAR_DMAMAP);
230 *mapp = NULL;
231 return (ENOMEM);
232 }
233 }
234 TAILQ_INIT(&map->map_entries);
235 map->tag = tag;
236 map->locked = true;
237 map->cansleep = false;
238 tag->map_count++;
239 *mapp = (bus_dmamap_t)map;
240
241 return (0);
242}
243
244static int
245dmar_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1)
246{
247 struct bus_dma_tag_dmar *tag;
248 struct bus_dmamap_dmar *map;
249
250 tag = (struct bus_dma_tag_dmar *)dmat;
251 map = (struct bus_dmamap_dmar *)map1;
252 if (map != NULL) {
253 DMAR_CTX_LOCK(tag->ctx);
254 if (!TAILQ_EMPTY(&map->map_entries)) {
255 DMAR_CTX_UNLOCK(tag->ctx);
256 return (EBUSY);
257 }
258 DMAR_CTX_UNLOCK(tag->ctx);
259 free(map, M_DMAR_DMAMAP);
260 }
261 tag->map_count--;
262 return (0);
263}
264
265
266static int
267dmar_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
268 bus_dmamap_t *mapp)
269{
270 struct bus_dma_tag_dmar *tag;
271 struct bus_dmamap_dmar *map;
272 int error, mflags;
273 vm_memattr_t attr;
274
275 error = dmar_bus_dmamap_create(dmat, flags, mapp);
276 if (error != 0)
277 return (error);
278
279 mflags = (flags & BUS_DMA_NOWAIT) != 0 ? M_NOWAIT : M_WAITOK;
280 mflags |= (flags & BUS_DMA_ZERO) != 0 ? M_ZERO : 0;
281 attr = (flags & BUS_DMA_NOCACHE) != 0 ? VM_MEMATTR_UNCACHEABLE :
282 VM_MEMATTR_DEFAULT;
283
284 tag = (struct bus_dma_tag_dmar *)dmat;
285 map = (struct bus_dmamap_dmar *)*mapp;
286
287 if (tag->common.maxsize < PAGE_SIZE &&
288 tag->common.alignment <= tag->common.maxsize &&
289 attr == VM_MEMATTR_DEFAULT) {
290 *vaddr = malloc(tag->common.maxsize, M_DEVBUF, mflags);
291 map->flags |= BUS_DMAMAP_DMAR_MALLOC;
292 } else {
293 *vaddr = (void *)kmem_alloc_attr(kernel_arena,
294 tag->common.maxsize, mflags, 0ul, BUS_SPACE_MAXADDR,
295 attr);
296 map->flags |= BUS_DMAMAP_DMAR_KMEM_ALLOC;
297 }
298 if (*vaddr == NULL) {
299 dmar_bus_dmamap_destroy(dmat, *mapp);
300 *mapp = NULL;
301 return (ENOMEM);
302 }
303 return (0);
304}
305
306static void
307dmar_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1)
308{
309 struct bus_dma_tag_dmar *tag;
310 struct bus_dmamap_dmar *map;
311
312 tag = (struct bus_dma_tag_dmar *)dmat;
313 map = (struct bus_dmamap_dmar *)map1;
314
315 if ((map->flags & BUS_DMAMAP_DMAR_MALLOC) != 0) {
316 free(vaddr, M_DEVBUF);
317 map->flags &= ~BUS_DMAMAP_DMAR_MALLOC;
318 } else {
319 KASSERT((map->flags & BUS_DMAMAP_DMAR_KMEM_ALLOC) != 0,
320 ("dmar_bus_dmamem_free for non alloced map %p", map));
321 kmem_free(kernel_arena, (vm_offset_t)vaddr, tag->common.maxsize);
322 map->flags &= ~BUS_DMAMAP_DMAR_KMEM_ALLOC;
323 }
324
325 dmar_bus_dmamap_destroy(dmat, map1);
326}
327
328static int
329dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag,
330 struct bus_dmamap_dmar *map, vm_page_t *ma, int offset, bus_size_t buflen,
331 int flags, bus_dma_segment_t *segs, int *segp,
332 struct dmar_map_entries_tailq *unroll_list)
333{
334 struct dmar_ctx *ctx;
335 struct dmar_map_entry *entry;
336 dmar_gaddr_t size;
337 bus_size_t buflen1;
338 int error, idx, gas_flags, seg;
339
340 if (segs == NULL)
341 segs = tag->segments;
342 ctx = tag->ctx;
343 seg = *segp;
344 error = 0;
347 idx = 0;
348 while (buflen > 0) {
349 seg++;
350 if (seg >= tag->common.nsegments) {
351 error = EFBIG;
352 break;
353 }
354 buflen1 = buflen > tag->common.maxsegsz ?
355 tag->common.maxsegsz : buflen;
356 buflen -= buflen1;
357 size = round_page(offset + buflen1);
358
359 /*
360 * (Too) optimistically allow split if there are more
361 * then one segments left.
362 */
363 gas_flags = map->cansleep ? DMAR_GM_CANWAIT : 0;
364 if (seg + 1 < tag->common.nsegments)
365 gas_flags |= DMAR_GM_CANSPLIT;
366
367 error = dmar_gas_map(ctx, &tag->common, size,
368 DMAR_MAP_ENTRY_READ | DMAR_MAP_ENTRY_WRITE,
369 gas_flags, ma + idx, &entry);
370 if (error != 0)
371 break;
372 if ((gas_flags & DMAR_GM_CANSPLIT) != 0) {
373 KASSERT(size >= entry->end - entry->start,
374 ("split increased entry size %jx %jx %jx",
375 (uintmax_t)size, (uintmax_t)entry->start,
376 (uintmax_t)entry->end));
377 size = entry->end - entry->start;
378 if (buflen1 > size)
379 buflen1 = size;
380 } else {
381 KASSERT(entry->end - entry->start == size,
382 ("no split allowed %jx %jx %jx",
383 (uintmax_t)size, (uintmax_t)entry->start,
384 (uintmax_t)entry->end));
385 }
386
387 KASSERT(((entry->start + offset) & (tag->common.alignment - 1))
388 == 0,
389 ("alignment failed: ctx %p start 0x%jx offset %x "
390 "align 0x%jx", ctx, (uintmax_t)entry->start, offset,
391 (uintmax_t)tag->common.alignment));
392 KASSERT(entry->end <= tag->common.lowaddr ||
393 entry->start >= tag->common.highaddr,
394 ("entry placement failed: ctx %p start 0x%jx end 0x%jx "
395 "lowaddr 0x%jx highaddr 0x%jx", ctx,
396 (uintmax_t)entry->start, (uintmax_t)entry->end,
397 (uintmax_t)tag->common.lowaddr,
398 (uintmax_t)tag->common.highaddr));
399 KASSERT(dmar_test_boundary(entry->start, entry->end -
400 entry->start, tag->common.boundary),
401 ("boundary failed: ctx %p start 0x%jx end 0x%jx "
402 "boundary 0x%jx", ctx, (uintmax_t)entry->start,
403 (uintmax_t)entry->end, (uintmax_t)tag->common.boundary));
404 KASSERT(buflen1 <= tag->common.maxsegsz,
405 ("segment too large: ctx %p start 0x%jx end 0x%jx "
406 "maxsegsz 0x%jx", ctx, (uintmax_t)entry->start,
407 (uintmax_t)entry->end, (uintmax_t)tag->common.maxsegsz));
408
409 DMAR_CTX_LOCK(ctx);
410 TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link);
411 entry->flags |= DMAR_MAP_ENTRY_MAP;
412 DMAR_CTX_UNLOCK(ctx);
413 TAILQ_INSERT_TAIL(unroll_list, entry, unroll_link);
414
415 segs[seg].ds_addr = entry->start + offset;
416 segs[seg].ds_len = buflen1;
417
418 idx += OFF_TO_IDX(trunc_page(offset + buflen1));
419 offset += buflen1;
420 offset &= DMAR_PAGE_MASK;
421 }
422 if (error == 0)
423 *segp = seg;
424 return (error);
425}
426
427static int
428dmar_bus_dmamap_load_something(struct bus_dma_tag_dmar *tag,
429 struct bus_dmamap_dmar *map, vm_page_t *ma, int offset, bus_size_t buflen,
430 int flags, bus_dma_segment_t *segs, int *segp)
431{
432 struct dmar_ctx *ctx;
433 struct dmar_map_entry *entry, *entry1;
434 struct dmar_map_entries_tailq unroll_list;
435 int error;
436
437 ctx = tag->ctx;
438 atomic_add_long(&ctx->loads, 1);
439
440 TAILQ_INIT(&unroll_list);
441 error = dmar_bus_dmamap_load_something1(tag, map, ma, offset,
442 buflen, flags, segs, segp, &unroll_list);
443 if (error != 0) {
444 /*
445 * The busdma interface does not allow us to report
446 * partial buffer load, so unfortunately we have to
447 * revert all work done.
448 */
449 DMAR_CTX_LOCK(ctx);
450 TAILQ_FOREACH_SAFE(entry, &unroll_list, unroll_link,
451 entry1) {
452 /*
453 * No entries other than what we have created
454 * during the failed run might have been
455 * inserted there in between, since we own ctx
456 * pglock.
457 */
458 TAILQ_REMOVE(&map->map_entries, entry, dmamap_link);
459 TAILQ_REMOVE(&unroll_list, entry, unroll_link);
460 TAILQ_INSERT_TAIL(&ctx->unload_entries, entry,
461 dmamap_link);
462 }
463 DMAR_CTX_UNLOCK(ctx);
464 taskqueue_enqueue(ctx->dmar->delayed_taskqueue,
465 &ctx->unload_task);
466 }
467
468 if (error == ENOMEM && (flags & BUS_DMA_NOWAIT) == 0 &&
469 !map->cansleep)
470 error = EINPROGRESS;
471 if (error == EINPROGRESS)
472 dmar_bus_schedule_dmamap(ctx->dmar, map);
473 return (error);
474}
475
476static int
477dmar_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map1,
478 struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
479 bus_dma_segment_t *segs, int *segp)
480{
481 struct bus_dma_tag_dmar *tag;
482 struct bus_dmamap_dmar *map;
483
484 tag = (struct bus_dma_tag_dmar *)dmat;
485 map = (struct bus_dmamap_dmar *)map1;
486 return (dmar_bus_dmamap_load_something(tag, map, ma, ma_offs, tlen,
487 flags, segs, segp));
488}
489
490static int
491dmar_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map1,
492 vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
493 int *segp)
494{
495 struct bus_dma_tag_dmar *tag;
496 struct bus_dmamap_dmar *map;
497 vm_page_t *ma;
498 vm_paddr_t pstart, pend;
499 int error, i, ma_cnt, offset;
500
501 tag = (struct bus_dma_tag_dmar *)dmat;
502 map = (struct bus_dmamap_dmar *)map1;
503 pstart = trunc_page(buf);
504 pend = round_page(buf + buflen);
505 offset = buf & PAGE_MASK;
506 ma_cnt = OFF_TO_IDX(pend - pstart);
507 ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, map->cansleep ?
508 M_WAITOK : M_NOWAIT);
509 if (ma == NULL)
510 return (ENOMEM);
511 for (i = 0; i < ma_cnt; i++)
512 ma[i] = PHYS_TO_VM_PAGE(pstart + i * PAGE_SIZE);
513 error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen,
514 flags, segs, segp);
515 free(ma, M_DEVBUF);
516 return (error);
517}
518
519static int
520dmar_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map1, void *buf,
521 bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
522 int *segp)
523{
524 struct bus_dma_tag_dmar *tag;
525 struct bus_dmamap_dmar *map;
526 vm_page_t *ma, fma;
527 vm_paddr_t pstart, pend, paddr;
528 int error, i, ma_cnt, offset;
529
530 tag = (struct bus_dma_tag_dmar *)dmat;
531 map = (struct bus_dmamap_dmar *)map1;
532 pstart = trunc_page((vm_offset_t)buf);
533 pend = round_page((vm_offset_t)buf + buflen);
534 offset = (vm_offset_t)buf & PAGE_MASK;
535 ma_cnt = OFF_TO_IDX(pend - pstart);
536 ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, map->cansleep ?
537 M_WAITOK : M_NOWAIT);
538 if (ma == NULL)
539 return (ENOMEM);
540 if (dumping) {
541 /*
542 * If dumping, do not attempt to call
543 * PHYS_TO_VM_PAGE() at all. It may return non-NULL
544 * but the vm_page returned might be not initialized,
545 * e.g. for the kernel itself.
546 */
547 KASSERT(pmap == kernel_pmap, ("non-kernel address write"));
548 fma = malloc(sizeof(struct vm_page) * ma_cnt, M_DEVBUF,
549 M_ZERO | (map->cansleep ? M_WAITOK : M_NOWAIT));
550 if (fma == NULL) {
551 free(ma, M_DEVBUF);
552 return (ENOMEM);
553 }
554 for (i = 0; i < ma_cnt; i++, pstart += PAGE_SIZE) {
555 paddr = pmap_kextract(pstart);
556 vm_page_initfake(&fma[i], paddr, VM_MEMATTR_DEFAULT);
557 ma[i] = &fma[i];
558 }
559 } else {
560 fma = NULL;
561 for (i = 0; i < ma_cnt; i++, pstart += PAGE_SIZE) {
562 if (pmap == kernel_pmap)
563 paddr = pmap_kextract(pstart);
564 else
565 paddr = pmap_extract(pmap, pstart);
566 ma[i] = PHYS_TO_VM_PAGE(paddr);
567 KASSERT(VM_PAGE_TO_PHYS(ma[i]) == paddr,
568 ("PHYS_TO_VM_PAGE failed %jx %jx m %p",
569 (uintmax_t)paddr, (uintmax_t)VM_PAGE_TO_PHYS(ma[i]),
570 ma[i]));
571 }
572 }
573 error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen,
574 flags, segs, segp);
575 free(ma, M_DEVBUF);
576 free(fma, M_DEVBUF);
577 return (error);
578}
579
580static void
581dmar_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map1,
582 struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
583{
584 struct bus_dmamap_dmar *map;
585
586 if (map1 == NULL)
587 return;
588 map = (struct bus_dmamap_dmar *)map1;
589 map->mem = *mem;
590 map->tag = (struct bus_dma_tag_dmar *)dmat;
591 map->callback = callback;
592 map->callback_arg = callback_arg;
593}
594
595static bus_dma_segment_t *
596dmar_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map1,
597 bus_dma_segment_t *segs, int nsegs, int error)
598{
599 struct bus_dma_tag_dmar *tag;
600 struct bus_dmamap_dmar *map;
601
602 tag = (struct bus_dma_tag_dmar *)dmat;
603 map = (struct bus_dmamap_dmar *)map1;
604
605 if (!map->locked) {
606 KASSERT(map->cansleep,
607 ("map not locked and not sleepable context %p", map));
608
609 /*
610 * We are called from the delayed context. Relock the
611 * driver.
612 */
613 (tag->common.lockfunc)(tag->common.lockfuncarg, BUS_DMA_LOCK);
614 map->locked = true;
615 }
616
617 if (segs == NULL)
618 segs = tag->segments;
619 return (segs);
620}
621
622/*
623 * The limitations of busdma KPI forces the dmar to perform the actual
624 * unload, consisting of the unmapping of the map entries page tables,
625 * from the delayed context on i386, since page table page mapping
626 * might require a sleep to be successfull. The unfortunate
627 * consequence is that the DMA requests can be served some time after
628 * the bus_dmamap_unload() call returned.
629 *
630 * On amd64, we assume that sf allocation cannot fail.
631 */
632static void
633dmar_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1)
634{
635 struct bus_dma_tag_dmar *tag;
636 struct bus_dmamap_dmar *map;
637 struct dmar_ctx *ctx;
638#if defined(__amd64__)
639 struct dmar_map_entries_tailq entries;
640#endif
641
642 tag = (struct bus_dma_tag_dmar *)dmat;
643 map = (struct bus_dmamap_dmar *)map1;
644 ctx = tag->ctx;
645 atomic_add_long(&ctx->unloads, 1);
646
647#if defined(__i386__)
648 DMAR_CTX_LOCK(ctx);
649 TAILQ_CONCAT(&ctx->unload_entries, &map->map_entries, dmamap_link);
650 DMAR_CTX_UNLOCK(ctx);
651 taskqueue_enqueue(ctx->dmar->delayed_taskqueue, &ctx->unload_task);
652#else /* defined(__amd64__) */
653 TAILQ_INIT(&entries);
654 DMAR_CTX_LOCK(ctx);
655 TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link);
656 DMAR_CTX_UNLOCK(ctx);
657 THREAD_NO_SLEEPING();
658 dmar_ctx_unload(ctx, &entries, false);
659 THREAD_SLEEPING_OK();
660 KASSERT(TAILQ_EMPTY(&entries), ("lazy dmar_ctx_unload %p", ctx));
661#endif
662}
663
664static void
665dmar_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
666 bus_dmasync_op_t op)
667{
668}
669
670struct bus_dma_impl bus_dma_dmar_impl = {
671 .tag_create = dmar_bus_dma_tag_create,
672 .tag_destroy = dmar_bus_dma_tag_destroy,
673 .map_create = dmar_bus_dmamap_create,
674 .map_destroy = dmar_bus_dmamap_destroy,
675 .mem_alloc = dmar_bus_dmamem_alloc,
676 .mem_free = dmar_bus_dmamem_free,
677 .load_phys = dmar_bus_dmamap_load_phys,
678 .load_buffer = dmar_bus_dmamap_load_buffer,
679 .load_ma = dmar_bus_dmamap_load_ma,
680 .map_waitok = dmar_bus_dmamap_waitok,
681 .map_complete = dmar_bus_dmamap_complete,
682 .map_unload = dmar_bus_dmamap_unload,
683 .map_sync = dmar_bus_dmamap_sync
684};
685
686static void
687dmar_bus_task_dmamap(void *arg, int pending)
688{
689 struct bus_dma_tag_dmar *tag;
690 struct bus_dmamap_dmar *map;
691 struct dmar_unit *unit;
692 struct dmar_ctx *ctx;
693
694 unit = arg;
695 DMAR_LOCK(unit);
696 while ((map = TAILQ_FIRST(&unit->delayed_maps)) != NULL) {
697 TAILQ_REMOVE(&unit->delayed_maps, map, delay_link);
698 DMAR_UNLOCK(unit);
699 tag = map->tag;
700 ctx = map->tag->ctx;
701 map->cansleep = true;
702 map->locked = false;
703 bus_dmamap_load_mem((bus_dma_tag_t)tag, (bus_dmamap_t)map,
704 &map->mem, map->callback, map->callback_arg,
705 BUS_DMA_WAITOK);
706 map->cansleep = false;
707 if (map->locked) {
708 (tag->common.lockfunc)(tag->common.lockfuncarg,
709 BUS_DMA_UNLOCK);
710 } else
711 map->locked = true;
712 map->cansleep = false;
713 DMAR_LOCK(unit);
714 }
715 DMAR_UNLOCK(unit);
716}
717
718static void
719dmar_bus_schedule_dmamap(struct dmar_unit *unit, struct bus_dmamap_dmar *map)
720{
721 struct dmar_ctx *ctx;
722
723 ctx = map->tag->ctx;
724 map->locked = false;
725 DMAR_LOCK(unit);
726 TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link);
727 DMAR_UNLOCK(unit);
728 taskqueue_enqueue(unit->delayed_taskqueue, &unit->dmamap_load_task);
729}
730
731int
732dmar_init_busdma(struct dmar_unit *unit)
733{
734
735 TAILQ_INIT(&unit->delayed_maps);
736 TASK_INIT(&unit->dmamap_load_task, 0, dmar_bus_task_dmamap, unit);
737 unit->delayed_taskqueue = taskqueue_create("dmar", M_WAITOK,
738 taskqueue_thread_enqueue, &unit->delayed_taskqueue);
739 taskqueue_start_threads(&unit->delayed_taskqueue, 1, PI_DISK,
740 "dmar%d busdma taskq", unit->unit);
741 return (0);
742}
743
744void
745dmar_fini_busdma(struct dmar_unit *unit)
746{
747
748 if (unit->delayed_taskqueue == NULL)
749 return;
750
751 taskqueue_drain(unit->delayed_taskqueue, &unit->dmamap_load_task);
752 taskqueue_free(unit->delayed_taskqueue);
753 unit->delayed_taskqueue = NULL;
754}
345 idx = 0;
346 while (buflen > 0) {
347 seg++;
348 if (seg >= tag->common.nsegments) {
349 error = EFBIG;
350 break;
351 }
352 buflen1 = buflen > tag->common.maxsegsz ?
353 tag->common.maxsegsz : buflen;
354 buflen -= buflen1;
355 size = round_page(offset + buflen1);
356
357 /*
358 * (Too) optimistically allow split if there are more
359 * then one segments left.
360 */
361 gas_flags = map->cansleep ? DMAR_GM_CANWAIT : 0;
362 if (seg + 1 < tag->common.nsegments)
363 gas_flags |= DMAR_GM_CANSPLIT;
364
365 error = dmar_gas_map(ctx, &tag->common, size,
366 DMAR_MAP_ENTRY_READ | DMAR_MAP_ENTRY_WRITE,
367 gas_flags, ma + idx, &entry);
368 if (error != 0)
369 break;
370 if ((gas_flags & DMAR_GM_CANSPLIT) != 0) {
371 KASSERT(size >= entry->end - entry->start,
372 ("split increased entry size %jx %jx %jx",
373 (uintmax_t)size, (uintmax_t)entry->start,
374 (uintmax_t)entry->end));
375 size = entry->end - entry->start;
376 if (buflen1 > size)
377 buflen1 = size;
378 } else {
379 KASSERT(entry->end - entry->start == size,
380 ("no split allowed %jx %jx %jx",
381 (uintmax_t)size, (uintmax_t)entry->start,
382 (uintmax_t)entry->end));
383 }
384
385 KASSERT(((entry->start + offset) & (tag->common.alignment - 1))
386 == 0,
387 ("alignment failed: ctx %p start 0x%jx offset %x "
388 "align 0x%jx", ctx, (uintmax_t)entry->start, offset,
389 (uintmax_t)tag->common.alignment));
390 KASSERT(entry->end <= tag->common.lowaddr ||
391 entry->start >= tag->common.highaddr,
392 ("entry placement failed: ctx %p start 0x%jx end 0x%jx "
393 "lowaddr 0x%jx highaddr 0x%jx", ctx,
394 (uintmax_t)entry->start, (uintmax_t)entry->end,
395 (uintmax_t)tag->common.lowaddr,
396 (uintmax_t)tag->common.highaddr));
397 KASSERT(dmar_test_boundary(entry->start, entry->end -
398 entry->start, tag->common.boundary),
399 ("boundary failed: ctx %p start 0x%jx end 0x%jx "
400 "boundary 0x%jx", ctx, (uintmax_t)entry->start,
401 (uintmax_t)entry->end, (uintmax_t)tag->common.boundary));
402 KASSERT(buflen1 <= tag->common.maxsegsz,
403 ("segment too large: ctx %p start 0x%jx end 0x%jx "
404 "maxsegsz 0x%jx", ctx, (uintmax_t)entry->start,
405 (uintmax_t)entry->end, (uintmax_t)tag->common.maxsegsz));
406
407 DMAR_CTX_LOCK(ctx);
408 TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link);
409 entry->flags |= DMAR_MAP_ENTRY_MAP;
410 DMAR_CTX_UNLOCK(ctx);
411 TAILQ_INSERT_TAIL(unroll_list, entry, unroll_link);
412
413 segs[seg].ds_addr = entry->start + offset;
414 segs[seg].ds_len = buflen1;
415
416 idx += OFF_TO_IDX(trunc_page(offset + buflen1));
417 offset += buflen1;
418 offset &= DMAR_PAGE_MASK;
419 }
420 if (error == 0)
421 *segp = seg;
422 return (error);
423}
424
425static int
426dmar_bus_dmamap_load_something(struct bus_dma_tag_dmar *tag,
427 struct bus_dmamap_dmar *map, vm_page_t *ma, int offset, bus_size_t buflen,
428 int flags, bus_dma_segment_t *segs, int *segp)
429{
430 struct dmar_ctx *ctx;
431 struct dmar_map_entry *entry, *entry1;
432 struct dmar_map_entries_tailq unroll_list;
433 int error;
434
435 ctx = tag->ctx;
436 atomic_add_long(&ctx->loads, 1);
437
438 TAILQ_INIT(&unroll_list);
439 error = dmar_bus_dmamap_load_something1(tag, map, ma, offset,
440 buflen, flags, segs, segp, &unroll_list);
441 if (error != 0) {
442 /*
443 * The busdma interface does not allow us to report
444 * partial buffer load, so unfortunately we have to
445 * revert all work done.
446 */
447 DMAR_CTX_LOCK(ctx);
448 TAILQ_FOREACH_SAFE(entry, &unroll_list, unroll_link,
449 entry1) {
450 /*
451 * No entries other than what we have created
452 * during the failed run might have been
453 * inserted there in between, since we own ctx
454 * pglock.
455 */
456 TAILQ_REMOVE(&map->map_entries, entry, dmamap_link);
457 TAILQ_REMOVE(&unroll_list, entry, unroll_link);
458 TAILQ_INSERT_TAIL(&ctx->unload_entries, entry,
459 dmamap_link);
460 }
461 DMAR_CTX_UNLOCK(ctx);
462 taskqueue_enqueue(ctx->dmar->delayed_taskqueue,
463 &ctx->unload_task);
464 }
465
466 if (error == ENOMEM && (flags & BUS_DMA_NOWAIT) == 0 &&
467 !map->cansleep)
468 error = EINPROGRESS;
469 if (error == EINPROGRESS)
470 dmar_bus_schedule_dmamap(ctx->dmar, map);
471 return (error);
472}
473
474static int
475dmar_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map1,
476 struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
477 bus_dma_segment_t *segs, int *segp)
478{
479 struct bus_dma_tag_dmar *tag;
480 struct bus_dmamap_dmar *map;
481
482 tag = (struct bus_dma_tag_dmar *)dmat;
483 map = (struct bus_dmamap_dmar *)map1;
484 return (dmar_bus_dmamap_load_something(tag, map, ma, ma_offs, tlen,
485 flags, segs, segp));
486}
487
488static int
489dmar_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map1,
490 vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
491 int *segp)
492{
493 struct bus_dma_tag_dmar *tag;
494 struct bus_dmamap_dmar *map;
495 vm_page_t *ma;
496 vm_paddr_t pstart, pend;
497 int error, i, ma_cnt, offset;
498
499 tag = (struct bus_dma_tag_dmar *)dmat;
500 map = (struct bus_dmamap_dmar *)map1;
501 pstart = trunc_page(buf);
502 pend = round_page(buf + buflen);
503 offset = buf & PAGE_MASK;
504 ma_cnt = OFF_TO_IDX(pend - pstart);
505 ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, map->cansleep ?
506 M_WAITOK : M_NOWAIT);
507 if (ma == NULL)
508 return (ENOMEM);
509 for (i = 0; i < ma_cnt; i++)
510 ma[i] = PHYS_TO_VM_PAGE(pstart + i * PAGE_SIZE);
511 error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen,
512 flags, segs, segp);
513 free(ma, M_DEVBUF);
514 return (error);
515}
516
517static int
518dmar_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map1, void *buf,
519 bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
520 int *segp)
521{
522 struct bus_dma_tag_dmar *tag;
523 struct bus_dmamap_dmar *map;
524 vm_page_t *ma, fma;
525 vm_paddr_t pstart, pend, paddr;
526 int error, i, ma_cnt, offset;
527
528 tag = (struct bus_dma_tag_dmar *)dmat;
529 map = (struct bus_dmamap_dmar *)map1;
530 pstart = trunc_page((vm_offset_t)buf);
531 pend = round_page((vm_offset_t)buf + buflen);
532 offset = (vm_offset_t)buf & PAGE_MASK;
533 ma_cnt = OFF_TO_IDX(pend - pstart);
534 ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, map->cansleep ?
535 M_WAITOK : M_NOWAIT);
536 if (ma == NULL)
537 return (ENOMEM);
538 if (dumping) {
539 /*
540 * If dumping, do not attempt to call
541 * PHYS_TO_VM_PAGE() at all. It may return non-NULL
542 * but the vm_page returned might be not initialized,
543 * e.g. for the kernel itself.
544 */
545 KASSERT(pmap == kernel_pmap, ("non-kernel address write"));
546 fma = malloc(sizeof(struct vm_page) * ma_cnt, M_DEVBUF,
547 M_ZERO | (map->cansleep ? M_WAITOK : M_NOWAIT));
548 if (fma == NULL) {
549 free(ma, M_DEVBUF);
550 return (ENOMEM);
551 }
552 for (i = 0; i < ma_cnt; i++, pstart += PAGE_SIZE) {
553 paddr = pmap_kextract(pstart);
554 vm_page_initfake(&fma[i], paddr, VM_MEMATTR_DEFAULT);
555 ma[i] = &fma[i];
556 }
557 } else {
558 fma = NULL;
559 for (i = 0; i < ma_cnt; i++, pstart += PAGE_SIZE) {
560 if (pmap == kernel_pmap)
561 paddr = pmap_kextract(pstart);
562 else
563 paddr = pmap_extract(pmap, pstart);
564 ma[i] = PHYS_TO_VM_PAGE(paddr);
565 KASSERT(VM_PAGE_TO_PHYS(ma[i]) == paddr,
566 ("PHYS_TO_VM_PAGE failed %jx %jx m %p",
567 (uintmax_t)paddr, (uintmax_t)VM_PAGE_TO_PHYS(ma[i]),
568 ma[i]));
569 }
570 }
571 error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen,
572 flags, segs, segp);
573 free(ma, M_DEVBUF);
574 free(fma, M_DEVBUF);
575 return (error);
576}
577
578static void
579dmar_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map1,
580 struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
581{
582 struct bus_dmamap_dmar *map;
583
584 if (map1 == NULL)
585 return;
586 map = (struct bus_dmamap_dmar *)map1;
587 map->mem = *mem;
588 map->tag = (struct bus_dma_tag_dmar *)dmat;
589 map->callback = callback;
590 map->callback_arg = callback_arg;
591}
592
593static bus_dma_segment_t *
594dmar_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map1,
595 bus_dma_segment_t *segs, int nsegs, int error)
596{
597 struct bus_dma_tag_dmar *tag;
598 struct bus_dmamap_dmar *map;
599
600 tag = (struct bus_dma_tag_dmar *)dmat;
601 map = (struct bus_dmamap_dmar *)map1;
602
603 if (!map->locked) {
604 KASSERT(map->cansleep,
605 ("map not locked and not sleepable context %p", map));
606
607 /*
608 * We are called from the delayed context. Relock the
609 * driver.
610 */
611 (tag->common.lockfunc)(tag->common.lockfuncarg, BUS_DMA_LOCK);
612 map->locked = true;
613 }
614
615 if (segs == NULL)
616 segs = tag->segments;
617 return (segs);
618}
619
620/*
621 * The limitations of busdma KPI forces the dmar to perform the actual
622 * unload, consisting of the unmapping of the map entries page tables,
623 * from the delayed context on i386, since page table page mapping
624 * might require a sleep to be successfull. The unfortunate
625 * consequence is that the DMA requests can be served some time after
626 * the bus_dmamap_unload() call returned.
627 *
628 * On amd64, we assume that sf allocation cannot fail.
629 */
630static void
631dmar_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1)
632{
633 struct bus_dma_tag_dmar *tag;
634 struct bus_dmamap_dmar *map;
635 struct dmar_ctx *ctx;
636#if defined(__amd64__)
637 struct dmar_map_entries_tailq entries;
638#endif
639
640 tag = (struct bus_dma_tag_dmar *)dmat;
641 map = (struct bus_dmamap_dmar *)map1;
642 ctx = tag->ctx;
643 atomic_add_long(&ctx->unloads, 1);
644
645#if defined(__i386__)
646 DMAR_CTX_LOCK(ctx);
647 TAILQ_CONCAT(&ctx->unload_entries, &map->map_entries, dmamap_link);
648 DMAR_CTX_UNLOCK(ctx);
649 taskqueue_enqueue(ctx->dmar->delayed_taskqueue, &ctx->unload_task);
650#else /* defined(__amd64__) */
651 TAILQ_INIT(&entries);
652 DMAR_CTX_LOCK(ctx);
653 TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link);
654 DMAR_CTX_UNLOCK(ctx);
655 THREAD_NO_SLEEPING();
656 dmar_ctx_unload(ctx, &entries, false);
657 THREAD_SLEEPING_OK();
658 KASSERT(TAILQ_EMPTY(&entries), ("lazy dmar_ctx_unload %p", ctx));
659#endif
660}
661
662static void
663dmar_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
664 bus_dmasync_op_t op)
665{
666}
667
668struct bus_dma_impl bus_dma_dmar_impl = {
669 .tag_create = dmar_bus_dma_tag_create,
670 .tag_destroy = dmar_bus_dma_tag_destroy,
671 .map_create = dmar_bus_dmamap_create,
672 .map_destroy = dmar_bus_dmamap_destroy,
673 .mem_alloc = dmar_bus_dmamem_alloc,
674 .mem_free = dmar_bus_dmamem_free,
675 .load_phys = dmar_bus_dmamap_load_phys,
676 .load_buffer = dmar_bus_dmamap_load_buffer,
677 .load_ma = dmar_bus_dmamap_load_ma,
678 .map_waitok = dmar_bus_dmamap_waitok,
679 .map_complete = dmar_bus_dmamap_complete,
680 .map_unload = dmar_bus_dmamap_unload,
681 .map_sync = dmar_bus_dmamap_sync
682};
683
684static void
685dmar_bus_task_dmamap(void *arg, int pending)
686{
687 struct bus_dma_tag_dmar *tag;
688 struct bus_dmamap_dmar *map;
689 struct dmar_unit *unit;
690 struct dmar_ctx *ctx;
691
692 unit = arg;
693 DMAR_LOCK(unit);
694 while ((map = TAILQ_FIRST(&unit->delayed_maps)) != NULL) {
695 TAILQ_REMOVE(&unit->delayed_maps, map, delay_link);
696 DMAR_UNLOCK(unit);
697 tag = map->tag;
698 ctx = map->tag->ctx;
699 map->cansleep = true;
700 map->locked = false;
701 bus_dmamap_load_mem((bus_dma_tag_t)tag, (bus_dmamap_t)map,
702 &map->mem, map->callback, map->callback_arg,
703 BUS_DMA_WAITOK);
704 map->cansleep = false;
705 if (map->locked) {
706 (tag->common.lockfunc)(tag->common.lockfuncarg,
707 BUS_DMA_UNLOCK);
708 } else
709 map->locked = true;
710 map->cansleep = false;
711 DMAR_LOCK(unit);
712 }
713 DMAR_UNLOCK(unit);
714}
715
716static void
717dmar_bus_schedule_dmamap(struct dmar_unit *unit, struct bus_dmamap_dmar *map)
718{
719 struct dmar_ctx *ctx;
720
721 ctx = map->tag->ctx;
722 map->locked = false;
723 DMAR_LOCK(unit);
724 TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link);
725 DMAR_UNLOCK(unit);
726 taskqueue_enqueue(unit->delayed_taskqueue, &unit->dmamap_load_task);
727}
728
729int
730dmar_init_busdma(struct dmar_unit *unit)
731{
732
733 TAILQ_INIT(&unit->delayed_maps);
734 TASK_INIT(&unit->dmamap_load_task, 0, dmar_bus_task_dmamap, unit);
735 unit->delayed_taskqueue = taskqueue_create("dmar", M_WAITOK,
736 taskqueue_thread_enqueue, &unit->delayed_taskqueue);
737 taskqueue_start_threads(&unit->delayed_taskqueue, 1, PI_DISK,
738 "dmar%d busdma taskq", unit->unit);
739 return (0);
740}
741
742void
743dmar_fini_busdma(struct dmar_unit *unit)
744{
745
746 if (unit->delayed_taskqueue == NULL)
747 return;
748
749 taskqueue_drain(unit->delayed_taskqueue, &unit->dmamap_load_task);
750 taskqueue_free(unit->delayed_taskqueue);
751 unit->delayed_taskqueue = NULL;
752}