busdma_dmar.c revision 279486
1/*- 2 * Copyright (c) 2013 The FreeBSD Foundation 3 * All rights reserved. 4 * 5 * This software was developed by Konstantin Belousov <kib@FreeBSD.org> 6 * under sponsorship from the FreeBSD Foundation. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: stable/10/sys/x86/iommu/busdma_dmar.c 279486 2015-03-01 10:39:19Z kib $"); 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/malloc.h> 36#include <sys/bus.h> 37#include <sys/conf.h> 38#include <sys/interrupt.h> 39#include <sys/kernel.h> 40#include <sys/ktr.h> 41#include <sys/lock.h> 42#include <sys/proc.h> 43#include <sys/memdesc.h> 44#include <sys/mutex.h> 45#include <sys/sysctl.h> 46#include <sys/rman.h> 47#include <sys/taskqueue.h> 48#include <sys/tree.h> 49#include <sys/uio.h> 50#include <dev/pci/pcireg.h> 51#include <dev/pci/pcivar.h> 52#include <vm/vm.h> 53#include <vm/vm_extern.h> 54#include <vm/vm_kern.h> 55#include <vm/vm_object.h> 56#include <vm/vm_page.h> 57#include <vm/vm_map.h> 58#include <machine/atomic.h> 59#include <machine/bus.h> 60#include <machine/md_var.h> 61#include <machine/specialreg.h> 62#include <x86/include/busdma_impl.h> 63#include <x86/iommu/intel_reg.h> 64#include <x86/iommu/busdma_dmar.h> 65#include <x86/iommu/intel_dmar.h> 66 67/* 68 * busdma_dmar.c, the implementation of the busdma(9) interface using 69 * DMAR units from Intel VT-d. 70 */ 71 72static bool 73dmar_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func) 74{ 75 char str[128], *env; 76 77 snprintf(str, sizeof(str), "hw.busdma.pci%d.%d.%d.%d.bounce", 78 domain, bus, slot, func); 79 env = getenv(str); 80 if (env == NULL) 81 return (false); 82 freeenv(env); 83 return (true); 84} 85 86/* 87 * Given original device, find the requester ID that will be seen by 88 * the DMAR unit and used for page table lookup. PCI bridges may take 89 * ownership of transactions from downstream devices, so it may not be 90 * the same as the BSF of the target device. In those cases, all 91 * devices downstream of the bridge must share a single mapping 92 * domain, and must collectively be assigned to use either DMAR or 93 * bounce mapping. 94 */ 95static device_t 96dmar_get_requester(device_t dev, uint16_t *rid) 97{ 98 devclass_t pci_class; 99 device_t l, pci, pcib, pcip, pcibp, requester; 100 int cap_offset; 101 uint16_t pcie_flags; 102 bool bridge_is_pcie; 103 104 pci_class = devclass_find("pci"); 105 l = requester = dev; 106 107 *rid = pci_get_rid(dev); 108 109 /* 110 * Walk the bridge hierarchy from the target device to the 111 * host port to find the translating bridge nearest the DMAR 112 * unit. 113 */ 114 for (;;) { 115 pci = device_get_parent(l); 116 KASSERT(pci != NULL, ("dmar_get_requester(%s): NULL parent " 117 "for %s", device_get_name(dev), device_get_name(l))); 118 KASSERT(device_get_devclass(pci) == pci_class, 119 ("dmar_get_requester(%s): non-pci parent %s for %s", 120 device_get_name(dev), device_get_name(pci), 121 device_get_name(l))); 122 123 pcib = device_get_parent(pci); 124 KASSERT(pcib != NULL, ("dmar_get_requester(%s): NULL bridge " 125 "for %s", device_get_name(dev), device_get_name(pci))); 126 127 /* 128 * The parent of our "bridge" isn't another PCI bus, 129 * so pcib isn't a PCI->PCI bridge but rather a host 130 * port, and the requester ID won't be translated 131 * further. 132 */ 133 pcip = device_get_parent(pcib); 134 if (device_get_devclass(pcip) != pci_class) 135 break; 136 pcibp = device_get_parent(pcip); 137 138 if (pci_find_cap(l, PCIY_EXPRESS, &cap_offset) == 0) { 139 /* 140 * Do not stop the loop even if the target 141 * device is PCIe, because it is possible (but 142 * unlikely) to have a PCI->PCIe bridge 143 * somewhere in the hierarchy. 144 */ 145 l = pcib; 146 } else { 147 /* 148 * Device is not PCIe, it cannot be seen as a 149 * requester by DMAR unit. Check whether the 150 * bridge is PCIe. 151 */ 152 bridge_is_pcie = pci_find_cap(pcib, PCIY_EXPRESS, 153 &cap_offset) == 0; 154 requester = pcib; 155 156 /* 157 * Check for a buggy PCIe/PCI bridge that 158 * doesn't report the express capability. If 159 * the bridge above it is express but isn't a 160 * PCI bridge, then we know pcib is actually a 161 * PCIe/PCI bridge. 162 */ 163 if (!bridge_is_pcie && pci_find_cap(pcibp, 164 PCIY_EXPRESS, &cap_offset) == 0) { 165 pcie_flags = pci_read_config(pcibp, 166 cap_offset + PCIER_FLAGS, 2); 167 if ((pcie_flags & PCIEM_FLAGS_TYPE) != 168 PCIEM_TYPE_PCI_BRIDGE) 169 bridge_is_pcie = true; 170 } 171 172 if (bridge_is_pcie) { 173 /* 174 * The current device is not PCIe, but 175 * the bridge above it is. This is a 176 * PCIe->PCI bridge. Assume that the 177 * requester ID will be the secondary 178 * bus number with slot and function 179 * set to zero. 180 * 181 * XXX: Doesn't handle the case where 182 * the bridge is PCIe->PCI-X, and the 183 * bridge will only take ownership of 184 * requests in some cases. We should 185 * provide context entries with the 186 * same page tables for taken and 187 * non-taken transactions. 188 */ 189 *rid = PCI_RID(pci_get_bus(l), 0, 0); 190 l = pcibp; 191 } else { 192 /* 193 * Neither the device nor the bridge 194 * above it are PCIe. This is a 195 * conventional PCI->PCI bridge, which 196 * will use the bridge's BSF as the 197 * requester ID. 198 */ 199 *rid = pci_get_rid(pcib); 200 l = pcib; 201 } 202 } 203 } 204 return (requester); 205} 206 207struct dmar_ctx * 208dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev, bool rmrr) 209{ 210 device_t requester; 211 struct dmar_ctx *ctx; 212 bool disabled; 213 uint16_t rid; 214 215 requester = dmar_get_requester(dev, &rid); 216 217 /* 218 * If the user requested the IOMMU disabled for the device, we 219 * cannot disable the DMAR, due to possibility of other 220 * devices on the same DMAR still requiring translation. 221 * Instead provide the identity mapping for the device 222 * context. 223 */ 224 disabled = dmar_bus_dma_is_dev_disabled(pci_get_domain(requester), 225 pci_get_bus(requester), pci_get_slot(requester), 226 pci_get_function(requester)); 227 ctx = dmar_get_ctx(dmar, requester, rid, disabled, rmrr); 228 if (ctx == NULL) 229 return (NULL); 230 if (disabled) { 231 /* 232 * Keep the first reference on context, release the 233 * later refs. 234 */ 235 DMAR_LOCK(dmar); 236 if ((ctx->flags & DMAR_CTX_DISABLED) == 0) { 237 ctx->flags |= DMAR_CTX_DISABLED; 238 DMAR_UNLOCK(dmar); 239 } else { 240 dmar_free_ctx_locked(dmar, ctx); 241 } 242 ctx = NULL; 243 } 244 return (ctx); 245} 246 247bus_dma_tag_t 248dmar_get_dma_tag(device_t dev, device_t child) 249{ 250 struct dmar_unit *dmar; 251 struct dmar_ctx *ctx; 252 bus_dma_tag_t res; 253 254 dmar = dmar_find(child); 255 /* Not in scope of any DMAR ? */ 256 if (dmar == NULL) 257 return (NULL); 258 dmar_quirks_pre_use(dmar); 259 dmar_instantiate_rmrr_ctxs(dmar); 260 261 ctx = dmar_instantiate_ctx(dmar, child, false); 262 res = ctx == NULL ? NULL : (bus_dma_tag_t)&ctx->ctx_tag; 263 return (res); 264} 265 266static MALLOC_DEFINE(M_DMAR_DMAMAP, "dmar_dmamap", "Intel DMAR DMA Map"); 267 268static void dmar_bus_schedule_dmamap(struct dmar_unit *unit, 269 struct bus_dmamap_dmar *map); 270 271static int 272dmar_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, 273 bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, 274 bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, 275 int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, 276 void *lockfuncarg, bus_dma_tag_t *dmat) 277{ 278 struct bus_dma_tag_dmar *newtag, *oldtag; 279 int error; 280 281 *dmat = NULL; 282 error = common_bus_dma_tag_create(parent != NULL ? 283 &((struct bus_dma_tag_dmar *)parent)->common : NULL, alignment, 284 boundary, lowaddr, highaddr, filter, filterarg, maxsize, 285 nsegments, maxsegsz, flags, lockfunc, lockfuncarg, 286 sizeof(struct bus_dma_tag_dmar), (void **)&newtag); 287 if (error != 0) 288 goto out; 289 290 oldtag = (struct bus_dma_tag_dmar *)parent; 291 newtag->common.impl = &bus_dma_dmar_impl; 292 newtag->ctx = oldtag->ctx; 293 newtag->owner = oldtag->owner; 294 295 *dmat = (bus_dma_tag_t)newtag; 296out: 297 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", 298 __func__, newtag, (newtag != NULL ? newtag->common.flags : 0), 299 error); 300 return (error); 301} 302 303static int 304dmar_bus_dma_tag_destroy(bus_dma_tag_t dmat1) 305{ 306 struct bus_dma_tag_dmar *dmat, *dmat_copy, *parent; 307 int error; 308 309 error = 0; 310 dmat_copy = dmat = (struct bus_dma_tag_dmar *)dmat1; 311 312 if (dmat != NULL) { 313 if (dmat->map_count != 0) { 314 error = EBUSY; 315 goto out; 316 } 317 while (dmat != NULL) { 318 parent = (struct bus_dma_tag_dmar *)dmat->common.parent; 319 if (atomic_fetchadd_int(&dmat->common.ref_count, -1) == 320 1) { 321 if (dmat == &dmat->ctx->ctx_tag) 322 dmar_free_ctx(dmat->ctx); 323 free(dmat->segments, M_DMAR_DMAMAP); 324 free(dmat, M_DEVBUF); 325 dmat = parent; 326 } else 327 dmat = NULL; 328 } 329 } 330out: 331 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); 332 return (error); 333} 334 335static int 336dmar_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) 337{ 338 struct bus_dma_tag_dmar *tag; 339 struct bus_dmamap_dmar *map; 340 341 tag = (struct bus_dma_tag_dmar *)dmat; 342 map = malloc(sizeof(*map), M_DMAR_DMAMAP, M_NOWAIT | M_ZERO); 343 if (map == NULL) { 344 *mapp = NULL; 345 return (ENOMEM); 346 } 347 if (tag->segments == NULL) { 348 tag->segments = malloc(sizeof(bus_dma_segment_t) * 349 tag->common.nsegments, M_DMAR_DMAMAP, M_NOWAIT); 350 if (tag->segments == NULL) { 351 free(map, M_DMAR_DMAMAP); 352 *mapp = NULL; 353 return (ENOMEM); 354 } 355 } 356 TAILQ_INIT(&map->map_entries); 357 map->tag = tag; 358 map->locked = true; 359 map->cansleep = false; 360 tag->map_count++; 361 *mapp = (bus_dmamap_t)map; 362 363 return (0); 364} 365 366static int 367dmar_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1) 368{ 369 struct bus_dma_tag_dmar *tag; 370 struct bus_dmamap_dmar *map; 371 372 tag = (struct bus_dma_tag_dmar *)dmat; 373 map = (struct bus_dmamap_dmar *)map1; 374 if (map != NULL) { 375 DMAR_CTX_LOCK(tag->ctx); 376 if (!TAILQ_EMPTY(&map->map_entries)) { 377 DMAR_CTX_UNLOCK(tag->ctx); 378 return (EBUSY); 379 } 380 DMAR_CTX_UNLOCK(tag->ctx); 381 free(map, M_DMAR_DMAMAP); 382 } 383 tag->map_count--; 384 return (0); 385} 386 387 388static int 389dmar_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, 390 bus_dmamap_t *mapp) 391{ 392 struct bus_dma_tag_dmar *tag; 393 struct bus_dmamap_dmar *map; 394 int error, mflags; 395 vm_memattr_t attr; 396 397 error = dmar_bus_dmamap_create(dmat, flags, mapp); 398 if (error != 0) 399 return (error); 400 401 mflags = (flags & BUS_DMA_NOWAIT) != 0 ? M_NOWAIT : M_WAITOK; 402 mflags |= (flags & BUS_DMA_ZERO) != 0 ? M_ZERO : 0; 403 attr = (flags & BUS_DMA_NOCACHE) != 0 ? VM_MEMATTR_UNCACHEABLE : 404 VM_MEMATTR_DEFAULT; 405 406 tag = (struct bus_dma_tag_dmar *)dmat; 407 map = (struct bus_dmamap_dmar *)*mapp; 408 409 if (tag->common.maxsize < PAGE_SIZE && 410 tag->common.alignment <= tag->common.maxsize && 411 attr == VM_MEMATTR_DEFAULT) { 412 *vaddr = malloc(tag->common.maxsize, M_DEVBUF, mflags); 413 map->flags |= BUS_DMAMAP_DMAR_MALLOC; 414 } else { 415 *vaddr = (void *)kmem_alloc_attr(kernel_arena, 416 tag->common.maxsize, mflags, 0ul, BUS_SPACE_MAXADDR, 417 attr); 418 map->flags |= BUS_DMAMAP_DMAR_KMEM_ALLOC; 419 } 420 if (*vaddr == NULL) { 421 dmar_bus_dmamap_destroy(dmat, *mapp); 422 *mapp = NULL; 423 return (ENOMEM); 424 } 425 return (0); 426} 427 428static void 429dmar_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1) 430{ 431 struct bus_dma_tag_dmar *tag; 432 struct bus_dmamap_dmar *map; 433 434 tag = (struct bus_dma_tag_dmar *)dmat; 435 map = (struct bus_dmamap_dmar *)map1; 436 437 if ((map->flags & BUS_DMAMAP_DMAR_MALLOC) != 0) { 438 free(vaddr, M_DEVBUF); 439 map->flags &= ~BUS_DMAMAP_DMAR_MALLOC; 440 } else { 441 KASSERT((map->flags & BUS_DMAMAP_DMAR_KMEM_ALLOC) != 0, 442 ("dmar_bus_dmamem_free for non alloced map %p", map)); 443 kmem_free(kernel_arena, (vm_offset_t)vaddr, tag->common.maxsize); 444 map->flags &= ~BUS_DMAMAP_DMAR_KMEM_ALLOC; 445 } 446 447 dmar_bus_dmamap_destroy(dmat, map1); 448} 449 450static int 451dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag, 452 struct bus_dmamap_dmar *map, vm_page_t *ma, int offset, bus_size_t buflen, 453 int flags, bus_dma_segment_t *segs, int *segp, 454 struct dmar_map_entries_tailq *unroll_list) 455{ 456 struct dmar_ctx *ctx; 457 struct dmar_map_entry *entry; 458 dmar_gaddr_t size; 459 bus_size_t buflen1; 460 int error, idx, gas_flags, seg; 461 462 if (segs == NULL) 463 segs = tag->segments; 464 ctx = tag->ctx; 465 seg = *segp; 466 error = 0; 467 idx = 0; 468 while (buflen > 0) { 469 seg++; 470 if (seg >= tag->common.nsegments) { 471 error = EFBIG; 472 break; 473 } 474 buflen1 = buflen > tag->common.maxsegsz ? 475 tag->common.maxsegsz : buflen; 476 buflen -= buflen1; 477 size = round_page(offset + buflen1); 478 479 /* 480 * (Too) optimistically allow split if there are more 481 * then one segments left. 482 */ 483 gas_flags = map->cansleep ? DMAR_GM_CANWAIT : 0; 484 if (seg + 1 < tag->common.nsegments) 485 gas_flags |= DMAR_GM_CANSPLIT; 486 487 error = dmar_gas_map(ctx, &tag->common, size, 488 DMAR_MAP_ENTRY_READ | DMAR_MAP_ENTRY_WRITE, 489 gas_flags, ma + idx, &entry); 490 if (error != 0) 491 break; 492 if ((gas_flags & DMAR_GM_CANSPLIT) != 0) { 493 KASSERT(size >= entry->end - entry->start, 494 ("split increased entry size %jx %jx %jx", 495 (uintmax_t)size, (uintmax_t)entry->start, 496 (uintmax_t)entry->end)); 497 size = entry->end - entry->start; 498 if (buflen1 > size) 499 buflen1 = size; 500 } else { 501 KASSERT(entry->end - entry->start == size, 502 ("no split allowed %jx %jx %jx", 503 (uintmax_t)size, (uintmax_t)entry->start, 504 (uintmax_t)entry->end)); 505 } 506 507 KASSERT(((entry->start + offset) & (tag->common.alignment - 1)) 508 == 0, 509 ("alignment failed: ctx %p start 0x%jx offset %x " 510 "align 0x%jx", ctx, (uintmax_t)entry->start, offset, 511 (uintmax_t)tag->common.alignment)); 512 KASSERT(entry->end <= tag->common.lowaddr || 513 entry->start >= tag->common.highaddr, 514 ("entry placement failed: ctx %p start 0x%jx end 0x%jx " 515 "lowaddr 0x%jx highaddr 0x%jx", ctx, 516 (uintmax_t)entry->start, (uintmax_t)entry->end, 517 (uintmax_t)tag->common.lowaddr, 518 (uintmax_t)tag->common.highaddr)); 519 KASSERT(dmar_test_boundary(entry->start, entry->end - 520 entry->start, tag->common.boundary), 521 ("boundary failed: ctx %p start 0x%jx end 0x%jx " 522 "boundary 0x%jx", ctx, (uintmax_t)entry->start, 523 (uintmax_t)entry->end, (uintmax_t)tag->common.boundary)); 524 KASSERT(buflen1 <= tag->common.maxsegsz, 525 ("segment too large: ctx %p start 0x%jx end 0x%jx " 526 "maxsegsz 0x%jx", ctx, (uintmax_t)entry->start, 527 (uintmax_t)entry->end, (uintmax_t)tag->common.maxsegsz)); 528 529 DMAR_CTX_LOCK(ctx); 530 TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link); 531 entry->flags |= DMAR_MAP_ENTRY_MAP; 532 DMAR_CTX_UNLOCK(ctx); 533 TAILQ_INSERT_TAIL(unroll_list, entry, unroll_link); 534 535 segs[seg].ds_addr = entry->start + offset; 536 segs[seg].ds_len = buflen1; 537 538 idx += OFF_TO_IDX(trunc_page(offset + buflen1)); 539 offset += buflen1; 540 offset &= DMAR_PAGE_MASK; 541 } 542 if (error == 0) 543 *segp = seg; 544 return (error); 545} 546 547static int 548dmar_bus_dmamap_load_something(struct bus_dma_tag_dmar *tag, 549 struct bus_dmamap_dmar *map, vm_page_t *ma, int offset, bus_size_t buflen, 550 int flags, bus_dma_segment_t *segs, int *segp) 551{ 552 struct dmar_ctx *ctx; 553 struct dmar_map_entry *entry, *entry1; 554 struct dmar_map_entries_tailq unroll_list; 555 int error; 556 557 ctx = tag->ctx; 558 atomic_add_long(&ctx->loads, 1); 559 560 TAILQ_INIT(&unroll_list); 561 error = dmar_bus_dmamap_load_something1(tag, map, ma, offset, 562 buflen, flags, segs, segp, &unroll_list); 563 if (error != 0) { 564 /* 565 * The busdma interface does not allow us to report 566 * partial buffer load, so unfortunately we have to 567 * revert all work done. 568 */ 569 DMAR_CTX_LOCK(ctx); 570 TAILQ_FOREACH_SAFE(entry, &unroll_list, unroll_link, 571 entry1) { 572 /* 573 * No entries other than what we have created 574 * during the failed run might have been 575 * inserted there in between, since we own ctx 576 * pglock. 577 */ 578 TAILQ_REMOVE(&map->map_entries, entry, dmamap_link); 579 TAILQ_REMOVE(&unroll_list, entry, unroll_link); 580 TAILQ_INSERT_TAIL(&ctx->unload_entries, entry, 581 dmamap_link); 582 } 583 DMAR_CTX_UNLOCK(ctx); 584 taskqueue_enqueue(ctx->dmar->delayed_taskqueue, 585 &ctx->unload_task); 586 } 587 588 if (error == ENOMEM && (flags & BUS_DMA_NOWAIT) == 0 && 589 !map->cansleep) 590 error = EINPROGRESS; 591 if (error == EINPROGRESS) 592 dmar_bus_schedule_dmamap(ctx->dmar, map); 593 return (error); 594} 595 596static int 597dmar_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map1, 598 struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, 599 bus_dma_segment_t *segs, int *segp) 600{ 601 struct bus_dma_tag_dmar *tag; 602 struct bus_dmamap_dmar *map; 603 604 tag = (struct bus_dma_tag_dmar *)dmat; 605 map = (struct bus_dmamap_dmar *)map1; 606 return (dmar_bus_dmamap_load_something(tag, map, ma, ma_offs, tlen, 607 flags, segs, segp)); 608} 609 610static int 611dmar_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map1, 612 vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, 613 int *segp) 614{ 615 struct bus_dma_tag_dmar *tag; 616 struct bus_dmamap_dmar *map; 617 vm_page_t *ma; 618 vm_paddr_t pstart, pend; 619 int error, i, ma_cnt, offset; 620 621 tag = (struct bus_dma_tag_dmar *)dmat; 622 map = (struct bus_dmamap_dmar *)map1; 623 pstart = trunc_page(buf); 624 pend = round_page(buf + buflen); 625 offset = buf & PAGE_MASK; 626 ma_cnt = OFF_TO_IDX(pend - pstart); 627 ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, map->cansleep ? 628 M_WAITOK : M_NOWAIT); 629 if (ma == NULL) 630 return (ENOMEM); 631 for (i = 0; i < ma_cnt; i++) 632 ma[i] = PHYS_TO_VM_PAGE(pstart + i * PAGE_SIZE); 633 error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen, 634 flags, segs, segp); 635 free(ma, M_DEVBUF); 636 return (error); 637} 638 639static int 640dmar_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map1, void *buf, 641 bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, 642 int *segp) 643{ 644 struct bus_dma_tag_dmar *tag; 645 struct bus_dmamap_dmar *map; 646 vm_page_t *ma, fma; 647 vm_paddr_t pstart, pend, paddr; 648 int error, i, ma_cnt, offset; 649 650 tag = (struct bus_dma_tag_dmar *)dmat; 651 map = (struct bus_dmamap_dmar *)map1; 652 pstart = trunc_page((vm_offset_t)buf); 653 pend = round_page((vm_offset_t)buf + buflen); 654 offset = (vm_offset_t)buf & PAGE_MASK; 655 ma_cnt = OFF_TO_IDX(pend - pstart); 656 ma = malloc(sizeof(vm_page_t) * ma_cnt, M_DEVBUF, map->cansleep ? 657 M_WAITOK : M_NOWAIT); 658 if (ma == NULL) 659 return (ENOMEM); 660 if (dumping) { 661 /* 662 * If dumping, do not attempt to call 663 * PHYS_TO_VM_PAGE() at all. It may return non-NULL 664 * but the vm_page returned might be not initialized, 665 * e.g. for the kernel itself. 666 */ 667 KASSERT(pmap == kernel_pmap, ("non-kernel address write")); 668 fma = malloc(sizeof(struct vm_page) * ma_cnt, M_DEVBUF, 669 M_ZERO | (map->cansleep ? M_WAITOK : M_NOWAIT)); 670 if (fma == NULL) { 671 free(ma, M_DEVBUF); 672 return (ENOMEM); 673 } 674 for (i = 0; i < ma_cnt; i++, pstart += PAGE_SIZE) { 675 paddr = pmap_kextract(pstart); 676 vm_page_initfake(&fma[i], paddr, VM_MEMATTR_DEFAULT); 677 ma[i] = &fma[i]; 678 } 679 } else { 680 fma = NULL; 681 for (i = 0; i < ma_cnt; i++, pstart += PAGE_SIZE) { 682 if (pmap == kernel_pmap) 683 paddr = pmap_kextract(pstart); 684 else 685 paddr = pmap_extract(pmap, pstart); 686 ma[i] = PHYS_TO_VM_PAGE(paddr); 687 KASSERT(VM_PAGE_TO_PHYS(ma[i]) == paddr, 688 ("PHYS_TO_VM_PAGE failed %jx %jx m %p", 689 (uintmax_t)paddr, (uintmax_t)VM_PAGE_TO_PHYS(ma[i]), 690 ma[i])); 691 } 692 } 693 error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen, 694 flags, segs, segp); 695 free(ma, M_DEVBUF); 696 free(fma, M_DEVBUF); 697 return (error); 698} 699 700static void 701dmar_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map1, 702 struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) 703{ 704 struct bus_dmamap_dmar *map; 705 706 if (map1 == NULL) 707 return; 708 map = (struct bus_dmamap_dmar *)map1; 709 map->mem = *mem; 710 map->tag = (struct bus_dma_tag_dmar *)dmat; 711 map->callback = callback; 712 map->callback_arg = callback_arg; 713} 714 715static bus_dma_segment_t * 716dmar_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map1, 717 bus_dma_segment_t *segs, int nsegs, int error) 718{ 719 struct bus_dma_tag_dmar *tag; 720 struct bus_dmamap_dmar *map; 721 722 tag = (struct bus_dma_tag_dmar *)dmat; 723 map = (struct bus_dmamap_dmar *)map1; 724 725 if (!map->locked) { 726 KASSERT(map->cansleep, 727 ("map not locked and not sleepable context %p", map)); 728 729 /* 730 * We are called from the delayed context. Relock the 731 * driver. 732 */ 733 (tag->common.lockfunc)(tag->common.lockfuncarg, BUS_DMA_LOCK); 734 map->locked = true; 735 } 736 737 if (segs == NULL) 738 segs = tag->segments; 739 return (segs); 740} 741 742/* 743 * The limitations of busdma KPI forces the dmar to perform the actual 744 * unload, consisting of the unmapping of the map entries page tables, 745 * from the delayed context on i386, since page table page mapping 746 * might require a sleep to be successfull. The unfortunate 747 * consequence is that the DMA requests can be served some time after 748 * the bus_dmamap_unload() call returned. 749 * 750 * On amd64, we assume that sf allocation cannot fail. 751 */ 752static void 753dmar_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1) 754{ 755 struct bus_dma_tag_dmar *tag; 756 struct bus_dmamap_dmar *map; 757 struct dmar_ctx *ctx; 758#if defined(__amd64__) 759 struct dmar_map_entries_tailq entries; 760#endif 761 762 tag = (struct bus_dma_tag_dmar *)dmat; 763 map = (struct bus_dmamap_dmar *)map1; 764 ctx = tag->ctx; 765 atomic_add_long(&ctx->unloads, 1); 766 767#if defined(__i386__) 768 DMAR_CTX_LOCK(ctx); 769 TAILQ_CONCAT(&ctx->unload_entries, &map->map_entries, dmamap_link); 770 DMAR_CTX_UNLOCK(ctx); 771 taskqueue_enqueue(ctx->dmar->delayed_taskqueue, &ctx->unload_task); 772#else /* defined(__amd64__) */ 773 TAILQ_INIT(&entries); 774 DMAR_CTX_LOCK(ctx); 775 TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link); 776 DMAR_CTX_UNLOCK(ctx); 777 THREAD_NO_SLEEPING(); 778 dmar_ctx_unload(ctx, &entries, false); 779 THREAD_SLEEPING_OK(); 780 KASSERT(TAILQ_EMPTY(&entries), ("lazy dmar_ctx_unload %p", ctx)); 781#endif 782} 783 784static void 785dmar_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, 786 bus_dmasync_op_t op) 787{ 788} 789 790struct bus_dma_impl bus_dma_dmar_impl = { 791 .tag_create = dmar_bus_dma_tag_create, 792 .tag_destroy = dmar_bus_dma_tag_destroy, 793 .map_create = dmar_bus_dmamap_create, 794 .map_destroy = dmar_bus_dmamap_destroy, 795 .mem_alloc = dmar_bus_dmamem_alloc, 796 .mem_free = dmar_bus_dmamem_free, 797 .load_phys = dmar_bus_dmamap_load_phys, 798 .load_buffer = dmar_bus_dmamap_load_buffer, 799 .load_ma = dmar_bus_dmamap_load_ma, 800 .map_waitok = dmar_bus_dmamap_waitok, 801 .map_complete = dmar_bus_dmamap_complete, 802 .map_unload = dmar_bus_dmamap_unload, 803 .map_sync = dmar_bus_dmamap_sync 804}; 805 806static void 807dmar_bus_task_dmamap(void *arg, int pending) 808{ 809 struct bus_dma_tag_dmar *tag; 810 struct bus_dmamap_dmar *map; 811 struct dmar_unit *unit; 812 struct dmar_ctx *ctx; 813 814 unit = arg; 815 DMAR_LOCK(unit); 816 while ((map = TAILQ_FIRST(&unit->delayed_maps)) != NULL) { 817 TAILQ_REMOVE(&unit->delayed_maps, map, delay_link); 818 DMAR_UNLOCK(unit); 819 tag = map->tag; 820 ctx = map->tag->ctx; 821 map->cansleep = true; 822 map->locked = false; 823 bus_dmamap_load_mem((bus_dma_tag_t)tag, (bus_dmamap_t)map, 824 &map->mem, map->callback, map->callback_arg, 825 BUS_DMA_WAITOK); 826 map->cansleep = false; 827 if (map->locked) { 828 (tag->common.lockfunc)(tag->common.lockfuncarg, 829 BUS_DMA_UNLOCK); 830 } else 831 map->locked = true; 832 map->cansleep = false; 833 DMAR_LOCK(unit); 834 } 835 DMAR_UNLOCK(unit); 836} 837 838static void 839dmar_bus_schedule_dmamap(struct dmar_unit *unit, struct bus_dmamap_dmar *map) 840{ 841 struct dmar_ctx *ctx; 842 843 ctx = map->tag->ctx; 844 map->locked = false; 845 DMAR_LOCK(unit); 846 TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link); 847 DMAR_UNLOCK(unit); 848 taskqueue_enqueue(unit->delayed_taskqueue, &unit->dmamap_load_task); 849} 850 851int 852dmar_init_busdma(struct dmar_unit *unit) 853{ 854 855 TAILQ_INIT(&unit->delayed_maps); 856 TASK_INIT(&unit->dmamap_load_task, 0, dmar_bus_task_dmamap, unit); 857 unit->delayed_taskqueue = taskqueue_create("dmar", M_WAITOK, 858 taskqueue_thread_enqueue, &unit->delayed_taskqueue); 859 taskqueue_start_threads(&unit->delayed_taskqueue, 1, PI_DISK, 860 "dmar%d busdma taskq", unit->unit); 861 return (0); 862} 863 864void 865dmar_fini_busdma(struct dmar_unit *unit) 866{ 867 868 if (unit->delayed_taskqueue == NULL) 869 return; 870 871 taskqueue_drain(unit->delayed_taskqueue, &unit->dmamap_load_task); 872 taskqueue_free(unit->delayed_taskqueue); 873 unit->delayed_taskqueue = NULL; 874} 875