1/* $NetBSD: iommu.c,v 1.119 2023/12/20 05:33:58 thorpej Exp $ */ 2 3/* 4 * Copyright (c) 1999, 2000 Matthew R. Green 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29/* 30 * Copyright (c) 2001, 2002 Eduardo Horvath 31 * All rights reserved. 32 * 33 * Redistribution and use in source and binary forms, with or without 34 * modification, are permitted provided that the following conditions 35 * are met: 36 * 1. Redistributions of source code must retain the above copyright 37 * notice, this list of conditions and the following disclaimer. 38 * 2. Redistributions in binary form must reproduce the above copyright 39 * notice, this list of conditions and the following disclaimer in the 40 * documentation and/or other materials provided with the distribution. 41 * 3. The name of the author may not be used to endorse or promote products 42 * derived from this software without specific prior written permission. 43 * 44 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 47 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 49 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 51 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 52 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 54 * SUCH DAMAGE. 55 */ 56 57/* 58 * UltraSPARC IOMMU support; used by both the sbus and pci code. 59 */ 60 61#include <sys/cdefs.h> 62__KERNEL_RCSID(0, "$NetBSD: iommu.c,v 1.119 2023/12/20 05:33:58 thorpej Exp $"); 63 64#include "opt_ddb.h" 65 66#include <sys/param.h> 67#include <sys/extent.h> 68#include <sys/systm.h> 69#include <sys/device.h> 70#include <sys/proc.h> 71 72#include <uvm/uvm.h> 73 74#include <sys/bus.h> 75#include <sparc64/dev/iommureg.h> 76#include <sparc64/dev/iommuvar.h> 77 78#include <machine/autoconf.h> 79#include <machine/cpu.h> 80#include <machine/hypervisor.h> 81 82#ifdef DEBUG 83#define IDB_BUSDMA 0x1 84#define IDB_IOMMU 0x2 85#define IDB_INFO 0x4 86#define IDB_SYNC 0x8 87int iommudebug = 0x0; 88#define DPRINTF(l, s) do { if (iommudebug & l) printf s; } while (0) 89#define IOTTE_DEBUG(n) (n) 90#else 91#define DPRINTF(l, s) 92#define IOTTE_DEBUG(n) 0 93#endif 94 95#define iommu_strbuf_flush(i, v) do { \ 96 if ((i)->sb_flush) \ 97 bus_space_write_8((i)->sb_is->is_bustag, (i)->sb_sb, \ 98 STRBUFREG(strbuf_pgflush), (v)); \ 99 } while (0) 100 101static int iommu_strbuf_flush_done(struct strbuf_ctl *); 102static void _iommu_dvmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 103 bus_size_t, int); 104static void iommu_enter_sun4u(struct strbuf_ctl *sb, vaddr_t va, int64_t pa, int flags); 105static void iommu_enter_sun4v(struct strbuf_ctl *sb, vaddr_t va, int64_t pa, int flags); 106static void iommu_remove_sun4u(struct iommu_state *is, vaddr_t va, size_t len); 107static void iommu_remove_sun4v(struct iommu_state *is, vaddr_t va, size_t len); 108 109/* 110 * initialise the UltraSPARC IOMMU (SBUS or PCI): 111 * - allocate and setup the iotsb. 112 * - enable the IOMMU 113 * - initialise the streaming buffers (if they exist) 114 * - create a private DVMA map. 115 */ 116void 117iommu_init(char *name, struct iommu_state *is, int tsbsize, uint32_t iovabase) 118{ 119 psize_t size; 120 vaddr_t va; 121 paddr_t pa; 122 struct vm_page *pg; 123 struct pglist pglist; 124 125 DPRINTF(IDB_INFO, ("iommu_init: tsbsize %x iovabase %x\n", tsbsize, iovabase)); 126 127 /* 128 * Setup the iommu. 129 * 130 * The sun4u iommu is part of the SBUS or PCI controller so we will 131 * deal with it here.. 132 * 133 * For sysio and psycho/psycho+ the IOMMU address space always ends at 134 * 0xffffe000, but the starting address depends on the size of the 135 * map. The map size is 1024 * 2 ^ is->is_tsbsize entries, where each 136 * entry is 8 bytes. The start of the map can be calculated by 137 * (0xffffe000 << (8 + is->is_tsbsize)). 138 * 139 * But sabre and hummingbird use a different scheme that seems to 140 * be hard-wired, so we read the start and size from the PROM and 141 * just use those values. 142 */ 143 if (strncmp(name, "pyro", 4) == 0) { 144 is->is_cr = IOMMUREG_READ(is, iommu_cr); 145 is->is_cr &= ~IOMMUCR_FIRE_BE; 146 is->is_cr |= (IOMMUCR_FIRE_SE | IOMMUCR_FIRE_CM_EN | 147 IOMMUCR_FIRE_TE); 148 } else 149 is->is_cr = IOMMUCR_EN; 150 is->is_tsbsize = tsbsize; 151 if (iovabase == -1) { 152 is->is_dvmabase = IOTSB_VSTART(is->is_tsbsize); 153 is->is_dvmaend = IOTSB_VEND - 1; 154 } else { 155 is->is_dvmabase = iovabase; 156 is->is_dvmaend = iovabase + IOTSB_VSIZE(tsbsize) - 1; 157 } 158 159 /* 160 * Allocate memory for I/O pagetables. They need to be physically 161 * contiguous. 162 */ 163 164 size = PAGE_SIZE << is->is_tsbsize; 165 if (uvm_pglistalloc((psize_t)size, (paddr_t)0, (paddr_t)-1, 166 (paddr_t)PAGE_SIZE, (paddr_t)0, &pglist, 1, 0) != 0) 167 panic("iommu_init: no memory"); 168 169 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY); 170 if (va == 0) 171 panic("iommu_init: no memory"); 172 is->is_tsb = (int64_t *)va; 173 174 is->is_ptsb = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist)); 175 176 /* Map the pages */ 177 TAILQ_FOREACH(pg, &pglist, pageq.queue) { 178 pa = VM_PAGE_TO_PHYS(pg); 179 pmap_kenter_pa(va, pa | PMAP_NVC, 180 VM_PROT_READ | VM_PROT_WRITE, 0); 181 va += PAGE_SIZE; 182 } 183 pmap_update(pmap_kernel()); 184 memset(is->is_tsb, 0, size); 185 186#ifdef DEBUG 187 if (iommudebug & IDB_INFO) 188 { 189 /* Probe the iommu */ 190 if (!CPU_ISSUN4V) { 191 printf("iommu cr=%llx tsb=%llx\n", 192 (unsigned long long)bus_space_read_8(is->is_bustag, 193 is->is_iommu, 194 offsetof(struct iommureg, iommu_cr)), 195 (unsigned long long)bus_space_read_8(is->is_bustag, 196 is->is_iommu, 197 offsetof(struct iommureg, iommu_tsb))); 198 printf("TSB base %p phys %llx\n", (void *)is->is_tsb, 199 (unsigned long long)is->is_ptsb); 200 delay(1000000); /* 1 s */ 201 } 202 } 203#endif 204 205 /* 206 * Now all the hardware's working we need to allocate a dvma map. 207 */ 208 aprint_debug("DVMA map: %x to %x\n", 209 (unsigned int)is->is_dvmabase, 210 (unsigned int)is->is_dvmaend); 211 aprint_debug("IOTSB: %llx to %llx\n", 212 (unsigned long long)is->is_ptsb, 213 (unsigned long long)(is->is_ptsb + size - 1)); 214 is->is_dvmamap = vmem_create(name, 215 is->is_dvmabase, 216 (is->is_dvmaend + 1) - is->is_dvmabase, 217 PAGE_SIZE, /* quantum */ 218 NULL, /* importfn */ 219 NULL, /* releasefn */ 220 NULL, /* source */ 221 0, /* qcache_max */ 222 VM_SLEEP, 223 IPL_VM); 224 KASSERT(is->is_dvmamap != NULL); 225 226 /* 227 * Set the TSB size. The relevant bits were moved to the TSB 228 * base register in the PCIe host bridges. 229 */ 230 if (is->is_flags & IOMMU_TSBSIZE_IN_PTSB) 231 is->is_ptsb |= is->is_tsbsize; 232 else 233 is->is_cr |= (is->is_tsbsize << 16); 234 235 /* 236 * now actually start up the IOMMU 237 */ 238 iommu_reset(is); 239} 240 241/* 242 * Streaming buffers don't exist on the UltraSPARC IIi; we should have 243 * detected that already and disabled them. If not, we will notice that 244 * they aren't there when the STRBUF_EN bit does not remain. 245 */ 246void 247iommu_reset(struct iommu_state *is) 248{ 249 int i; 250 struct strbuf_ctl *sb; 251 252 if (CPU_ISSUN4V) 253 return; 254 255 IOMMUREG_WRITE(is, iommu_tsb, is->is_ptsb); 256 257 /* Enable IOMMU in diagnostic mode */ 258 IOMMUREG_WRITE(is, iommu_cr, is->is_cr|IOMMUCR_DE); 259 260 for (i = 0; i < 2; i++) { 261 if ((sb = is->is_sb[i])) { 262 263 /* Enable diagnostics mode? */ 264 bus_space_write_8(is->is_bustag, is->is_sb[i]->sb_sb, 265 STRBUFREG(strbuf_ctl), STRBUF_EN); 266 267 membar_Lookaside(); 268 269 /* No streaming buffers? Disable them */ 270 if (bus_space_read_8(is->is_bustag, 271 is->is_sb[i]->sb_sb, 272 STRBUFREG(strbuf_ctl)) == 0) { 273 is->is_sb[i]->sb_flush = NULL; 274 } else { 275 276 /* 277 * locate the pa of the flush buffer. 278 */ 279 if (pmap_extract(pmap_kernel(), 280 (vaddr_t)is->is_sb[i]->sb_flush, 281 &is->is_sb[i]->sb_flushpa) == FALSE) 282 is->is_sb[i]->sb_flush = NULL; 283 } 284 } 285 } 286 287 if (is->is_flags & IOMMU_FLUSH_CACHE) 288 IOMMUREG_WRITE(is, iommu_cache_invalidate, -1ULL); 289} 290 291/* 292 * Here are the iommu control routines. 293 */ 294 295static void 296iommu_enter(struct strbuf_ctl *sb, vaddr_t va, int64_t pa, int flags) 297{ 298 DPRINTF(IDB_IOMMU, ("iommu_enter: va %lx pa %lx flags %x\n", 299 va, (long)pa, flags)); 300 if (!CPU_ISSUN4V) 301 iommu_enter_sun4u(sb, va, pa, flags); 302 else 303 iommu_enter_sun4v(sb, va, pa, flags); 304} 305 306 307void 308iommu_enter_sun4u(struct strbuf_ctl *sb, vaddr_t va, int64_t pa, int flags) 309{ 310 struct iommu_state *is = sb->sb_is; 311 int strbuf = (flags & BUS_DMA_STREAMING); 312 int64_t tte; 313 314#ifdef DIAGNOSTIC 315 if (va < is->is_dvmabase || va > is->is_dvmaend) 316 panic("iommu_enter: va %#lx not in DVMA space", va); 317#endif 318 319 /* Is the streamcache flush really needed? */ 320 if (sb->sb_flush) 321 iommu_strbuf_flush(sb, va); 322 else 323 /* If we can't flush the strbuf don't enable it. */ 324 strbuf = 0; 325 326 tte = MAKEIOTTE(pa, !(flags & BUS_DMA_NOWRITE), 327 !(flags & BUS_DMA_NOCACHE), (strbuf)); 328#ifdef DEBUG 329 tte |= (flags & 0xff000LL)<<(4*8); 330#endif 331 332 is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] = tte; 333 bus_space_write_8(is->is_bustag, is->is_iommu, 334 IOMMUREG(iommu_flush), va); 335 DPRINTF(IDB_IOMMU, ("iommu_enter: slot %d va %lx pa %lx " 336 "TSB[%lx]@%p=%lx\n", (int)IOTSBSLOT(va,is->is_tsbsize), 337 va, (long)pa, (u_long)IOTSBSLOT(va,is->is_tsbsize), 338 (void *)(u_long)&is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)], 339 (u_long)tte)); 340} 341 342void 343iommu_enter_sun4v(struct strbuf_ctl *sb, vaddr_t va, int64_t pa, int flags) 344{ 345 struct iommu_state *is = sb->sb_is; 346 u_int64_t tsbid = IOTSBSLOT(va, is->is_tsbsize); 347 paddr_t page_list[1], addr; 348 u_int64_t attr, nmapped; 349 int err; 350 351#ifdef DIAGNOSTIC 352 if (va < is->is_dvmabase || (va + PAGE_MASK) > is->is_dvmaend) 353 panic("viommu_enter: va %#lx not in DVMA space", va); 354#endif 355 356 attr = PCI_MAP_ATTR_READ | PCI_MAP_ATTR_WRITE; 357 if (flags & BUS_DMA_READ) 358 attr &= ~PCI_MAP_ATTR_READ; 359 if (flags & BUS_DMA_WRITE) 360 attr &= ~PCI_MAP_ATTR_WRITE; 361 362 page_list[0] = trunc_page(pa); 363 if (!pmap_extract(pmap_kernel(), (vaddr_t)page_list, &addr)) 364 panic("viommu_enter: pmap_extract failed"); 365 err = hv_pci_iommu_map(is->is_devhandle, tsbid, 1, attr, 366 addr, &nmapped); 367 if (err != H_EOK || nmapped != 1) 368 panic("hv_pci_iommu_map: err=%d, nmapped=%lu", err, (long unsigned int)nmapped); 369} 370 371/* 372 * Find the value of a DVMA address (debug routine). 373 */ 374paddr_t 375iommu_extract(struct iommu_state *is, vaddr_t dva) 376{ 377 int64_t tte = 0; 378 379 if (dva >= is->is_dvmabase && dva <= is->is_dvmaend) 380 tte = is->is_tsb[IOTSBSLOT(dva, is->is_tsbsize)]; 381 382 if ((tte & IOTTE_V) == 0) 383 return ((paddr_t)-1L); 384 return (tte & IOTTE_PAMASK); 385} 386 387/* 388 * iommu_remove: removes mappings created by iommu_enter 389 * 390 * Only demap from IOMMU if flag is set. 391 * 392 * XXX: this function needs better internal error checking. 393 */ 394 395static void 396iommu_remove(struct iommu_state *is, vaddr_t va, size_t len) 397{ 398 DPRINTF(IDB_IOMMU, ("iommu_remove: va %lx len %zu\n", va, len)); 399 if (!CPU_ISSUN4V) 400 iommu_remove_sun4u(is, va, len); 401 else 402 iommu_remove_sun4v(is, va, len); 403} 404 405void 406iommu_remove_sun4u(struct iommu_state *is, vaddr_t va, size_t len) 407{ 408 409 int slot; 410 411#ifdef DIAGNOSTIC 412 if (va < is->is_dvmabase || va > is->is_dvmaend) 413 panic("iommu_remove: va 0x%lx not in DVMA space", (u_long)va); 414 if ((long)(va + len) < (long)va) 415 panic("iommu_remove: va 0x%lx + len 0x%lx wraps", 416 (long) va, (long) len); 417 if (len & ~0xfffffff) 418 panic("iommu_remove: ridiculous len 0x%lx", (u_long)len); 419#endif 420 421 va = trunc_page(va); 422 DPRINTF(IDB_IOMMU, ("iommu_remove: va %lx TSB[%lx]@%p\n", 423 va, (u_long)IOTSBSLOT(va, is->is_tsbsize), 424 &is->is_tsb[IOTSBSLOT(va, is->is_tsbsize)])); 425 while (len > 0) { 426 DPRINTF(IDB_IOMMU, ("iommu_remove: clearing TSB slot %d " 427 "for va %p size %lx\n", 428 (int)IOTSBSLOT(va,is->is_tsbsize), (void *)(u_long)va, 429 (u_long)len)); 430 if (len <= PAGE_SIZE) 431 len = 0; 432 else 433 len -= PAGE_SIZE; 434 435#if 0 436 /* 437 * XXX Zero-ing the entry would not require RMW 438 * 439 * Disabling valid bit while a page is used by a device 440 * causes an uncorrectable DMA error. 441 * Workaround to avoid an uncorrectable DMA error is 442 * eliminating the next line, but the page is mapped 443 * until the next iommu_enter call. 444 */ 445 is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] &= ~IOTTE_V; 446 membar_StoreStore(); 447#endif 448 IOMMUREG_WRITE(is, iommu_flush, va); 449 450 /* Flush cache if necessary. */ 451 slot = IOTSBSLOT(trunc_page(va), is->is_tsbsize); 452 if ((is->is_flags & IOMMU_FLUSH_CACHE) && 453 (len == 0 || (slot % 8) == 7)) 454 IOMMUREG_WRITE(is, iommu_cache_flush, 455 is->is_ptsb + slot * 8); 456 457 va += PAGE_SIZE; 458 } 459} 460 461void 462iommu_remove_sun4v(struct iommu_state *is, vaddr_t va, size_t len) 463{ 464 u_int64_t tsbid = IOTSBSLOT(va, is->is_tsbsize); 465 u_int64_t ndemapped; 466 int err; 467 468#ifdef DIAGNOSTIC 469 if (va < is->is_dvmabase || (va + PAGE_MASK) > is->is_dvmaend) 470 panic("iommu_remove: va 0x%lx not in DVMA space", (u_long)va); 471 if (va != trunc_page(va)) { 472 printf("iommu_remove: unaligned va: %lx\n", va); 473 va = trunc_page(va); 474 } 475#endif 476 477 err = hv_pci_iommu_demap(is->is_devhandle, tsbid, 1, &ndemapped); 478 if (err != H_EOK || ndemapped != 1) 479 panic("hv_pci_iommu_unmap: err=%d", err); 480} 481 482static int 483iommu_strbuf_flush_done(struct strbuf_ctl *sb) 484{ 485 struct iommu_state *is = sb->sb_is; 486 struct timeval cur, flushtimeout; 487 488#define BUMPTIME(t, usec) { \ 489 register volatile struct timeval *tp = (t); \ 490 register long us; \ 491 \ 492 tp->tv_usec = us = tp->tv_usec + (usec); \ 493 if (us >= 1000000) { \ 494 tp->tv_usec = us - 1000000; \ 495 tp->tv_sec++; \ 496 } \ 497} 498 499 if (!sb->sb_flush) 500 return (0); 501 502 /* 503 * Streaming buffer flushes: 504 * 505 * 1 Tell strbuf to flush by storing va to strbuf_pgflush. If 506 * we're not on a cache line boundary (64-bits): 507 * 2 Store 0 in flag 508 * 3 Store pointer to flag in flushsync 509 * 4 wait till flushsync becomes 0x1 510 * 511 * If it takes more than .5 sec, something 512 * went wrong. 513 */ 514 515 *sb->sb_flush = 0; 516 bus_space_write_8(is->is_bustag, sb->sb_sb, 517 STRBUFREG(strbuf_flushsync), sb->sb_flushpa); 518 519 microtime(&flushtimeout); 520 cur = flushtimeout; 521 BUMPTIME(&flushtimeout, 500000); /* 1/2 sec */ 522 523 DPRINTF(IDB_IOMMU, ("%s: flush = %lx at va = %lx pa = %lx now=" 524 "%"PRIx64":%"PRIx32" until = %"PRIx64":%"PRIx32"\n", __func__, 525 (long)*sb->sb_flush, (long)sb->sb_flush, (long)sb->sb_flushpa, 526 cur.tv_sec, cur.tv_usec, 527 flushtimeout.tv_sec, flushtimeout.tv_usec)); 528 529 /* Bypass non-coherent D$ */ 530 while ((!ldxa(sb->sb_flushpa, ASI_PHYS_CACHED)) && 531 timercmp(&cur, &flushtimeout, <=)) 532 microtime(&cur); 533 534#ifdef DIAGNOSTIC 535 if (!ldxa(sb->sb_flushpa, ASI_PHYS_CACHED)) { 536 printf("%s: flush timeout %p, at %p\n", __func__, 537 (void *)(u_long)*sb->sb_flush, 538 (void *)(u_long)sb->sb_flushpa); /* panic? */ 539#ifdef DDB 540 Debugger(); 541#endif 542 } 543#endif 544 DPRINTF(IDB_IOMMU, ("%s: flushed\n", __func__)); 545 return (*sb->sb_flush); 546} 547 548/* 549 * IOMMU DVMA operations, common to SBUS and PCI. 550 */ 551int 552iommu_dvmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, 553 bus_size_t buflen, struct proc *p, int flags) 554{ 555 struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie; 556 struct iommu_state *is = sb->sb_is; 557 int err, needsflush; 558 bus_size_t sgsize; 559 paddr_t curaddr; 560 u_long sgstart, sgend, bmask; 561 vmem_addr_t dvmaddr; 562 bus_size_t align, boundary, len; 563 vaddr_t vaddr = (vaddr_t)buf; 564 int seg; 565 struct pmap *pmap; 566 int slot; 567 568 if (map->dm_nsegs) { 569 /* Already in use?? */ 570#ifdef DIAGNOSTIC 571 printf("iommu_dvmamap_load: map still in use\n"); 572#endif 573 bus_dmamap_unload(t, map); 574 } 575 576 /* 577 * Make sure that on error condition we return "no valid mappings". 578 */ 579 map->dm_nsegs = 0; 580 KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz); 581 582 if (buflen > map->_dm_size) { 583 DPRINTF(IDB_BUSDMA, 584 ("iommu_dvmamap_load(): error %d > %d -- " 585 "map size exceeded!\n", (int)buflen, (int)map->_dm_size)); 586 return (EINVAL); 587 } 588 589 sgsize = round_page(buflen + ((int)vaddr & PGOFSET)); 590 591 /* 592 * A boundary presented to bus_dmamem_alloc() takes precedence 593 * over boundary in the map. 594 */ 595 if ((boundary = (map->dm_segs[0]._ds_boundary)) == 0) 596 boundary = map->_dm_boundary; 597 align = uimax(map->dm_segs[0]._ds_align, PAGE_SIZE); 598 599 /* 600 * If our segment size is larger than the boundary we need to 601 * split the transfer up int little pieces ourselves. 602 */ 603 KASSERT(is->is_dvmamap != NULL); 604 err = vmem_xalloc(is->is_dvmamap, sgsize, 605 align, /* alignment */ 606 0, /* phase */ 607 (sgsize > boundary) ? 0 : boundary, 608 VMEM_ADDR_MIN, /* minaddr */ 609 VMEM_ADDR_MAX, /* maxaddr */ 610 VM_NOSLEEP | VM_BESTFIT, 611 &dvmaddr); 612 613#ifdef DEBUG 614 if (err || (dvmaddr == (u_long)-1)) { 615 printf("iommu_dvmamap_load(): extent_alloc(%d, %x) failed!\n", 616 (int)sgsize, flags); 617#ifdef DDB 618 Debugger(); 619#endif 620 } 621#endif 622 if (err != 0) 623 return (err); 624 625 if (dvmaddr == (u_long)-1) 626 return (ENOMEM); 627 628 /* Set the active DVMA map */ 629 map->_dm_dvmastart = dvmaddr; 630 map->_dm_dvmasize = sgsize; 631 632 /* 633 * Now split the DVMA range into segments, not crossing 634 * the boundary. 635 */ 636 seg = 0; 637 sgstart = dvmaddr + (vaddr & PGOFSET); 638 sgend = sgstart + buflen - 1; 639 map->dm_segs[seg].ds_addr = sgstart; 640 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: boundary %lx boundary - 1 %lx " 641 "~(boundary - 1) %lx\n", (long)boundary, (long)(boundary - 1), 642 (long)~(boundary - 1))); 643 bmask = ~(boundary - 1); 644 while ((sgstart & bmask) != (sgend & bmask) || 645 sgend - sgstart + 1 > map->dm_maxsegsz) { 646 /* Oops. We crossed a boundary or large seg. Split the xfer. */ 647 len = map->dm_maxsegsz; 648 if ((sgstart & bmask) != (sgend & bmask)) 649 len = uimin(len, boundary - (sgstart & (boundary - 1))); 650 map->dm_segs[seg].ds_len = len; 651 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: " 652 "seg %d start %lx size %lx\n", seg, 653 (long)map->dm_segs[seg].ds_addr, 654 (long)map->dm_segs[seg].ds_len)); 655 if (++seg >= map->_dm_segcnt) { 656 /* Too many segments. Fail the operation. */ 657 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: " 658 "too many segments %d\n", seg)); 659 vmem_xfree(is->is_dvmamap, dvmaddr, sgsize); 660 map->_dm_dvmastart = 0; 661 map->_dm_dvmasize = 0; 662 return (EFBIG); 663 } 664 sgstart += len; 665 map->dm_segs[seg].ds_addr = sgstart; 666 } 667 map->dm_segs[seg].ds_len = sgend - sgstart + 1; 668 DPRINTF(IDB_INFO, ("iommu_dvmamap_load: " 669 "seg %d start %lx size %lx\n", seg, 670 (long)map->dm_segs[seg].ds_addr, (long)map->dm_segs[seg].ds_len)); 671 map->dm_nsegs = seg + 1; 672 map->dm_mapsize = buflen; 673 674 if (p != NULL) 675 pmap = p->p_vmspace->vm_map.pmap; 676 else 677 pmap = pmap_kernel(); 678 679 needsflush = 0; 680 for (; buflen > 0; ) { 681 682 /* 683 * Get the physical address for this page. 684 */ 685 if (pmap_extract(pmap, (vaddr_t)vaddr, &curaddr) == FALSE) { 686#ifdef DIAGNOSTIC 687 printf("iommu_dvmamap_load: pmap_extract failed %lx\n", vaddr); 688#endif 689 bus_dmamap_unload(t, map); 690 return (-1); 691 } 692 693 /* 694 * Compute the segment size, and adjust counts. 695 */ 696 sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); 697 if (buflen < sgsize) 698 sgsize = buflen; 699 700 DPRINTF(IDB_BUSDMA, 701 ("iommu_dvmamap_load: map %p loading va %p " 702 "dva %lx at pa %lx\n", 703 map, (void *)vaddr, (long)dvmaddr, 704 (long)trunc_page(curaddr))); 705 iommu_enter(sb, trunc_page(dvmaddr), trunc_page(curaddr), 706 flags | IOTTE_DEBUG(0x4000)); 707 needsflush = 1; 708 709 vaddr += sgsize; 710 buflen -= sgsize; 711 712 /* Flush cache if necessary. */ 713 slot = IOTSBSLOT(trunc_page(dvmaddr), is->is_tsbsize); 714 if ((is->is_flags & IOMMU_FLUSH_CACHE) && 715 (buflen <= 0 || (slot % 8) == 7)) 716 IOMMUREG_WRITE(is, iommu_cache_flush, 717 is->is_ptsb + slot * 8); 718 719 dvmaddr += PAGE_SIZE; 720 } 721 if (needsflush) 722 iommu_strbuf_flush_done(sb); 723#ifdef DIAGNOSTIC 724 for (seg = 0; seg < map->dm_nsegs; seg++) { 725 if (map->dm_segs[seg].ds_addr < is->is_dvmabase || 726 map->dm_segs[seg].ds_addr > is->is_dvmaend) { 727 printf("seg %d dvmaddr %lx out of range %x - %x\n", 728 seg, (long)map->dm_segs[seg].ds_addr, 729 is->is_dvmabase, is->is_dvmaend); 730#ifdef DDB 731 Debugger(); 732#endif 733 } 734 } 735#endif 736 return (0); 737} 738 739 740void 741iommu_dvmamap_unload(bus_dma_tag_t t, bus_dmamap_t map) 742{ 743 struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie; 744 struct iommu_state *is = sb->sb_is; 745 746 /* Flush the iommu */ 747 if (!map->_dm_dvmastart) 748 panic("%s: error dvmastart is zero!\n", __func__); 749 750 if (is->is_flags & IOMMU_SYNC_BEFORE_UNMAP) { 751 752 /* Flush the caches */ 753 bus_dmamap_unload(t->_parent, map); 754 755 iommu_remove(is, map->_dm_dvmastart, map->_dm_dvmasize); 756 757 } else { 758 759 iommu_remove(is, map->_dm_dvmastart, map->_dm_dvmasize); 760 761 /* Flush the caches */ 762 bus_dmamap_unload(t->_parent, map); 763 } 764 765 vmem_xfree(is->is_dvmamap, map->_dm_dvmastart, map->_dm_dvmasize); 766 map->_dm_dvmastart = 0; 767 map->_dm_dvmasize = 0; 768 769 /* Clear the map */ 770} 771 772 773int 774iommu_dvmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, 775 bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) 776{ 777 struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie; 778 struct iommu_state *is = sb->sb_is; 779 struct vm_page *pg; 780 int i, j; 781 int left; 782 int err, needsflush; 783 bus_size_t sgsize; 784 paddr_t pa; 785 bus_size_t boundary, align; 786 u_long dvmaddr, sgstart, sgend, bmask; 787 struct pglist *pglist; 788 const int pagesz = PAGE_SIZE; 789 int slot; 790#ifdef DEBUG 791 int npg = 0; 792#endif 793 794 if (map->dm_nsegs) { 795 /* Already in use?? */ 796#ifdef DIAGNOSTIC 797 printf("iommu_dvmamap_load_raw: map still in use\n"); 798#endif 799 bus_dmamap_unload(t, map); 800 } 801 802 /* 803 * A boundary presented to bus_dmamem_alloc() takes precedence 804 * over boundary in the map. 805 */ 806 if ((boundary = segs[0]._ds_boundary) == 0) 807 boundary = map->_dm_boundary; 808 809 align = uimax(segs[0]._ds_align, pagesz); 810 811 /* 812 * Make sure that on error condition we return "no valid mappings". 813 */ 814 map->dm_nsegs = 0; 815 /* Count up the total number of pages we need */ 816 pa = trunc_page(segs[0].ds_addr); 817 sgsize = 0; 818 left = size; 819 for (i = 0; left > 0 && i < nsegs; i++) { 820 if (round_page(pa) != round_page(segs[i].ds_addr)) 821 sgsize = round_page(sgsize) + 822 (segs[i].ds_addr & PGOFSET); 823 sgsize += uimin(left, segs[i].ds_len); 824 left -= segs[i].ds_len; 825 pa = segs[i].ds_addr + segs[i].ds_len; 826 } 827 sgsize = round_page(sgsize); 828 829 /* 830 * If our segment size is larger than the boundary we need to 831 * split the transfer up into little pieces ourselves. 832 */ 833 const vm_flag_t vmflags = VM_BESTFIT | 834 ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP); 835 836 err = vmem_xalloc(is->is_dvmamap, sgsize, 837 align, /* alignment */ 838 0, /* phase */ 839 (sgsize > boundary) ? 0 : boundary, 840 VMEM_ADDR_MIN, /* minaddr */ 841 VMEM_ADDR_MAX, /* maxaddr */ 842 vmflags, 843 &dvmaddr); 844 if (err != 0) 845 return (err); 846 847#ifdef DEBUG 848 if (dvmaddr == (u_long)-1) 849 { 850 printf("iommu_dvmamap_load_raw(): extent_alloc(%d, %x) failed!\n", 851 (int)sgsize, flags); 852#ifdef DDB 853 Debugger(); 854#endif 855 } 856#endif 857 if (dvmaddr == (u_long)-1) 858 return (ENOMEM); 859 860 /* Set the active DVMA map */ 861 map->_dm_dvmastart = dvmaddr; 862 map->_dm_dvmasize = sgsize; 863 864 bmask = ~(boundary - 1); 865 if ((pglist = segs[0]._ds_mlist) == NULL) { 866 u_long prev_va = 0UL, last_va = dvmaddr; 867 paddr_t prev_pa = 0; 868 int end = 0, offset; 869 bus_size_t len = size; 870 871 /* 872 * This segs is made up of individual physical 873 * segments, probably by _bus_dmamap_load_uio() or 874 * _bus_dmamap_load_mbuf(). Ignore the mlist and 875 * load each one individually. 876 */ 877 j = 0; 878 needsflush = 0; 879 for (i = 0; i < nsegs ; i++) { 880 881 pa = segs[i].ds_addr; 882 offset = (pa & PGOFSET); 883 pa = trunc_page(pa); 884 dvmaddr = trunc_page(dvmaddr); 885 left = uimin(len, segs[i].ds_len); 886 887 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: converting " 888 "physseg %d start %lx size %lx\n", i, 889 (long)segs[i].ds_addr, (long)segs[i].ds_len)); 890 891 if ((pa == prev_pa) && 892 ((offset != 0) || (end != offset))) { 893 /* We can re-use this mapping */ 894 dvmaddr = prev_va; 895 } 896 897 sgstart = dvmaddr + offset; 898 sgend = sgstart + left - 1; 899 900 /* Are the segments virtually adjacent? */ 901 if ((j > 0) && (end == offset) && 902 ((offset == 0) || (pa == prev_pa)) && 903 (map->dm_segs[j-1].ds_len + left <= 904 map->dm_maxsegsz)) { 905 /* Just append to the previous segment. */ 906 map->dm_segs[--j].ds_len += left; 907 /* Restore sgstart for boundary check */ 908 sgstart = map->dm_segs[j].ds_addr; 909 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 910 "appending seg %d start %lx size %lx\n", j, 911 (long)map->dm_segs[j].ds_addr, 912 (long)map->dm_segs[j].ds_len)); 913 } else { 914 if (j >= map->_dm_segcnt) { 915 iommu_remove(is, map->_dm_dvmastart, 916 last_va - map->_dm_dvmastart); 917 goto fail; 918 } 919 map->dm_segs[j].ds_addr = sgstart; 920 map->dm_segs[j].ds_len = left; 921 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 922 "seg %d start %lx size %lx\n", j, 923 (long)map->dm_segs[j].ds_addr, 924 (long)map->dm_segs[j].ds_len)); 925 } 926 end = (offset + left) & PGOFSET; 927 928 /* Check for boundary issues */ 929 while ((sgstart & bmask) != (sgend & bmask)) { 930 /* Need a new segment. */ 931 map->dm_segs[j].ds_len = 932 boundary - (sgstart & (boundary - 1)); 933 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 934 "seg %d start %lx size %lx\n", j, 935 (long)map->dm_segs[j].ds_addr, 936 (long)map->dm_segs[j].ds_len)); 937 if (++j >= map->_dm_segcnt) { 938 iommu_remove(is, map->_dm_dvmastart, 939 last_va - map->_dm_dvmastart); 940 goto fail; 941 } 942 sgstart += map->dm_segs[j-1].ds_len; 943 map->dm_segs[j].ds_addr = sgstart; 944 map->dm_segs[j].ds_len = sgend - sgstart + 1; 945 } 946 947 if (sgsize == 0) 948 panic("iommu_dmamap_load_raw: size botch"); 949 950 /* Now map a series of pages. */ 951 while (dvmaddr <= sgend) { 952 DPRINTF(IDB_BUSDMA, 953 ("iommu_dvmamap_load_raw: map %p " 954 "loading va %lx at pa %lx\n", 955 map, (long)dvmaddr, 956 (long)(pa))); 957 /* Enter it if we haven't before. */ 958 if (prev_va != dvmaddr) { 959 iommu_enter(sb, prev_va = dvmaddr, 960 prev_pa = pa, 961 flags | IOTTE_DEBUG(++npg << 12)); 962 needsflush = 1; 963 964 /* Flush cache if necessary. */ 965 slot = IOTSBSLOT(trunc_page(dvmaddr), is->is_tsbsize); 966 if ((is->is_flags & IOMMU_FLUSH_CACHE) && 967 ((dvmaddr + pagesz) > sgend || (slot % 8) == 7)) 968 IOMMUREG_WRITE(is, iommu_cache_flush, 969 is->is_ptsb + slot * 8); 970 } 971 972 dvmaddr += pagesz; 973 pa += pagesz; 974 last_va = dvmaddr; 975 } 976 977 len -= left; 978 ++j; 979 } 980 if (needsflush) 981 iommu_strbuf_flush_done(sb); 982 983 map->dm_mapsize = size; 984 map->dm_nsegs = j; 985#ifdef DIAGNOSTIC 986 { int seg; 987 for (seg = 0; seg < map->dm_nsegs; seg++) { 988 if (map->dm_segs[seg].ds_addr < is->is_dvmabase || 989 map->dm_segs[seg].ds_addr > is->is_dvmaend) { 990 printf("seg %d dvmaddr %lx out of range %x - %x\n", 991 seg, (long)map->dm_segs[seg].ds_addr, 992 is->is_dvmabase, is->is_dvmaend); 993#ifdef DDB 994 Debugger(); 995#endif 996 } 997 } 998 } 999#endif 1000 return (0); 1001 } 1002 1003 /* 1004 * This was allocated with bus_dmamem_alloc. 1005 * The pages are on a `pglist'. 1006 */ 1007 i = 0; 1008 sgstart = dvmaddr; 1009 sgend = sgstart + size - 1; 1010 map->dm_segs[i].ds_addr = sgstart; 1011 while ((sgstart & bmask) != (sgend & bmask)) { 1012 /* Oops. We crossed a boundary. Split the xfer. */ 1013 map->dm_segs[i].ds_len = boundary - (sgstart & (boundary - 1)); 1014 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 1015 "seg %d start %lx size %lx\n", i, 1016 (long)map->dm_segs[i].ds_addr, 1017 (long)map->dm_segs[i].ds_len)); 1018 if (++i >= map->_dm_segcnt) { 1019 /* Too many segments. Fail the operation. */ 1020 goto fail; 1021 } 1022 sgstart += map->dm_segs[i-1].ds_len; 1023 map->dm_segs[i].ds_addr = sgstart; 1024 } 1025 DPRINTF(IDB_INFO, ("iommu_dvmamap_load_raw: " 1026 "seg %d start %lx size %lx\n", i, 1027 (long)map->dm_segs[i].ds_addr, (long)map->dm_segs[i].ds_len)); 1028 map->dm_segs[i].ds_len = sgend - sgstart + 1; 1029 1030 needsflush = 0; 1031 TAILQ_FOREACH(pg, pglist, pageq.queue) { 1032 if (sgsize == 0) 1033 panic("iommu_dmamap_load_raw: size botch"); 1034 pa = VM_PAGE_TO_PHYS(pg); 1035 1036 DPRINTF(IDB_BUSDMA, 1037 ("iommu_dvmamap_load_raw: map %p loading va %lx at pa %lx\n", 1038 map, (long)dvmaddr, (long)(pa))); 1039 iommu_enter(sb, dvmaddr, pa, flags | IOTTE_DEBUG(0x8000)); 1040 needsflush = 1; 1041 1042 sgsize -= pagesz; 1043 1044 /* Flush cache if necessary. */ 1045 slot = IOTSBSLOT(trunc_page(dvmaddr), is->is_tsbsize); 1046 if ((is->is_flags & IOMMU_FLUSH_CACHE) && 1047 (sgsize == 0 || (slot % 8) == 7)) 1048 IOMMUREG_WRITE(is, iommu_cache_flush, 1049 is->is_ptsb + slot * 8); 1050 1051 dvmaddr += pagesz; 1052 } 1053 if (needsflush) 1054 iommu_strbuf_flush_done(sb); 1055 map->dm_mapsize = size; 1056 map->dm_nsegs = i+1; 1057#ifdef DIAGNOSTIC 1058 { int seg; 1059 for (seg = 0; seg < map->dm_nsegs; seg++) { 1060 if (map->dm_segs[seg].ds_addr < is->is_dvmabase || 1061 map->dm_segs[seg].ds_addr > is->is_dvmaend) { 1062 printf("seg %d dvmaddr %lx out of range %x - %x\n", 1063 seg, (long)map->dm_segs[seg].ds_addr, 1064 is->is_dvmabase, is->is_dvmaend); 1065#ifdef DDB 1066 Debugger(); 1067#endif 1068 } 1069 } 1070 } 1071#endif 1072 return (0); 1073 1074fail: 1075 vmem_xfree(is->is_dvmamap, map->_dm_dvmastart, sgsize); 1076 map->_dm_dvmastart = 0; 1077 map->_dm_dvmasize = 0; 1078 return (EFBIG); 1079} 1080 1081 1082/* 1083 * Flush an individual dma segment, returns non-zero if the streaming buffers 1084 * need flushing afterwards. 1085 */ 1086static int 1087iommu_dvmamap_sync_range(struct strbuf_ctl *sb, vaddr_t va, bus_size_t len) 1088{ 1089 vaddr_t vaend; 1090 struct iommu_state *is = sb->sb_is; 1091 1092#ifdef DIAGNOSTIC 1093 if (va < is->is_dvmabase || va > is->is_dvmaend) 1094 panic("invalid va: %llx", (long long)va); 1095#endif 1096 1097 if ((is->is_tsb[IOTSBSLOT(va, is->is_tsbsize)] & IOTTE_STREAM) == 0) { 1098 DPRINTF(IDB_SYNC, 1099 ("iommu_dvmamap_sync_range: attempting to flush " 1100 "non-streaming entry\n")); 1101 return (0); 1102 } 1103 1104 vaend = round_page(va + len) - 1; 1105 va = trunc_page(va); 1106 1107#ifdef DIAGNOSTIC 1108 if (va < is->is_dvmabase || vaend > is->is_dvmaend) 1109 panic("invalid va range: %llx to %llx (%x to %x)", 1110 (long long)va, (long long)vaend, 1111 is->is_dvmabase, 1112 is->is_dvmaend); 1113#endif 1114 1115 for ( ; va <= vaend; va += PAGE_SIZE) { 1116 DPRINTF(IDB_SYNC, 1117 ("iommu_dvmamap_sync_range: flushing va %p\n", 1118 (void *)(u_long)va)); 1119 iommu_strbuf_flush(sb, va); 1120 } 1121 1122 return (1); 1123} 1124 1125static void 1126_iommu_dvmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 1127 bus_size_t len, int ops) 1128{ 1129 struct strbuf_ctl *sb = (struct strbuf_ctl *)map->_dm_cookie; 1130 bus_size_t count; 1131 int i, needsflush = 0; 1132 1133 if (!sb->sb_flush) 1134 return; 1135 1136 for (i = 0; i < map->dm_nsegs; i++) { 1137 if (offset < map->dm_segs[i].ds_len) 1138 break; 1139 offset -= map->dm_segs[i].ds_len; 1140 } 1141 1142 if (i == map->dm_nsegs) 1143 panic("%s: segment too short %llu", __func__, 1144 (unsigned long long)offset); 1145 1146 if (ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_POSTWRITE)) { 1147 /* Nothing to do */; 1148 } 1149 1150 if (ops & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_PREWRITE)) { 1151 1152 for (; len > 0 && i < map->dm_nsegs; i++) { 1153 count = MIN(map->dm_segs[i].ds_len - offset, len); 1154 if (count > 0 && 1155 iommu_dvmamap_sync_range(sb, 1156 map->dm_segs[i].ds_addr + offset, count)) 1157 needsflush = 1; 1158 offset = 0; 1159 len -= count; 1160 } 1161#ifdef DIAGNOSTIC 1162 if (i == map->dm_nsegs && len > 0) 1163 panic("%s: leftover %llu", __func__, 1164 (unsigned long long)len); 1165#endif 1166 1167 if (needsflush) 1168 iommu_strbuf_flush_done(sb); 1169 } 1170} 1171 1172void 1173iommu_dvmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, 1174 bus_size_t len, int ops) 1175{ 1176 1177 /* If len is 0, then there is nothing to do */ 1178 if (len == 0) 1179 return; 1180 1181 if (ops & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) { 1182 /* Flush the CPU then the IOMMU */ 1183 bus_dmamap_sync(t->_parent, map, offset, len, ops); 1184 _iommu_dvmamap_sync(t, map, offset, len, ops); 1185 } 1186 if (ops & (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)) { 1187 /* Flush the IOMMU then the CPU */ 1188 _iommu_dvmamap_sync(t, map, offset, len, ops); 1189 bus_dmamap_sync(t->_parent, map, offset, len, ops); 1190 } 1191} 1192 1193int 1194iommu_dvmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, 1195 bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, 1196 int flags) 1197{ 1198 1199 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_alloc: sz %llx align %llx bound %llx " 1200 "segp %p flags %d\n", (unsigned long long)size, 1201 (unsigned long long)alignment, (unsigned long long)boundary, 1202 segs, flags)); 1203 return (bus_dmamem_alloc(t->_parent, size, alignment, boundary, 1204 segs, nsegs, rsegs, flags|BUS_DMA_DVMA)); 1205} 1206 1207void 1208iommu_dvmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs) 1209{ 1210 1211 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_free: segp %p nsegs %d\n", 1212 segs, nsegs)); 1213 bus_dmamem_free(t->_parent, segs, nsegs); 1214} 1215 1216/* 1217 * Map the DVMA mappings into the kernel pmap. 1218 * Check the flags to see whether we're streaming or coherent. 1219 */ 1220int 1221iommu_dvmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, 1222 size_t size, void **kvap, int flags) 1223{ 1224 struct vm_page *pg; 1225 vaddr_t va; 1226 bus_addr_t addr; 1227 struct pglist *pglist; 1228 int cbit; 1229 const uvm_flag_t kmflags = 1230 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0; 1231 1232 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: segp %p nsegs %d size %lx\n", 1233 segs, nsegs, size)); 1234 1235 /* 1236 * Allocate some space in the kernel map, and then map these pages 1237 * into this space. 1238 */ 1239 size = round_page(size); 1240 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags); 1241 if (va == 0) 1242 return (ENOMEM); 1243 1244 *kvap = (void *)va; 1245 1246 /* 1247 * digest flags: 1248 */ 1249 cbit = 0; 1250 if (flags & BUS_DMA_COHERENT) /* Disable vcache */ 1251 cbit |= PMAP_NVC; 1252 if (flags & BUS_DMA_NOCACHE) /* side effects */ 1253 cbit |= PMAP_NC; 1254 1255 /* 1256 * Now take this and map it into the CPU. 1257 */ 1258 pglist = segs[0]._ds_mlist; 1259 TAILQ_FOREACH(pg, pglist, pageq.queue) { 1260#ifdef DIAGNOSTIC 1261 if (size == 0) 1262 panic("iommu_dvmamem_map: size botch"); 1263#endif 1264 addr = VM_PAGE_TO_PHYS(pg); 1265 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: " 1266 "mapping va %lx at %llx\n", va, (unsigned long long)addr | cbit)); 1267 pmap_kenter_pa(va, addr | cbit, 1268 VM_PROT_READ | VM_PROT_WRITE, 0); 1269 va += PAGE_SIZE; 1270 size -= PAGE_SIZE; 1271 } 1272 pmap_update(pmap_kernel()); 1273 return (0); 1274} 1275 1276/* 1277 * Unmap DVMA mappings from kernel 1278 */ 1279void 1280iommu_dvmamem_unmap(bus_dma_tag_t t, void *kva, size_t size) 1281{ 1282 1283 DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_unmap: kvm %p size %lx\n", 1284 kva, size)); 1285 1286#ifdef DIAGNOSTIC 1287 if ((u_long)kva & PGOFSET) 1288 panic("iommu_dvmamem_unmap"); 1289#endif 1290 1291 size = round_page(size); 1292 pmap_kremove((vaddr_t)kva, size); 1293 pmap_update(pmap_kernel()); 1294 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY); 1295} 1296