Lines Matching refs:dmat

175 	bus_dma_tag_t		dmat;
193 static int alloc_bounce_zone(bus_dma_tag_t dmat);
194 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
195 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
197 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
199 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
200 static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, pmap_t pmap,
202 static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
204 static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
296 exclusion_bounce(bus_dma_tag_t dmat)
299 return (dmat->flags & BUS_DMA_EXCL_BOUNCE);
306 alignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr)
309 return (addr & (dmat->alignment - 1));
343 might_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t addr,
347 return ((dmat->flags & BUS_DMA_EXCL_BOUNCE) ||
348 alignment_bounce(dmat, addr) ||
364 must_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr,
375 if (alignment_bounce(dmat, paddr))
385 while (dmat != NULL && exclusion_bounce(dmat)) {
386 if ((paddr >= dmat->lowaddr && paddr <= dmat->highaddr) &&
387 (dmat->filter == NULL ||
388 dmat->filter(dmat->filterarg, paddr) != 0))
390 dmat = dmat->parent;
399 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own
441 void *lockfuncarg, bus_dma_tag_t *dmat)
456 *dmat = NULL;
555 *dmat = newtag;
563 bus_dma_tag_destroy(bus_dma_tag_t dmat)
569 dmat_copy = dmat;
571 if (dmat != NULL) {
573 if (dmat->map_count != 0) {
578 while (dmat != NULL) {
581 parent = dmat->parent;
582 atomic_subtract_int(&dmat->ref_count, 1);
583 if (dmat->ref_count == 0) {
585 free(dmat, M_BUSDMA);
591 dmat = parent;
593 dmat = NULL;
602 allocate_bz_and_pages(bus_dma_tag_t dmat, bus_dmamap_t mapp)
608 if (dmat->bounce_zone == NULL)
609 if ((error = alloc_bounce_zone(dmat)) != 0)
611 bz = dmat->bounce_zone;
621 if (dmat->flags & BUS_DMA_COULD_BOUNCE)
625 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 ||
629 pages = atop(roundup2(dmat->maxsize, PAGE_SIZE)) + 1;
632 if (alloc_bounce_pages(dmat, pages) < pages)
635 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0)
636 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
643 allocate_map(bus_dma_tag_t dmat, int mflags)
653 KASSERT(dmat->nsegments <= MAX_DMA_SEGMENTS,
655 dmat->nsegments, MAX_DMA_SEGMENTS));
656 segsize = sizeof(struct bus_dma_segment) * dmat->nsegments;
657 mapsize = sizeof(*map) + sizeof(struct sync_list) * dmat->nsegments;
660 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
672 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
677 *mapp = map = allocate_map(dmat, M_NOWAIT);
679 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
690 error = allocate_bz_and_pages(dmat, map);
699 dmat->map_count++;
709 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
714 __func__, dmat, EBUSY);
717 if (dmat->bounce_zone)
718 dmat->bounce_zone->map_count--;
723 dmat->map_count--;
724 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
734 bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
750 *mapp = map = allocate_map(dmat, mflags);
753 __func__, dmat, dmat->flags, ENOMEM);
773 bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
790 if (bufzone != NULL && dmat->alignment <= bufzone->size &&
791 !exclusion_bounce(dmat)) {
793 } else if (dmat->nsegments >=
794 howmany(dmat->maxsize, MIN(dmat->maxsegsz, PAGE_SIZE)) &&
795 dmat->alignment <= PAGE_SIZE &&
796 (dmat->boundary % PAGE_SIZE) == 0) {
797 *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
798 mflags, 0, dmat->lowaddr, memattr);
800 *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
801 mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
806 __func__, dmat, dmat->flags, ENOMEM);
815 dmat->map_count++;
818 __func__, dmat, dmat->flags, 0);
827 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
837 bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
839 if (bufzone != NULL && dmat->alignment <= bufzone->size &&
840 !exclusion_bounce(dmat))
843 kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
845 dmat->map_count--;
851 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
855 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
864 dmat->lowaddr, dmat->boundary, dmat->alignment,
872 sgsize = MIN(buflen, dmat->maxsegsz);
873 if (must_bounce(dmat, map, curaddr, sgsize) != 0) {
886 _bus_dmamap_count_pages(bus_dma_tag_t dmat, pmap_t pmap, bus_dmamap_t map,
896 dmat->lowaddr, dmat->boundary, dmat->alignment,
910 if (must_bounce(dmat, map, paddr,
923 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
929 if (reserve_bounce_pages(dmat, map, 0) != 0) {
935 if (reserve_bounce_pages(dmat, map, 1) != 0) {
951 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
960 bmask = ~(dmat->boundary - 1);
961 if (dmat->boundary > 0) {
962 baddr = (curaddr + dmat->boundary) & bmask;
978 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
979 (dmat->boundary == 0 ||
983 if (++seg >= dmat->nsegments)
998 _bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
1013 if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) {
1014 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
1017 error = _bus_dmamap_reserve_pages(dmat, map, flags);
1027 sgsize = MIN(buflen, dmat->maxsegsz);
1028 if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr,
1031 curaddr = add_bounce_page(dmat, map, 0, curaddr,
1038 if (++map->sync_count > dmat->nsegments)
1051 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
1063 _bus_dmamap_unload(dmat, map);
1070 _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
1075 return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
1084 _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
1109 if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) {
1110 _bus_dmamap_count_pages(dmat, pmap, map, buf, buflen, flags);
1113 error = _bus_dmamap_reserve_pages(dmat, map, flags);
1138 if (sgsize > dmat->maxsegsz)
1139 sgsize = dmat->maxsegsz;
1143 if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr,
1145 curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
1157 if (++map->sync_count > dmat->nsegments)
1175 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
1188 _bus_dmamap_unload(dmat, map);
1195 __bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem,
1200 map->dmat = dmat;
1206 _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
1219 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
1224 if ((bz = dmat->bounce_zone) != NULL) {
1227 free_bounce_page(dmat, bpage);
1230 bz = dmat->bounce_zone;
1319 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1337 "performing bounce", __func__, dmat, dmat->flags, op);
1360 dmat->bounce_zone->total_bounced++;
1409 dmat->bounce_zone->total_bounced++;
1440 "performing sync", __func__, dmat, dmat->flags, op);
1474 alloc_bounce_zone(bus_dma_tag_t dmat)
1480 if ((dmat->alignment <= bz->alignment) &&
1481 (dmat->lowaddr >= bz->lowaddr)) {
1482 dmat->bounce_zone = bz;
1495 bz->lowaddr = dmat->lowaddr;
1496 bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
1502 dmat->bounce_zone = bz;
1548 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1553 bz = dmat->bounce_zone;
1583 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1589 bz = dmat->bounce_zone;
1602 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1608 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1611 bz = dmat->bounce_zone;
1630 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1644 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1649 bz = dmat->bounce_zone;
1652 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1667 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1682 bus_dma_tag_t dmat;
1689 dmat = map->dmat;
1690 dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_LOCK);
1691 bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
1693 dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_UNLOCK);