Lines Matching refs:dmat

143 	bus_dma_tag_t	dmat;
160 static int alloc_bounce_zone(bus_dma_tag_t dmat);
161 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
162 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
164 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
167 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
188 bus_dma_tag_t dmat;
191 dmat = (bus_dma_tag_t)arg;
193 dmat->map_count++;
195 map->dmat = dmat;
216 map->dmat->map_count--;
255 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
262 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
263 || ((paddr & (dmat->alignment - 1)) != 0))
264 && (dmat->filter == NULL
265 || (*dmat->filter)(dmat->filterarg, paddr) != 0))
268 dmat = dmat->parent;
269 } while (retval == 0 && dmat != NULL);
293 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own
331 _busdma_alloc_dmamap(bus_dma_tag_t dmat)
336 slist = malloc(sizeof(*slist) * dmat->nsegments, M_BUSDMA, M_NOWAIT);
339 map = uma_zalloc_arg(dmamap_zone, dmat, M_NOWAIT);
366 void *lockfuncarg, bus_dma_tag_t *dmat)
371 *dmat = NULL;
464 *dmat = newtag;
472 bus_dma_tag_destroy(bus_dma_tag_t dmat)
475 bus_dma_tag_t dmat_copy = dmat;
478 if (dmat != NULL) {
479 if (dmat->map_count != 0)
482 while (dmat != NULL) {
485 parent = dmat->parent;
486 atomic_subtract_int(&dmat->ref_count, 1);
487 if (dmat->ref_count == 0) {
488 if (dmat->segments != NULL)
489 free(dmat->segments, M_BUSDMA);
490 free(dmat, M_BUSDMA);
496 dmat = parent;
498 dmat = NULL;
512 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
517 if (dmat->segments == NULL) {
518 dmat->segments = (bus_dma_segment_t *)malloc(
519 sizeof(bus_dma_segment_t) * dmat->nsegments, M_BUSDMA,
521 if (dmat->segments == NULL) {
523 __func__, dmat, ENOMEM);
528 newmap = _busdma_alloc_dmamap(dmat);
530 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
540 if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
546 if (dmat->bounce_zone == NULL) {
547 if ((error = alloc_bounce_zone(dmat)) != 0) {
553 bz = dmat->bounce_zone;
563 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
567 pages = MAX(atop(dmat->maxsize), 1);
570 if (alloc_bounce_pages(dmat, pages) < pages)
573 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
575 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
584 __func__, dmat, dmat->flags, error);
594 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
599 __func__, dmat, EBUSY);
602 if (dmat->bounce_zone)
603 dmat->bounce_zone->map_count--;
605 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
615 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddrp, int flags,
630 if (dmat->segments == NULL) {
631 dmat->segments = (bus_dma_segment_t *)malloc(
632 sizeof(bus_dma_segment_t) * dmat->nsegments, M_BUSDMA,
634 if (dmat->segments == NULL) {
636 __func__, dmat, dmat->flags, ENOMEM);
641 newmap = _busdma_alloc_dmamap(dmat);
644 __func__, dmat, dmat->flags, ENOMEM);
652 if (dmat->flags & BUS_DMA_COHERENT)
674 bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
691 if (bufzone != NULL && dmat->alignment <= bufzone->size &&
692 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
694 } else if (dmat->nsegments >=
695 howmany(dmat->maxsize, MIN(dmat->maxsegsz, PAGE_SIZE)) &&
696 dmat->alignment <= PAGE_SIZE &&
697 (dmat->boundary % PAGE_SIZE) == 0) {
698 vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
699 mflags, 0, dmat->lowaddr, memattr);
701 vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
702 mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
722 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
735 bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
737 if (bufzone != NULL && dmat->alignment <= bufzone->size &&
738 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
741 kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
742 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
746 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
754 dmat->lowaddr, dmat->boundary, dmat->alignment);
763 sgsize = MIN(buflen, dmat->maxsegsz);
764 if (run_filter(dmat, curaddr) != 0) {
776 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
785 dmat->lowaddr, dmat->boundary, dmat->alignment);
801 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
802 run_filter(dmat, paddr) != 0) {
803 sg_len = roundup2(sg_len, dmat->alignment);
813 _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,int flags)
819 if (reserve_bounce_pages(dmat, map, 0) != 0) {
824 if (reserve_bounce_pages(dmat, map, 1) != 0) {
841 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
850 bmask = ~(dmat->boundary - 1);
851 if (dmat->boundary > 0) {
852 baddr = (curaddr + dmat->boundary) & bmask;
863 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
864 (dmat->boundary == 0 ||
868 if (++seg >= dmat->nsegments)
882 _bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
891 segs = dmat->segments;
893 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
894 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
896 error = _bus_dmamap_reserve_pages(dmat, map, flags);
904 sgsize = MIN(buflen, dmat->maxsegsz);
905 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
906 map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
908 curaddr = add_bounce_page(dmat, map, 0, curaddr,
911 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
923 _bus_dmamap_unload(dmat, map);
930 _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
935 return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
945 _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
957 segs = dmat->segments;
961 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
962 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
964 error = _bus_dmamap_reserve_pages(dmat, map, flags);
970 "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
986 if (sgsize > dmat->maxsegsz)
987 sgsize = dmat->maxsegsz;
991 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
992 map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
993 curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
999 if (++map->sync_count > dmat->nsegments)
1008 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
1021 _bus_dmamap_unload(dmat, map);
1028 __bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
1032 KASSERT(dmat != NULL, ("dmatag is NULL"));
1040 _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
1045 segs = dmat->segments;
1053 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
1059 free_bounce_page(dmat, bpage);
1174 _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1196 dmat->bounce_zone->total_bounced++;
1211 dmat->bounce_zone->total_bounced++;
1217 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1225 _bus_dmamap_sync_bp(dmat, map, op);
1227 if ((dmat->flags & BUS_DMA_COHERENT) ||
1270 alloc_bounce_zone(bus_dma_tag_t dmat)
1276 if ((dmat->alignment <= bz->alignment)
1277 && (dmat->lowaddr >= bz->lowaddr)) {
1278 dmat->bounce_zone = bz;
1291 bz->lowaddr = dmat->lowaddr;
1292 bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
1298 dmat->bounce_zone = bz;
1344 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1349 bz = dmat->bounce_zone;
1384 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1390 bz = dmat->bounce_zone;
1403 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1409 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1412 bz = dmat->bounce_zone;
1431 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1444 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1449 bz = dmat->bounce_zone;
1452 if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1467 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1482 bus_dma_tag_t dmat;
1489 dmat = map->dmat;
1490 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
1491 bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
1493 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);