Deleted Added
full compact
busdma_machdep-v6.c (269213) busdma_machdep-v6.c (269214)
1/*-
2 * Copyright (c) 2012 Ian Lepore
3 * Copyright (c) 2010 Mark Tinguely
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 2002 Peter Grehan
6 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * All rights reserved.
8 *

--- 17 unchanged lines hidden (view full) ---

26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb
31 */
32
33#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2012 Ian Lepore
3 * Copyright (c) 2010 Mark Tinguely
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 2002 Peter Grehan
6 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * All rights reserved.
8 *

--- 17 unchanged lines hidden (view full) ---

26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep-v6.c 269213 2014-07-29 02:36:50Z ian $");
34__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep-v6.c 269214 2014-07-29 02:37:24Z ian $");
35
36#define _ARM32_BUS_DMA_PRIVATE
37#include <sys/param.h>
38#include <sys/kdb.h>
39#include <ddb/ddb.h>
40#include <ddb/db_output.h>
41#include <sys/systm.h>
42#include <sys/malloc.h>

--- 602 unchanged lines hidden (view full) ---

645
646/*
647 * Allocate a handle for mapping from kva/uva/physical
648 * address space into bus device space.
649 */
650int
651bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
652{
35
36#define _ARM32_BUS_DMA_PRIVATE
37#include <sys/param.h>
38#include <sys/kdb.h>
39#include <ddb/ddb.h>
40#include <ddb/db_output.h>
41#include <sys/systm.h>
42#include <sys/malloc.h>

--- 602 unchanged lines hidden (view full) ---

645
646/*
647 * Allocate a handle for mapping from kva/uva/physical
648 * address space into bus device space.
649 */
650int
651bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
652{
653 bus_dmamap_t map;
653 int mapsize;
654 int error = 0;
655
654 int mapsize;
655 int error = 0;
656
656 mapsize = sizeof(**mapp) + (sizeof(struct sync_list) * dmat->nsegments);
657 *mapp = (bus_dmamap_t)malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO);
658 if (*mapp == NULL) {
657 mapsize = sizeof(*map) + (sizeof(struct sync_list) * dmat->nsegments);
658 *mapp = map = malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO);
659 if (map == NULL) {
659 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
660 return (ENOMEM);
661 }
660 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
661 return (ENOMEM);
662 }
662 (*mapp)->sync_count = 0;
663 map->sync_count = 0;
663
664 if (dmat->segments == NULL) {
665 dmat->segments = (bus_dma_segment_t *)malloc(
666 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
667 M_NOWAIT);
668 if (dmat->segments == NULL) {
669 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
670 __func__, dmat, ENOMEM);
664
665 if (dmat->segments == NULL) {
666 dmat->segments = (bus_dma_segment_t *)malloc(
667 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
668 M_NOWAIT);
669 if (dmat->segments == NULL) {
670 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
671 __func__, dmat, ENOMEM);
671 free(*mapp, M_DEVBUF);
672 free(map, M_DEVBUF);
672 *mapp = NULL;
673 return (ENOMEM);
674 }
675 }
676 /*
677 * Bouncing might be required if the driver asks for an active
678 * exclusion region, a data alignment that is stricter than 1, and/or
679 * an active address boundary.
680 */
673 *mapp = NULL;
674 return (ENOMEM);
675 }
676 }
677 /*
678 * Bouncing might be required if the driver asks for an active
679 * exclusion region, a data alignment that is stricter than 1, and/or
680 * an active address boundary.
681 */
681 error = allocate_bz_and_pages(dmat, *mapp);
682 error = allocate_bz_and_pages(dmat, map);
682 if (error != 0) {
683 if (error != 0) {
683 free(*mapp, M_DEVBUF);
684 free(map, M_DEVBUF);
684 *mapp = NULL;
685 return (error);
686 }
687 return (error);
688}
689
690/*
691 * Destroy a handle for mapping from kva/uva/physical

--- 22 unchanged lines hidden (view full) ---

714 * A dmamap to for use with dmamap_load is also allocated.
715 */
716int
717bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
718 bus_dmamap_t *mapp)
719{
720 busdma_bufalloc_t ba;
721 struct busdma_bufzone *bufzone;
685 *mapp = NULL;
686 return (error);
687 }
688 return (error);
689}
690
691/*
692 * Destroy a handle for mapping from kva/uva/physical

--- 22 unchanged lines hidden (view full) ---

715 * A dmamap to for use with dmamap_load is also allocated.
716 */
717int
718bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
719 bus_dmamap_t *mapp)
720{
721 busdma_bufalloc_t ba;
722 struct busdma_bufzone *bufzone;
723 bus_dmamap_t map;
722 vm_memattr_t memattr;
723 int mflags;
724 int mapsize;
725 int error;
726
727 if (flags & BUS_DMA_NOWAIT)
728 mflags = M_NOWAIT;
729 else
730 mflags = M_WAITOK;
731
732 /* ARM non-snooping caches need a map for the VA cache sync structure */
733
724 vm_memattr_t memattr;
725 int mflags;
726 int mapsize;
727 int error;
728
729 if (flags & BUS_DMA_NOWAIT)
730 mflags = M_NOWAIT;
731 else
732 mflags = M_WAITOK;
733
734 /* ARM non-snooping caches need a map for the VA cache sync structure */
735
734 mapsize = sizeof(**mapp) + (sizeof(struct sync_list) * dmat->nsegments);
735 *mapp = (bus_dmamap_t)malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO);
736 if (*mapp == NULL) {
736 mapsize = sizeof(*map) + (sizeof(struct sync_list) * dmat->nsegments);
737 *mapp = map = malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO);
738 if (map == NULL) {
737 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
738 __func__, dmat, dmat->flags, ENOMEM);
739 return (ENOMEM);
740 }
739 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
740 __func__, dmat, dmat->flags, ENOMEM);
741 return (ENOMEM);
742 }
743 map->flags = DMAMAP_DMAMEM_ALLOC;
744 map->sync_count = 0;
741
745
742 (*mapp)->flags = DMAMAP_DMAMEM_ALLOC;
743 (*mapp)->sync_count = 0;
744
745 /* We may need bounce pages, even for allocated memory */
746 /* We may need bounce pages, even for allocated memory */
746 error = allocate_bz_and_pages(dmat, *mapp);
747 error = allocate_bz_and_pages(dmat, map);
747 if (error != 0) {
748 if (error != 0) {
748 free(*mapp, M_DEVBUF);
749 free(map, M_DEVBUF);
749 *mapp = NULL;
750 return (error);
751 }
752
753 if (dmat->segments == NULL) {
754 dmat->segments = (bus_dma_segment_t *)malloc(
755 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
756 mflags);
757 if (dmat->segments == NULL) {
758 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
759 __func__, dmat, dmat->flags, ENOMEM);
750 *mapp = NULL;
751 return (error);
752 }
753
754 if (dmat->segments == NULL) {
755 dmat->segments = (bus_dma_segment_t *)malloc(
756 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
757 mflags);
758 if (dmat->segments == NULL) {
759 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
760 __func__, dmat, dmat->flags, ENOMEM);
760 free(*mapp, M_DEVBUF);
761 free(map, M_DEVBUF);
761 *mapp = NULL;
762 return (ENOMEM);
763 }
764 }
765
766 if (flags & BUS_DMA_ZERO)
767 mflags |= M_ZERO;
768 if (flags & BUS_DMA_COHERENT) {
769 memattr = VM_MEMATTR_UNCACHEABLE;
770 ba = coherent_allocator;
762 *mapp = NULL;
763 return (ENOMEM);
764 }
765 }
766
767 if (flags & BUS_DMA_ZERO)
768 mflags |= M_ZERO;
769 if (flags & BUS_DMA_COHERENT) {
770 memattr = VM_MEMATTR_UNCACHEABLE;
771 ba = coherent_allocator;
771 (*mapp)->flags |= DMAMAP_COHERENT;
772 map->flags |= DMAMAP_COHERENT;
772 } else {
773 memattr = VM_MEMATTR_DEFAULT;
774 ba = standard_allocator;
775 }
776
777 /*
778 * Try to find a bufzone in the allocator that holds a cache of buffers
779 * of the right size for this request. If the buffer is too big to be

--- 26 unchanged lines hidden (view full) ---

806 mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
807 memattr);
808 }
809
810
811 if (*vaddr == NULL) {
812 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
813 __func__, dmat, dmat->flags, ENOMEM);
773 } else {
774 memattr = VM_MEMATTR_DEFAULT;
775 ba = standard_allocator;
776 }
777
778 /*
779 * Try to find a bufzone in the allocator that holds a cache of buffers
780 * of the right size for this request. If the buffer is too big to be

--- 26 unchanged lines hidden (view full) ---

807 mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
808 memattr);
809 }
810
811
812 if (*vaddr == NULL) {
813 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
814 __func__, dmat, dmat->flags, ENOMEM);
814 free(*mapp, M_DEVBUF);
815 free(map, M_DEVBUF);
815 *mapp = NULL;
816 return (ENOMEM);
817 }
818 dmat->map_count++;
819
820 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
821 __func__, dmat, dmat->flags, 0);
822 return (0);

--- 830 unchanged lines hidden ---
816 *mapp = NULL;
817 return (ENOMEM);
818 }
819 dmat->map_count++;
820
821 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
822 __func__, dmat, dmat->flags, 0);
823 return (0);

--- 830 unchanged lines hidden ---