Deleted Added
full compact
busdma_machdep-v6.c (269210) busdma_machdep-v6.c (269211)
1/*-
2 * Copyright (c) 2012 Ian Lepore
3 * Copyright (c) 2010 Mark Tinguely
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 2002 Peter Grehan
6 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * All rights reserved.
8 *

--- 17 unchanged lines hidden (view full) ---

26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb
31 */
32
33#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2012 Ian Lepore
3 * Copyright (c) 2010 Mark Tinguely
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 2002 Peter Grehan
6 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * All rights reserved.
8 *

--- 17 unchanged lines hidden (view full) ---

26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep-v6.c 269210 2014-07-29 02:36:09Z ian $");
34__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep-v6.c 269211 2014-07-29 02:36:27Z ian $");
35
36#define _ARM32_BUS_DMA_PRIVATE
37#include <sys/param.h>
38#include <sys/kdb.h>
39#include <ddb/ddb.h>
40#include <ddb/db_output.h>
41#include <sys/systm.h>
42#include <sys/malloc.h>

--- 131 unchanged lines hidden (view full) ---

174static int alloc_bounce_zone(bus_dma_tag_t dmat);
175static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
176static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
177 int commit);
178static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
179 vm_offset_t vaddr, bus_addr_t addr,
180 bus_size_t size);
181static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
35
36#define _ARM32_BUS_DMA_PRIVATE
37#include <sys/param.h>
38#include <sys/kdb.h>
39#include <ddb/ddb.h>
40#include <ddb/db_output.h>
41#include <sys/systm.h>
42#include <sys/malloc.h>

--- 131 unchanged lines hidden (view full) ---

174static int alloc_bounce_zone(bus_dma_tag_t dmat);
175static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
176static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
177 int commit);
178static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
179 vm_offset_t vaddr, bus_addr_t addr,
180 bus_size_t size);
181static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
182int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t size, int coherent);
183static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
184 void *buf, bus_size_t buflen, int flags);
185static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
186 vm_paddr_t buf, bus_size_t buflen, int flags);
187static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
188 int flags);
189
190static busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */

--- 93 unchanged lines hidden (view full) ---

284 */
285static __inline int
286cacheline_bounce(bus_addr_t addr, bus_size_t size)
287{
288
289 return ((addr | size) & arm_dcache_align_mask);
290}
291
182static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
183 void *buf, bus_size_t buflen, int flags);
184static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
185 vm_paddr_t buf, bus_size_t buflen, int flags);
186static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
187 int flags);
188
189static busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */

--- 93 unchanged lines hidden (view full) ---

283 */
284static __inline int
285cacheline_bounce(bus_addr_t addr, bus_size_t size)
286{
287
288 return ((addr | size) & arm_dcache_align_mask);
289}
290
291/*
292 * Return true if we might need to bounce the DMA described by addr and size.
293 *
294 * This is used to quick-check whether we need to do the more expensive work of
295 * checking the DMA page-by-page looking for alignment and exclusion bounces.
296 *
297 * Note that the addr argument might be either virtual or physical. It doesn't
298 * matter because we only look at the low-order bits, which are the same in both
299 * address spaces.
300 */
301static __inline int
302might_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t addr,
303 bus_size_t size)
304{
305 return ((dmat->flags & BUS_DMA_COULD_BOUNCE) ||
306 !((map->flags & DMAMAP_COHERENT) && cacheline_bounce(addr, size)));
307}
308
309/*
310 * Return true if we must bounce the DMA described by paddr and size.
311 *
312 * Bouncing can be triggered by DMA that doesn't begin and end on cacheline
313 * boundaries, or doesn't begin on an alignment boundary, or falls within the
314 * exclusion zone of any tag in the ancestry chain.
315 *
316 * For exclusions, walk the chain of tags comparing paddr to the exclusion zone
317 * within each tag. If the tag has a filter function, use it to decide whether
318 * the DMA needs to bounce, otherwise any DMA within the zone bounces.
319 */
320static int
321must_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr,
322 bus_size_t size)
323{
324
325 /* Coherent memory doesn't need to bounce due to cache alignment. */
326 if (!(map->flags & DMAMAP_COHERENT) && cacheline_bounce(paddr, size))
327 return (1);
328
329 /*
330 * The tag already contains ancestors' alignment restrictions so this
331 * check doesn't need to be inside the loop.
332 */
333 if (alignment_bounce(dmat, paddr))
334 return (1);
335
336 /*
337 * Even though each tag has an exclusion zone that is a superset of its
338 * own and all its ancestors' exclusions, the exclusion zone of each tag
339 * up the chain must be checked within the loop, because the busdma
340 * rules say the filter function is called only when the address lies
341 * within the low-highaddr range of the tag that filterfunc belongs to.
342 */
343 while (dmat != NULL && exclusion_bounce(dmat)) {
344 if ((paddr >= dmat->lowaddr && paddr <= dmat->highaddr) &&
345 (dmat->filter == NULL ||
346 dmat->filter(dmat->filterarg, paddr) != 0))
347 return (1);
348 dmat = dmat->parent;
349 }
350
351 return (0);
352}
353
292static __inline struct arm32_dma_range *
293_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
294 bus_addr_t curaddr)
295{
296 struct arm32_dma_range *dr;
297 int i;
298
299 for (i = 0, dr = ranges; i < nranges; i++, dr++) {
300 if (curaddr >= dr->dr_sysbase &&
301 round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
302 return (dr);
303 }
304
305 return (NULL);
306}
307
308/*
354static __inline struct arm32_dma_range *
355_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
356 bus_addr_t curaddr)
357{
358 struct arm32_dma_range *dr;
359 int i;
360
361 for (i = 0, dr = ranges; i < nranges; i++, dr++) {
362 if (curaddr >= dr->dr_sysbase &&
363 round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
364 return (dr);
365 }
366
367 return (NULL);
368}
369
370/*
309 * Return true if a match is made.
310 *
311 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
312 *
313 * If paddr is within the bounds of the dma tag then call the filter callback
314 * to check for a match, if there is no filter callback then assume a match.
315 */
316int
317run_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t size, int coherent)
318{
319 int retval;
320
321 retval = 0;
322
323 do {
324 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) ||
325 alignment_bounce(dmat, paddr) ||
326 (!coherent && cacheline_bounce(paddr, size))) &&
327 (dmat->filter == NULL ||
328 dmat->filter(dmat->filterarg, paddr) != 0))
329 retval = 1;
330 dmat = dmat->parent;
331 } while (retval == 0 && dmat != NULL);
332 return (retval);
333}
334
335/*
336 * Convenience function for manipulating driver locks from busdma (during
337 * busdma_swi, for example). Drivers that don't provide their own locks
338 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own
339 * non-mutex locking scheme don't have to use this at all.
340 */
341void
342busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
343{

--- 474 unchanged lines hidden (view full) ---

818 map, map->pagesneeded);
819 /*
820 * Count the number of bounce pages
821 * needed in order to complete this transfer
822 */
823 curaddr = buf;
824 while (buflen != 0) {
825 sgsize = MIN(buflen, dmat->maxsegsz);
371 * Convenience function for manipulating driver locks from busdma (during
372 * busdma_swi, for example). Drivers that don't provide their own locks
373 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own
374 * non-mutex locking scheme don't have to use this at all.
375 */
376void
377busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
378{

--- 474 unchanged lines hidden (view full) ---

853 map, map->pagesneeded);
854 /*
855 * Count the number of bounce pages
856 * needed in order to complete this transfer
857 */
858 curaddr = buf;
859 while (buflen != 0) {
860 sgsize = MIN(buflen, dmat->maxsegsz);
826 if (run_filter(dmat, curaddr, sgsize,
827 map->flags & DMAMAP_COHERENT) != 0) {
861 if (must_bounce(dmat, map, curaddr, sgsize) != 0) {
828 sgsize = MIN(sgsize, PAGE_SIZE);
829 map->pagesneeded++;
830 }
831 curaddr += sgsize;
832 buflen -= sgsize;
833 }
834 CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded);
835 }

--- 19 unchanged lines hidden (view full) ---

855 vaddr = (vm_offset_t)buf;
856 vendaddr = (vm_offset_t)buf + buflen;
857
858 while (vaddr < vendaddr) {
859 if (__predict_true(map->pmap == kernel_pmap))
860 paddr = pmap_kextract(vaddr);
861 else
862 paddr = pmap_extract(map->pmap, vaddr);
862 sgsize = MIN(sgsize, PAGE_SIZE);
863 map->pagesneeded++;
864 }
865 curaddr += sgsize;
866 buflen -= sgsize;
867 }
868 CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded);
869 }

--- 19 unchanged lines hidden (view full) ---

889 vaddr = (vm_offset_t)buf;
890 vendaddr = (vm_offset_t)buf + buflen;
891
892 while (vaddr < vendaddr) {
893 if (__predict_true(map->pmap == kernel_pmap))
894 paddr = pmap_kextract(vaddr);
895 else
896 paddr = pmap_extract(map->pmap, vaddr);
863 if (run_filter(dmat, paddr,
864 min(vendaddr - vaddr,
865 (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK))),
866 map->flags & DMAMAP_COHERENT) != 0) {
897 if (must_bounce(dmat, map, paddr,
898 min(vendaddr - vaddr, (PAGE_SIZE - ((vm_offset_t)vaddr &
899 PAGE_MASK)))) != 0) {
867 map->pagesneeded++;
868 }
869 vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
870
871 }
872 CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded);
873 }
874}

--- 99 unchanged lines hidden (view full) ---

974{
975 bus_addr_t curaddr;
976 bus_size_t sgsize;
977 int error;
978
979 if (segs == NULL)
980 segs = dmat->segments;
981
900 map->pagesneeded++;
901 }
902 vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
903
904 }
905 CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded);
906 }
907}

--- 99 unchanged lines hidden (view full) ---

1007{
1008 bus_addr_t curaddr;
1009 bus_size_t sgsize;
1010 int error;
1011
1012 if (segs == NULL)
1013 segs = dmat->segments;
1014
982 if (((map->flags & DMAMAP_COHERENT) == 0) ||
983 (dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
1015 if (might_bounce(dmat, map, buflen, buflen)) {
984 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
985 if (map->pagesneeded != 0) {
986 error = _bus_dmamap_reserve_pages(dmat, map, flags);
987 if (error)
988 return (error);
989 }
990 }
991
992 while (buflen > 0) {
993 curaddr = buf;
994 sgsize = MIN(buflen, dmat->maxsegsz);
1016 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
1017 if (map->pagesneeded != 0) {
1018 error = _bus_dmamap_reserve_pages(dmat, map, flags);
1019 if (error)
1020 return (error);
1021 }
1022 }
1023
1024 while (buflen > 0) {
1025 curaddr = buf;
1026 sgsize = MIN(buflen, dmat->maxsegsz);
995 if ((((map->flags & DMAMAP_COHERENT) == 0) ||
996 ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) &&
997 map->pagesneeded != 0 && run_filter(dmat, curaddr,
998 sgsize, map->flags & DMAMAP_COHERENT)) {
1027 if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr,
1028 sgsize)) {
999 sgsize = MIN(sgsize, PAGE_SIZE);
1000 curaddr = add_bounce_page(dmat, map, 0, curaddr,
1001 sgsize);
1002 }
1003 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
1004 segp);
1005 if (sgsize == 0)
1006 break;

--- 40 unchanged lines hidden (view full) ---

1047 struct sync_list *sl;
1048 int error;
1049
1050 if (segs == NULL)
1051 segs = dmat->segments;
1052
1053 map->pmap = pmap;
1054
1029 sgsize = MIN(sgsize, PAGE_SIZE);
1030 curaddr = add_bounce_page(dmat, map, 0, curaddr,
1031 sgsize);
1032 }
1033 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
1034 segp);
1035 if (sgsize == 0)
1036 break;

--- 40 unchanged lines hidden (view full) ---

1077 struct sync_list *sl;
1078 int error;
1079
1080 if (segs == NULL)
1081 segs = dmat->segments;
1082
1083 map->pmap = pmap;
1084
1055 if (!(map->flags & DMAMAP_COHERENT) ||
1056 (dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
1085 if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) {
1057 _bus_dmamap_count_pages(dmat, map, buf, buflen, flags);
1058 if (map->pagesneeded != 0) {
1059 error = _bus_dmamap_reserve_pages(dmat, map, flags);
1060 if (error)
1061 return (error);
1062 }
1063 }
1064

--- 13 unchanged lines hidden (view full) ---

1078 * Compute the segment size, and adjust counts.
1079 */
1080 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
1081 if (sgsize > dmat->maxsegsz)
1082 sgsize = dmat->maxsegsz;
1083 if (buflen < sgsize)
1084 sgsize = buflen;
1085
1086 _bus_dmamap_count_pages(dmat, map, buf, buflen, flags);
1087 if (map->pagesneeded != 0) {
1088 error = _bus_dmamap_reserve_pages(dmat, map, flags);
1089 if (error)
1090 return (error);
1091 }
1092 }
1093

--- 13 unchanged lines hidden (view full) ---

1107 * Compute the segment size, and adjust counts.
1108 */
1109 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
1110 if (sgsize > dmat->maxsegsz)
1111 sgsize = dmat->maxsegsz;
1112 if (buflen < sgsize)
1113 sgsize = buflen;
1114
1086 if ((((map->flags & DMAMAP_COHERENT) == 0) ||
1087 ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) &&
1088 map->pagesneeded != 0 && run_filter(dmat, curaddr,
1089 sgsize, map->flags & DMAMAP_COHERENT)) {
1115 if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr,
1116 sgsize)) {
1090 curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
1091 sgsize);
1092 } else {
1093 sl = &map->slist[map->sync_count - 1];
1094 if (map->sync_count == 0 ||
1095#ifdef ARM_L2_PIPT
1096 curaddr != sl->busaddr + sl->datacount ||
1097#endif

--- 511 unchanged lines hidden ---
1117 curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
1118 sgsize);
1119 } else {
1120 sl = &map->slist[map->sync_count - 1];
1121 if (map->sync_count == 0 ||
1122#ifdef ARM_L2_PIPT
1123 curaddr != sl->busaddr + sl->datacount ||
1124#endif

--- 511 unchanged lines hidden ---