Deleted Added
full compact
busdma_machdep-v6.c (269208) busdma_machdep-v6.c (269209)
1/*-
2 * Copyright (c) 2012 Ian Lepore
3 * Copyright (c) 2010 Mark Tinguely
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 2002 Peter Grehan
6 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * All rights reserved.
8 *

--- 17 unchanged lines hidden (view full) ---

26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb
31 */
32
33#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2012 Ian Lepore
3 * Copyright (c) 2010 Mark Tinguely
4 * Copyright (c) 2004 Olivier Houchard
5 * Copyright (c) 2002 Peter Grehan
6 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * All rights reserved.
8 *

--- 17 unchanged lines hidden (view full) ---

26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep-v6.c 269208 2014-07-29 02:35:44Z ian $");
34__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep-v6.c 269209 2014-07-29 02:36:02Z ian $");
35
36#define _ARM32_BUS_DMA_PRIVATE
37#include <sys/param.h>
38#include <sys/kdb.h>
39#include <ddb/ddb.h>
40#include <ddb/db_output.h>
41#include <sys/systm.h>
42#include <sys/malloc.h>

--- 204 unchanged lines hidden (view full) ---

247exclusion_bounce_check(vm_offset_t lowaddr, vm_offset_t highaddr)
248{
249 int i;
250
251 if (lowaddr >= BUS_SPACE_MAXADDR)
252 return (0);
253
254 for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
35
36#define _ARM32_BUS_DMA_PRIVATE
37#include <sys/param.h>
38#include <sys/kdb.h>
39#include <ddb/ddb.h>
40#include <ddb/db_output.h>
41#include <sys/systm.h>
42#include <sys/malloc.h>

--- 204 unchanged lines hidden (view full) ---

247exclusion_bounce_check(vm_offset_t lowaddr, vm_offset_t highaddr)
248{
249 int i;
250
251 if (lowaddr >= BUS_SPACE_MAXADDR)
252 return (0);
253
254 for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
255 if ((lowaddr >= phys_avail[i] && lowaddr < phys_avail[i + 1])
256 || (lowaddr < phys_avail[i] &&
257 highaddr >= phys_avail[i]))
255 if ((lowaddr >= phys_avail[i] && lowaddr < phys_avail[i + 1]) ||
256 (lowaddr < phys_avail[i] && highaddr >= phys_avail[i]))
258 return (1);
259 }
260 return (0);
261}
262
263/*
264 * Return true if the tag has an exclusion zone that could lead to bouncing.
265 */

--- 51 unchanged lines hidden (view full) ---

317int
318run_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t size, int coherent)
319{
320 int retval;
321
322 retval = 0;
323
324 do {
257 return (1);
258 }
259 return (0);
260}
261
262/*
263 * Return true if the tag has an exclusion zone that could lead to bouncing.
264 */

--- 51 unchanged lines hidden (view full) ---

316int
317run_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t size, int coherent)
318{
319 int retval;
320
321 retval = 0;
322
323 do {
325 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
326 || alignment_bounce(dmat, paddr) ||
327 (!coherent && cacheline_bounce(paddr, size)))
328 && (dmat->filter == NULL
329 || (*dmat->filter)(dmat->filterarg, paddr) != 0))
324 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) ||
325 alignment_bounce(dmat, paddr) ||
326 (!coherent && cacheline_bounce(paddr, size))) &&
327 (dmat->filter == NULL ||
328 dmat->filter(dmat->filterarg, paddr) != 0))
330 retval = 1;
329 retval = 1;
331
332 dmat = dmat->parent;
333 } while (retval == 0 && dmat != NULL);
334 return (retval);
335}
336
337/*
338 * Convenience function for manipulating driver locks from busdma (during
339 * busdma_swi, for example). Drivers that don't provide their own locks

--- 235 unchanged lines hidden (view full) ---

575 * limit. Even if the tag isn't flagged as COULD_BOUNCE due to
576 * alignment and boundary constraints, it could still auto-bounce due to
577 * cacheline alignment, which requires at most two bounce pages.
578 */
579 if (dmat->flags & BUS_DMA_COULD_BOUNCE)
580 maxpages = MAX_BPAGES;
581 else
582 maxpages = 2 * bz->map_count;
330 dmat = dmat->parent;
331 } while (retval == 0 && dmat != NULL);
332 return (retval);
333}
334
335/*
336 * Convenience function for manipulating driver locks from busdma (during
337 * busdma_swi, for example). Drivers that don't provide their own locks

--- 235 unchanged lines hidden (view full) ---

573 * limit. Even if the tag isn't flagged as COULD_BOUNCE due to
574 * alignment and boundary constraints, it could still auto-bounce due to
575 * cacheline alignment, which requires at most two bounce pages.
576 */
577 if (dmat->flags & BUS_DMA_COULD_BOUNCE)
578 maxpages = MAX_BPAGES;
579 else
580 maxpages = 2 * bz->map_count;
583 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
584 || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
581 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 ||
582 (bz->map_count > 0 && bz->total_bpages < maxpages)) {
585 int pages;
586
587 pages = atop(roundup2(dmat->maxsize, PAGE_SIZE)) + 1;
588 pages = MIN(maxpages - bz->total_bpages, pages);
589 pages = MAX(pages, 2);
590 if (alloc_bounce_pages(dmat, pages) < pages)
591 return (ENOMEM);
592

--- 650 unchanged lines hidden (view full) ---

1243 /* Handle data bouncing. */
1244 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
1245 "performing bounce", __func__, dmat, dmat->flags, op);
1246
1247 if (op & BUS_DMASYNC_PREWRITE) {
1248 while (bpage != NULL) {
1249 if (bpage->datavaddr != 0)
1250 bcopy((void *)bpage->datavaddr,
583 int pages;
584
585 pages = atop(roundup2(dmat->maxsize, PAGE_SIZE)) + 1;
586 pages = MIN(maxpages - bz->total_bpages, pages);
587 pages = MAX(pages, 2);
588 if (alloc_bounce_pages(dmat, pages) < pages)
589 return (ENOMEM);
590

--- 650 unchanged lines hidden (view full) ---

1241 /* Handle data bouncing. */
1242 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
1243 "performing bounce", __func__, dmat, dmat->flags, op);
1244
1245 if (op & BUS_DMASYNC_PREWRITE) {
1246 while (bpage != NULL) {
1247 if (bpage->datavaddr != 0)
1248 bcopy((void *)bpage->datavaddr,
1251 (void *)bpage->vaddr,
1252 bpage->datacount);
1249 (void *)bpage->vaddr,
1250 bpage->datacount);
1253 else
1254 physcopyout(bpage->dataaddr,
1251 else
1252 physcopyout(bpage->dataaddr,
1255 (void *)bpage->vaddr,
1256 bpage->datacount);
1253 (void *)bpage->vaddr,
1254 bpage->datacount);
1257 cpu_dcache_wb_range((vm_offset_t)bpage->vaddr,
1258 bpage->datacount);
1259 l2cache_wb_range((vm_offset_t)bpage->vaddr,
1260 (vm_offset_t)bpage->busaddr,
1261 bpage->datacount);
1262 bpage = STAILQ_NEXT(bpage, links);
1263 }
1264 dmat->bounce_zone->total_bounced++;

--- 25 unchanged lines hidden (view full) ---

1290 if (len & arm_dcache_align_mask)
1291 len = (len -
1292 (len & arm_dcache_align_mask)) +
1293 arm_dcache_align;
1294 cpu_dcache_inv_range(startv, len);
1295 l2cache_inv_range(startv, startp, len);
1296 if (bpage->datavaddr != 0)
1297 bcopy((void *)bpage->vaddr,
1255 cpu_dcache_wb_range((vm_offset_t)bpage->vaddr,
1256 bpage->datacount);
1257 l2cache_wb_range((vm_offset_t)bpage->vaddr,
1258 (vm_offset_t)bpage->busaddr,
1259 bpage->datacount);
1260 bpage = STAILQ_NEXT(bpage, links);
1261 }
1262 dmat->bounce_zone->total_bounced++;

--- 25 unchanged lines hidden (view full) ---

1288 if (len & arm_dcache_align_mask)
1289 len = (len -
1290 (len & arm_dcache_align_mask)) +
1291 arm_dcache_align;
1292 cpu_dcache_inv_range(startv, len);
1293 l2cache_inv_range(startv, startp, len);
1294 if (bpage->datavaddr != 0)
1295 bcopy((void *)bpage->vaddr,
1298 (void *)bpage->datavaddr,
1299 bpage->datacount);
1296 (void *)bpage->datavaddr,
1297 bpage->datacount);
1300 else
1301 physcopyin((void *)bpage->vaddr,
1298 else
1299 physcopyin((void *)bpage->vaddr,
1302 bpage->dataaddr,
1303 bpage->datacount);
1300 bpage->dataaddr,
1301 bpage->datacount);
1304 bpage = STAILQ_NEXT(bpage, links);
1305 }
1306 dmat->bounce_zone->total_bounced++;
1307 }
1308 }
1309 if (map->flags & DMAMAP_COHERENT)
1310 return;
1311

--- 72 unchanged lines hidden (view full) ---

1384
1385static int
1386alloc_bounce_zone(bus_dma_tag_t dmat)
1387{
1388 struct bounce_zone *bz;
1389
1390 /* Check to see if we already have a suitable zone */
1391 STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1302 bpage = STAILQ_NEXT(bpage, links);
1303 }
1304 dmat->bounce_zone->total_bounced++;
1305 }
1306 }
1307 if (map->flags & DMAMAP_COHERENT)
1308 return;
1309

--- 72 unchanged lines hidden (view full) ---

1382
1383static int
1384alloc_bounce_zone(bus_dma_tag_t dmat)
1385{
1386 struct bounce_zone *bz;
1387
1388 /* Check to see if we already have a suitable zone */
1389 STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1392 if ((dmat->alignment <= bz->alignment)
1393 && (dmat->lowaddr >= bz->lowaddr)) {
1390 if ((dmat->alignment <= bz->alignment) &&
1391 (dmat->lowaddr >= bz->lowaddr)) {
1394 dmat->bounce_zone = bz;
1395 return (0);
1396 }
1397 }
1398
1399 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
1400 M_NOWAIT | M_ZERO)) == NULL)
1401 return (ENOMEM);

--- 61 unchanged lines hidden (view full) ---

1463 int count;
1464
1465 bz = dmat->bounce_zone;
1466 count = 0;
1467 while (numpages > 0) {
1468 struct bounce_page *bpage;
1469
1470 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1392 dmat->bounce_zone = bz;
1393 return (0);
1394 }
1395 }
1396
1397 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
1398 M_NOWAIT | M_ZERO)) == NULL)
1399 return (ENOMEM);

--- 61 unchanged lines hidden (view full) ---

1461 int count;
1462
1463 bz = dmat->bounce_zone;
1464 count = 0;
1465 while (numpages > 0) {
1466 struct bounce_page *bpage;
1467
1468 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1471 M_NOWAIT | M_ZERO);
1469 M_NOWAIT | M_ZERO);
1472
1473 if (bpage == NULL)
1474 break;
1475 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1470
1471 if (bpage == NULL)
1472 break;
1473 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1476 M_NOWAIT, 0ul,
1477 bz->lowaddr,
1478 PAGE_SIZE,
1479 0);
1474 M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0);
1480 if (bpage->vaddr == 0) {
1481 free(bpage, M_DEVBUF);
1482 break;
1483 }
1484 bpage->busaddr = pmap_kextract(bpage->vaddr);
1485 mtx_lock(&bounce_lock);
1486 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1487 total_bpages++;

--- 89 unchanged lines hidden (view full) ---

1577 mtx_lock(&bounce_lock);
1578 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1579 bz->free_bpages++;
1580 bz->active_bpages--;
1581 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1582 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1583 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1584 STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1475 if (bpage->vaddr == 0) {
1476 free(bpage, M_DEVBUF);
1477 break;
1478 }
1479 bpage->busaddr = pmap_kextract(bpage->vaddr);
1480 mtx_lock(&bounce_lock);
1481 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1482 total_bpages++;

--- 89 unchanged lines hidden (view full) ---

1572 mtx_lock(&bounce_lock);
1573 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1574 bz->free_bpages++;
1575 bz->active_bpages--;
1576 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1577 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1578 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1579 STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1585 map, links);
1580 map, links);
1586 busdma_swi_pending = 1;
1587 bz->total_deferred++;
1588 swi_sched(vm_ih, 0);
1589 }
1590 }
1591 mtx_unlock(&bounce_lock);
1592}
1593
1594void
1595busdma_swi(void)
1596{
1597 bus_dma_tag_t dmat;
1598 struct bus_dmamap *map;
1599
1600 mtx_lock(&bounce_lock);
1601 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1602 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1603 mtx_unlock(&bounce_lock);
1604 dmat = map->dmat;
1581 busdma_swi_pending = 1;
1582 bz->total_deferred++;
1583 swi_sched(vm_ih, 0);
1584 }
1585 }
1586 mtx_unlock(&bounce_lock);
1587}
1588
1589void
1590busdma_swi(void)
1591{
1592 bus_dma_tag_t dmat;
1593 struct bus_dmamap *map;
1594
1595 mtx_lock(&bounce_lock);
1596 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1597 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1598 mtx_unlock(&bounce_lock);
1599 dmat = map->dmat;
1605 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
1600 dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_LOCK);
1606 bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
1601 bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
1607 map->callback_arg, BUS_DMA_WAITOK);
1608 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1602 map->callback_arg, BUS_DMA_WAITOK);
1603 dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1609 mtx_lock(&bounce_lock);
1610 }
1611 mtx_unlock(&bounce_lock);
1612}
1604 mtx_lock(&bounce_lock);
1605 }
1606 mtx_unlock(&bounce_lock);
1607}