1/* $NetBSD: pxa2x0_dmac.c,v 1.14 2018/09/03 16:29:24 riastradh Exp $ */ 2 3/* 4 * Copyright (c) 2003, 2005 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38#include "opt_pxa2x0_dmac.h" 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/device.h> 43#include <sys/kernel.h> 44#include <sys/kmem.h> 45#include <sys/queue.h> 46 47#include <uvm/uvm_param.h> /* For PAGE_SIZE */ 48 49#include <machine/intr.h> 50#include <sys/bus.h> 51 52#include <dev/dmover/dmovervar.h> 53 54#include <arm/xscale/pxa2x0reg.h> 55#include <arm/xscale/pxa2x0var.h> 56#include <arm/xscale/pxa2x0cpu.h> 57 58#include <arm/xscale/pxa2x0_dmac.h> 59 60#include "locators.h" 61 62#undef DMAC_N_PRIORITIES 63#ifndef PXA2X0_DMAC_FIXED_PRIORITY 64#define DMAC_N_PRIORITIES 3 65#define DMAC_PRI(p) (p) 66#else 67#define DMAC_N_PRIORITIES 1 68#define DMAC_PRI(p) (0) 69#endif 70 71struct dmac_desc { 72 SLIST_ENTRY(dmac_desc) d_link; 73 struct pxa2x0_dma_desc *d_desc; 74 paddr_t d_desc_pa; 75}; 76 77/* 78 * This is used to maintain state for an in-progress transfer. 79 * It tracks the current DMA segment, and offset within the segment 80 * in the case where we had to split a request into several DMA 81 * operations due to a shortage of DMAC descriptors. 82 */ 83struct dmac_desc_segs { 84 bus_dma_segment_t *ds_curseg; /* Current segment */ 85 u_int ds_nsegs; /* Remaining segments */ 86 bus_size_t ds_offset; /* Offset within current seg */ 87}; 88 89SIMPLEQ_HEAD(dmac_xfer_state_head, dmac_xfer_state); 90 91struct dmac_xfer_state { 92 struct dmac_xfer dxs_xfer; 93#define dxs_cookie dxs_xfer.dx_cookie 94#define dxs_done dxs_xfer.dx_done 95#define dxs_priority dxs_xfer.dx_priority 96#define dxs_peripheral dxs_xfer.dx_peripheral 97#define dxs_flow dxs_xfer.dx_flow 98#define dxs_dev_width dxs_xfer.dx_dev_width 99#define dxs_burst_size dxs_xfer.dx_burst_size 100#define dxs_loop_notify dxs_xfer.dx_loop_notify 101#define dxs_desc dxs_xfer.dx_desc 102 SIMPLEQ_ENTRY(dmac_xfer_state) dxs_link; 103 SLIST_HEAD(, dmac_desc) dxs_descs; 104 struct dmac_xfer_state_head *dxs_queue; 105 u_int dxs_channel; 106#define DMAC_NO_CHANNEL (~0) 107 uint32_t dxs_dcmd; 108 struct dmac_desc_segs dxs_segs[2]; 109 bool dxs_misaligned_flag; 110}; 111 112 113#if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0) 114/* 115 * This structure is used to maintain state for the dmover(9) backend 116 * part of the driver. We can have a number of concurrent dmover 117 * requests in progress at any given time. The exact number is given 118 * by the PXA2X0_DMAC_DMOVER_CONCURRENCY compile-time constant. One of 119 * these structures is allocated for each concurrent request. 120 */ 121struct dmac_dmover_state { 122 LIST_ENTRY(dmac_dmover_state) ds_link; /* List of idle dmover chans */ 123 struct pxadmac_softc *ds_sc; /* Uplink to pxadmac softc */ 124 struct dmover_request *ds_current; /* Current dmover request */ 125 struct dmac_xfer_state ds_xfer; 126 bus_dmamap_t ds_src_dmap; 127 bus_dmamap_t ds_dst_dmap; 128/* 129 * There is no inherent size limit in the DMA engine. 130 * The following limit is somewhat arbitrary. 131 */ 132#define DMAC_DMOVER_MAX_XFER (8*1024*1024) 133#if 0 134/* This would require 16KB * 2 just for segments... */ 135#define DMAC_DMOVER_NSEGS ((DMAC_DMOVER_MAX_XFER / PAGE_SIZE) + 1) 136#else 137#define DMAC_DMOVER_NSEGS 512 /* XXX: Only enough for 2MB */ 138#endif 139 bus_dma_segment_t ds_zero_seg; /* Used for zero-fill ops */ 140 void *ds_zero_va; 141 bus_dma_segment_t ds_fill_seg; /* Used for fill8 ops */ 142 void *ds_fill_va; 143 144#define ds_src_addr_hold ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_addr_hold 145#define ds_dst_addr_hold ds_xfer.dxs_desc[DMAC_DESC_DST].xd_addr_hold 146#define ds_src_burst ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_burst_size 147#define ds_dst_burst ds_xfer.dxs_desc[DMAC_DESC_DST].xd_burst_size 148#define ds_src_dma_segs ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_dma_segs 149#define ds_dst_dma_segs ds_xfer.dxs_desc[DMAC_DESC_DST].xd_dma_segs 150#define ds_src_nsegs ds_xfer.dxs_desc[DMAC_DESC_SRC].xd_nsegs 151#define ds_dst_nsegs ds_xfer.dxs_desc[DMAC_DESC_DST].xd_nsegs 152}; 153 154/* 155 * Overall dmover(9) backend state 156 */ 157struct dmac_dmover { 158 struct dmover_backend dd_backend; 159 int dd_busy; 160 LIST_HEAD(, dmac_dmover_state) dd_free; 161 struct dmac_dmover_state dd_state[PXA2X0_DMAC_DMOVER_CONCURRENCY]; 162}; 163#endif 164 165struct pxadmac_softc { 166 device_t sc_dev; 167 bus_space_tag_t sc_bust; 168 bus_dma_tag_t sc_dmat; 169 bus_space_handle_t sc_bush; 170 void *sc_irqcookie; 171 172 /* 173 * Queue of pending requests, per priority 174 */ 175 struct dmac_xfer_state_head sc_queue[DMAC_N_PRIORITIES]; 176 177 /* 178 * Queue of pending requests, per peripheral 179 */ 180 struct { 181 struct dmac_xfer_state_head sp_queue; 182 u_int sp_busy; 183 } sc_periph[DMAC_N_PERIPH]; 184 185 /* 186 * Active requests, per channel. 187 */ 188 struct dmac_xfer_state *sc_active[DMAC_N_CHANNELS]; 189 190 /* 191 * Channel Priority Allocation 192 */ 193 struct { 194 uint8_t p_first; 195 uint8_t p_pri[DMAC_N_CHANNELS]; 196 } sc_prio[DMAC_N_PRIORITIES]; 197#define DMAC_PRIO_END (~0) 198 uint8_t sc_channel_priority[DMAC_N_CHANNELS]; 199 200 /* 201 * DMA descriptor management 202 */ 203 bus_dmamap_t sc_desc_map; 204 bus_dma_segment_t sc_segs; 205#define DMAC_N_DESCS ((PAGE_SIZE * 2) / sizeof(struct pxa2x0_dma_desc)) 206#define DMAC_DESCS_SIZE (DMAC_N_DESCS * sizeof(struct pxa2x0_dma_desc)) 207 struct dmac_desc sc_all_descs[DMAC_N_DESCS]; 208 u_int sc_free_descs; 209 SLIST_HEAD(, dmac_desc) sc_descs; 210 211#if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0) 212 /* 213 * dmover(9) backend state 214 */ 215 struct dmac_dmover sc_dmover; 216#endif 217}; 218 219static int pxadmac_match(device_t, cfdata_t, void *); 220static void pxadmac_attach(device_t, device_t, void *); 221 222CFATTACH_DECL_NEW(pxadmac, sizeof(struct pxadmac_softc), 223 pxadmac_match, pxadmac_attach, NULL, NULL); 224 225static struct pxadmac_softc *pxadmac_sc; 226 227static void dmac_start(struct pxadmac_softc *, dmac_priority_t); 228static int dmac_continue_xfer(struct pxadmac_softc *, struct dmac_xfer_state *); 229static u_int dmac_channel_intr(struct pxadmac_softc *, u_int); 230static int dmac_intr(void *); 231 232#if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0) 233static void dmac_dmover_attach(struct pxadmac_softc *); 234static void dmac_dmover_process(struct dmover_backend *); 235static void dmac_dmover_run(struct dmover_backend *); 236static void dmac_dmover_done(struct dmac_xfer *, int); 237#endif 238 239static inline uint32_t 240dmac_reg_read(struct pxadmac_softc *sc, int reg) 241{ 242 243 return (bus_space_read_4(sc->sc_bust, sc->sc_bush, reg)); 244} 245 246static inline void 247dmac_reg_write(struct pxadmac_softc *sc, int reg, uint32_t val) 248{ 249 250 bus_space_write_4(sc->sc_bust, sc->sc_bush, reg, val); 251} 252 253static inline int 254dmac_allocate_channel(struct pxadmac_softc *sc, dmac_priority_t priority, 255 u_int *chanp) 256{ 257 u_int channel; 258 259 KDASSERT((u_int)priority < DMAC_N_PRIORITIES); 260 261 if ((channel = sc->sc_prio[priority].p_first) == DMAC_PRIO_END) 262 return (-1); 263 sc->sc_prio[priority].p_first = sc->sc_prio[priority].p_pri[channel]; 264 265 *chanp = channel; 266 return (0); 267} 268 269static inline void 270dmac_free_channel(struct pxadmac_softc *sc, dmac_priority_t priority, 271 u_int channel) 272{ 273 274 KDASSERT((u_int)priority < DMAC_N_PRIORITIES); 275 276 sc->sc_prio[priority].p_pri[channel] = sc->sc_prio[priority].p_first; 277 sc->sc_prio[priority].p_first = channel; 278} 279 280static int 281pxadmac_match(device_t parent, cfdata_t cf, void *aux) 282{ 283 struct pxaip_attach_args *pxa = aux; 284 285 if (pxadmac_sc || pxa->pxa_addr != PXA2X0_DMAC_BASE || 286 pxa->pxa_intr != PXA2X0_INT_DMA) 287 return (0); 288 289 pxa->pxa_size = PXA2X0_DMAC_SIZE; 290 291 return (1); 292} 293 294static void 295pxadmac_attach(device_t parent, device_t self, void *aux) 296{ 297 struct pxadmac_softc *sc = device_private(self); 298 struct pxaip_attach_args *pxa = aux; 299 struct pxa2x0_dma_desc *dd; 300 int i, nsegs; 301 302 sc->sc_dev = self; 303 sc->sc_bust = pxa->pxa_iot; 304 sc->sc_dmat = pxa->pxa_dmat; 305 306 aprint_normal(": DMA Controller\n"); 307 308 if (bus_space_map(sc->sc_bust, pxa->pxa_addr, pxa->pxa_size, 0, 309 &sc->sc_bush)) { 310 aprint_error_dev(self, "Can't map registers!\n"); 311 return; 312 } 313 314 pxadmac_sc = sc; 315 316 /* 317 * Make sure the DMAC is quiescent 318 */ 319 for (i = 0; i < DMAC_N_CHANNELS; i++) { 320 dmac_reg_write(sc, DMAC_DCSR(i), 0); 321 dmac_reg_write(sc, DMAC_DRCMR(i), 0); 322 sc->sc_active[i] = NULL; 323 } 324 if (!CPU_IS_PXA270) 325 dmac_reg_write(sc, DMAC_DINT, 326 dmac_reg_read(sc, DMAC_DINT) & DMAC_DINT_MASK); 327 328 /* 329 * Initialise the request queues 330 */ 331 for (i = 0; i < DMAC_N_PRIORITIES; i++) 332 SIMPLEQ_INIT(&sc->sc_queue[i]); 333 334 /* 335 * Initialise the request queues 336 */ 337 for (i = 0; i < DMAC_N_PERIPH; i++) { 338 sc->sc_periph[i].sp_busy = 0; 339 SIMPLEQ_INIT(&sc->sc_periph[i].sp_queue); 340 } 341 342 /* 343 * Initialise the channel priority metadata 344 */ 345 memset(sc->sc_prio, DMAC_PRIO_END, sizeof(sc->sc_prio)); 346 for (i = 0; i < DMAC_N_CHANNELS; i++) { 347#if (DMAC_N_PRIORITIES > 1) 348 if (i <= 3) 349 dmac_free_channel(sc, DMAC_PRIORITY_HIGH, i); 350 else 351 if (i <= 7) 352 dmac_free_channel(sc, DMAC_PRIORITY_MED, i); 353 else 354 dmac_free_channel(sc, DMAC_PRIORITY_LOW, i); 355#else 356 dmac_free_channel(sc, DMAC_PRIORITY_NORMAL, i); 357#endif 358 } 359 360 /* 361 * Initialise DMA descriptors and associated metadata 362 */ 363 if (bus_dmamem_alloc(sc->sc_dmat, DMAC_DESCS_SIZE, DMAC_DESCS_SIZE, 0, 364 &sc->sc_segs, 1, &nsegs, BUS_DMA_NOWAIT)) 365 panic("dmac_pxaip_attach: bus_dmamem_alloc failed"); 366 367 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_segs, 1, DMAC_DESCS_SIZE, 368 (void *)&dd, BUS_DMA_COHERENT|BUS_DMA_NOCACHE)) 369 panic("dmac_pxaip_attach: bus_dmamem_map failed"); 370 371 if (bus_dmamap_create(sc->sc_dmat, DMAC_DESCS_SIZE, 1, 372 DMAC_DESCS_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_desc_map)) 373 panic("dmac_pxaip_attach: bus_dmamap_create failed"); 374 375 if (bus_dmamap_load(sc->sc_dmat, sc->sc_desc_map, (void *)dd, 376 DMAC_DESCS_SIZE, NULL, BUS_DMA_NOWAIT)) 377 panic("dmac_pxaip_attach: bus_dmamap_load failed"); 378 379 SLIST_INIT(&sc->sc_descs); 380 sc->sc_free_descs = DMAC_N_DESCS; 381 for (i = 0; i < DMAC_N_DESCS; i++, dd++) { 382 SLIST_INSERT_HEAD(&sc->sc_descs, &sc->sc_all_descs[i], d_link); 383 sc->sc_all_descs[i].d_desc = dd; 384 sc->sc_all_descs[i].d_desc_pa = 385 sc->sc_segs.ds_addr + (sizeof(struct pxa2x0_dma_desc) * i); 386 } 387 388 sc->sc_irqcookie = pxa2x0_intr_establish(pxa->pxa_intr, IPL_BIO, 389 dmac_intr, sc); 390 KASSERT(sc->sc_irqcookie != NULL); 391 392#if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0) 393 dmac_dmover_attach(sc); 394#endif 395} 396 397#if (PXA2X0_DMAC_DMOVER_CONCURRENCY > 0) 398/* 399 * We support the following dmover(9) operations 400 */ 401static const struct dmover_algdesc dmac_dmover_algdescs[] = { 402 {DMOVER_FUNC_ZERO, NULL, 0}, /* Zero-fill */ 403 {DMOVER_FUNC_FILL8, NULL, 0}, /* Fill with 8-bit immediate value */ 404 {DMOVER_FUNC_COPY, NULL, 1} /* Copy */ 405}; 406#define DMAC_DMOVER_ALGDESC_COUNT \ 407 (sizeof(dmac_dmover_algdescs) / sizeof(dmac_dmover_algdescs[0])) 408 409static void 410dmac_dmover_attach(struct pxadmac_softc *sc) 411{ 412 struct dmac_dmover *dd = &sc->sc_dmover; 413 struct dmac_dmover_state *ds; 414 int i, dummy; 415 416 /* 417 * Describe ourselves to the dmover(9) code 418 */ 419 dd->dd_backend.dmb_name = "pxadmac"; 420 dd->dd_backend.dmb_speed = 100*1024*1024; /* XXX */ 421 dd->dd_backend.dmb_cookie = sc; 422 dd->dd_backend.dmb_algdescs = dmac_dmover_algdescs; 423 dd->dd_backend.dmb_nalgdescs = DMAC_DMOVER_ALGDESC_COUNT; 424 dd->dd_backend.dmb_process = dmac_dmover_process; 425 dd->dd_busy = 0; 426 LIST_INIT(&dd->dd_free); 427 428 for (i = 0; i < PXA2X0_DMAC_DMOVER_CONCURRENCY; i++) { 429 ds = &dd->dd_state[i]; 430 ds->ds_sc = sc; 431 ds->ds_current = NULL; 432 ds->ds_xfer.dxs_cookie = ds; 433 ds->ds_xfer.dxs_done = dmac_dmover_done; 434 ds->ds_xfer.dxs_priority = DMAC_PRIORITY_NORMAL; 435 ds->ds_xfer.dxs_peripheral = DMAC_PERIPH_NONE; 436 ds->ds_xfer.dxs_flow = DMAC_FLOW_CTRL_NONE; 437 ds->ds_xfer.dxs_dev_width = DMAC_DEV_WIDTH_DEFAULT; 438 ds->ds_xfer.dxs_burst_size = DMAC_BURST_SIZE_8; /* XXX */ 439 ds->ds_xfer.dxs_loop_notify = DMAC_DONT_LOOP; 440 ds->ds_src_addr_hold = false; 441 ds->ds_dst_addr_hold = false; 442 ds->ds_src_nsegs = 0; 443 ds->ds_dst_nsegs = 0; 444 LIST_INSERT_HEAD(&dd->dd_free, ds, ds_link); 445 446 /* 447 * Create dma maps for both source and destination buffers. 448 */ 449 if (bus_dmamap_create(sc->sc_dmat, DMAC_DMOVER_MAX_XFER, 450 DMAC_DMOVER_NSEGS, DMAC_DMOVER_MAX_XFER, 451 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 452 &ds->ds_src_dmap) || 453 bus_dmamap_create(sc->sc_dmat, DMAC_DMOVER_MAX_XFER, 454 DMAC_DMOVER_NSEGS, DMAC_DMOVER_MAX_XFER, 455 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 456 &ds->ds_dst_dmap)) { 457 panic("dmac_dmover_attach: bus_dmamap_create failed"); 458 } 459 460 /* 461 * Allocate some dma memory to be used as source buffers 462 * for the zero-fill and fill-8 operations. We only need 463 * small buffers here, since we set up the DMAC source 464 * descriptor with 'ds_addr_hold' set to true. 465 */ 466 if (bus_dmamem_alloc(sc->sc_dmat, arm_pcache.dcache_line_size, 467 arm_pcache.dcache_line_size, 0, 468 &ds->ds_zero_seg, 1, &dummy, BUS_DMA_NOWAIT) || 469 bus_dmamem_alloc(sc->sc_dmat, arm_pcache.dcache_line_size, 470 arm_pcache.dcache_line_size, 0, 471 &ds->ds_fill_seg, 1, &dummy, BUS_DMA_NOWAIT)) { 472 panic("dmac_dmover_attach: bus_dmamem_alloc failed"); 473 } 474 475 if (bus_dmamem_map(sc->sc_dmat, &ds->ds_zero_seg, 1, 476 arm_pcache.dcache_line_size, &ds->ds_zero_va, 477 BUS_DMA_NOWAIT) || 478 bus_dmamem_map(sc->sc_dmat, &ds->ds_fill_seg, 1, 479 arm_pcache.dcache_line_size, &ds->ds_fill_va, 480 BUS_DMA_NOWAIT)) { 481 panic("dmac_dmover_attach: bus_dmamem_map failed"); 482 } 483 484 /* 485 * Make sure the zero-fill source buffer really is zero filled 486 */ 487 memset(ds->ds_zero_va, 0, arm_pcache.dcache_line_size); 488 } 489 490 dmover_backend_register(&sc->sc_dmover.dd_backend); 491} 492 493static void 494dmac_dmover_process(struct dmover_backend *dmb) 495{ 496 struct pxadmac_softc *sc = dmb->dmb_cookie; 497 int s = splbio(); 498 499 /* 500 * If the backend is currently idle, go process the queue. 501 */ 502 if (sc->sc_dmover.dd_busy == 0) 503 dmac_dmover_run(&sc->sc_dmover.dd_backend); 504 splx(s); 505} 506 507static void 508dmac_dmover_run(struct dmover_backend *dmb) 509{ 510 struct dmover_request *dreq; 511 struct pxadmac_softc *sc; 512 struct dmac_dmover *dd; 513 struct dmac_dmover_state *ds; 514 size_t len_src, len_dst; 515 int rv; 516 517 sc = dmb->dmb_cookie; 518 dd = &sc->sc_dmover; 519 sc->sc_dmover.dd_busy = 1; 520 521 /* 522 * As long as we can queue up dmover requests... 523 */ 524 while ((dreq = TAILQ_FIRST(&dmb->dmb_pendreqs)) != NULL && 525 (ds = LIST_FIRST(&dd->dd_free)) != NULL) { 526 /* 527 * Pull the request off the queue, mark it 'running', 528 * and make it 'current'. 529 */ 530 dmover_backend_remque(dmb, dreq); 531 dreq->dreq_flags |= DMOVER_REQ_RUNNING; 532 LIST_REMOVE(ds, ds_link); 533 ds->ds_current = dreq; 534 535 switch (dreq->dreq_outbuf_type) { 536 case DMOVER_BUF_LINEAR: 537 len_dst = dreq->dreq_outbuf.dmbuf_linear.l_len; 538 break; 539 case DMOVER_BUF_UIO: 540 len_dst = dreq->dreq_outbuf.dmbuf_uio->uio_resid; 541 break; 542 default: 543 goto error; 544 } 545 546 /* 547 * Fix up the appropriate DMA 'source' buffer 548 */ 549 if (dreq->dreq_assignment->das_algdesc->dad_ninputs) { 550 struct uio *uio; 551 /* 552 * This is a 'copy' operation. 553 * Load up the specified source buffer 554 */ 555 switch (dreq->dreq_inbuf_type) { 556 case DMOVER_BUF_LINEAR: 557 len_src= dreq->dreq_inbuf[0].dmbuf_linear.l_len; 558 if (len_src != len_dst) 559 goto error; 560 if (bus_dmamap_load(sc->sc_dmat,ds->ds_src_dmap, 561 dreq->dreq_inbuf[0].dmbuf_linear.l_addr, 562 len_src, NULL, 563 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 564 BUS_DMA_READ)) 565 goto error; 566 break; 567 568 case DMOVER_BUF_UIO: 569 uio = dreq->dreq_inbuf[0].dmbuf_uio; 570 len_src = uio->uio_resid; 571 if (uio->uio_rw != UIO_WRITE || 572 len_src != len_dst) 573 goto error; 574 if (bus_dmamap_load_uio(sc->sc_dmat, 575 ds->ds_src_dmap, uio, 576 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 577 BUS_DMA_READ)) 578 goto error; 579 break; 580 581 default: 582 goto error; 583 } 584 585 ds->ds_src_addr_hold = false; 586 } else 587 if (dreq->dreq_assignment->das_algdesc->dad_name == 588 DMOVER_FUNC_ZERO) { 589 /* 590 * Zero-fill operation. 591 * Simply load up the pre-zeroed source buffer 592 */ 593 if (bus_dmamap_load(sc->sc_dmat, ds->ds_src_dmap, 594 ds->ds_zero_va, arm_pcache.dcache_line_size, NULL, 595 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_READ)) 596 goto error; 597 598 ds->ds_src_addr_hold = true; 599 } else 600 if (dreq->dreq_assignment->das_algdesc->dad_name == 601 DMOVER_FUNC_FILL8) { 602 /* 603 * Fill-8 operation. 604 * Initialise our fill-8 buffer, and load it up. 605 * 606 * XXX: Experiment with exactly how much of the 607 * source buffer needs to be filled. Particularly WRT 608 * burst size (which is hardcoded to 8 for dmover). 609 */ 610 memset(ds->ds_fill_va, dreq->dreq_immediate[0], 611 arm_pcache.dcache_line_size); 612 613 if (bus_dmamap_load(sc->sc_dmat, ds->ds_src_dmap, 614 ds->ds_fill_va, arm_pcache.dcache_line_size, NULL, 615 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_READ)) 616 goto error; 617 618 ds->ds_src_addr_hold = true; 619 } else { 620 goto error; 621 } 622 623 /* 624 * Now do the same for the destination buffer 625 */ 626 switch (dreq->dreq_outbuf_type) { 627 case DMOVER_BUF_LINEAR: 628 if (bus_dmamap_load(sc->sc_dmat, ds->ds_dst_dmap, 629 dreq->dreq_outbuf.dmbuf_linear.l_addr, 630 len_dst, NULL, 631 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE)) 632 goto error_unload_src; 633 break; 634 635 case DMOVER_BUF_UIO: 636 if (dreq->dreq_outbuf.dmbuf_uio->uio_rw != UIO_READ) 637 goto error_unload_src; 638 if (bus_dmamap_load_uio(sc->sc_dmat, ds->ds_dst_dmap, 639 dreq->dreq_outbuf.dmbuf_uio, 640 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE)) 641 goto error_unload_src; 642 break; 643 644 default: 645 error_unload_src: 646 bus_dmamap_unload(sc->sc_dmat, ds->ds_src_dmap); 647 error: 648 dreq->dreq_error = EINVAL; 649 dreq->dreq_flags |= DMOVER_REQ_ERROR; 650 ds->ds_current = NULL; 651 LIST_INSERT_HEAD(&dd->dd_free, ds, ds_link); 652 dmover_done(dreq); 653 continue; 654 } 655 656 /* 657 * The last step before shipping the request off to the 658 * DMAC driver is to sync the dma maps. 659 */ 660 bus_dmamap_sync(sc->sc_dmat, ds->ds_src_dmap, 0, 661 ds->ds_src_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 662 ds->ds_src_dma_segs = ds->ds_src_dmap->dm_segs; 663 ds->ds_src_nsegs = ds->ds_src_dmap->dm_nsegs; 664 665 bus_dmamap_sync(sc->sc_dmat, ds->ds_dst_dmap, 0, 666 ds->ds_dst_dmap->dm_mapsize, BUS_DMASYNC_PREREAD); 667 ds->ds_dst_dma_segs = ds->ds_dst_dmap->dm_segs; 668 ds->ds_dst_nsegs = ds->ds_dst_dmap->dm_nsegs; 669 670 /* 671 * Hand the request over to the dmac section of the driver. 672 */ 673 if ((rv = pxa2x0_dmac_start_xfer(&ds->ds_xfer.dxs_xfer)) != 0) { 674 bus_dmamap_unload(sc->sc_dmat, ds->ds_src_dmap); 675 bus_dmamap_unload(sc->sc_dmat, ds->ds_dst_dmap); 676 dreq->dreq_error = rv; 677 dreq->dreq_flags |= DMOVER_REQ_ERROR; 678 ds->ds_current = NULL; 679 LIST_INSERT_HEAD(&dd->dd_free, ds, ds_link); 680 dmover_done(dreq); 681 } 682 } 683 684 /* All done */ 685 sc->sc_dmover.dd_busy = 0; 686} 687 688static void 689dmac_dmover_done(struct dmac_xfer *dx, int error) 690{ 691 struct dmac_dmover_state *ds = dx->dx_cookie; 692 struct pxadmac_softc *sc = ds->ds_sc; 693 struct dmover_request *dreq = ds->ds_current; 694 695 /* 696 * A dmover(9) request has just completed. 697 */ 698 699 KDASSERT(dreq != NULL); 700 701 /* 702 * Sync and unload the DMA maps 703 */ 704 bus_dmamap_sync(sc->sc_dmat, ds->ds_src_dmap, 0, 705 ds->ds_src_dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 706 bus_dmamap_sync(sc->sc_dmat, ds->ds_dst_dmap, 0, 707 ds->ds_dst_dmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 708 709 bus_dmamap_unload(sc->sc_dmat, ds->ds_src_dmap); 710 bus_dmamap_unload(sc->sc_dmat, ds->ds_dst_dmap); 711 712 ds->ds_current = NULL; 713 LIST_INSERT_HEAD(&sc->sc_dmover.dd_free, ds, ds_link); 714 715 /* 716 * Record the completion status of the transfer 717 */ 718 if (error) { 719 dreq->dreq_error = error; 720 dreq->dreq_flags |= DMOVER_REQ_ERROR; 721 } else { 722 if (dreq->dreq_outbuf_type == DMOVER_BUF_UIO) 723 dreq->dreq_outbuf.dmbuf_uio->uio_resid = 0; 724 if (dreq->dreq_assignment->das_algdesc->dad_ninputs && 725 dreq->dreq_inbuf_type == DMOVER_BUF_UIO) 726 dreq->dreq_inbuf[0].dmbuf_uio->uio_resid = 0; 727 } 728 729 /* 730 * Done! 731 */ 732 dmover_done(dreq); 733 734 /* 735 * See if we can start some more dmover(9) requests. 736 * 737 * Note: We're already at splbio() here. 738 */ 739 if (sc->sc_dmover.dd_busy == 0) 740 dmac_dmover_run(&sc->sc_dmover.dd_backend); 741} 742#endif 743 744struct dmac_xfer * 745pxa2x0_dmac_allocate_xfer(void) 746{ 747 struct dmac_xfer_state *dxs; 748 749 dxs = kmem_alloc(sizeof(*dxs), KM_SLEEP); 750 751 return ((struct dmac_xfer *)dxs); 752} 753 754void 755pxa2x0_dmac_free_xfer(struct dmac_xfer *dx) 756{ 757 struct dmac_xfer_state *dxs = (struct dmac_xfer_state *)dx; 758 759 /* 760 * XXX: Should verify the DMAC is not actively using this 761 * structure before freeing... 762 */ 763 kmem_free(dxs, sizeof(*dxs)); 764} 765 766static inline int 767dmac_validate_desc(struct dmac_xfer_desc *xd, size_t *psize, 768 bool *misaligned_flag) 769{ 770 bus_dma_segment_t *dma_segs = xd->xd_dma_segs; 771 bus_addr_t periph_end; 772 bus_size_t align; 773 size_t size; 774 int i, nsegs = xd->xd_nsegs; 775 776 /* 777 * Make sure the transfer parameters are acceptable. 778 */ 779 780 if (xd->xd_addr_hold && 781 (nsegs != 1 || dma_segs[0].ds_len == 0)) 782 return (EINVAL); 783 784 periph_end = CPU_IS_PXA270 ? PXA270_PERIPH_END : PXA250_PERIPH_END; 785 for (i = 0, size = 0; i < nsegs; i++) { 786 if (dma_segs[i].ds_addr >= PXA2X0_PERIPH_START && 787 dma_segs[i].ds_addr + dma_segs[i].ds_len < periph_end) 788 /* Internal Peripherals. */ 789 align = 0x03; 790 else /* Companion-Chip/External Peripherals/External Memory. */ 791 align = 0x07; 792 /* 793 * XXXX: 794 * Also PXA27x has more constraints by pairs Source/Target. 795 */ 796 797 if (dma_segs[i].ds_addr & align) { 798 if (!CPU_IS_PXA270) 799 return (EFAULT); 800 *misaligned_flag = true; 801 } 802 size += dma_segs[i].ds_len; 803 } 804 805 *psize = size; 806 return (0); 807} 808 809static inline int 810dmac_init_desc(struct dmac_desc_segs *ds, struct dmac_xfer_desc *xd, 811 size_t *psize, bool *misaligned_flag) 812{ 813 int err; 814 815 if ((err = dmac_validate_desc(xd, psize, misaligned_flag))) 816 return (err); 817 818 ds->ds_curseg = xd->xd_dma_segs; 819 ds->ds_nsegs = xd->xd_nsegs; 820 ds->ds_offset = 0; 821 return (0); 822} 823 824int 825pxa2x0_dmac_start_xfer(struct dmac_xfer *dx) 826{ 827 struct pxadmac_softc *sc = pxadmac_sc; 828 struct dmac_xfer_state *dxs = (struct dmac_xfer_state *)dx; 829 struct dmac_xfer_desc *src, *dst; 830 size_t size; 831 int err, s; 832 833 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE && 834 dxs->dxs_peripheral >= DMAC_N_PERIPH) 835 return (EINVAL); 836 837 src = &dxs->dxs_desc[DMAC_DESC_SRC]; 838 dst = &dxs->dxs_desc[DMAC_DESC_DST]; 839 840 dxs->dxs_misaligned_flag = false; 841 842 if ((err = dmac_init_desc(&dxs->dxs_segs[DMAC_DESC_SRC], src, &size, 843 &dxs->dxs_misaligned_flag))) 844 return (err); 845 if (src->xd_addr_hold == false && 846 dxs->dxs_loop_notify != DMAC_DONT_LOOP && 847 (size % dxs->dxs_loop_notify) != 0) 848 return (EINVAL); 849 850 if ((err = dmac_init_desc(&dxs->dxs_segs[DMAC_DESC_DST], dst, &size, 851 &dxs->dxs_misaligned_flag))) 852 return (err); 853 if (dst->xd_addr_hold == false && 854 dxs->dxs_loop_notify != DMAC_DONT_LOOP && 855 (size % dxs->dxs_loop_notify) != 0) 856 return (EINVAL); 857 858 SLIST_INIT(&dxs->dxs_descs); 859 dxs->dxs_channel = DMAC_NO_CHANNEL; 860 dxs->dxs_dcmd = (((uint32_t)dxs->dxs_dev_width) << DCMD_WIDTH_SHIFT) | 861 (((uint32_t)dxs->dxs_burst_size) << DCMD_SIZE_SHIFT); 862 863 switch (dxs->dxs_flow) { 864 case DMAC_FLOW_CTRL_NONE: 865 break; 866 case DMAC_FLOW_CTRL_SRC: 867 dxs->dxs_dcmd |= DCMD_FLOWSRC; 868 break; 869 case DMAC_FLOW_CTRL_DEST: 870 dxs->dxs_dcmd |= DCMD_FLOWTRG; 871 break; 872 } 873 874 if (src->xd_addr_hold == false) 875 dxs->dxs_dcmd |= DCMD_INCSRCADDR; 876 if (dst->xd_addr_hold == false) 877 dxs->dxs_dcmd |= DCMD_INCTRGADDR; 878 879 s = splbio(); 880 if (dxs->dxs_peripheral == DMAC_PERIPH_NONE || 881 sc->sc_periph[dxs->dxs_peripheral].sp_busy == 0) { 882 dxs->dxs_queue = &sc->sc_queue[DMAC_PRI(dxs->dxs_priority)]; 883 SIMPLEQ_INSERT_TAIL(dxs->dxs_queue, dxs, dxs_link); 884 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE) 885 sc->sc_periph[dxs->dxs_peripheral].sp_busy++; 886 dmac_start(sc, DMAC_PRI(dxs->dxs_priority)); 887 } else { 888 dxs->dxs_queue = &sc->sc_periph[dxs->dxs_peripheral].sp_queue; 889 SIMPLEQ_INSERT_TAIL(dxs->dxs_queue, dxs, dxs_link); 890 sc->sc_periph[dxs->dxs_peripheral].sp_busy++; 891 } 892 splx(s); 893 894 return (0); 895} 896 897void 898pxa2x0_dmac_abort_xfer(struct dmac_xfer *dx) 899{ 900 struct pxadmac_softc *sc = pxadmac_sc; 901 struct dmac_xfer_state *ndxs, *dxs = (struct dmac_xfer_state *)dx; 902 struct dmac_desc *desc, *ndesc; 903 struct dmac_xfer_state_head *queue; 904 uint32_t rv; 905 int s, timeout, need_start = 0; 906 907 s = splbio(); 908 909 queue = dxs->dxs_queue; 910 911 if (dxs->dxs_channel == DMAC_NO_CHANNEL) { 912 /* 913 * The request has not yet started, or it has already 914 * completed. If the request is not on a queue, just 915 * return. 916 */ 917 if (queue == NULL) { 918 splx(s); 919 return; 920 } 921 922 dxs->dxs_queue = NULL; 923 SIMPLEQ_REMOVE(queue, dxs, dmac_xfer_state, dxs_link); 924 } else { 925 /* 926 * The request is in progress. This is a bit trickier. 927 */ 928 dmac_reg_write(sc, DMAC_DCSR(dxs->dxs_channel), 0); 929 930 for (timeout = 5000; timeout; timeout--) { 931 rv = dmac_reg_read(sc, DMAC_DCSR(dxs->dxs_channel)); 932 if (rv & DCSR_STOPSTATE) 933 break; 934 delay(1); 935 } 936 937 if ((rv & DCSR_STOPSTATE) == 0) 938 panic( 939 "pxa2x0_dmac_abort_xfer: channel %d failed to abort", 940 dxs->dxs_channel); 941 942 /* 943 * Free resources allocated to the request 944 */ 945 for (desc = SLIST_FIRST(&dxs->dxs_descs); desc; desc = ndesc) { 946 ndesc = SLIST_NEXT(desc, d_link); 947 SLIST_INSERT_HEAD(&sc->sc_descs, desc, d_link); 948 sc->sc_free_descs++; 949 } 950 951 sc->sc_active[dxs->dxs_channel] = NULL; 952 dmac_free_channel(sc, DMAC_PRI(dxs->dxs_priority), 953 dxs->dxs_channel); 954 955 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE) 956 dmac_reg_write(sc, DMAC_DRCMR(dxs->dxs_peripheral), 0); 957 958 need_start = 1; 959 dxs->dxs_queue = NULL; 960 } 961 962 if (dxs->dxs_peripheral == DMAC_PERIPH_NONE || 963 sc->sc_periph[dxs->dxs_peripheral].sp_busy-- == 1 || 964 queue == &sc->sc_periph[dxs->dxs_peripheral].sp_queue) 965 goto out; 966 967 /* 968 * We've just removed the current item for this 969 * peripheral, and there is at least one more 970 * pending item waiting. Make it current. 971 */ 972 ndxs = SIMPLEQ_FIRST(&sc->sc_periph[dxs->dxs_peripheral].sp_queue); 973 dxs = ndxs; 974 KDASSERT(dxs != NULL); 975 SIMPLEQ_REMOVE_HEAD(&sc->sc_periph[dxs->dxs_peripheral].sp_queue, 976 dxs_link); 977 978 dxs->dxs_queue = &sc->sc_queue[DMAC_PRI(dxs->dxs_priority)]; 979 SIMPLEQ_INSERT_TAIL(dxs->dxs_queue, dxs, dxs_link); 980 need_start = 1; 981 982 /* 983 * Try to start any pending requests with the same 984 * priority. 985 */ 986out: 987 if (need_start) 988 dmac_start(sc, DMAC_PRI(dxs->dxs_priority)); 989 splx(s); 990} 991 992static void 993dmac_start(struct pxadmac_softc *sc, dmac_priority_t priority) 994{ 995 struct dmac_xfer_state *dxs; 996 u_int channel; 997 998 while (sc->sc_free_descs && 999 (dxs = SIMPLEQ_FIRST(&sc->sc_queue[priority])) != NULL && 1000 dmac_allocate_channel(sc, priority, &channel) == 0) { 1001 /* 1002 * Yay, got some descriptors, a transfer request, and 1003 * an available DMA channel. 1004 */ 1005 KDASSERT(sc->sc_active[channel] == NULL); 1006 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue[priority], dxs_link); 1007 1008 /* set DMA alignment register */ 1009 if (CPU_IS_PXA270) { 1010 uint32_t dalgn; 1011 1012 dalgn = dmac_reg_read(sc, DMAC_DALGN); 1013 dalgn &= ~(1U << channel); 1014 if (dxs->dxs_misaligned_flag) 1015 dalgn |= (1U << channel); 1016 dmac_reg_write(sc, DMAC_DALGN, dalgn); 1017 } 1018 1019 dxs->dxs_channel = channel; 1020 sc->sc_active[channel] = dxs; 1021 (void) dmac_continue_xfer(sc, dxs); 1022 /* 1023 * XXX: Deal with descriptor allocation failure for loops 1024 */ 1025 } 1026} 1027 1028static int 1029dmac_continue_xfer(struct pxadmac_softc *sc, struct dmac_xfer_state *dxs) 1030{ 1031 struct dmac_desc *desc, *prev_desc; 1032 struct pxa2x0_dma_desc *dd; 1033 struct dmac_desc_segs *src_ds, *dst_ds; 1034 struct dmac_xfer_desc *src_xd, *dst_xd; 1035 bus_dma_segment_t *src_seg, *dst_seg; 1036 bus_addr_t src_mem_addr, dst_mem_addr; 1037 bus_size_t src_size, dst_size, this_size; 1038 1039 desc = NULL; 1040 prev_desc = NULL; 1041 dd = NULL; 1042 src_ds = &dxs->dxs_segs[DMAC_DESC_SRC]; 1043 dst_ds = &dxs->dxs_segs[DMAC_DESC_DST]; 1044 src_xd = &dxs->dxs_desc[DMAC_DESC_SRC]; 1045 dst_xd = &dxs->dxs_desc[DMAC_DESC_DST]; 1046 SLIST_INIT(&dxs->dxs_descs); 1047 1048 /* 1049 * As long as the source/destination buffers have DMA segments, 1050 * and we have free descriptors, build a DMA chain. 1051 */ 1052 while (src_ds->ds_nsegs && dst_ds->ds_nsegs && sc->sc_free_descs) { 1053 src_seg = src_ds->ds_curseg; 1054 src_mem_addr = src_seg->ds_addr + src_ds->ds_offset; 1055 if (src_xd->xd_addr_hold == false && 1056 dxs->dxs_loop_notify != DMAC_DONT_LOOP) 1057 src_size = dxs->dxs_loop_notify; 1058 else 1059 src_size = src_seg->ds_len - src_ds->ds_offset; 1060 1061 dst_seg = dst_ds->ds_curseg; 1062 dst_mem_addr = dst_seg->ds_addr + dst_ds->ds_offset; 1063 if (dst_xd->xd_addr_hold == false && 1064 dxs->dxs_loop_notify != DMAC_DONT_LOOP) 1065 dst_size = dxs->dxs_loop_notify; 1066 else 1067 dst_size = dst_seg->ds_len - dst_ds->ds_offset; 1068 1069 /* 1070 * We may need to split a source or destination segment 1071 * across two or more DMAC descriptors. 1072 */ 1073 while (src_size && dst_size && 1074 (desc = SLIST_FIRST(&sc->sc_descs)) != NULL) { 1075 SLIST_REMOVE_HEAD(&sc->sc_descs, d_link); 1076 sc->sc_free_descs--; 1077 1078 /* 1079 * Decide how much data we're going to transfer 1080 * using this DMAC descriptor. 1081 */ 1082 if (src_xd->xd_addr_hold) 1083 this_size = dst_size; 1084 else 1085 if (dst_xd->xd_addr_hold) 1086 this_size = src_size; 1087 else 1088 this_size = uimin(dst_size, src_size); 1089 1090 /* 1091 * But clamp the transfer size to the DMAC 1092 * descriptor's maximum. 1093 */ 1094 this_size = uimin(this_size, DCMD_LENGTH_MASK & ~0x1f); 1095 1096 /* 1097 * Fill in the DMAC descriptor 1098 */ 1099 dd = desc->d_desc; 1100 dd->dd_dsadr = src_mem_addr; 1101 dd->dd_dtadr = dst_mem_addr; 1102 dd->dd_dcmd = dxs->dxs_dcmd | this_size; 1103 1104 /* 1105 * Link it into the chain 1106 */ 1107 if (prev_desc) { 1108 SLIST_INSERT_AFTER(prev_desc, desc, d_link); 1109 prev_desc->d_desc->dd_ddadr = desc->d_desc_pa; 1110 } else { 1111 SLIST_INSERT_HEAD(&dxs->dxs_descs, desc, 1112 d_link); 1113 } 1114 prev_desc = desc; 1115 1116 /* 1117 * Update the source/destination pointers 1118 */ 1119 if (src_xd->xd_addr_hold == false) { 1120 src_size -= this_size; 1121 src_ds->ds_offset += this_size; 1122 if (src_ds->ds_offset == src_seg->ds_len) { 1123 KDASSERT(src_size == 0); 1124 src_ds->ds_curseg = ++src_seg; 1125 src_ds->ds_offset = 0; 1126 src_ds->ds_nsegs--; 1127 } else 1128 src_mem_addr += this_size; 1129 } 1130 1131 if (dst_xd->xd_addr_hold == false) { 1132 dst_size -= this_size; 1133 dst_ds->ds_offset += this_size; 1134 if (dst_ds->ds_offset == dst_seg->ds_len) { 1135 KDASSERT(dst_size == 0); 1136 dst_ds->ds_curseg = ++dst_seg; 1137 dst_ds->ds_offset = 0; 1138 dst_ds->ds_nsegs--; 1139 } else 1140 dst_mem_addr += this_size; 1141 } 1142 } 1143 1144 if (dxs->dxs_loop_notify != DMAC_DONT_LOOP) { 1145 /* 1146 * We must be able to allocate descriptors for the 1147 * entire loop. Otherwise, return them to the pool 1148 * and bail. 1149 */ 1150 if (desc == NULL) { 1151 struct dmac_desc *ndesc; 1152 for (desc = SLIST_FIRST(&dxs->dxs_descs); 1153 desc; desc = ndesc) { 1154 ndesc = SLIST_NEXT(desc, d_link); 1155 SLIST_INSERT_HEAD(&sc->sc_descs, desc, 1156 d_link); 1157 sc->sc_free_descs++; 1158 } 1159 1160 return (0); 1161 } 1162 1163 KASSERT(dd != NULL); 1164 dd->dd_dcmd |= DCMD_ENDIRQEN; 1165 } 1166 } 1167 1168 /* 1169 * Did we manage to build a chain? 1170 * If not, just return. 1171 */ 1172 if (dd == NULL) 1173 return (0); 1174 1175 if (dxs->dxs_loop_notify == DMAC_DONT_LOOP) { 1176 dd->dd_dcmd |= DCMD_ENDIRQEN; 1177 dd->dd_ddadr = DMAC_DESC_LAST; 1178 } else 1179 dd->dd_ddadr = SLIST_FIRST(&dxs->dxs_descs)->d_desc_pa; 1180 1181 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE) { 1182 dmac_reg_write(sc, DMAC_DRCMR(dxs->dxs_peripheral), 1183 dxs->dxs_channel | DRCMR_MAPVLD); 1184 } 1185 dmac_reg_write(sc, DMAC_DDADR(dxs->dxs_channel), 1186 SLIST_FIRST(&dxs->dxs_descs)->d_desc_pa); 1187 dmac_reg_write(sc, DMAC_DCSR(dxs->dxs_channel), 1188 DCSR_ENDINTR | DCSR_RUN); 1189 1190 return (1); 1191} 1192 1193static u_int 1194dmac_channel_intr(struct pxadmac_softc *sc, u_int channel) 1195{ 1196 struct dmac_xfer_state *dxs; 1197 struct dmac_desc *desc, *ndesc; 1198 uint32_t dcsr; 1199 u_int rv = 0; 1200 1201 dcsr = dmac_reg_read(sc, DMAC_DCSR(channel)); 1202 dmac_reg_write(sc, DMAC_DCSR(channel), dcsr); 1203 if (dmac_reg_read(sc, DMAC_DCSR(channel)) & DCSR_STOPSTATE) 1204 dmac_reg_write(sc, DMAC_DCSR(channel), dcsr & ~DCSR_RUN); 1205 1206 if ((dxs = sc->sc_active[channel]) == NULL) { 1207 aprint_error_dev(sc->sc_dev, 1208 "Stray DMAC interrupt for unallocated channel %d\n", 1209 channel); 1210 return (0); 1211 } 1212 1213 /* 1214 * Clear down the interrupt in the DMA Interrupt Register 1215 */ 1216 if (!CPU_IS_PXA270) 1217 dmac_reg_write(sc, DMAC_DINT, (1u << channel)); 1218 1219 /* 1220 * If this is a looping request, invoke the 'done' callback and 1221 * return immediately. 1222 */ 1223 if (dxs->dxs_loop_notify != DMAC_DONT_LOOP && 1224 (dcsr & DCSR_BUSERRINTR) == 0) { 1225 (dxs->dxs_done)(&dxs->dxs_xfer, 0); 1226 return (0); 1227 } 1228 1229 /* 1230 * Free the descriptors allocated to the completed transfer 1231 * 1232 * XXX: If there is more data to transfer in this request, 1233 * we could simply reuse some or all of the descriptors 1234 * already allocated for the transfer which just completed. 1235 */ 1236 for (desc = SLIST_FIRST(&dxs->dxs_descs); desc; desc = ndesc) { 1237 ndesc = SLIST_NEXT(desc, d_link); 1238 SLIST_INSERT_HEAD(&sc->sc_descs, desc, d_link); 1239 sc->sc_free_descs++; 1240 } 1241 1242 if ((dcsr & DCSR_BUSERRINTR) || dmac_continue_xfer(sc, dxs) == 0) { 1243 /* 1244 * The transfer completed (possibly due to an error), 1245 * -OR- we were unable to continue any remaining 1246 * segment of the transfer due to a lack of descriptors. 1247 * 1248 * In either case, we have to free up DMAC resources 1249 * allocated to the request. 1250 */ 1251 sc->sc_active[channel] = NULL; 1252 dmac_free_channel(sc, DMAC_PRI(dxs->dxs_priority), channel); 1253 dxs->dxs_channel = DMAC_NO_CHANNEL; 1254 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE) 1255 dmac_reg_write(sc, DMAC_DRCMR(dxs->dxs_peripheral), 0); 1256 1257 if (dxs->dxs_segs[DMAC_DESC_SRC].ds_nsegs == 0 || 1258 dxs->dxs_segs[DMAC_DESC_DST].ds_nsegs == 0 || 1259 (dcsr & DCSR_BUSERRINTR)) { 1260 1261 /* 1262 * The transfer is complete. 1263 */ 1264 dxs->dxs_queue = NULL; 1265 rv = 1u << DMAC_PRI(dxs->dxs_priority); 1266 1267 if (dxs->dxs_peripheral != DMAC_PERIPH_NONE && 1268 --sc->sc_periph[dxs->dxs_peripheral].sp_busy != 0) { 1269 struct dmac_xfer_state *ndxs; 1270 /* 1271 * We've just removed the current item for this 1272 * peripheral, and there is at least one more 1273 * pending item waiting. Make it current. 1274 */ 1275 ndxs = SIMPLEQ_FIRST( 1276 &sc->sc_periph[dxs->dxs_peripheral].sp_queue); 1277 KDASSERT(ndxs != NULL); 1278 SIMPLEQ_REMOVE_HEAD( 1279 &sc->sc_periph[dxs->dxs_peripheral].sp_queue, 1280 dxs_link); 1281 1282 ndxs->dxs_queue = 1283 &sc->sc_queue[DMAC_PRI(dxs->dxs_priority)]; 1284 SIMPLEQ_INSERT_TAIL(ndxs->dxs_queue, ndxs, 1285 dxs_link); 1286 } 1287 1288 (dxs->dxs_done)(&dxs->dxs_xfer, 1289 (dcsr & DCSR_BUSERRINTR) ? EFAULT : 0); 1290 } else { 1291 /* 1292 * The request is not yet complete, but we were unable 1293 * to make any headway at this time because there are 1294 * no free descriptors. Put the request back at the 1295 * head of the appropriate priority queue. It'll be 1296 * dealt with as other in-progress transfers complete. 1297 */ 1298 SIMPLEQ_INSERT_HEAD( 1299 &sc->sc_queue[DMAC_PRI(dxs->dxs_priority)], dxs, 1300 dxs_link); 1301 } 1302 } 1303 1304 return (rv); 1305} 1306 1307static int 1308dmac_intr(void *arg) 1309{ 1310 struct pxadmac_softc *sc = arg; 1311 uint32_t rv, mask; 1312 u_int chan, pri; 1313 1314 rv = dmac_reg_read(sc, DMAC_DINT); 1315 if ((rv & DMAC_DINT_MASK) == 0) 1316 return (0); 1317 1318 /* 1319 * Deal with completed transfers 1320 */ 1321 for (chan = 0, mask = 1u, pri = 0; 1322 chan < DMAC_N_CHANNELS; chan++, mask <<= 1) { 1323 if (rv & mask) 1324 pri |= dmac_channel_intr(sc, chan); 1325 } 1326 1327 /* 1328 * Now try to start any queued transfers 1329 */ 1330#if (DMAC_N_PRIORITIES > 1) 1331 if (pri & (1u << DMAC_PRIORITY_HIGH)) 1332 dmac_start(sc, DMAC_PRIORITY_HIGH); 1333 if (pri & (1u << DMAC_PRIORITY_MED)) 1334 dmac_start(sc, DMAC_PRIORITY_MED); 1335 if (pri & (1u << DMAC_PRIORITY_LOW)) 1336 dmac_start(sc, DMAC_PRIORITY_LOW); 1337#else 1338 if (pri) 1339 dmac_start(sc, DMAC_PRIORITY_NORMAL); 1340#endif 1341 1342 return (1); 1343} 1344