Deleted Added
full compact
busdma_machdep-v4.c (134934) busdma_machdep-v4.c (135644)
1/*
2 * Copyright (c) 2004 Olivier Houchard
3 * Copyright (c) 2002 Peter Grehan
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 15 unchanged lines hidden (view full) ---

24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
29 */
30
31#include <sys/cdefs.h>
1/*
2 * Copyright (c) 2004 Olivier Houchard
3 * Copyright (c) 2002 Peter Grehan
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 15 unchanged lines hidden (view full) ---

24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 134934 2004-09-08 04:54:19Z scottl $");
32__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 135644 2004-09-23 21:57:47Z cognet $");
33
34/*
35 * MacPPC bus dma support routines
36 */
37
38#define _ARM32_BUS_DMA_PRIVATE
39#include <sys/param.h>
40#include <sys/systm.h>

--- 33 unchanged lines hidden (view full) ---

74 /*
75 * DMA range for this tag. If the page doesn't fall within
76 * one of these ranges, an error is returned. The caller
77 * may then decide what to do with the transfer. If the
78 * range pointer is NULL, it is ignored.
79 */
80 struct arm32_dma_range *ranges;
81 int _nranges;
33
34/*
35 * MacPPC bus dma support routines
36 */
37
38#define _ARM32_BUS_DMA_PRIVATE
39#include <sys/param.h>
40#include <sys/systm.h>

--- 33 unchanged lines hidden (view full) ---

74 /*
75 * DMA range for this tag. If the page doesn't fall within
76 * one of these ranges, an error is returned. The caller
77 * may then decide what to do with the transfer. If the
78 * range pointer is NULL, it is ignored.
79 */
80 struct arm32_dma_range *ranges;
81 int _nranges;
82
83};
84
82};
83
85struct arm_seglist {
86 bus_dma_segment_t seg;
87 SLIST_ENTRY(arm_seglist) next;
88};
89
90#define MAX_SEGS 512
84#define DMAMAP_LINEAR 0x1
85#define DMAMAP_MBUF 0x2
86#define DMAMAP_UIO 0x4
87#define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
88#define DMAMAP_COHERENT 0x8
91struct bus_dmamap {
89struct bus_dmamap {
92 bus_dma_tag_t dmat;
93 int flags;
94 SLIST_HEAD(, arm_seglist) seglist;
90 bus_dma_tag_t dmat;
91 int flags;
92 void *buffer;
93 int len;
95};
96
97/*
98 * Check to see if the specified page is in an allowed DMA range.
99 */
100
101static int
102bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
103 bus_dmamap_t map, void *buf, bus_size_t buflen, struct thread *td,
104 int flags, vm_offset_t *lastaddrp, int *segp,
105 int first);
94};
95
96/*
97 * Check to see if the specified page is in an allowed DMA range.
98 */
99
100static int
101bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
102 bus_dmamap_t map, void *buf, bus_size_t buflen, struct thread *td,
103 int flags, vm_offset_t *lastaddrp, int *segp,
104 int first);
105
106static __inline struct arm32_dma_range *
107_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
108 bus_addr_t curaddr)
109{
110 struct arm32_dma_range *dr;
111 int i;
112
113 for (i = 0, dr = ranges; i < nranges; i++, dr++) {

--- 42 unchanged lines hidden (view full) ---

156#else
157 printf("DRIVER_ERROR: busdma dflt_lock called\n");
158#endif
159}
160
161/*
162 * Allocate a device specific dma_tag.
163 */
106static __inline struct arm32_dma_range *
107_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
108 bus_addr_t curaddr)
109{
110 struct arm32_dma_range *dr;
111 int i;
112
113 for (i = 0, dr = ranges; i < nranges; i++, dr++) {

--- 42 unchanged lines hidden (view full) ---

156#else
157 printf("DRIVER_ERROR: busdma dflt_lock called\n");
158#endif
159}
160
161/*
162 * Allocate a device specific dma_tag.
163 */
164#define SEG_NB 1024
165
164int
165bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
166 bus_size_t boundary, bus_addr_t lowaddr,
167 bus_addr_t highaddr, bus_dma_filter_t *filter,
168 void *filterarg, bus_size_t maxsize, int nsegments,
169 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
170 void *lockfuncarg, bus_dma_tag_t *dmat)
171{
172 bus_dma_tag_t newtag;
173 int error = 0;
166int
167bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
168 bus_size_t boundary, bus_addr_t lowaddr,
169 bus_addr_t highaddr, bus_dma_filter_t *filter,
170 void *filterarg, bus_size_t maxsize, int nsegments,
171 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
172 void *lockfuncarg, bus_dma_tag_t *dmat)
173{
174 bus_dma_tag_t newtag;
175 int error = 0;
174
175 /* Return a NULL tag on failure */
176 *dmat = NULL;
177
178 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
179 if (newtag == NULL)
180 return (ENOMEM);
181
182 newtag->parent = parent;

--- 5 unchanged lines hidden (view full) ---

188 newtag->filterarg = filterarg;
189 newtag->maxsize = maxsize;
190 newtag->nsegments = nsegments;
191 newtag->maxsegsz = maxsegsz;
192 newtag->flags = flags;
193 newtag->ref_count = 1; /* Count ourself */
194 newtag->map_count = 0;
195 newtag->ranges = bus_dma_get_range();
176 /* Return a NULL tag on failure */
177 *dmat = NULL;
178
179 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
180 if (newtag == NULL)
181 return (ENOMEM);
182
183 newtag->parent = parent;

--- 5 unchanged lines hidden (view full) ---

189 newtag->filterarg = filterarg;
190 newtag->maxsize = maxsize;
191 newtag->nsegments = nsegments;
192 newtag->maxsegsz = maxsegsz;
193 newtag->flags = flags;
194 newtag->ref_count = 1; /* Count ourself */
195 newtag->map_count = 0;
196 newtag->ranges = bus_dma_get_range();
197 newtag->_nranges = bus_dma_get_range_nb();
196 if (lockfunc != NULL) {
197 newtag->lockfunc = lockfunc;
198 newtag->lockfuncarg = lockfuncarg;
199 } else {
200 newtag->lockfunc = dflt_lock;
201 newtag->lockfuncarg = NULL;
202 }
198 if (lockfunc != NULL) {
199 newtag->lockfunc = lockfunc;
200 newtag->lockfuncarg = lockfuncarg;
201 } else {
202 newtag->lockfunc = dflt_lock;
203 newtag->lockfuncarg = NULL;
204 }
203
204 /*
205 * Take into account any restrictions imposed by our parent tag
206 */
207 if (parent != NULL) {
208 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
209 newtag->highaddr = max(parent->highaddr, newtag->highaddr);
210 if (newtag->boundary == 0)
211 newtag->boundary = parent->boundary;

--- 40 unchanged lines hidden (view full) ---

252 dmat = parent;
253 } else
254 dmat = NULL;
255 }
256 }
257 return (0);
258}
259
205 /*
206 * Take into account any restrictions imposed by our parent tag
207 */
208 if (parent != NULL) {
209 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
210 newtag->highaddr = max(parent->highaddr, newtag->highaddr);
211 if (newtag->boundary == 0)
212 newtag->boundary = parent->boundary;

--- 40 unchanged lines hidden (view full) ---

253 dmat = parent;
254 } else
255 dmat = NULL;
256 }
257 }
258 return (0);
259}
260
260static void
261arm_dmamap_freesegs(bus_dmamap_t map)
262{
263 struct arm_seglist *seg = SLIST_FIRST(&map->seglist);
264
265 while (seg) {
266 struct arm_seglist *next;
267
268 next = SLIST_NEXT(seg, next);
269 SLIST_REMOVE_HEAD(&map->seglist, next);
270 free(seg, M_DEVBUF);
271 seg = next;
272 }
273}
274
275static int
276arm_dmamap_addseg(bus_dmamap_t map, vm_offset_t addr, vm_size_t size)
277{
278 struct arm_seglist *seg = malloc(sizeof(*seg), M_DEVBUF, M_NOWAIT);
279
280 if (!seg)
281 return (ENOMEM);
282 seg->seg.ds_addr = addr;
283 seg->seg.ds_len = size;
284 SLIST_INSERT_HEAD(&map->seglist, seg, next);
285 return (0);
286}
287
288/*
289 * Allocate a handle for mapping from kva/uva/physical
290 * address space into bus device space.
291 */
292int
293bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
294{
295 bus_dmamap_t newmap;
296
297 newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO);
298 if (newmap == NULL)
299 return (ENOMEM);
261/*
262 * Allocate a handle for mapping from kva/uva/physical
263 * address space into bus device space.
264 */
265int
266bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
267{
268 bus_dmamap_t newmap;
269
270 newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO);
271 if (newmap == NULL)
272 return (ENOMEM);
300 SLIST_INIT(&newmap->seglist);
301 *mapp = newmap;
273 *mapp = newmap;
274 newmap->dmat = dmat;
275 newmap->flags = 0;
302 dmat->map_count++;
303
304 return (0);
305}
306
307/*
308 * Destroy a handle for mapping from kva/uva/physical
309 * address space into bus device space.
310 */
311int
312bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
313{
276 dmat->map_count++;
277
278 return (0);
279}
280
281/*
282 * Destroy a handle for mapping from kva/uva/physical
283 * address space into bus device space.
284 */
285int
286bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
287{
314 arm_dmamap_freesegs(map);
288
315 free(map, M_DEVBUF);
316 dmat->map_count--;
317 return (0);
318}
319
320/*
321 * Allocate a piece of memory that can be efficiently mapped into
322 * bus device space based on the constraints lited in the dma tag.
323 * A dmamap to for use with dmamap_load is also allocated.
324 */
325int
326bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
327 bus_dmamap_t *mapp)
328{
289 free(map, M_DEVBUF);
290 dmat->map_count--;
291 return (0);
292}
293
294/*
295 * Allocate a piece of memory that can be efficiently mapped into
296 * bus device space based on the constraints lited in the dma tag.
297 * A dmamap to for use with dmamap_load is also allocated.
298 */
299int
300bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
301 bus_dmamap_t *mapp)
302{
329 bus_dmamap_t newmap;
303 bus_dmamap_t newmap = NULL;
330
331 int mflags;
332
333 if (flags & BUS_DMA_NOWAIT)
334 mflags = M_NOWAIT;
335 else
336 mflags = M_WAITOK;
337 if (flags & BUS_DMA_ZERO)
338 mflags |= M_ZERO;
339
304
305 int mflags;
306
307 if (flags & BUS_DMA_NOWAIT)
308 mflags = M_NOWAIT;
309 else
310 mflags = M_WAITOK;
311 if (flags & BUS_DMA_ZERO)
312 mflags |= M_ZERO;
313
340 newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO);
341 if (newmap == NULL)
342 return (ENOMEM);
343 SLIST_INIT(&newmap->seglist);
344 *mapp = newmap;
314 if (!*mapp) {
315 newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO);
316 if (newmap == NULL)
317 return (ENOMEM);
318 dmat->map_count++;
319 newmap->flags = 0;
320 *mapp = newmap;
321 newmap->dmat = dmat;
322 }
323
345 if (dmat->maxsize <= PAGE_SIZE) {
346 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
347 } else {
348 /*
349 * XXX Use Contigmalloc until it is merged into this facility
350 * and handles multi-seg allocations. Nobody is doing
351 * multi-seg allocations yet though.
352 */
353 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
354 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
355 dmat->boundary);
356 }
324 if (dmat->maxsize <= PAGE_SIZE) {
325 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
326 } else {
327 /*
328 * XXX Use Contigmalloc until it is merged into this facility
329 * and handles multi-seg allocations. Nobody is doing
330 * multi-seg allocations yet though.
331 */
332 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
333 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
334 dmat->boundary);
335 }
357
358 if (*vaddr == NULL) {
336 if (*vaddr == NULL && newmap != NULL) {
359 free(newmap, M_DEVBUF);
337 free(newmap, M_DEVBUF);
338 dmat->map_count--;
360 *mapp = NULL;
361 return (ENOMEM);
362 }
339 *mapp = NULL;
340 return (ENOMEM);
341 }
363
364 return (0);
365}
366
367/*
368 * Free a piece of memory and it's allocated dmamap, that was allocated
369 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
370 */
371void
372bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
373{
342 return (0);
343}
344
345/*
346 * Free a piece of memory and it's allocated dmamap, that was allocated
347 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
348 */
349void
350bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
351{
374 if (map != NULL)
375 panic("bus_dmamem_free: Invalid map freed\n");
376 if (dmat->maxsize <= PAGE_SIZE)
377 free(vaddr, M_DEVBUF);
378 else {
379 contigfree(vaddr, dmat->maxsize, M_DEVBUF);
380 }
352 if (dmat->maxsize <= PAGE_SIZE)
353 free(vaddr, M_DEVBUF);
354 else {
355 contigfree(vaddr, dmat->maxsize, M_DEVBUF);
356 }
381 arm_dmamap_freesegs(map);
357 dmat->map_count--;
382 free(map, M_DEVBUF);
383}
384
385/*
386 * Map the buffer buf into bus space using the dmamap map.
387 */
388int
389bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
390 bus_size_t buflen, bus_dmamap_callback_t *callback,
391 void *callback_arg, int flags)
392{
393 vm_offset_t lastaddr = 0;
394 int error, nsegs = 0;
395#ifdef __GNUC__
396 bus_dma_segment_t dm_segments[dmat->nsegments];
397#else
398 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
399#endif
400
358 free(map, M_DEVBUF);
359}
360
361/*
362 * Map the buffer buf into bus space using the dmamap map.
363 */
364int
365bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
366 bus_size_t buflen, bus_dmamap_callback_t *callback,
367 void *callback_arg, int flags)
368{
369 vm_offset_t lastaddr = 0;
370 int error, nsegs = 0;
371#ifdef __GNUC__
372 bus_dma_segment_t dm_segments[dmat->nsegments];
373#else
374 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
375#endif
376
377 map->flags &= ~DMAMAP_TYPE_MASK;
378 map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT;
379 map->buffer = buf;
380 map->len = buflen;
401 error = bus_dmamap_load_buffer(dmat,
402 dm_segments, map, buf, buflen, NULL,
403 flags, &lastaddr, &nsegs, 1);
381 error = bus_dmamap_load_buffer(dmat,
382 dm_segments, map, buf, buflen, NULL,
383 flags, &lastaddr, &nsegs, 1);
404 (*callback)(callback_arg, dm_segments, nsegs, error);
384 if (error)
385 (*callback)(callback_arg, NULL, 0, error);
386 else
387 (*callback)(callback_arg, dm_segments, nsegs + 1, error);
405
406 return (0);
407}
408
409/*
410 * Utility function to load a linear buffer. lastaddrp holds state
411 * between invocations (for multiple-buffer loads). segp contains
412 * the starting segment on entrance, and the ending segment on exit.

--- 10 unchanged lines hidden (view full) ---

423 vm_offset_t vaddr = (vm_offset_t)buf;
424 int seg;
425 int error = 0;
426 pmap_t pmap;
427 pd_entry_t *pde;
428 pt_entry_t pte;
429 pt_entry_t *ptep;
430
388
389 return (0);
390}
391
392/*
393 * Utility function to load a linear buffer. lastaddrp holds state
394 * between invocations (for multiple-buffer loads). segp contains
395 * the starting segment on entrance, and the ending segment on exit.

--- 10 unchanged lines hidden (view full) ---

406 vm_offset_t vaddr = (vm_offset_t)buf;
407 int seg;
408 int error = 0;
409 pmap_t pmap;
410 pd_entry_t *pde;
411 pt_entry_t pte;
412 pt_entry_t *ptep;
413
431
432 if (td != NULL)
433 pmap = vmspace_pmap(td->td_proc->p_vmspace);
434 else
435 pmap = pmap_kernel();
436
437 lastaddr = *lastaddrp;
438 bmask = ~(dmat->boundary - 1);
439

--- 6 unchanged lines hidden (view full) ---

446 */
447 if (__predict_true(pmap == pmap_kernel())) {
448 (void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
449 if (__predict_false(pmap_pde_section(pde))) {
450 curaddr = (*pde & L1_S_FRAME) |
451 (vaddr & L1_S_OFFSET);
452 if (*pde & L1_S_CACHE_MASK) {
453 map->flags &=
414 if (td != NULL)
415 pmap = vmspace_pmap(td->td_proc->p_vmspace);
416 else
417 pmap = pmap_kernel();
418
419 lastaddr = *lastaddrp;
420 bmask = ~(dmat->boundary - 1);
421

--- 6 unchanged lines hidden (view full) ---

428 */
429 if (__predict_true(pmap == pmap_kernel())) {
430 (void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
431 if (__predict_false(pmap_pde_section(pde))) {
432 curaddr = (*pde & L1_S_FRAME) |
433 (vaddr & L1_S_OFFSET);
434 if (*pde & L1_S_CACHE_MASK) {
435 map->flags &=
454 ~ARM32_DMAMAP_COHERENT;
436 ~DMAMAP_COHERENT;
455 }
456 } else {
457 pte = *ptep;
458 KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV,
459 ("INV type"));
460 if (__predict_false((pte & L2_TYPE_MASK)
461 == L2_TYPE_L)) {
462 curaddr = (pte & L2_L_FRAME) |
463 (vaddr & L2_L_OFFSET);
464 if (pte & L2_L_CACHE_MASK) {
465 map->flags &=
437 }
438 } else {
439 pte = *ptep;
440 KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV,
441 ("INV type"));
442 if (__predict_false((pte & L2_TYPE_MASK)
443 == L2_TYPE_L)) {
444 curaddr = (pte & L2_L_FRAME) |
445 (vaddr & L2_L_OFFSET);
446 if (pte & L2_L_CACHE_MASK) {
447 map->flags &=
466 ~ARM32_DMAMAP_COHERENT;
448 ~DMAMAP_COHERENT;
449
467 }
468 } else {
469 curaddr = (pte & L2_S_FRAME) |
470 (vaddr & L2_S_OFFSET);
471 if (pte & L2_S_CACHE_MASK) {
472 map->flags &=
450 }
451 } else {
452 curaddr = (pte & L2_S_FRAME) |
453 (vaddr & L2_S_OFFSET);
454 if (pte & L2_S_CACHE_MASK) {
455 map->flags &=
473 ~ARM32_DMAMAP_COHERENT;
456 ~DMAMAP_COHERENT;
474 }
475 }
476 }
477 } else {
478 curaddr = pmap_extract(pmap, vaddr);
457 }
458 }
459 }
460 } else {
461 curaddr = pmap_extract(pmap, vaddr);
479 map->flags &= ~ARM32_DMAMAP_COHERENT;
462 map->flags &= ~DMAMAP_COHERENT;
480 }
481
463 }
464
465 if (dmat->ranges) {
466 struct arm32_dma_range *dr;
467
468 dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
469 curaddr);
470 if (dr == NULL)
471 return (EINVAL);
472 /*
473 * In a valid DMA range. Translate the physical
474 * memory address to an address in the DMA window.
475 */
476 curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
477
478 }
482 /*
483 * Compute the segment size, and adjust counts.
484 */
485 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
486 if (buflen < sgsize)
487 sgsize = buflen;
488
489 /*

--- 4 unchanged lines hidden (view full) ---

494 if (sgsize > (baddr - curaddr))
495 sgsize = (baddr - curaddr);
496 }
497
498 /*
499 * Insert chunk into a segment, coalescing with
500 * the previous segment if possible.
501 */
479 /*
480 * Compute the segment size, and adjust counts.
481 */
482 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
483 if (buflen < sgsize)
484 sgsize = buflen;
485
486 /*

--- 4 unchanged lines hidden (view full) ---

491 if (sgsize > (baddr - curaddr))
492 sgsize = (baddr - curaddr);
493 }
494
495 /*
496 * Insert chunk into a segment, coalescing with
497 * the previous segment if possible.
498 */
502 error = arm_dmamap_addseg(map,
503 (vm_offset_t)curaddr, sgsize);
504 if (error)
505 break;
506
507 if (first) {
508 segs[seg].ds_addr = curaddr;
509 segs[seg].ds_len = sgsize;
510 first = 0;
511 } else {
512 if (curaddr == lastaddr &&
513 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
514 (dmat->boundary == 0 ||
499 if (first) {
500 segs[seg].ds_addr = curaddr;
501 segs[seg].ds_len = sgsize;
502 first = 0;
503 } else {
504 if (curaddr == lastaddr &&
505 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
506 (dmat->boundary == 0 ||
515 (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
507 (segs[seg].ds_addr & bmask) ==
508 (curaddr & bmask))) {
516 segs[seg].ds_len += sgsize;
509 segs[seg].ds_len += sgsize;
510 goto segdone;
511 }
517 else {
518 if (++seg >= dmat->nsegments)
519 break;
520 segs[seg].ds_addr = curaddr;
521 segs[seg].ds_len = sgsize;
522 }
523 }
524
512 else {
513 if (++seg >= dmat->nsegments)
514 break;
515 segs[seg].ds_addr = curaddr;
516 segs[seg].ds_len = sgsize;
517 }
518 }
519
520 if (error)
521 break;
522segdone:
525 lastaddr = curaddr + sgsize;
526 vaddr += sgsize;
527 buflen -= sgsize;
528 }
529
530 *segp = seg;
531 *lastaddrp = lastaddr;
532

--- 17 unchanged lines hidden (view full) ---

550 bus_dma_segment_t dm_segments[dmat->nsegments];
551#else
552 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
553#endif
554 int nsegs = 0, error = 0;
555
556 M_ASSERTPKTHDR(m0);
557
523 lastaddr = curaddr + sgsize;
524 vaddr += sgsize;
525 buflen -= sgsize;
526 }
527
528 *segp = seg;
529 *lastaddrp = lastaddr;
530

--- 17 unchanged lines hidden (view full) ---

548 bus_dma_segment_t dm_segments[dmat->nsegments];
549#else
550 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
551#endif
552 int nsegs = 0, error = 0;
553
554 M_ASSERTPKTHDR(m0);
555
556 map->flags &= ~DMAMAP_TYPE_MASK;
557 map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
558 map->buffer = m0;
558 if (m0->m_pkthdr.len <= dmat->maxsize) {
559 int first = 1;
560 vm_offset_t lastaddr = 0;
561 struct mbuf *m;
562
563 for (m = m0; m != NULL && error == 0; m = m->m_next) {
564 if (m->m_len > 0) {
565 error = bus_dmamap_load_buffer(dmat,

--- 7 unchanged lines hidden (view full) ---

573 }
574
575 if (error) {
576 /*
577 * force "no valid mappings" on error in callback.
578 */
579 (*callback)(callback_arg, dm_segments, 0, 0, error);
580 } else {
559 if (m0->m_pkthdr.len <= dmat->maxsize) {
560 int first = 1;
561 vm_offset_t lastaddr = 0;
562 struct mbuf *m;
563
564 for (m = m0; m != NULL && error == 0; m = m->m_next) {
565 if (m->m_len > 0) {
566 error = bus_dmamap_load_buffer(dmat,

--- 7 unchanged lines hidden (view full) ---

574 }
575
576 if (error) {
577 /*
578 * force "no valid mappings" on error in callback.
579 */
580 (*callback)(callback_arg, dm_segments, 0, 0, error);
581 } else {
581 (*callback)(callback_arg, dm_segments, nsegs+1,
582 (*callback)(callback_arg, dm_segments, nsegs + 1,
582 m0->m_pkthdr.len, error);
583 }
584 return (error);
585}
586
587/*
588 * Like bus_dmamap_load(), but for uios.
589 */

--- 10 unchanged lines hidden (view full) ---

600#endif
601 int nsegs, i, error, first;
602 bus_size_t resid;
603 struct iovec *iov;
604 struct thread *td = NULL;
605
606 resid = uio->uio_resid;
607 iov = uio->uio_iov;
583 m0->m_pkthdr.len, error);
584 }
585 return (error);
586}
587
588/*
589 * Like bus_dmamap_load(), but for uios.
590 */

--- 10 unchanged lines hidden (view full) ---

601#endif
602 int nsegs, i, error, first;
603 bus_size_t resid;
604 struct iovec *iov;
605 struct thread *td = NULL;
606
607 resid = uio->uio_resid;
608 iov = uio->uio_iov;
609 map->flags &= ~DMAMAP_TYPE_MASK;
610 map->flags |= DMAMAP_UIO|DMAMAP_COHERENT;
611 map->buffer = uio;
608
609 if (uio->uio_segflg == UIO_USERSPACE) {
610 td = uio->uio_td;
611 KASSERT(td != NULL,
612 ("bus_dmamap_load_uio: USERSPACE but no proc"));
613 }
614
615 first = 1;

--- 26 unchanged lines hidden (view full) ---

642 (*callback)(callback_arg, dm_segments, nsegs+1,
643 uio->uio_resid, error);
644 }
645
646 return (error);
647}
648
649/*
612
613 if (uio->uio_segflg == UIO_USERSPACE) {
614 td = uio->uio_td;
615 KASSERT(td != NULL,
616 ("bus_dmamap_load_uio: USERSPACE but no proc"));
617 }
618
619 first = 1;

--- 26 unchanged lines hidden (view full) ---

646 (*callback)(callback_arg, dm_segments, nsegs+1,
647 uio->uio_resid, error);
648 }
649
650 return (error);
651}
652
653/*
650 * Release the mapping held by map. A no-op on PowerPC.
654 * Release the mapping held by map.
651 */
652void
653bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
654{
655 */
656void
657bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
658{
655 arm_dmamap_freesegs(map);
659 map->flags &= ~DMAMAP_TYPE_MASK;
656 return;
657}
658
660 return;
661}
662
663static void
664bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
665{
666
667 if (op & BUS_DMASYNC_POSTREAD ||
668 op == (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) {
669 cpu_dcache_wbinv_range((vm_offset_t)buf, len);
670 return;
671 }
672 if (op & BUS_DMASYNC_PREWRITE)
673 cpu_dcache_wb_range((vm_offset_t)buf, len);
674 if (op & BUS_DMASYNC_PREREAD) {
675 if ((((vm_offset_t)buf | len) & arm_dcache_align_mask) == 0)
676 cpu_dcache_inv_range((vm_offset_t)buf, len);
677 else
678 cpu_dcache_wbinv_range((vm_offset_t)buf, len);
679 }
680}
681
659void
660bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
661{
682void
683bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
684{
662 struct arm_seglist *seg = SLIST_FIRST(&map->seglist);
663
664 if (op != BUS_DMASYNC_PREREAD && op != BUS_DMASYNC_PREWRITE)
685 struct mbuf *m;
686 struct uio *uio;
687 int resid;
688 struct iovec *iov;
689
690 if (op == BUS_DMASYNC_POSTREAD)
665 return;
691 return;
666 /* Skip cache frobbing if mapping was COHERENT. */
667 if (map->flags & ARM32_DMAMAP_COHERENT) {
668 /* Drain the write buffer. */
669 cpu_drain_writebuf();
692 if (map->flags & DMAMAP_COHERENT)
670 return;
693 return;
694 switch(map->flags & DMAMAP_TYPE_MASK) {
695 case DMAMAP_LINEAR:
696 bus_dmamap_sync_buf(map->buffer, map->len, op);
697 break;
698 case DMAMAP_MBUF:
699 m = map->buffer;
700 while (m) {
701 bus_dmamap_sync_buf(m->m_data, m->m_len, op);
702 m = m->m_next;
703 }
704 break;
705 case DMAMAP_UIO:
706 uio = map->buffer;
707 iov = uio->uio_iov;
708 resid = uio->uio_resid;
709 for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
710 bus_size_t minlen = resid < iov[i].iov_len ? resid :
711 iov[i].iov_len;
712 if (minlen > 0) {
713 bus_dmamap_sync_buf(iov[i].iov_base, minlen,
714 op);
715 resid -= minlen;
716 }
717 }
718 break;
719 default:
720 break;
671 }
721 }
672 while (seg) {
673 cpu_dcache_wbinv_range(seg->seg.ds_addr, seg->seg.ds_len);
674 seg = SLIST_NEXT(seg, next);
675 }
722 cpu_drain_writebuf();
676}
723}