1/*
2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <IOKit/assert.h>
30
31#include <libkern/OSTypes.h>
32#include <libkern/OSByteOrder.h>
33
34#include <IOKit/IOReturn.h>
35#include <IOKit/IOLib.h>
36#include <IOKit/IODMACommand.h>
37#include <IOKit/IOMapper.h>
38#include <IOKit/IOMemoryDescriptor.h>
39#include <IOKit/IOBufferMemoryDescriptor.h>
40
41#include "IOKitKernelInternal.h"
42#include "IOCopyMapper.h"
43
44#define MAPTYPE(type)		((UInt) (type) & kTypeMask)
45#define IS_MAPPED(type)		(MAPTYPE(type) == kMapped)
46#define IS_BYPASSED(type)	(MAPTYPE(type) == kBypassed)
47#define IS_NONCOHERENT(type)	(MAPTYPE(type) == kNonCoherent)
48
49
50static bool gIOEnableCopyMapper  = true;
51
52enum
53{
54    kWalkSyncIn       = 0x01,	// bounce -> md
55    kWalkSyncOut      = 0x02,	// bounce <- md
56    kWalkSyncAlways   = 0x04,
57    kWalkPreflight    = 0x08,
58    kWalkDoubleBuffer = 0x10,
59    kWalkPrepare      = 0x20,
60    kWalkComplete     = 0x40,
61    kWalkClient       = 0x80
62};
63
64
65#define fInternalState reserved
66#define fState         reserved->fState
67#define fMDSummary     reserved->fMDSummary
68
69
70#if 1
71// no direction => OutIn
72#define SHOULD_COPY_DIR(op, direction)					    \
73	((kIODirectionNone == (direction))				    \
74	    || (kWalkSyncAlways & (op))					    \
75	    || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut)   \
76						    & (direction)))
77
78#else
79#define SHOULD_COPY_DIR(state, direction) (true)
80#endif
81
82#if 0
83#define DEBG(fmt, args...)  	{ kprintf(fmt, ## args); }
84#else
85#define DEBG(fmt, args...)  	{}
86#endif
87
88
89/**************************** class IODMACommand ***************************/
90
91#undef super
92#define super OSObject
93OSDefineMetaClassAndStructors(IODMACommand, IOCommand);
94
95OSMetaClassDefineReservedUsed(IODMACommand,  0);
96OSMetaClassDefineReservedUsed(IODMACommand,  1);
97OSMetaClassDefineReservedUnused(IODMACommand,  2);
98OSMetaClassDefineReservedUnused(IODMACommand,  3);
99OSMetaClassDefineReservedUnused(IODMACommand,  4);
100OSMetaClassDefineReservedUnused(IODMACommand,  5);
101OSMetaClassDefineReservedUnused(IODMACommand,  6);
102OSMetaClassDefineReservedUnused(IODMACommand,  7);
103OSMetaClassDefineReservedUnused(IODMACommand,  8);
104OSMetaClassDefineReservedUnused(IODMACommand,  9);
105OSMetaClassDefineReservedUnused(IODMACommand, 10);
106OSMetaClassDefineReservedUnused(IODMACommand, 11);
107OSMetaClassDefineReservedUnused(IODMACommand, 12);
108OSMetaClassDefineReservedUnused(IODMACommand, 13);
109OSMetaClassDefineReservedUnused(IODMACommand, 14);
110OSMetaClassDefineReservedUnused(IODMACommand, 15);
111
112IODMACommand *
113IODMACommand::withSpecification(SegmentFunction outSegFunc,
114				UInt8           numAddressBits,
115				UInt64          maxSegmentSize,
116				MappingOptions  mappingOptions,
117				UInt64          maxTransferSize,
118				UInt32          alignment,
119				IOMapper       *mapper,
120				void           *refCon)
121{
122    IODMACommand * me = new IODMACommand;
123
124    if (me && !me->initWithSpecification(outSegFunc,
125					 numAddressBits, maxSegmentSize,
126					 mappingOptions, maxTransferSize,
127					 alignment,      mapper, refCon))
128    {
129        me->release();
130        return 0;
131    };
132
133    return me;
134}
135
136IODMACommand *
137IODMACommand::cloneCommand(void *refCon)
138{
139    return withSpecification(fOutSeg, fNumAddressBits, fMaxSegmentSize,
140	    fMappingOptions, fMaxTransferSize, fAlignMask + 1, fMapper, refCon);
141}
142
143#define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
144
145bool
146IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
147				    UInt8           numAddressBits,
148				    UInt64          maxSegmentSize,
149				    MappingOptions  mappingOptions,
150				    UInt64          maxTransferSize,
151				    UInt32          alignment,
152				    IOMapper       *mapper,
153				    void           *refCon)
154{
155    if (!super::init() || !outSegFunc || !numAddressBits)
156        return false;
157
158    bool is32Bit = (OutputHost32   == outSegFunc || OutputBig32 == outSegFunc
159                 || OutputLittle32 == outSegFunc);
160    if (is32Bit)
161    {
162	if (!numAddressBits)
163	    numAddressBits = 32;
164	else if (numAddressBits > 32)
165	    return false;		// Wrong output function for bits
166    }
167
168    if (numAddressBits && (numAddressBits < PAGE_SHIFT))
169	return false;
170
171    if (!maxSegmentSize)
172	maxSegmentSize--;	// Set Max segment to -1
173    if (!maxTransferSize)
174	maxTransferSize--;	// Set Max transfer to -1
175
176    if (!mapper)
177    {
178        IOMapper::checkForSystemMapper();
179	mapper = IOMapper::gSystem;
180    }
181
182    fNumSegments     = 0;
183    fBypassMask      = 0;
184    fOutSeg	     = outSegFunc;
185    fNumAddressBits  = numAddressBits;
186    fMaxSegmentSize  = maxSegmentSize;
187    fMappingOptions  = mappingOptions;
188    fMaxTransferSize = maxTransferSize;
189    if (!alignment)
190	alignment = 1;
191    fAlignMask	     = alignment - 1;
192    fMapper          = mapper;
193    fRefCon          = refCon;
194
195    switch (MAPTYPE(mappingOptions))
196    {
197    case kMapped:                   break;
198    case kNonCoherent: fMapper = 0; break;
199    case kBypassed:
200	if (mapper && !mapper->getBypassMask(&fBypassMask))
201	    return false;
202	break;
203    default:
204	return false;
205    };
206
207    reserved = IONew(IODMACommandInternal, 1);
208    if (!reserved)
209	return false;
210    bzero(reserved, sizeof(IODMACommandInternal));
211
212    fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
213
214    return true;
215}
216
217void
218IODMACommand::free()
219{
220    if (reserved)
221	IODelete(reserved, IODMACommandInternal, 1);
222
223    super::free();
224}
225
226IOReturn
227IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare)
228{
229    if (mem == fMemory)
230    {
231	if (!autoPrepare)
232	{
233	    while (fActive)
234		complete();
235	}
236	return kIOReturnSuccess;
237    }
238
239    if (fMemory) {
240	// As we are almost certainly being called from a work loop thread
241	// if fActive is true it is probably not a good time to potentially
242	// block.  Just test for it and return an error
243	if (fActive)
244	    return kIOReturnBusy;
245	clearMemoryDescriptor();
246    };
247
248    if (mem) {
249	bzero(&fMDSummary, sizeof(fMDSummary));
250	IOReturn rtn = mem->dmaCommandOperation(
251		kIOMDGetCharacteristics,
252		&fMDSummary, sizeof(fMDSummary));
253	if (rtn)
254	    return rtn;
255
256	ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage;
257
258	if ((kMapped == MAPTYPE(fMappingOptions))
259	    && fMapper
260	    && (!fNumAddressBits || (fNumAddressBits >= 31)))
261	    // assuming mapped space is 2G
262	    fInternalState->fCheckAddressing = false;
263	else
264	    fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT))));
265
266	fInternalState->fNewMD = true;
267	mem->retain();
268	fMemory = mem;
269
270	if (autoPrepare)
271	    return prepare();
272    };
273
274    return kIOReturnSuccess;
275}
276
277IOReturn
278IODMACommand::clearMemoryDescriptor(bool autoComplete)
279{
280    if (fActive && !autoComplete)
281	return (kIOReturnNotReady);
282
283    if (fMemory) {
284	while (fActive)
285	    complete();
286	fMemory->release();
287	fMemory = 0;
288    }
289
290    return (kIOReturnSuccess);
291}
292
293const IOMemoryDescriptor *
294IODMACommand::getMemoryDescriptor() const
295{
296    return fMemory;
297}
298
299
300IOReturn
301IODMACommand::segmentOp(
302			void         *reference,
303			IODMACommand *target,
304			Segment64     segment,
305			void         *segments,
306			UInt32        segmentIndex)
307{
308    IOOptionBits op = (IOOptionBits) reference;
309    addr64_t     maxPhys, address;
310    addr64_t     remapAddr = 0;
311    uint64_t     length;
312    uint32_t     numPages;
313
314    IODMACommandInternal * state = target->reserved;
315
316    if (target->fNumAddressBits && (target->fNumAddressBits < 64))
317	maxPhys = (1ULL << target->fNumAddressBits);
318    else
319	maxPhys = 0;
320    maxPhys--;
321
322    address = segment.fIOVMAddr;
323    length = segment.fLength;
324
325    assert(address);
326    assert(length);
327
328    if (!state->fMisaligned)
329    {
330	state->fMisaligned |= (0 != (target->fAlignMask & address));
331	if (state->fMisaligned) DEBG("misaligned %qx:%qx, %lx\n", address, length, target->fAlignMask);
332    }
333
334    if (state->fMisaligned && (kWalkPreflight & op))
335	return (kIOReturnNotAligned);
336
337    if (!state->fDoubleBuffer)
338    {
339	if ((address + length - 1) <= maxPhys)
340	{
341	    length = 0;
342	}
343	else if (address <= maxPhys)
344	{
345	    DEBG("tail %qx, %qx", address, length);
346	    length = (address + length - maxPhys - 1);
347	    address = maxPhys + 1;
348	    DEBG("-> %qx, %qx\n", address, length);
349	}
350    }
351
352    if (!length)
353	return (kIOReturnSuccess);
354
355    numPages = atop_64(round_page_64(length));
356    remapAddr = state->fCopyNext;
357
358    if (kWalkPreflight & op)
359    {
360	state->fCopyPageCount += numPages;
361    }
362    else
363    {
364	if (kWalkPrepare & op)
365	{
366	    for (IOItemCount idx = 0; idx < numPages; idx++)
367		gIOCopyMapper->iovmInsert(atop_64(remapAddr), idx, atop_64(address) + idx);
368	}
369	if (state->fDoubleBuffer)
370	    state->fCopyNext += length;
371	else
372	{
373	    state->fCopyNext += round_page(length);
374	    remapAddr += (address & PAGE_MASK);
375	}
376
377	if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
378	{
379	    DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr,
380			(kWalkSyncIn & op) ? "->" : "<-",
381			address, length, op);
382	    if (kWalkSyncIn & op)
383	    { // cppvNoModSnk
384		copypv(remapAddr, address, length,
385				cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
386	    }
387	    else
388	    {
389		copypv(address, remapAddr, length,
390				cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
391	    }
392	}
393    }
394
395    return kIOReturnSuccess;
396}
397
398IOReturn
399IODMACommand::walkAll(UInt8 op)
400{
401    IODMACommandInternal * state = fInternalState;
402
403    IOReturn     ret = kIOReturnSuccess;
404    UInt32       numSegments;
405    UInt64       offset;
406
407    if (gIOEnableCopyMapper && (kWalkPreflight & op))
408    {
409	state->fCopyContig     = false;
410	state->fMisaligned     = false;
411	state->fDoubleBuffer   = false;
412	state->fPrepared       = false;
413	state->fCopyNext       = 0;
414	state->fCopyPageAlloc  = 0;
415	state->fCopyPageCount  = 0;
416	state->fNextRemapIndex = 0;
417	state->fCopyMD         = 0;
418
419	if (!(kWalkDoubleBuffer & op))
420	{
421	    offset = 0;
422	    numSegments = 0-1;
423	    ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
424	}
425
426	op &= ~kWalkPreflight;
427
428	state->fDoubleBuffer = (state->fMisaligned || (kWalkDoubleBuffer & op));
429	if (state->fDoubleBuffer)
430	    state->fCopyPageCount = atop_64(round_page(state->fPreparedLength));
431
432	if (state->fCopyPageCount)
433	{
434	    IOMapper * mapper;
435	    ppnum_t    mapBase = 0;
436
437	    DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount);
438
439	    mapper = gIOCopyMapper;
440	    if (mapper)
441		mapBase = mapper->iovmAlloc(state->fCopyPageCount);
442	    if (mapBase)
443	    {
444		state->fCopyPageAlloc = mapBase;
445		if (state->fCopyPageAlloc && state->fDoubleBuffer)
446		{
447		    DEBG("contig copy map\n");
448		    state->fCopyContig = true;
449		}
450
451		state->fCopyNext = ptoa_64(state->fCopyPageAlloc);
452		offset = 0;
453		numSegments = 0-1;
454		ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
455		state->fPrepared = true;
456		op &= ~(kWalkSyncIn | kWalkSyncOut);
457	    }
458	    else
459	    {
460		DEBG("alloc IOBMD\n");
461		state->fCopyMD = IOBufferMemoryDescriptor::withOptions(
462				    fMDSummary.fDirection, state->fPreparedLength, page_size);
463
464		if (state->fCopyMD)
465		{
466		    ret = kIOReturnSuccess;
467		    state->fPrepared = true;
468		}
469		else
470		{
471		    DEBG("IODMACommand !iovmAlloc");
472		    return (kIOReturnNoResources);
473		}
474	    }
475	}
476    }
477
478    if (gIOEnableCopyMapper && state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op))
479    {
480	if (state->fCopyPageCount)
481	{
482	    DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount);
483
484	    if (state->fCopyPageAlloc)
485	    {
486		state->fCopyNext = ptoa_64(state->fCopyPageAlloc);
487		offset = 0;
488		numSegments = 0-1;
489		ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
490	    }
491	    else if (state->fCopyMD)
492	    {
493		DEBG("sync IOBMD\n");
494
495		if (SHOULD_COPY_DIR(op, fMDSummary.fDirection))
496		{
497		    IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
498
499		    IOByteCount bytes;
500
501		    if (kWalkSyncIn & op)
502			bytes = poMD->writeBytes(state->fPreparedOffset,
503						    state->fCopyMD->getBytesNoCopy(),
504						    state->fPreparedLength);
505		    else
506			bytes = poMD->readBytes(state->fPreparedOffset,
507						    state->fCopyMD->getBytesNoCopy(),
508						    state->fPreparedLength);
509		    DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn & op) ? "wrote" : "read", bytes);
510		    ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun;
511		}
512		else
513		    ret = kIOReturnSuccess;
514	    }
515	}
516    }
517
518    if (kWalkComplete & op)
519    {
520	if (state->fCopyPageAlloc)
521	{
522	    gIOCopyMapper->iovmFree(state->fCopyPageAlloc, state->fCopyPageCount);
523	    state->fCopyPageAlloc = 0;
524	    state->fCopyPageCount = 0;
525	}
526	if (state->fCopyMD)
527	{
528	    state->fCopyMD->release();
529	    state->fCopyMD = 0;
530	}
531
532	state->fPrepared = false;
533    }
534    return (ret);
535}
536
537IOReturn
538IODMACommand::prepareWithSpecification(SegmentFunction	outSegFunc,
539				       UInt8		numAddressBits,
540				       UInt64		maxSegmentSize,
541				       MappingOptions	mappingOptions,
542				       UInt64		maxTransferSize,
543				       UInt32		alignment,
544				       IOMapper		*mapper,
545				       UInt64		offset,
546				       UInt64		length,
547				       bool		flushCache,
548				       bool		synchronize)
549{
550    if (fActive)
551        return kIOReturnNotPermitted;
552
553    if (!outSegFunc || !numAddressBits)
554        return kIOReturnBadArgument;
555
556    bool is32Bit = (OutputHost32   == outSegFunc || OutputBig32 == outSegFunc
557                 || OutputLittle32 == outSegFunc);
558    if (is32Bit)
559    {
560	if (!numAddressBits)
561	    numAddressBits = 32;
562	else if (numAddressBits > 32)
563	    return kIOReturnBadArgument;		// Wrong output function for bits
564    }
565
566    if (numAddressBits && (numAddressBits < PAGE_SHIFT))
567	return kIOReturnBadArgument;
568
569    if (!maxSegmentSize)
570	maxSegmentSize--;	// Set Max segment to -1
571    if (!maxTransferSize)
572	maxTransferSize--;	// Set Max transfer to -1
573
574    if (!mapper)
575    {
576        IOMapper::checkForSystemMapper();
577	mapper = IOMapper::gSystem;
578    }
579
580    switch (MAPTYPE(mappingOptions))
581    {
582    case kMapped:                   break;
583    case kNonCoherent: fMapper = 0; break;
584    case kBypassed:
585	if (mapper && !mapper->getBypassMask(&fBypassMask))
586	    return kIOReturnBadArgument;
587	break;
588    default:
589	return kIOReturnBadArgument;
590    };
591
592    fNumSegments     = 0;
593    fBypassMask      = 0;
594    fOutSeg	     = outSegFunc;
595    fNumAddressBits  = numAddressBits;
596    fMaxSegmentSize  = maxSegmentSize;
597    fMappingOptions  = mappingOptions;
598    fMaxTransferSize = maxTransferSize;
599    if (!alignment)
600	alignment = 1;
601    fAlignMask	     = alignment - 1;
602    fMapper          = mapper;
603
604    fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
605
606    return prepare(offset, length, flushCache, synchronize);
607}
608
609
610IOReturn
611IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize)
612{
613    IODMACommandInternal * state = fInternalState;
614    IOReturn               ret   = kIOReturnSuccess;
615    MappingOptions mappingOptions    = fMappingOptions;
616
617    if (!length)
618	length = fMDSummary.fLength;
619
620    if (length > fMaxTransferSize)
621	return kIOReturnNoSpace;
622
623    if (IS_NONCOHERENT(mappingOptions) && flushCache) {
624	IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
625
626	poMD->performOperation(kIOMemoryIncoherentIOStore, 0, fMDSummary.fLength);
627    }
628    if (fActive++)
629    {
630	if ((state->fPreparedOffset != offset)
631	  || (state->fPreparedLength != length))
632	ret = kIOReturnNotReady;
633    }
634    else
635    {
636	state->fPreparedOffset = offset;
637	state->fPreparedLength = length;
638
639	state->fCopyContig     = false;
640	state->fMisaligned     = false;
641	state->fDoubleBuffer   = false;
642	state->fPrepared       = false;
643	state->fCopyNext       = 0;
644	state->fCopyPageAlloc  = 0;
645	state->fCopyPageCount  = 0;
646	state->fNextRemapIndex = 0;
647	state->fCopyMD         = 0;
648
649	state->fCursor = state->fIterateOnly
650			|| (!state->fCheckAddressing
651			    && (!fAlignMask
652				|| ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & fAlignMask)))));
653	if (!state->fCursor)
654	{
655	    IOOptionBits op = kWalkPrepare | kWalkPreflight;
656	    if (synchronize)
657		op |= kWalkSyncOut;
658	    ret = walkAll(op);
659	}
660	if (kIOReturnSuccess == ret)
661	    state->fPrepared = true;
662    }
663    return ret;
664}
665
666IOReturn
667IODMACommand::complete(bool invalidateCache, bool synchronize)
668{
669    IODMACommandInternal * state = fInternalState;
670    IOReturn               ret   = kIOReturnSuccess;
671
672    if (fActive < 1)
673	return kIOReturnNotReady;
674
675    if (!--fActive)
676    {
677	if (!state->fCursor)
678	{
679		IOOptionBits op = kWalkComplete;
680		if (synchronize)
681			op |= kWalkSyncIn;
682		ret = walkAll(op);
683	}
684	state->fPrepared = false;
685
686	if (IS_NONCOHERENT(fMappingOptions) && invalidateCache)
687	{
688	    IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
689
690	    poMD->performOperation(kIOMemoryIncoherentIOFlush, 0, fMDSummary.fLength);
691	}
692    }
693
694    return ret;
695}
696
697IOReturn
698IODMACommand::synchronize(IOOptionBits options)
699{
700    IODMACommandInternal * state = fInternalState;
701    IOReturn		   ret   = kIOReturnSuccess;
702    IOOptionBits           op;
703
704    if (kIODirectionOutIn == (kIODirectionOutIn & options))
705	return kIOReturnBadArgument;
706
707    if (fActive < 1)
708	return kIOReturnNotReady;
709
710    op = 0;
711    if (kForceDoubleBuffer & options)
712    {
713	if (state->fDoubleBuffer)
714	    return kIOReturnSuccess;
715	if (state->fCursor)
716	    state->fCursor = false;
717	else
718	    ret = walkAll(kWalkComplete);
719
720	op |= kWalkPrepare | kWalkPreflight | kWalkDoubleBuffer;
721    }
722    else if (state->fCursor)
723	return kIOReturnSuccess;
724
725    if (kIODirectionIn & options)
726	op |= kWalkSyncIn | kWalkSyncAlways;
727    else if (kIODirectionOut & options)
728	op |= kWalkSyncOut | kWalkSyncAlways;
729
730    ret = walkAll(op);
731
732    return ret;
733}
734
735struct IODMACommandTransferContext
736{
737    void *   buffer;
738    UInt64   bufferOffset;
739    UInt64   remaining;
740    UInt32   op;
741};
742enum
743{
744    kIODMACommandTransferOpReadBytes  = 1,
745    kIODMACommandTransferOpWriteBytes = 2
746};
747
748IOReturn
749IODMACommand::transferSegment(void   *reference,
750			IODMACommand *target,
751			Segment64     segment,
752			void         *segments,
753			UInt32        segmentIndex)
754{
755    IODMACommandTransferContext * context = (IODMACommandTransferContext *) segments;
756    UInt64   length  = min(segment.fLength, context->remaining);
757    addr64_t ioAddr  = segment.fIOVMAddr;
758    addr64_t cpuAddr = ioAddr;
759
760    context->remaining -= length;
761
762    while (length)
763    {
764	UInt64 copyLen = length;
765	if ((kMapped == MAPTYPE(target->fMappingOptions))
766	    && target->fMapper)
767	{
768	    cpuAddr = target->fMapper->mapAddr(ioAddr);
769	    copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1)));
770	    ioAddr += copyLen;
771	}
772
773	switch (context->op)
774	{
775	    case kIODMACommandTransferOpReadBytes:
776		copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, copyLen,
777				    cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
778		break;
779	    case kIODMACommandTransferOpWriteBytes:
780		copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, copyLen,
781				cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
782		break;
783	}
784	length                -= copyLen;
785	context->bufferOffset += copyLen;
786    }
787
788    return (context->remaining ? kIOReturnSuccess : kIOReturnOverrun);
789}
790
791UInt64
792IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length)
793{
794    IODMACommandInternal *      state = fInternalState;
795    IODMACommandTransferContext context;
796    UInt32                      numSegments = 0-1;
797
798    if (fActive < 1)
799        return (0);
800
801    if (offset >= state->fPreparedLength)
802        return (0);
803    length = min(length, state->fPreparedLength - offset);
804
805    context.buffer       = buffer;
806    context.bufferOffset = 0;
807    context.remaining    = length;
808    context.op           = transferOp;
809    (void) genIOVMSegments(transferSegment, (void *) kWalkClient, &offset, &context, &numSegments);
810
811    return (length - context.remaining);
812}
813
814UInt64
815IODMACommand::readBytes(UInt64 offset, void *bytes, UInt64 length)
816{
817    return (transfer(kIODMACommandTransferOpReadBytes, offset, bytes, length));
818}
819
820UInt64
821IODMACommand::writeBytes(UInt64 offset, const void *bytes, UInt64 length)
822{
823    return (transfer(kIODMACommandTransferOpWriteBytes, offset, const_cast<void *>(bytes), length));
824}
825
826IOReturn
827IODMACommand::genIOVMSegments(UInt64 *offsetP,
828			      void   *segmentsP,
829			      UInt32 *numSegmentsP)
830{
831    return (genIOVMSegments(clientOutputSegment, (void *) kWalkClient, offsetP, segmentsP, numSegmentsP));
832}
833
834IOReturn
835IODMACommand::genIOVMSegments(InternalSegmentFunction outSegFunc,
836			      void   *reference,
837			      UInt64 *offsetP,
838			      void   *segmentsP,
839			      UInt32 *numSegmentsP)
840{
841    IOOptionBits           op = (IOOptionBits) reference;
842    IODMACommandInternal * internalState = fInternalState;
843    IOOptionBits           mdOp = kIOMDWalkSegments;
844    IOReturn               ret  = kIOReturnSuccess;
845
846    if (!(kWalkComplete & op) && !fActive)
847	return kIOReturnNotReady;
848
849    if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP)
850	return kIOReturnBadArgument;
851
852    IOMDDMAWalkSegmentArgs *state =
853	(IOMDDMAWalkSegmentArgs *) fState;
854
855    UInt64 offset    = *offsetP + internalState->fPreparedOffset;
856    UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength;
857
858    if (offset >= memLength)
859	return kIOReturnOverrun;
860
861    if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) {
862	state->fOffset                 = 0;
863	state->fIOVMAddr               = 0;
864	internalState->fNextRemapIndex = 0;
865	internalState->fNewMD	       = false;
866	state->fMapped                 = (IS_MAPPED(fMappingOptions) && fMapper);
867	mdOp                           = kIOMDFirstSegment;
868    };
869
870    UInt64    bypassMask = fBypassMask;
871    UInt32    segIndex = 0;
872    UInt32    numSegments = *numSegmentsP;
873    Segment64 curSeg = { 0, 0 };
874    addr64_t  maxPhys;
875
876    if (fNumAddressBits && (fNumAddressBits < 64))
877	maxPhys = (1ULL << fNumAddressBits);
878    else
879	maxPhys = 0;
880    maxPhys--;
881
882    while ((state->fIOVMAddr) || state->fOffset < memLength)
883    {
884        if (!state->fIOVMAddr) {
885
886	    IOReturn rtn;
887
888	    state->fOffset = offset;
889	    state->fLength = memLength - offset;
890
891	    if (internalState->fCopyContig && (kWalkClient & op))
892	    {
893		state->fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc)
894					    + offset - internalState->fPreparedOffset;
895		rtn = kIOReturnSuccess;
896	    }
897	    else
898	    {
899		const IOMemoryDescriptor * memory =
900		    internalState->fCopyMD ? internalState->fCopyMD : fMemory;
901		rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState));
902		mdOp = kIOMDWalkSegments;
903	    }
904
905	    if (rtn == kIOReturnSuccess) {
906		assert(state->fIOVMAddr);
907		assert(state->fLength);
908	    }
909	    else if (rtn == kIOReturnOverrun)
910		state->fIOVMAddr = state->fLength = 0;	// At end
911	    else
912		return rtn;
913        };
914
915        if (!curSeg.fIOVMAddr) {
916	    UInt64 length = state->fLength;
917
918            offset          += length;
919            curSeg.fIOVMAddr = state->fIOVMAddr | bypassMask;
920            curSeg.fLength   = length;
921            state->fIOVMAddr = 0;
922        }
923        else if ((curSeg.fIOVMAddr + curSeg.fLength == state->fIOVMAddr)) {
924	    UInt64 length = state->fLength;
925            offset          += length;
926            curSeg.fLength  += length;
927            state->fIOVMAddr = 0;
928        };
929
930
931        if (!state->fIOVMAddr)
932	{
933	    if (kWalkClient & op)
934	    {
935		if ((curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys)
936		{
937		    if (internalState->fCursor)
938		    {
939			curSeg.fIOVMAddr = 0;
940			ret = kIOReturnMessageTooLarge;
941			break;
942		    }
943		    else if (curSeg.fIOVMAddr <= maxPhys)
944		    {
945			UInt64 remain, newLength;
946
947			newLength = (maxPhys + 1 - curSeg.fIOVMAddr);
948			DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength);
949			remain = curSeg.fLength - newLength;
950			state->fIOVMAddr = newLength + curSeg.fIOVMAddr;
951			curSeg.fLength   = newLength;
952			state->fLength   = remain;
953			offset          -= remain;
954		    }
955		    else if (gIOCopyMapper)
956		    {
957			DEBG("sparse switch %qx, %qx ", curSeg.fIOVMAddr, curSeg.fLength);
958			if (trunc_page_64(curSeg.fIOVMAddr) == gIOCopyMapper->mapAddr(
959							    ptoa_64(internalState->fCopyPageAlloc + internalState->fNextRemapIndex)))
960			{
961
962			    curSeg.fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc + internalState->fNextRemapIndex)
963						+ (curSeg.fIOVMAddr & PAGE_MASK);
964			    internalState->fNextRemapIndex += atop_64(round_page(curSeg.fLength));
965			}
966			else for (UInt checkRemapIndex = 0; checkRemapIndex < internalState->fCopyPageCount; checkRemapIndex++)
967			{
968			    if (trunc_page_64(curSeg.fIOVMAddr) == gIOCopyMapper->mapAddr(
969							    ptoa_64(internalState->fCopyPageAlloc + checkRemapIndex)))
970			    {
971				curSeg.fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc + checkRemapIndex)
972						    + (curSeg.fIOVMAddr & PAGE_MASK);
973				internalState->fNextRemapIndex = checkRemapIndex + atop_64(round_page(curSeg.fLength));
974				break;
975			    }
976			}
977			DEBG("-> %qx, %qx\n", curSeg.fIOVMAddr, curSeg.fLength);
978		    }
979		}
980	    }
981
982	    if (curSeg.fLength > fMaxSegmentSize)
983	    {
984		UInt64 remain = curSeg.fLength - fMaxSegmentSize;
985
986		state->fIOVMAddr = fMaxSegmentSize + curSeg.fIOVMAddr;
987		curSeg.fLength   = fMaxSegmentSize;
988
989		state->fLength   = remain;
990		offset          -= remain;
991	    }
992
993	    if (internalState->fCursor
994		&& (0 != (fAlignMask & curSeg.fIOVMAddr)))
995	    {
996		curSeg.fIOVMAddr = 0;
997		ret = kIOReturnNotAligned;
998		break;
999	    }
1000
1001	    if (offset >= memLength)
1002	    {
1003		curSeg.fLength   -= (offset - memLength);
1004		offset = memLength;
1005		state->fIOVMAddr = state->fLength = 0;	// At end
1006		break;
1007	    }
1008	}
1009
1010        if (state->fIOVMAddr) {
1011            if ((segIndex + 1 == numSegments))
1012                break;
1013
1014	    ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1015            curSeg.fIOVMAddr = 0;
1016	    if (kIOReturnSuccess != ret)
1017		break;
1018        }
1019    }
1020
1021    if (curSeg.fIOVMAddr) {
1022	ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1023    }
1024
1025    if (kIOReturnSuccess == ret)
1026    {
1027	state->fOffset = offset;
1028	*offsetP       = offset - internalState->fPreparedOffset;
1029	*numSegmentsP  = segIndex;
1030    }
1031    return ret;
1032}
1033
1034IOReturn
1035IODMACommand::clientOutputSegment(
1036	void *reference, IODMACommand *target,
1037	Segment64 segment, void *vSegList, UInt32 outSegIndex)
1038{
1039    IOReturn ret = kIOReturnSuccess;
1040
1041    if ((target->fNumAddressBits < 64)
1042	&& ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits))
1043    {
1044	DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1045	ret = kIOReturnMessageTooLarge;
1046    }
1047
1048    if (!(*target->fOutSeg)(target, segment, vSegList, outSegIndex))
1049    {
1050	DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1051	ret = kIOReturnMessageTooLarge;
1052    }
1053
1054    return (ret);
1055}
1056
1057bool
1058IODMACommand::OutputHost32(IODMACommand *,
1059	Segment64 segment, void *vSegList, UInt32 outSegIndex)
1060{
1061    Segment32 *base = (Segment32 *) vSegList;
1062    base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr;
1063    base[outSegIndex].fLength   = (UInt32) segment.fLength;
1064    return true;
1065}
1066
1067bool
1068IODMACommand::OutputBig32(IODMACommand *,
1069	Segment64 segment, void *vSegList, UInt32 outSegIndex)
1070{
1071    const UInt offAddr = outSegIndex * sizeof(Segment32);
1072    const UInt offLen  = offAddr + sizeof(UInt32);
1073    OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1074    OSWriteBigInt32(vSegList, offLen,  (UInt32) segment.fLength);
1075    return true;
1076}
1077
1078bool
1079IODMACommand::OutputLittle32(IODMACommand *,
1080	Segment64 segment, void *vSegList, UInt32 outSegIndex)
1081{
1082    const UInt offAddr = outSegIndex * sizeof(Segment32);
1083    const UInt offLen  = offAddr + sizeof(UInt32);
1084    OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1085    OSWriteLittleInt32(vSegList, offLen,  (UInt32) segment.fLength);
1086    return true;
1087}
1088
1089bool
1090IODMACommand::OutputHost64(IODMACommand *,
1091	Segment64 segment, void *vSegList, UInt32 outSegIndex)
1092{
1093    Segment64 *base = (Segment64 *) vSegList;
1094    base[outSegIndex] = segment;
1095    return true;
1096}
1097
1098bool
1099IODMACommand::OutputBig64(IODMACommand *,
1100	Segment64 segment, void *vSegList, UInt32 outSegIndex)
1101{
1102    const UInt offAddr = outSegIndex * sizeof(Segment64);
1103    const UInt offLen  = offAddr + sizeof(UInt64);
1104    OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1105    OSWriteBigInt64(vSegList, offLen,  (UInt64) segment.fLength);
1106    return true;
1107}
1108
1109bool
1110IODMACommand::OutputLittle64(IODMACommand *,
1111	Segment64 segment, void *vSegList, UInt32 outSegIndex)
1112{
1113    const UInt offAddr = outSegIndex * sizeof(Segment64);
1114    const UInt offLen  = offAddr + sizeof(UInt64);
1115    OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1116    OSWriteLittleInt64(vSegList, offLen,  (UInt64) segment.fLength);
1117    return true;
1118}
1119
1120
1121