1/*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Copyright (c) 1998 Apple Computer, Inc.  All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34
35
36#include <sys/cdefs.h>
37
38#include <IOKit/assert.h>
39#include <IOKit/system.h>
40#include <IOKit/IOLib.h>
41#include <IOKit/IOMemoryDescriptor.h>
42#include <IOKit/IOMapper.h>
43#include <IOKit/IODMACommand.h>
44#include <IOKit/IOKitKeysPrivate.h>
45
46#ifndef __LP64__
47#include <IOKit/IOSubMemoryDescriptor.h>
48#endif /* !__LP64__ */
49
50#include <IOKit/IOKitDebug.h>
51#include <libkern/OSDebug.h>
52
53#include "IOKitKernelInternal.h"
54
55#include <libkern/c++/OSContainers.h>
56#include <libkern/c++/OSDictionary.h>
57#include <libkern/c++/OSArray.h>
58#include <libkern/c++/OSSymbol.h>
59#include <libkern/c++/OSNumber.h>
60
61#include <sys/uio.h>
62
63__BEGIN_DECLS
64#include <vm/pmap.h>
65#include <vm/vm_pageout.h>
66#include <mach/memory_object_types.h>
67#include <device/device_port.h>
68
69#include <mach/vm_prot.h>
70#include <mach/mach_vm.h>
71#include <vm/vm_fault.h>
72#include <vm/vm_protos.h>
73
74extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
75extern void ipc_port_release_send(ipc_port_t port);
76
77kern_return_t
78memory_object_iopl_request(
79	ipc_port_t		port,
80	memory_object_offset_t	offset,
81	vm_size_t		*upl_size,
82	upl_t			*upl_ptr,
83	upl_page_info_array_t	user_page_list,
84	unsigned int		*page_list_count,
85	int			*flags);
86
87// osfmk/device/iokit_rpc.c
88unsigned int IODefaultCacheBits(addr64_t pa);
89unsigned int  IOTranslateCacheBits(struct phys_entry *pp);
90
91__END_DECLS
92
93#define kIOMapperWaitSystem	((IOMapper *) 1)
94
95static IOMapper * gIOSystemMapper = NULL;
96
97ppnum_t		  gIOLastPage;
98
99/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
100
101OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
102
103#define super IOMemoryDescriptor
104
105OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
106
107/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
108
109static IORecursiveLock * gIOMemoryLock;
110
111#define LOCK	IORecursiveLockLock( gIOMemoryLock)
112#define UNLOCK	IORecursiveLockUnlock( gIOMemoryLock)
113#define SLEEP	IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
114#define WAKEUP	\
115    IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
116
117#if 0
118#define DEBG(fmt, args...)  	{ kprintf(fmt, ## args); }
119#else
120#define DEBG(fmt, args...)  	{}
121#endif
122
123#define IOMD_DEBUG_DMAACTIVE	1
124
125/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
126
127// Some data structures and accessor macros used by the initWithOptions
128// Function
129
130enum ioPLBlockFlags {
131    kIOPLOnDevice  = 0x00000001,
132    kIOPLExternUPL = 0x00000002,
133};
134
135struct IOMDPersistentInitData
136{
137    const IOGeneralMemoryDescriptor * fMD;
138    IOMemoryReference               * fMemRef;
139};
140
141struct ioPLBlock {
142    upl_t fIOPL;
143    vm_address_t fPageInfo;   // Pointer to page list or index into it
144    uint32_t fIOMDOffset;	    // The offset of this iopl in descriptor
145    ppnum_t fMappedPage;	    // Page number of first page in this iopl
146    unsigned int fPageOffset;	    // Offset within first page of iopl
147    unsigned int fFlags;	    // Flags
148};
149
150struct ioGMDData {
151    IOMapper *  fMapper;
152    uint8_t	fDMAMapNumAddressBits;
153    uint64_t    fDMAMapAlignment;
154    addr64_t    fMappedBase;
155    uint64_t fPreparationID;
156    unsigned int fPageCnt;
157    unsigned char fDiscontig:1;
158    unsigned char fCompletionError:1;
159    unsigned char _resv:6;
160#if __LP64__
161    // align arrays to 8 bytes so following macros work
162    unsigned char fPad[3];
163#endif
164    upl_page_info_t fPageList[1]; /* variable length */
165    ioPLBlock fBlocks[1]; /* variable length */
166};
167
168#define getDataP(osd)	((ioGMDData *) (osd)->getBytesNoCopy())
169#define getIOPLList(d)	((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
170#define getNumIOPL(osd, d)	\
171    (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
172#define getPageList(d)	(&(d->fPageList[0]))
173#define computeDataSize(p, u) \
174    (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
175
176/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
177
178#define next_page(a) ( trunc_page(a) + PAGE_SIZE )
179
180extern "C" {
181
182kern_return_t device_data_action(
183               uintptr_t               device_handle,
184               ipc_port_t              device_pager,
185               vm_prot_t               protection,
186               vm_object_offset_t      offset,
187               vm_size_t               size)
188{
189    kern_return_t	 kr;
190    IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
191    IOMemoryDescriptor * memDesc;
192
193    LOCK;
194    memDesc = ref->dp.memory;
195    if( memDesc)
196    {
197	memDesc->retain();
198	kr = memDesc->handleFault(device_pager, offset, size);
199	memDesc->release();
200    }
201    else
202	kr = KERN_ABORTED;
203    UNLOCK;
204
205    return( kr );
206}
207
208kern_return_t device_close(
209               uintptr_t     device_handle)
210{
211    IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
212
213    IODelete( ref, IOMemoryDescriptorReserved, 1 );
214
215    return( kIOReturnSuccess );
216}
217};	// end extern "C"
218
219/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
220
221// Note this inline function uses C++ reference arguments to return values
222// This means that pointers are not passed and NULLs don't have to be
223// checked for as a NULL reference is illegal.
224static inline void
225getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
226     UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
227{
228    assert(kIOMemoryTypeUIO       == type
229	|| kIOMemoryTypeVirtual   == type || kIOMemoryTypeVirtual64 == type
230	|| kIOMemoryTypePhysical  == type || kIOMemoryTypePhysical64 == type);
231    if (kIOMemoryTypeUIO == type) {
232	user_size_t us;
233	user_addr_t ad;
234	uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
235    }
236#ifndef __LP64__
237    else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
238	IOAddressRange cur = r.v64[ind];
239	addr = cur.address;
240	len  = cur.length;
241    }
242#endif /* !__LP64__ */
243    else {
244	IOVirtualRange cur = r.v[ind];
245	addr = cur.address;
246	len  = cur.length;
247    }
248}
249
250/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
251
252static IOReturn
253purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
254{
255    IOReturn err = kIOReturnSuccess;
256
257    *control = VM_PURGABLE_SET_STATE;
258
259    enum { kIOMemoryPurgeableControlMask = 15 };
260
261    switch (kIOMemoryPurgeableControlMask & newState)
262    {
263	case kIOMemoryPurgeableKeepCurrent:
264	    *control = VM_PURGABLE_GET_STATE;
265	    break;
266
267	case kIOMemoryPurgeableNonVolatile:
268	    *state = VM_PURGABLE_NONVOLATILE;
269	    break;
270	case kIOMemoryPurgeableVolatile:
271	    *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
272	    break;
273	case kIOMemoryPurgeableEmpty:
274	    *state = VM_PURGABLE_EMPTY;
275	    break;
276	default:
277	    err = kIOReturnBadArgument;
278	    break;
279    }
280    return (err);
281}
282
283static IOReturn
284purgeableStateBits(int * state)
285{
286    IOReturn err = kIOReturnSuccess;
287
288    switch (VM_PURGABLE_STATE_MASK & *state)
289    {
290	case VM_PURGABLE_NONVOLATILE:
291	    *state = kIOMemoryPurgeableNonVolatile;
292	    break;
293	case VM_PURGABLE_VOLATILE:
294	    *state = kIOMemoryPurgeableVolatile;
295	    break;
296	case VM_PURGABLE_EMPTY:
297	    *state = kIOMemoryPurgeableEmpty;
298	    break;
299	default:
300	    *state = kIOMemoryPurgeableNonVolatile;
301	    err = kIOReturnNotReady;
302	    break;
303    }
304    return (err);
305}
306
307
308static vm_prot_t
309vmProtForCacheMode(IOOptionBits cacheMode)
310{
311    vm_prot_t prot = 0;
312    switch (cacheMode)
313    {
314	case kIOInhibitCache:
315	    SET_MAP_MEM(MAP_MEM_IO, prot);
316	    break;
317
318	case kIOWriteThruCache:
319	    SET_MAP_MEM(MAP_MEM_WTHRU, prot);
320	    break;
321
322	case kIOWriteCombineCache:
323	    SET_MAP_MEM(MAP_MEM_WCOMB, prot);
324	    break;
325
326	case kIOCopybackCache:
327	    SET_MAP_MEM(MAP_MEM_COPYBACK, prot);
328	    break;
329
330	case kIOCopybackInnerCache:
331	    SET_MAP_MEM(MAP_MEM_INNERWBACK, prot);
332	    break;
333
334	case kIODefaultCache:
335	default:
336	    SET_MAP_MEM(MAP_MEM_NOOP, prot);
337	    break;
338    }
339
340    return (prot);
341}
342
343static unsigned int
344pagerFlagsForCacheMode(IOOptionBits cacheMode)
345{
346    unsigned int pagerFlags = 0;
347    switch (cacheMode)
348    {
349	case kIOInhibitCache:
350	    pagerFlags = DEVICE_PAGER_CACHE_INHIB |  DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
351	    break;
352
353	case kIOWriteThruCache:
354	    pagerFlags = DEVICE_PAGER_WRITE_THROUGH | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
355	    break;
356
357	case kIOWriteCombineCache:
358	    pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT;
359	    break;
360
361	case kIOCopybackCache:
362	    pagerFlags = DEVICE_PAGER_COHERENT;
363	    break;
364
365	case kIOCopybackInnerCache:
366	    pagerFlags = DEVICE_PAGER_COHERENT;
367	    break;
368
369	case kIODefaultCache:
370	default:
371	    pagerFlags = -1U;
372	    break;
373    }
374    return (pagerFlags);
375}
376
377/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
378/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
379
380struct IOMemoryEntry
381{
382    ipc_port_t entry;
383    int64_t    offset;
384    uint64_t   size;
385};
386
387struct IOMemoryReference
388{
389    volatile SInt32 refCount;
390    vm_prot_t       prot;
391    uint32_t        capacity;
392    uint32_t        count;
393    IOMemoryEntry   entries[0];
394};
395
396enum
397{
398    kIOMemoryReferenceReuse = 0x00000001,
399    kIOMemoryReferenceWrite = 0x00000002,
400};
401
402SInt32 gIOMemoryReferenceCount;
403
404IOMemoryReference *
405IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
406{
407    IOMemoryReference * ref;
408    size_t              newSize, oldSize, copySize;
409
410    newSize = (sizeof(IOMemoryReference)
411                 - sizeof(ref->entries)
412                 + capacity * sizeof(ref->entries[0]));
413    ref = (typeof(ref)) IOMalloc(newSize);
414    if (realloc)
415    {
416	oldSize = (sizeof(IOMemoryReference)
417		        - sizeof(realloc->entries)
418		        + realloc->capacity * sizeof(realloc->entries[0]));
419	copySize = oldSize;
420        if (copySize > newSize) copySize = newSize;
421	if (ref) bcopy(realloc, ref, copySize);
422	IOFree(realloc, oldSize);
423    }
424    else if (ref)
425    {
426	bzero(ref, sizeof(*ref));
427	ref->refCount = 1;
428	OSIncrementAtomic(&gIOMemoryReferenceCount);
429    }
430    if (!ref) return (0);
431    ref->capacity = capacity;
432    return (ref);
433}
434
435void
436IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
437{
438    IOMemoryEntry * entries;
439    size_t          size;
440
441    entries = ref->entries + ref->count;
442    while (entries > &ref->entries[0])
443    {
444        entries--;
445        ipc_port_release_send(entries->entry);
446    }
447    size = (sizeof(IOMemoryReference)
448                 - sizeof(ref->entries)
449                 + ref->capacity * sizeof(ref->entries[0]));
450    IOFree(ref, size);
451
452    OSDecrementAtomic(&gIOMemoryReferenceCount);
453}
454
455void
456IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
457{
458    if (1 == OSDecrementAtomic(&ref->refCount)) memoryReferenceFree(ref);
459}
460
461
462IOReturn
463IOGeneralMemoryDescriptor::memoryReferenceCreate(
464                        IOOptionBits         options,
465                        IOMemoryReference ** reference)
466{
467    enum { kCapacity = 4, kCapacityInc = 4 };
468
469    kern_return_t        err;
470    IOMemoryReference *  ref;
471    IOMemoryEntry *      entries;
472    IOMemoryEntry *      cloneEntries;
473    vm_map_t             map;
474    ipc_port_t           entry, cloneEntry;
475    vm_prot_t            prot;
476    memory_object_size_t actualSize;
477    uint32_t             rangeIdx;
478    uint32_t             count;
479    mach_vm_address_t    entryAddr, endAddr, entrySize;
480    mach_vm_size_t       srcAddr, srcLen;
481    mach_vm_size_t       nextAddr, nextLen;
482    mach_vm_size_t       offset, remain;
483    IOByteCount          physLen;
484    IOOptionBits         type = (_flags & kIOMemoryTypeMask);
485    IOOptionBits         cacheMode;
486    unsigned int    	 pagerFlags;
487
488    ref = memoryReferenceAlloc(kCapacity, NULL);
489    if (!ref) return (kIOReturnNoMemory);
490    entries = &ref->entries[0];
491    count = 0;
492
493    offset = 0;
494    rangeIdx = 0;
495    if (_task) getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
496    else
497    {
498        nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
499        nextLen = physLen;
500	// default cache mode for physical
501	if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift))
502	{
503	    IOOptionBits mode;
504	    pagerFlags = IODefaultCacheBits(nextAddr);
505	    if (DEVICE_PAGER_CACHE_INHIB & pagerFlags)
506	    {
507		if (DEVICE_PAGER_GUARDED & pagerFlags)
508		    mode = kIOInhibitCache;
509		else
510		    mode = kIOWriteCombineCache;
511	    }
512	    else if (DEVICE_PAGER_WRITE_THROUGH & pagerFlags)
513		mode = kIOWriteThruCache;
514	    else
515		mode = kIOCopybackCache;
516	    _flags |= (mode << kIOMemoryBufferCacheShift);
517	}
518    }
519
520    // cache mode & vm_prot
521    prot = VM_PROT_READ;
522    cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
523    prot |= vmProtForCacheMode(cacheMode);
524    // VM system requires write access to change cache mode
525    if (kIODefaultCache != cacheMode)                    prot |= VM_PROT_WRITE;
526    if (kIODirectionOut != (kIODirectionOutIn & _flags)) prot |= VM_PROT_WRITE;
527    if (kIOMemoryReferenceWrite & options)               prot |= VM_PROT_WRITE;
528
529    if ((kIOMemoryReferenceReuse & options) && _memRef)
530    {
531        cloneEntries = &_memRef->entries[0];
532	prot |= MAP_MEM_NAMED_REUSE;
533    }
534
535    if (_task)
536    {
537	// virtual ranges
538
539	if (kIOMemoryBufferPageable & _flags)
540	{
541	    // IOBufferMemoryDescriptor alloc - set flags for entry + object create
542	    prot |= MAP_MEM_NAMED_CREATE;
543	    if (kIOMemoryBufferPurgeable & _flags) prot |= MAP_MEM_PURGABLE;
544	    prot |= VM_PROT_WRITE;
545	    map = NULL;
546	}
547	else map = get_task_map(_task);
548
549	remain = _length;
550	while (remain)
551	{
552	    srcAddr  = nextAddr;
553	    srcLen   = nextLen;
554	    nextAddr = 0;
555	    nextLen  = 0;
556	    // coalesce addr range
557	    for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++)
558	    {
559		getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
560		if ((srcAddr + srcLen) != nextAddr) break;
561		srcLen += nextLen;
562	    }
563	    entryAddr = trunc_page_64(srcAddr);
564	    endAddr   = round_page_64(srcAddr + srcLen);
565	    do
566	    {
567		entrySize = (endAddr - entryAddr);
568		if (!entrySize) break;
569		actualSize = entrySize;
570
571		cloneEntry = MACH_PORT_NULL;
572		if (MAP_MEM_NAMED_REUSE & prot)
573		{
574		    if (cloneEntries < &_memRef->entries[_memRef->count]) cloneEntry = cloneEntries->entry;
575		    else                                                  prot &= ~MAP_MEM_NAMED_REUSE;
576		}
577
578		err = mach_make_memory_entry_64(map,
579			&actualSize, entryAddr, prot, &entry, cloneEntry);
580
581		if (KERN_SUCCESS != err) break;
582		if (actualSize > entrySize) panic("mach_make_memory_entry_64 actualSize");
583
584		if (count >= ref->capacity)
585		{
586		    ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
587		    entries = &ref->entries[count];
588		}
589		entries->entry  = entry;
590		entries->size   = actualSize;
591		entries->offset = offset + (entryAddr - srcAddr);
592		entryAddr += actualSize;
593		if (MAP_MEM_NAMED_REUSE & prot)
594		{
595		    if ((cloneEntries->entry  == entries->entry)
596		     && (cloneEntries->size   == entries->size)
597		     && (cloneEntries->offset == entries->offset))         cloneEntries++;
598		     else                                    prot &= ~MAP_MEM_NAMED_REUSE;
599		}
600		entries++;
601		count++;
602	    }
603	    while (true);
604	    offset += srcLen;
605	    remain -= srcLen;
606	}
607    }
608    else
609    {
610	// _task == 0, physical
611	memory_object_t pager;
612        vm_size_t       size = ptoa_32(_pages);
613
614	if (!getKernelReserved()) panic("getKernelReserved");
615
616	reserved->dp.pagerContig = (1 == _rangesCount);
617	reserved->dp.memory      = this;
618
619	pagerFlags = pagerFlagsForCacheMode(cacheMode);
620	if (-1U == pagerFlags) panic("phys is kIODefaultCache");
621	if (reserved->dp.pagerContig) pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
622
623	pager = device_pager_setup((memory_object_t) 0, (uintptr_t) reserved,
624							    size, pagerFlags);
625	assert (pager);
626	if (!pager) err = kIOReturnVMError;
627	else
628	{
629	    srcAddr  = nextAddr;
630	    entryAddr = trunc_page_64(srcAddr);
631	    err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
632			size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
633	    assert (KERN_SUCCESS == err);
634	    if (KERN_SUCCESS != err) device_pager_deallocate(pager);
635	    else
636	    {
637		reserved->dp.devicePager = pager;
638		entries->entry  = entry;
639		entries->size   = size;
640		entries->offset = offset + (entryAddr - srcAddr);
641		entries++;
642		count++;
643	    }
644	}
645    }
646
647    ref->count = count;
648    ref->prot  = prot;
649
650    if (KERN_SUCCESS == err)
651    {
652	if (MAP_MEM_NAMED_REUSE & prot)
653	{
654	    memoryReferenceFree(ref);
655	    OSIncrementAtomic(&_memRef->refCount);
656	    ref = _memRef;
657	}
658    }
659    else
660    {
661        memoryReferenceFree(ref);
662        ref = NULL;
663    }
664
665    *reference = ref;
666
667    return (err);
668}
669
670struct IOMemoryDescriptorMapAllocRef
671{
672    vm_map_t          map;
673    mach_vm_address_t mapped;
674    mach_vm_size_t    size;
675    vm_prot_t         prot;
676    IOOptionBits      options;
677};
678
679static kern_return_t
680IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
681{
682    IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
683    IOReturn			    err;
684    vm_map_offset_t		    addr;
685
686    addr = ref->mapped;
687    err = vm_map_enter_mem_object(map, &addr, ref->size,
688				  (vm_map_offset_t) 0,
689				  (((ref->options & kIOMapAnywhere)
690				    ? VM_FLAGS_ANYWHERE
691				    : VM_FLAGS_FIXED)
692				   | VM_MAKE_TAG(VM_MEMORY_IOKIT)
693				   | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
694				  IPC_PORT_NULL,
695				  (memory_object_offset_t) 0,
696				  false, /* copy */
697				  ref->prot,
698				  ref->prot,
699				  VM_INHERIT_NONE);
700    if (KERN_SUCCESS == err)
701    {
702	ref->mapped = (mach_vm_address_t) addr;
703	ref->map = map;
704    }
705
706    return( err );
707}
708
709IOReturn
710IOGeneralMemoryDescriptor::memoryReferenceMap(
711		     IOMemoryReference * ref,
712                     vm_map_t            map,
713                     mach_vm_size_t      inoffset,
714                     mach_vm_size_t      size,
715                     IOOptionBits        options,
716                     mach_vm_address_t * inaddr)
717{
718    IOReturn        err;
719    int64_t         offset = inoffset;
720    uint32_t        rangeIdx, entryIdx;
721    vm_map_offset_t addr, mapAddr;
722    vm_map_offset_t pageOffset, entryOffset, remain, chunk;
723
724    mach_vm_address_t srcAddr, nextAddr;
725    mach_vm_size_t    srcLen, nextLen;
726    IOByteCount       physLen;
727    IOMemoryEntry   * entry;
728    vm_prot_t         prot, memEntryCacheMode;
729    IOOptionBits      type;
730    IOOptionBits      cacheMode;
731
732    /*
733     * For the kIOMapPrefault option.
734     */
735    upl_page_info_t *pageList = NULL;
736    UInt currentPageIndex = 0;
737
738    type = _flags & kIOMemoryTypeMask;
739    prot = VM_PROT_READ;
740    if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE;
741    prot &= ref->prot;
742
743    cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
744    if (kIODefaultCache != cacheMode)
745    {
746	// VM system requires write access to change cache mode
747        prot |= VM_PROT_WRITE;
748        // update named entries cache mode
749	memEntryCacheMode = (MAP_MEM_ONLY | prot | vmProtForCacheMode(cacheMode));
750    }
751
752    if (_task)
753    {
754	// Find first range for offset
755	for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++)
756	{
757	    getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
758	    if (remain < nextLen) break;
759	    remain -= nextLen;
760	}
761    }
762    else
763    {
764        rangeIdx = 0;
765        remain   = 0;
766        nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
767        nextLen  = size;
768    }
769
770    assert(remain < nextLen);
771    if (remain >= nextLen) return (kIOReturnBadArgument);
772
773    nextAddr  += remain;
774    nextLen   -= remain;
775    pageOffset = (page_mask & nextAddr);
776    addr = 0;
777    if (!(options & kIOMapAnywhere))
778    {
779        addr = *inaddr;
780        if (pageOffset != (page_mask & addr)) return (kIOReturnNotAligned);
781        addr -= pageOffset;
782    }
783
784    // find first entry for offset
785    for (entryIdx = 0;
786    	(entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
787    	entryIdx++) {}
788    entryIdx--;
789    entry = &ref->entries[entryIdx];
790
791    // allocate VM
792    size = round_page_64(size + pageOffset);
793    {
794	IOMemoryDescriptorMapAllocRef ref;
795	ref.map     = map;
796	ref.options = options;
797	ref.size    = size;
798	ref.prot    = prot;
799	if (options & kIOMapAnywhere)
800	    // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
801	    ref.mapped = 0;
802	else
803	    ref.mapped = addr;
804
805	if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags))
806	    err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
807	else
808	    err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
809	if (KERN_SUCCESS == err)
810	{
811	    addr = ref.mapped;
812	    map  = ref.map;
813	}
814    }
815
816    /*
817     * Prefaulting is only possible if we wired the memory earlier. Check the
818     * memory type, and the underlying data.
819     */
820    if (options & kIOMapPrefault) {
821        /*
822         * The memory must have been wired by calling ::prepare(), otherwise
823         * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
824         */
825        assert(map != kernel_map);
826        assert(_wireCount != 0);
827        assert(_memoryEntries != NULL);
828        if ((map == kernel_map) ||
829            (_wireCount == 0) ||
830            (_memoryEntries == NULL))
831        {
832            return kIOReturnBadArgument;
833        }
834
835        // Get the page list.
836        ioGMDData* dataP = getDataP(_memoryEntries);
837        ioPLBlock const* ioplList = getIOPLList(dataP);
838        pageList = getPageList(dataP);
839
840        // Get the number of IOPLs.
841        UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
842
843        /*
844         * Scan through the IOPL Info Blocks, looking for the first block containing
845         * the offset. The research will go past it, so we'll need to go back to the
846         * right range at the end.
847         */
848        UInt ioplIndex = 0;
849        while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset)
850            ioplIndex++;
851        ioplIndex--;
852
853        // Retrieve the IOPL info block.
854        ioPLBlock ioplInfo = ioplList[ioplIndex];
855
856        /*
857         * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
858         * array.
859         */
860        if (ioplInfo.fFlags & kIOPLExternUPL)
861            pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
862        else
863            pageList = &pageList[ioplInfo.fPageInfo];
864
865        // Rebase [offset] into the IOPL in order to looks for the first page index.
866        mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
867
868        // Retrieve the index of the first page corresponding to the offset.
869        currentPageIndex = atop_32(offsetInIOPL);
870    }
871
872    // enter mappings
873    remain  = size;
874    mapAddr = addr;
875    addr    += pageOffset;
876    while (remain && nextLen && (KERN_SUCCESS == err))
877    {
878	srcAddr  = nextAddr;
879	srcLen   = nextLen;
880	nextAddr = 0;
881	nextLen  = 0;
882	// coalesce addr range
883	for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++)
884	{
885	    getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
886	    if ((srcAddr + srcLen) != nextAddr) break;
887	    srcLen += nextLen;
888	}
889
890        while (srcLen && (KERN_SUCCESS == err))
891        {
892            entryOffset = offset - entry->offset;
893            if ((page_mask & entryOffset) != pageOffset)
894            {
895                err = kIOReturnNotAligned;
896                break;
897            }
898
899	    if (kIODefaultCache != cacheMode)
900	    {
901		vm_size_t unused = 0;
902		err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
903					     memEntryCacheMode, NULL, entry->entry);
904		assert (KERN_SUCCESS == err);
905	    }
906
907            entryOffset -= pageOffset;
908            if (entryOffset >= entry->size) panic("entryOffset");
909            chunk = entry->size - entryOffset;
910            if (chunk)
911            {
912                if (chunk > remain) chunk = remain;
913
914                if (options & kIOMapPrefault) {
915                    UInt nb_pages = round_page(chunk) / PAGE_SIZE;
916                    err = vm_map_enter_mem_object_prefault(map,
917                                                           &mapAddr,
918                                                           chunk, 0 /* mask */,
919                                                            (VM_FLAGS_FIXED
920                                                           | VM_FLAGS_OVERWRITE
921                                                           | VM_MAKE_TAG(VM_MEMORY_IOKIT)
922                                                           | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
923                                                           entry->entry,
924                                                           entryOffset,
925                                                           prot, // cur
926                                                           prot, // max
927                                                           &pageList[currentPageIndex],
928						           nb_pages);
929
930                    // Compute the next index in the page list.
931                    currentPageIndex += nb_pages;
932                    assert(currentPageIndex <= _pages);
933                } else {
934                    err = vm_map_enter_mem_object(map,
935                                                  &mapAddr,
936                                                  chunk, 0 /* mask */,
937                                                   (VM_FLAGS_FIXED
938                                                  | VM_FLAGS_OVERWRITE
939                                                  | VM_MAKE_TAG(VM_MEMORY_IOKIT)
940                                                  | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
941                                                  entry->entry,
942                                                  entryOffset,
943                                                  false, // copy
944                                                  prot, // cur
945                                                  prot, // max
946                                                  VM_INHERIT_NONE);
947                }
948
949                if (KERN_SUCCESS != err) break;
950                remain -= chunk;
951                if (!remain) break;
952                mapAddr  += chunk;
953                offset   += chunk - pageOffset;
954            }
955            pageOffset = 0;
956            entry++;
957            entryIdx++;
958            if (entryIdx >= ref->count)
959            {
960                err = kIOReturnOverrun;
961                break;
962            }
963        }
964    }
965
966    if ((KERN_SUCCESS != err) && addr)
967    {
968        (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
969        addr = 0;
970    }
971    *inaddr = addr;
972
973    return (err);
974}
975
976IOReturn
977IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
978			       IOMemoryReference * ref,
979                               IOByteCount       * residentPageCount,
980                               IOByteCount       * dirtyPageCount)
981{
982    IOReturn        err;
983    IOMemoryEntry * entries;
984    unsigned int resident, dirty;
985    unsigned int totalResident, totalDirty;
986
987    totalResident = totalDirty = 0;
988    entries = ref->entries + ref->count;
989    while (entries > &ref->entries[0])
990    {
991        entries--;
992	err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
993	if (KERN_SUCCESS != err) break;
994	totalResident += resident;
995	totalDirty    += dirty;
996    }
997
998    if (residentPageCount) *residentPageCount = totalResident;
999    if (dirtyPageCount)    *dirtyPageCount    = totalDirty;
1000    return (err);
1001}
1002
1003IOReturn
1004IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1005				IOMemoryReference * ref,
1006				IOOptionBits        newState,
1007				IOOptionBits      * oldState)
1008{
1009    IOReturn        err;
1010    IOMemoryEntry * entries;
1011    vm_purgable_t   control;
1012    int             totalState, state;
1013
1014    entries = ref->entries + ref->count;
1015    totalState = kIOMemoryPurgeableNonVolatile;
1016    while (entries > &ref->entries[0])
1017    {
1018        entries--;
1019
1020	err = purgeableControlBits(newState, &control, &state);
1021	if (KERN_SUCCESS != err) break;
1022	err = mach_memory_entry_purgable_control(entries->entry, control, &state);
1023	if (KERN_SUCCESS != err) break;
1024	err = purgeableStateBits(&state);
1025	if (KERN_SUCCESS != err) break;
1026
1027	if (kIOMemoryPurgeableEmpty == state)              totalState = kIOMemoryPurgeableEmpty;
1028	else if (kIOMemoryPurgeableEmpty == totalState)    continue;
1029	else if (kIOMemoryPurgeableVolatile == totalState) continue;
1030	else if (kIOMemoryPurgeableVolatile == state)      totalState = kIOMemoryPurgeableVolatile;
1031	else totalState = kIOMemoryPurgeableNonVolatile;
1032    }
1033
1034    if (oldState) *oldState = totalState;
1035    return (err);
1036}
1037
1038/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1039
1040IOMemoryDescriptor *
1041IOMemoryDescriptor::withAddress(void *      address,
1042                                IOByteCount   length,
1043                                IODirection direction)
1044{
1045    return IOMemoryDescriptor::
1046        withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1047}
1048
1049#ifndef __LP64__
1050IOMemoryDescriptor *
1051IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1052                                IOByteCount  length,
1053                                IODirection  direction,
1054                                task_t       task)
1055{
1056    IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1057    if (that)
1058    {
1059	if (that->initWithAddress(address, length, direction, task))
1060	    return that;
1061
1062        that->release();
1063    }
1064    return 0;
1065}
1066#endif /* !__LP64__ */
1067
1068IOMemoryDescriptor *
1069IOMemoryDescriptor::withPhysicalAddress(
1070				IOPhysicalAddress	address,
1071				IOByteCount		length,
1072				IODirection      	direction )
1073{
1074    return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
1075}
1076
1077#ifndef __LP64__
1078IOMemoryDescriptor *
1079IOMemoryDescriptor::withRanges(	IOVirtualRange * ranges,
1080				UInt32           withCount,
1081				IODirection      direction,
1082				task_t           task,
1083				bool             asReference)
1084{
1085    IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1086    if (that)
1087    {
1088	if (that->initWithRanges(ranges, withCount, direction, task, asReference))
1089	    return that;
1090
1091        that->release();
1092    }
1093    return 0;
1094}
1095#endif /* !__LP64__ */
1096
1097IOMemoryDescriptor *
1098IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1099					mach_vm_size_t length,
1100					IOOptionBits   options,
1101					task_t         task)
1102{
1103    IOAddressRange range = { address, length };
1104    return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
1105}
1106
1107IOMemoryDescriptor *
1108IOMemoryDescriptor::withAddressRanges(IOAddressRange *   ranges,
1109					UInt32           rangeCount,
1110					IOOptionBits     options,
1111					task_t           task)
1112{
1113    IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1114    if (that)
1115    {
1116	if (task)
1117	    options |= kIOMemoryTypeVirtual64;
1118	else
1119	    options |= kIOMemoryTypePhysical64;
1120
1121	if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
1122	    return that;
1123
1124	that->release();
1125    }
1126
1127    return 0;
1128}
1129
1130
1131/*
1132 * withOptions:
1133 *
1134 * Create a new IOMemoryDescriptor. The buffer is made up of several
1135 * virtual address ranges, from a given task.
1136 *
1137 * Passing the ranges as a reference will avoid an extra allocation.
1138 */
1139IOMemoryDescriptor *
1140IOMemoryDescriptor::withOptions(void *		buffers,
1141                                UInt32		count,
1142                                UInt32		offset,
1143                                task_t		task,
1144                                IOOptionBits	opts,
1145                                IOMapper *	mapper)
1146{
1147    IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
1148
1149    if (self
1150    && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
1151    {
1152        self->release();
1153        return 0;
1154    }
1155
1156    return self;
1157}
1158
1159bool IOMemoryDescriptor::initWithOptions(void *		buffers,
1160                                         UInt32		count,
1161                                         UInt32		offset,
1162                                         task_t		task,
1163                                         IOOptionBits	options,
1164                                         IOMapper *	mapper)
1165{
1166    return( false );
1167}
1168
1169#ifndef __LP64__
1170IOMemoryDescriptor *
1171IOMemoryDescriptor::withPhysicalRanges(	IOPhysicalRange * ranges,
1172                                        UInt32          withCount,
1173                                        IODirection     direction,
1174                                        bool            asReference)
1175{
1176    IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1177    if (that)
1178    {
1179	if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
1180	    return that;
1181
1182        that->release();
1183    }
1184    return 0;
1185}
1186
1187IOMemoryDescriptor *
1188IOMemoryDescriptor::withSubRange(IOMemoryDescriptor *	of,
1189				IOByteCount		offset,
1190				IOByteCount		length,
1191				IODirection		direction)
1192{
1193    return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe));
1194}
1195#endif /* !__LP64__ */
1196
1197IOMemoryDescriptor *
1198IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1199{
1200    IOGeneralMemoryDescriptor *origGenMD =
1201	OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1202
1203    if (origGenMD)
1204	return IOGeneralMemoryDescriptor::
1205	    withPersistentMemoryDescriptor(origGenMD);
1206    else
1207	return 0;
1208}
1209
1210IOMemoryDescriptor *
1211IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1212{
1213    IOMemoryReference * memRef;
1214
1215    if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) return (0);
1216
1217    if (memRef == originalMD->_memRef)
1218    {
1219	originalMD->retain();		    // Add a new reference to ourselves
1220        originalMD->memoryReferenceRelease(memRef);
1221	return originalMD;
1222    }
1223
1224    IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
1225    IOMDPersistentInitData initData = { originalMD, memRef };
1226
1227    if (self
1228    && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
1229        self->release();
1230	self = 0;
1231    }
1232    return self;
1233}
1234
1235#ifndef __LP64__
1236bool
1237IOGeneralMemoryDescriptor::initWithAddress(void *      address,
1238                                    IOByteCount   withLength,
1239                                    IODirection withDirection)
1240{
1241    _singleRange.v.address = (vm_offset_t) address;
1242    _singleRange.v.length  = withLength;
1243
1244    return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1245}
1246
1247bool
1248IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1249                                    IOByteCount    withLength,
1250                                    IODirection  withDirection,
1251                                    task_t       withTask)
1252{
1253    _singleRange.v.address = address;
1254    _singleRange.v.length  = withLength;
1255
1256    return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1257}
1258
1259bool
1260IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1261				 IOPhysicalAddress	address,
1262				 IOByteCount		withLength,
1263				 IODirection      	withDirection )
1264{
1265    _singleRange.p.address = address;
1266    _singleRange.p.length  = withLength;
1267
1268    return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1269}
1270
1271bool
1272IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1273                                IOPhysicalRange * ranges,
1274                                UInt32            count,
1275                                IODirection       direction,
1276                                bool              reference)
1277{
1278    IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1279
1280    if (reference)
1281        mdOpts |= kIOMemoryAsReference;
1282
1283    return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
1284}
1285
1286bool
1287IOGeneralMemoryDescriptor::initWithRanges(
1288                                   IOVirtualRange * ranges,
1289                                   UInt32           count,
1290                                   IODirection      direction,
1291                                   task_t           task,
1292                                   bool             reference)
1293{
1294    IOOptionBits mdOpts = direction;
1295
1296    if (reference)
1297        mdOpts |= kIOMemoryAsReference;
1298
1299    if (task) {
1300        mdOpts |= kIOMemoryTypeVirtual;
1301
1302	// Auto-prepare if this is a kernel memory descriptor as very few
1303	// clients bother to prepare() kernel memory.
1304	// But it was not enforced so what are you going to do?
1305        if (task == kernel_task)
1306            mdOpts |= kIOMemoryAutoPrepare;
1307    }
1308    else
1309        mdOpts |= kIOMemoryTypePhysical;
1310
1311    return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
1312}
1313#endif /* !__LP64__ */
1314
1315/*
1316 * initWithOptions:
1317 *
1318 *  IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1319 * from a given task, several physical ranges, an UPL from the ubc
1320 * system or a uio (may be 64bit) from the BSD subsystem.
1321 *
1322 * Passing the ranges as a reference will avoid an extra allocation.
1323 *
1324 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1325 * existing instance -- note this behavior is not commonly supported in other
1326 * I/O Kit classes, although it is supported here.
1327 */
1328
1329bool
1330IOGeneralMemoryDescriptor::initWithOptions(void *	buffers,
1331                                           UInt32	count,
1332                                           UInt32	offset,
1333                                           task_t	task,
1334                                           IOOptionBits	options,
1335                                           IOMapper *	mapper)
1336{
1337    IOOptionBits type = options & kIOMemoryTypeMask;
1338
1339#ifndef __LP64__
1340    if (task
1341        && (kIOMemoryTypeVirtual == type)
1342        && vm_map_is_64bit(get_task_map(task))
1343        && ((IOVirtualRange *) buffers)->address)
1344    {
1345        OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1346        return false;
1347    }
1348#endif /* !__LP64__ */
1349
1350    // Grab the original MD's configuation data to initialse the
1351    // arguments to this function.
1352    if (kIOMemoryTypePersistentMD == type) {
1353
1354	IOMDPersistentInitData *initData = (typeof(initData)) buffers;
1355	const IOGeneralMemoryDescriptor *orig = initData->fMD;
1356	ioGMDData *dataP = getDataP(orig->_memoryEntries);
1357
1358	// Only accept persistent memory descriptors with valid dataP data.
1359	assert(orig->_rangesCount == 1);
1360	if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
1361	    return false;
1362
1363	_memRef = initData->fMemRef;	// Grab the new named entry
1364	options = orig->_flags & ~kIOMemoryAsReference;
1365        type = options & kIOMemoryTypeMask;
1366	buffers = orig->_ranges.v;
1367	count = orig->_rangesCount;
1368
1369	// Now grab the original task and whatever mapper was previously used
1370	task = orig->_task;
1371	mapper = dataP->fMapper;
1372
1373	// We are ready to go through the original initialisation now
1374    }
1375
1376    switch (type) {
1377    case kIOMemoryTypeUIO:
1378    case kIOMemoryTypeVirtual:
1379#ifndef __LP64__
1380    case kIOMemoryTypeVirtual64:
1381#endif /* !__LP64__ */
1382        assert(task);
1383        if (!task)
1384            return false;
1385	break;
1386
1387    case kIOMemoryTypePhysical:		// Neither Physical nor UPL should have a task
1388#ifndef __LP64__
1389    case kIOMemoryTypePhysical64:
1390#endif /* !__LP64__ */
1391    case kIOMemoryTypeUPL:
1392        assert(!task);
1393        break;
1394    default:
1395        return false;	/* bad argument */
1396    }
1397
1398    assert(buffers);
1399    assert(count);
1400
1401    /*
1402     * We can check the _initialized  instance variable before having ever set
1403     * it to an initial value because I/O Kit guarantees that all our instance
1404     * variables are zeroed on an object's allocation.
1405     */
1406
1407    if (_initialized) {
1408        /*
1409         * An existing memory descriptor is being retargeted to point to
1410         * somewhere else.  Clean up our present state.
1411         */
1412	IOOptionBits type = _flags & kIOMemoryTypeMask;
1413	if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
1414	{
1415	    while (_wireCount)
1416		complete();
1417	}
1418        if (_ranges.v && !(kIOMemoryAsReference & _flags))
1419	{
1420	    if (kIOMemoryTypeUIO == type)
1421		uio_free((uio_t) _ranges.v);
1422#ifndef __LP64__
1423	    else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1424		IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1425#endif /* !__LP64__ */
1426	    else
1427		IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1428	}
1429
1430	options |= (kIOMemoryRedirected & _flags);
1431	if (!(kIOMemoryRedirected & options))
1432	{
1433	    if (_memRef)
1434	    {
1435		memoryReferenceRelease(_memRef);
1436		_memRef = 0;
1437	    }
1438	    if (_mappings)
1439		_mappings->flushCollection();
1440	}
1441    }
1442    else {
1443        if (!super::init())
1444            return false;
1445        _initialized = true;
1446    }
1447
1448    // Grab the appropriate mapper
1449    if (kIOMemoryHostOnly & options) options |= kIOMemoryMapperNone;
1450    if (kIOMemoryMapperNone & options)
1451        mapper = 0;	// No Mapper
1452    else if (mapper == kIOMapperSystem) {
1453        IOMapper::checkForSystemMapper();
1454        gIOSystemMapper = mapper = IOMapper::gSystem;
1455    }
1456
1457    // Temp binary compatibility for kIOMemoryThreadSafe
1458    if (kIOMemoryReserved6156215 & options)
1459    {
1460	options &= ~kIOMemoryReserved6156215;
1461	options |= kIOMemoryThreadSafe;
1462    }
1463    // Remove the dynamic internal use flags from the initial setting
1464    options 		  &= ~(kIOMemoryPreparedReadOnly);
1465    _flags		   = options;
1466    _task                  = task;
1467
1468#ifndef __LP64__
1469    _direction             = (IODirection) (_flags & kIOMemoryDirectionMask);
1470#endif /* !__LP64__ */
1471
1472    __iomd_reservedA = 0;
1473    __iomd_reservedB = 0;
1474    _highestPage = 0;
1475
1476    if (kIOMemoryThreadSafe & options)
1477    {
1478	if (!_prepareLock)
1479	    _prepareLock = IOLockAlloc();
1480    }
1481    else if (_prepareLock)
1482    {
1483	IOLockFree(_prepareLock);
1484	_prepareLock = NULL;
1485    }
1486
1487    if (kIOMemoryTypeUPL == type) {
1488
1489        ioGMDData *dataP;
1490        unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
1491
1492        if (!initMemoryEntries(dataSize, mapper)) return (false);
1493        dataP = getDataP(_memoryEntries);
1494        dataP->fPageCnt = 0;
1495
1496 //       _wireCount++;	// UPLs start out life wired
1497
1498        _length    = count;
1499        _pages    += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
1500
1501        ioPLBlock iopl;
1502        iopl.fIOPL = (upl_t) buffers;
1503        upl_set_referenced(iopl.fIOPL, true);
1504        upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
1505
1506	if (upl_get_size(iopl.fIOPL) < (count + offset))
1507	    panic("short external upl");
1508
1509        _highestPage = upl_get_highest_page(iopl.fIOPL);
1510
1511        // Set the flag kIOPLOnDevice convieniently equal to 1
1512        iopl.fFlags  = pageList->device | kIOPLExternUPL;
1513        if (!pageList->device) {
1514            // Pre-compute the offset into the UPL's page list
1515            pageList = &pageList[atop_32(offset)];
1516            offset &= PAGE_MASK;
1517        }
1518        iopl.fIOMDOffset = 0;
1519        iopl.fMappedPage = 0;
1520        iopl.fPageInfo = (vm_address_t) pageList;
1521        iopl.fPageOffset = offset;
1522        _memoryEntries->appendBytes(&iopl, sizeof(iopl));
1523    }
1524    else {
1525	// kIOMemoryTypeVirtual  | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
1526	// kIOMemoryTypePhysical | kIOMemoryTypePhysical64
1527
1528	// Initialize the memory descriptor
1529	if (options & kIOMemoryAsReference) {
1530#ifndef __LP64__
1531	    _rangesIsAllocated = false;
1532#endif /* !__LP64__ */
1533
1534	    // Hack assignment to get the buffer arg into _ranges.
1535	    // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
1536	    // work, C++ sigh.
1537	    // This also initialises the uio & physical ranges.
1538	    _ranges.v = (IOVirtualRange *) buffers;
1539	}
1540	else {
1541#ifndef __LP64__
1542	    _rangesIsAllocated = true;
1543#endif /* !__LP64__ */
1544	    switch (type)
1545	    {
1546	      case kIOMemoryTypeUIO:
1547		_ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
1548		break;
1549
1550#ifndef __LP64__
1551	      case kIOMemoryTypeVirtual64:
1552	      case kIOMemoryTypePhysical64:
1553		if (count == 1
1554		    && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
1555		    ) {
1556		    if (kIOMemoryTypeVirtual64 == type)
1557			type = kIOMemoryTypeVirtual;
1558		    else
1559			type = kIOMemoryTypePhysical;
1560		    _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
1561		    _rangesIsAllocated = false;
1562		    _ranges.v = &_singleRange.v;
1563		    _singleRange.v.address = ((IOAddressRange *) buffers)->address;
1564		    _singleRange.v.length  = ((IOAddressRange *) buffers)->length;
1565		    break;
1566		}
1567		_ranges.v64 = IONew(IOAddressRange, count);
1568		if (!_ranges.v64)
1569		    return false;
1570		bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
1571		break;
1572#endif /* !__LP64__ */
1573	      case kIOMemoryTypeVirtual:
1574	      case kIOMemoryTypePhysical:
1575		if (count == 1) {
1576		    _flags |= kIOMemoryAsReference;
1577#ifndef __LP64__
1578		    _rangesIsAllocated = false;
1579#endif /* !__LP64__ */
1580		    _ranges.v = &_singleRange.v;
1581		} else {
1582		    _ranges.v = IONew(IOVirtualRange, count);
1583		    if (!_ranges.v)
1584			return false;
1585		}
1586		bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
1587		break;
1588	    }
1589	}
1590
1591	// Find starting address within the vector of ranges
1592	Ranges vec = _ranges;
1593	UInt32 length = 0;
1594	UInt32 pages = 0;
1595	for (unsigned ind = 0; ind < count;  ind++) {
1596	    mach_vm_address_t addr;
1597	    mach_vm_size_t len;
1598
1599	    // addr & len are returned by this function
1600	    getAddrLenForInd(addr, len, type, vec, ind);
1601	    pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
1602	    len += length;
1603	    assert(len >= length);	// Check for 32 bit wrap around
1604	    length = len;
1605
1606	    if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1607	    {
1608		ppnum_t highPage = atop_64(addr + len - 1);
1609		if (highPage > _highestPage)
1610		    _highestPage = highPage;
1611	    }
1612	}
1613	_length      = length;
1614	_pages       = pages;
1615	_rangesCount = count;
1616
1617        // Auto-prepare memory at creation time.
1618        // Implied completion when descriptor is free-ed
1619        if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1620            _wireCount++;	// Physical MDs are, by definition, wired
1621        else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
1622            ioGMDData *dataP;
1623            unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
1624
1625            if (!initMemoryEntries(dataSize, mapper)) return false;
1626            dataP = getDataP(_memoryEntries);
1627            dataP->fPageCnt = _pages;
1628
1629	    if ( (kIOMemoryPersistent & _flags) && !_memRef)
1630	    {
1631		IOReturn
1632		err = memoryReferenceCreate(0, &_memRef);
1633		if (kIOReturnSuccess != err) return false;
1634	    }
1635
1636            if ((_flags & kIOMemoryAutoPrepare)
1637             && prepare() != kIOReturnSuccess)
1638                return false;
1639        }
1640    }
1641
1642    return true;
1643}
1644
1645/*
1646 * free
1647 *
1648 * Free resources.
1649 */
1650void IOGeneralMemoryDescriptor::free()
1651{
1652    IOOptionBits type = _flags & kIOMemoryTypeMask;
1653
1654    if( reserved)
1655    {
1656	LOCK;
1657	reserved->dp.memory = 0;
1658	UNLOCK;
1659    }
1660    if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1661    {
1662	ioGMDData * dataP;
1663	if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1664	{
1665	    dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages);
1666	    dataP->fMappedBase = 0;
1667	}
1668    }
1669    else
1670    {
1671	while (_wireCount) complete();
1672    }
1673
1674    if (_memoryEntries) _memoryEntries->release();
1675
1676    if (_ranges.v && !(kIOMemoryAsReference & _flags))
1677    {
1678	if (kIOMemoryTypeUIO == type)
1679	    uio_free((uio_t) _ranges.v);
1680#ifndef __LP64__
1681	else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1682	    IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1683#endif /* !__LP64__ */
1684	else
1685	    IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1686
1687	_ranges.v = NULL;
1688    }
1689
1690    if (reserved)
1691    {
1692        if (reserved->dp.devicePager)
1693        {
1694            // memEntry holds a ref on the device pager which owns reserved
1695            // (IOMemoryDescriptorReserved) so no reserved access after this point
1696            device_pager_deallocate( (memory_object_t) reserved->dp.devicePager );
1697        }
1698        else
1699            IODelete(reserved, IOMemoryDescriptorReserved, 1);
1700        reserved = NULL;
1701    }
1702
1703    if (_memRef)      memoryReferenceRelease(_memRef);
1704    if (_prepareLock) IOLockFree(_prepareLock);
1705
1706    super::free();
1707}
1708
1709#ifndef __LP64__
1710void IOGeneralMemoryDescriptor::unmapFromKernel()
1711{
1712    panic("IOGMD::unmapFromKernel deprecated");
1713}
1714
1715void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1716{
1717    panic("IOGMD::mapIntoKernel deprecated");
1718}
1719#endif /* !__LP64__ */
1720
1721/*
1722 * getDirection:
1723 *
1724 * Get the direction of the transfer.
1725 */
1726IODirection IOMemoryDescriptor::getDirection() const
1727{
1728#ifndef __LP64__
1729    if (_direction)
1730	return _direction;
1731#endif /* !__LP64__ */
1732    return (IODirection) (_flags & kIOMemoryDirectionMask);
1733}
1734
1735/*
1736 * getLength:
1737 *
1738 * Get the length of the transfer (over all ranges).
1739 */
1740IOByteCount IOMemoryDescriptor::getLength() const
1741{
1742    return _length;
1743}
1744
1745void IOMemoryDescriptor::setTag( IOOptionBits tag )
1746{
1747    _tag = tag;
1748}
1749
1750IOOptionBits IOMemoryDescriptor::getTag( void )
1751{
1752    return( _tag);
1753}
1754
1755#ifndef __LP64__
1756// @@@ gvdl: who is using this API?  Seems like a wierd thing to implement.
1757IOPhysicalAddress
1758IOMemoryDescriptor::getSourceSegment( IOByteCount   offset, IOByteCount * length )
1759{
1760    addr64_t physAddr = 0;
1761
1762    if( prepare() == kIOReturnSuccess) {
1763        physAddr = getPhysicalSegment64( offset, length );
1764        complete();
1765    }
1766
1767    return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1768}
1769#endif /* !__LP64__ */
1770
1771IOByteCount IOMemoryDescriptor::readBytes
1772                (IOByteCount offset, void *bytes, IOByteCount length)
1773{
1774    addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1775    IOByteCount remaining;
1776
1777    // Assert that this entire I/O is withing the available range
1778    assert(offset <= _length);
1779    assert(offset + length <= _length);
1780    if (offset >= _length) {
1781        return 0;
1782    }
1783
1784    if (kIOMemoryThreadSafe & _flags)
1785	LOCK;
1786
1787    remaining = length = min(length, _length - offset);
1788    while (remaining) {	// (process another target segment?)
1789        addr64_t	srcAddr64;
1790        IOByteCount	srcLen;
1791
1792        srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1793        if (!srcAddr64)
1794            break;
1795
1796        // Clip segment length to remaining
1797        if (srcLen > remaining)
1798            srcLen = remaining;
1799
1800        copypv(srcAddr64, dstAddr, srcLen,
1801                            cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1802
1803        dstAddr   += srcLen;
1804        offset    += srcLen;
1805        remaining -= srcLen;
1806    }
1807
1808    if (kIOMemoryThreadSafe & _flags)
1809	UNLOCK;
1810
1811    assert(!remaining);
1812
1813    return length - remaining;
1814}
1815
1816IOByteCount IOMemoryDescriptor::writeBytes
1817                (IOByteCount inoffset, const void *bytes, IOByteCount length)
1818{
1819    addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1820    IOByteCount remaining;
1821    IOByteCount offset = inoffset;
1822
1823    // Assert that this entire I/O is withing the available range
1824    assert(offset <= _length);
1825    assert(offset + length <= _length);
1826
1827    assert( !(kIOMemoryPreparedReadOnly & _flags) );
1828
1829    if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1830        return 0;
1831    }
1832
1833    if (kIOMemoryThreadSafe & _flags)
1834	LOCK;
1835
1836    remaining = length = min(length, _length - offset);
1837    while (remaining) {	// (process another target segment?)
1838        addr64_t    dstAddr64;
1839        IOByteCount dstLen;
1840
1841        dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1842        if (!dstAddr64)
1843            break;
1844
1845        // Clip segment length to remaining
1846        if (dstLen > remaining)
1847            dstLen = remaining;
1848
1849	if (!srcAddr) bzero_phys(dstAddr64, dstLen);
1850	else
1851	{
1852	    copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1853		    cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1854	    srcAddr   += dstLen;
1855	}
1856        offset    += dstLen;
1857        remaining -= dstLen;
1858    }
1859
1860    if (kIOMemoryThreadSafe & _flags)
1861	UNLOCK;
1862
1863    assert(!remaining);
1864
1865    if (!srcAddr) performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
1866
1867    return length - remaining;
1868}
1869
1870#ifndef __LP64__
1871void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1872{
1873    panic("IOGMD::setPosition deprecated");
1874}
1875#endif /* !__LP64__ */
1876
1877static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1878
1879uint64_t
1880IOGeneralMemoryDescriptor::getPreparationID( void )
1881{
1882    ioGMDData *dataP;
1883
1884    if (!_wireCount)
1885	return (kIOPreparationIDUnprepared);
1886
1887    if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
1888      || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64))
1889    {
1890        IOMemoryDescriptor::setPreparationID();
1891        return (IOMemoryDescriptor::getPreparationID());
1892    }
1893
1894    if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1895	return (kIOPreparationIDUnprepared);
1896
1897    if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1898    {
1899	dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1900    }
1901    return (dataP->fPreparationID);
1902}
1903
1904IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void )
1905{
1906    if (!reserved)
1907    {
1908        reserved = IONew(IOMemoryDescriptorReserved, 1);
1909        if (reserved)
1910            bzero(reserved, sizeof(IOMemoryDescriptorReserved));
1911    }
1912    return (reserved);
1913}
1914
1915void IOMemoryDescriptor::setPreparationID( void )
1916{
1917    if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID))
1918    {
1919#if defined(__ppc__ )
1920        reserved->preparationID = gIOMDPreparationID++;
1921#else
1922        reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1923#endif
1924    }
1925}
1926
1927uint64_t IOMemoryDescriptor::getPreparationID( void )
1928{
1929    if (reserved)
1930        return (reserved->preparationID);
1931    else
1932        return (kIOPreparationIDUnsupported);
1933}
1934
1935IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1936{
1937    IOReturn err = kIOReturnSuccess;
1938    DMACommandOps params;
1939    IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1940    ioGMDData *dataP;
1941
1942    params = (op & ~kIOMDDMACommandOperationMask & op);
1943    op &= kIOMDDMACommandOperationMask;
1944
1945    if (kIOMDDMAMap == op)
1946    {
1947	if (dataSize < sizeof(IOMDDMAMapArgs))
1948	    return kIOReturnUnderrun;
1949
1950	IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1951
1952	if (!_memoryEntries
1953	    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1954
1955	if (_memoryEntries && data->fMapper)
1956	{
1957	    bool remap;
1958	    bool whole = ((data->fOffset == 0) && (data->fLength == _length));
1959	    dataP = getDataP(_memoryEntries);
1960
1961	    if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
1962	    if (data->fMapSpec.alignment      > dataP->fDMAMapAlignment)      dataP->fDMAMapAlignment      = data->fMapSpec.alignment;
1963
1964	    remap = (dataP->fDMAMapNumAddressBits < 64)
1965	    	 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
1966	    remap |= (dataP->fDMAMapAlignment > page_size);
1967	    remap |= (!whole);
1968	    if (remap || !dataP->fMappedBase)
1969	    {
1970//		if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
1971	    	err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
1972		if ((kIOReturnSuccess == err) && whole && !dataP->fMappedBase)
1973		{
1974		    dataP->fMappedBase = data->fAlloc;
1975		    data->fAllocCount = 0; 			// IOMD owns the alloc now
1976		}
1977	    }
1978	    else
1979	    {
1980	    	data->fAlloc = dataP->fMappedBase;
1981		data->fAllocCount = 0; 				// IOMD owns the alloc
1982	    }
1983	    data->fMapContig = !dataP->fDiscontig;
1984	}
1985
1986	return (err);
1987    }
1988
1989    if (kIOMDAddDMAMapSpec == op)
1990    {
1991	if (dataSize < sizeof(IODMAMapSpecification))
1992	    return kIOReturnUnderrun;
1993
1994	IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
1995
1996	if (!_memoryEntries
1997	    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1998
1999	if (_memoryEntries)
2000	{
2001	    dataP = getDataP(_memoryEntries);
2002	    if (data->numAddressBits < dataP->fDMAMapNumAddressBits)
2003	     	dataP->fDMAMapNumAddressBits = data->numAddressBits;
2004	    if (data->alignment > dataP->fDMAMapAlignment)
2005	     	dataP->fDMAMapAlignment = data->alignment;
2006	}
2007	return kIOReturnSuccess;
2008    }
2009
2010    if (kIOMDGetCharacteristics == op) {
2011
2012	if (dataSize < sizeof(IOMDDMACharacteristics))
2013	    return kIOReturnUnderrun;
2014
2015	IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2016	data->fLength = _length;
2017	data->fSGCount = _rangesCount;
2018	data->fPages = _pages;
2019	data->fDirection = getDirection();
2020	if (!_wireCount)
2021	    data->fIsPrepared = false;
2022	else {
2023	    data->fIsPrepared = true;
2024	    data->fHighestPage = _highestPage;
2025	    if (_memoryEntries)
2026	    {
2027		dataP = getDataP(_memoryEntries);
2028		ioPLBlock *ioplList = getIOPLList(dataP);
2029		UInt count = getNumIOPL(_memoryEntries, dataP);
2030		if (count == 1)
2031		    data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
2032	    }
2033	}
2034
2035	return kIOReturnSuccess;
2036
2037#if IOMD_DEBUG_DMAACTIVE
2038    } else if (kIOMDDMAActive == op) {
2039	if (params) OSIncrementAtomic(&md->__iomd_reservedA);
2040	else {
2041	    if (md->__iomd_reservedA)
2042		OSDecrementAtomic(&md->__iomd_reservedA);
2043	    else
2044		panic("kIOMDSetDMAInactive");
2045	}
2046#endif /* IOMD_DEBUG_DMAACTIVE */
2047
2048    } else if (kIOMDWalkSegments != op)
2049	return kIOReturnBadArgument;
2050
2051    // Get the next segment
2052    struct InternalState {
2053	IOMDDMAWalkSegmentArgs fIO;
2054	UInt fOffset2Index;
2055	UInt fIndex;
2056	UInt fNextOffset;
2057    } *isP;
2058
2059    // Find the next segment
2060    if (dataSize < sizeof(*isP))
2061	return kIOReturnUnderrun;
2062
2063    isP = (InternalState *) vData;
2064    UInt offset = isP->fIO.fOffset;
2065    bool mapped = isP->fIO.fMapped;
2066
2067    if (IOMapper::gSystem && mapped
2068        && (!(kIOMemoryHostOnly & _flags))
2069	&& (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBase))
2070//	&& (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase))
2071    {
2072	if (!_memoryEntries
2073	    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
2074
2075	dataP = getDataP(_memoryEntries);
2076	if (dataP->fMapper)
2077	{
2078	    IODMAMapSpecification mapSpec;
2079	    bzero(&mapSpec, sizeof(mapSpec));
2080	    mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2081	    mapSpec.alignment = dataP->fDMAMapAlignment;
2082	    err = md->dmaMap(dataP->fMapper, &mapSpec, 0, _length, &dataP->fMappedBase, NULL);
2083	    if (kIOReturnSuccess != err) return (err);
2084	}
2085    }
2086
2087    if (offset >= _length)
2088	return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
2089
2090    // Validate the previous offset
2091    UInt ind, off2Ind = isP->fOffset2Index;
2092    if (!params
2093	&& offset
2094	&& (offset == isP->fNextOffset || off2Ind <= offset))
2095	ind = isP->fIndex;
2096    else
2097	ind = off2Ind = 0;	// Start from beginning
2098
2099    UInt length;
2100    UInt64 address;
2101
2102
2103    if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
2104
2105	// Physical address based memory descriptor
2106	const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
2107
2108	// Find the range after the one that contains the offset
2109	mach_vm_size_t len;
2110	for (len = 0; off2Ind <= offset; ind++) {
2111	    len = physP[ind].length;
2112	    off2Ind += len;
2113	}
2114
2115	// Calculate length within range and starting address
2116	length   = off2Ind - offset;
2117	address  = physP[ind - 1].address + len - length;
2118
2119	if (true && mapped && _memoryEntries
2120		&& (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
2121	{
2122	    address = dataP->fMappedBase + offset;
2123	}
2124	else
2125	{
2126	    // see how far we can coalesce ranges
2127	    while (ind < _rangesCount && address + length == physP[ind].address) {
2128		len = physP[ind].length;
2129		length += len;
2130		off2Ind += len;
2131		ind++;
2132	    }
2133	}
2134
2135	// correct contiguous check overshoot
2136	ind--;
2137	off2Ind -= len;
2138    }
2139#ifndef __LP64__
2140    else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
2141
2142	// Physical address based memory descriptor
2143	const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
2144
2145	// Find the range after the one that contains the offset
2146	mach_vm_size_t len;
2147	for (len = 0; off2Ind <= offset; ind++) {
2148	    len = physP[ind].length;
2149	    off2Ind += len;
2150	}
2151
2152	// Calculate length within range and starting address
2153	length   = off2Ind - offset;
2154	address  = physP[ind - 1].address + len - length;
2155
2156	if (true && mapped && _memoryEntries
2157		&& (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
2158	{
2159	    address = dataP->fMappedBase + offset;
2160	}
2161	else
2162	{
2163	    // see how far we can coalesce ranges
2164	    while (ind < _rangesCount && address + length == physP[ind].address) {
2165		len = physP[ind].length;
2166		length += len;
2167		off2Ind += len;
2168		ind++;
2169	    }
2170	}
2171	// correct contiguous check overshoot
2172	ind--;
2173	off2Ind -= len;
2174    }
2175#endif /* !__LP64__ */
2176    else do {
2177	if (!_wireCount)
2178	    panic("IOGMD: not wired for the IODMACommand");
2179
2180	assert(_memoryEntries);
2181
2182	dataP = getDataP(_memoryEntries);
2183	const ioPLBlock *ioplList = getIOPLList(dataP);
2184	UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
2185	upl_page_info_t *pageList = getPageList(dataP);
2186
2187	assert(numIOPLs > 0);
2188
2189	// Scan through iopl info blocks looking for block containing offset
2190	while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
2191	    ind++;
2192
2193	// Go back to actual range as search goes past it
2194	ioPLBlock ioplInfo = ioplList[ind - 1];
2195	off2Ind = ioplInfo.fIOMDOffset;
2196
2197	if (ind < numIOPLs)
2198	    length = ioplList[ind].fIOMDOffset;
2199	else
2200	    length = _length;
2201	length -= offset;			// Remainder within iopl
2202
2203	// Subtract offset till this iopl in total list
2204	offset -= off2Ind;
2205
2206	// If a mapped address is requested and this is a pre-mapped IOPL
2207	// then just need to compute an offset relative to the mapped base.
2208	if (mapped && dataP->fMappedBase) {
2209	    offset += (ioplInfo.fPageOffset & PAGE_MASK);
2210	    address = trunc_page_64(dataP->fMappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
2211	    continue;	// Done leave do/while(false) now
2212	}
2213
2214	// The offset is rebased into the current iopl.
2215	// Now add the iopl 1st page offset.
2216	offset += ioplInfo.fPageOffset;
2217
2218	// For external UPLs the fPageInfo field points directly to
2219	// the upl's upl_page_info_t array.
2220	if (ioplInfo.fFlags & kIOPLExternUPL)
2221	    pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
2222	else
2223	    pageList = &pageList[ioplInfo.fPageInfo];
2224
2225	// Check for direct device non-paged memory
2226	if ( ioplInfo.fFlags & kIOPLOnDevice ) {
2227	    address = ptoa_64(pageList->phys_addr) + offset;
2228	    continue;	// Done leave do/while(false) now
2229	}
2230
2231	// Now we need compute the index into the pageList
2232	UInt pageInd = atop_32(offset);
2233	offset &= PAGE_MASK;
2234
2235	// Compute the starting address of this segment
2236	IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
2237	if (!pageAddr) {
2238	    panic("!pageList phys_addr");
2239	}
2240
2241	address = ptoa_64(pageAddr) + offset;
2242
2243	// length is currently set to the length of the remainider of the iopl.
2244	// We need to check that the remainder of the iopl is contiguous.
2245	// This is indicated by pageList[ind].phys_addr being sequential.
2246	IOByteCount contigLength = PAGE_SIZE - offset;
2247	while (contigLength < length
2248		&& ++pageAddr == pageList[++pageInd].phys_addr)
2249	{
2250	    contigLength += PAGE_SIZE;
2251	}
2252
2253	if (contigLength < length)
2254	    length = contigLength;
2255
2256
2257	assert(address);
2258	assert(length);
2259
2260    } while (false);
2261
2262    // Update return values and state
2263    isP->fIO.fIOVMAddr = address;
2264    isP->fIO.fLength   = length;
2265    isP->fIndex        = ind;
2266    isP->fOffset2Index = off2Ind;
2267    isP->fNextOffset   = isP->fIO.fOffset + length;
2268
2269    return kIOReturnSuccess;
2270}
2271
2272addr64_t
2273IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
2274{
2275    IOReturn          ret;
2276    mach_vm_address_t address = 0;
2277    mach_vm_size_t    length  = 0;
2278    IOMapper *        mapper  = gIOSystemMapper;
2279    IOOptionBits      type    = _flags & kIOMemoryTypeMask;
2280
2281    if (lengthOfSegment)
2282        *lengthOfSegment = 0;
2283
2284    if (offset >= _length)
2285        return 0;
2286
2287    // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
2288    // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
2289    // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
2290    // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2291
2292    if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
2293    {
2294        unsigned rangesIndex = 0;
2295	Ranges vec = _ranges;
2296	mach_vm_address_t addr;
2297
2298	// Find starting address within the vector of ranges
2299	for (;;) {
2300	    getAddrLenForInd(addr, length, type, vec, rangesIndex);
2301	    if (offset < length)
2302		break;
2303	    offset -= length; // (make offset relative)
2304	    rangesIndex++;
2305	}
2306
2307	// Now that we have the starting range,
2308	// lets find the last contiguous range
2309        addr   += offset;
2310        length -= offset;
2311
2312        for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
2313	    mach_vm_address_t newAddr;
2314	    mach_vm_size_t    newLen;
2315
2316	    getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
2317	    if (addr + length != newAddr)
2318		break;
2319	    length += newLen;
2320	}
2321        if (addr)
2322	    address = (IOPhysicalAddress) addr;	// Truncate address to 32bit
2323    }
2324    else
2325    {
2326	IOMDDMAWalkSegmentState _state;
2327	IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
2328
2329	state->fOffset = offset;
2330	state->fLength = _length - offset;
2331	state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOnly);
2332
2333	ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
2334
2335	if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
2336		DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
2337					ret, this, state->fOffset,
2338					state->fIOVMAddr, state->fLength);
2339	if (kIOReturnSuccess == ret)
2340	{
2341	    address = state->fIOVMAddr;
2342	    length  = state->fLength;
2343	}
2344
2345	// dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
2346	// with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
2347
2348	if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
2349	{
2350	    if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
2351	    {
2352		addr64_t    origAddr = address;
2353		IOByteCount origLen  = length;
2354
2355		address = mapper->mapAddr(origAddr);
2356		length = page_size - (address & (page_size - 1));
2357		while ((length < origLen)
2358		    && ((address + length) == mapper->mapAddr(origAddr + length)))
2359		    length += page_size;
2360		if (length > origLen)
2361		    length = origLen;
2362	    }
2363	}
2364    }
2365
2366    if (!address)
2367        length = 0;
2368
2369    if (lengthOfSegment)
2370        *lengthOfSegment = length;
2371
2372    return (address);
2373}
2374
2375#ifndef __LP64__
2376addr64_t
2377IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
2378{
2379    addr64_t address = 0;
2380
2381    if (options & _kIOMemorySourceSegment)
2382    {
2383        address = getSourceSegment(offset, lengthOfSegment);
2384    }
2385    else if (options & kIOMemoryMapperNone)
2386    {
2387        address = getPhysicalSegment64(offset, lengthOfSegment);
2388    }
2389    else
2390    {
2391        address = getPhysicalSegment(offset, lengthOfSegment);
2392    }
2393
2394    return (address);
2395}
2396
2397addr64_t
2398IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2399{
2400    return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
2401}
2402
2403IOPhysicalAddress
2404IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2405{
2406    addr64_t    address = 0;
2407    IOByteCount length  = 0;
2408
2409    address = getPhysicalSegment(offset, lengthOfSegment, 0);
2410
2411    if (lengthOfSegment)
2412	length = *lengthOfSegment;
2413
2414    if ((address + length) > 0x100000000ULL)
2415    {
2416	panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
2417		    address, (long) length, (getMetaClass())->getClassName());
2418    }
2419
2420    return ((IOPhysicalAddress) address);
2421}
2422
2423addr64_t
2424IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2425{
2426    IOPhysicalAddress phys32;
2427    IOByteCount	      length;
2428    addr64_t 	      phys64;
2429    IOMapper *        mapper = 0;
2430
2431    phys32 = getPhysicalSegment(offset, lengthOfSegment);
2432    if (!phys32)
2433	return 0;
2434
2435    if (gIOSystemMapper)
2436	mapper = gIOSystemMapper;
2437
2438    if (mapper)
2439    {
2440	IOByteCount origLen;
2441
2442	phys64 = mapper->mapAddr(phys32);
2443	origLen = *lengthOfSegment;
2444	length = page_size - (phys64 & (page_size - 1));
2445	while ((length < origLen)
2446	    && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
2447	    length += page_size;
2448	if (length > origLen)
2449	    length = origLen;
2450
2451	*lengthOfSegment = length;
2452    }
2453    else
2454	phys64 = (addr64_t) phys32;
2455
2456    return phys64;
2457}
2458
2459IOPhysicalAddress
2460IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2461{
2462    return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
2463}
2464
2465IOPhysicalAddress
2466IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2467{
2468    return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
2469}
2470
2471void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
2472							IOByteCount * lengthOfSegment)
2473{
2474    if (_task == kernel_task)
2475        return (void *) getSourceSegment(offset, lengthOfSegment);
2476    else
2477        panic("IOGMD::getVirtualSegment deprecated");
2478
2479    return 0;
2480}
2481#endif /* !__LP64__ */
2482
2483IOReturn
2484IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2485{
2486    IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
2487    DMACommandOps params;
2488    IOReturn err;
2489
2490    params = (op & ~kIOMDDMACommandOperationMask & op);
2491    op &= kIOMDDMACommandOperationMask;
2492
2493    if (kIOMDGetCharacteristics == op) {
2494	if (dataSize < sizeof(IOMDDMACharacteristics))
2495	    return kIOReturnUnderrun;
2496
2497	IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2498	data->fLength = getLength();
2499	data->fSGCount = 0;
2500	data->fDirection = getDirection();
2501	data->fIsPrepared = true;	// Assume prepared - fails safe
2502    }
2503    else if (kIOMDWalkSegments == op) {
2504	if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
2505	    return kIOReturnUnderrun;
2506
2507	IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
2508	IOByteCount offset  = (IOByteCount) data->fOffset;
2509
2510	IOPhysicalLength length;
2511	if (data->fMapped && IOMapper::gSystem)
2512	    data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
2513	else
2514	    data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
2515	data->fLength = length;
2516    }
2517    else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported;
2518    else if (kIOMDDMAMap == op)
2519    {
2520	if (dataSize < sizeof(IOMDDMAMapArgs))
2521	    return kIOReturnUnderrun;
2522	IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2523
2524	if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
2525
2526	data->fMapContig = true;
2527	err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
2528	return (err);
2529    }
2530    else return kIOReturnBadArgument;
2531
2532    return kIOReturnSuccess;
2533}
2534
2535IOReturn
2536IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
2537						   IOOptionBits * oldState )
2538{
2539    IOReturn	  err = kIOReturnSuccess;
2540
2541    vm_purgable_t control;
2542    int           state;
2543
2544    if (_memRef)
2545    {
2546	err = super::setPurgeable(newState, oldState);
2547    }
2548    else
2549    {
2550	if (kIOMemoryThreadSafe & _flags)
2551	    LOCK;
2552	do
2553	{
2554	    // Find the appropriate vm_map for the given task
2555	    vm_map_t curMap;
2556	    if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2557	    {
2558		err = kIOReturnNotReady;
2559		break;
2560	    }
2561	    else if (!_task)
2562	    {
2563		err = kIOReturnUnsupported;
2564		break;
2565	    }
2566	    else
2567		curMap = get_task_map(_task);
2568
2569	    // can only do one range
2570	    Ranges vec = _ranges;
2571	    IOOptionBits type = _flags & kIOMemoryTypeMask;
2572	    mach_vm_address_t addr;
2573	    mach_vm_size_t    len;
2574	    getAddrLenForInd(addr, len, type, vec, 0);
2575
2576	    err = purgeableControlBits(newState, &control, &state);
2577	    if (kIOReturnSuccess != err)
2578		break;
2579	    err = mach_vm_purgable_control(curMap, addr, control, &state);
2580	    if (oldState)
2581	    {
2582		if (kIOReturnSuccess == err)
2583		{
2584		    err = purgeableStateBits(&state);
2585		    *oldState = state;
2586		}
2587	    }
2588	}
2589	while (false);
2590	if (kIOMemoryThreadSafe & _flags)
2591	    UNLOCK;
2592    }
2593
2594    return (err);
2595}
2596
2597IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
2598                                           IOOptionBits * oldState )
2599{
2600    IOReturn err = kIOReturnNotReady;
2601
2602    if (kIOMemoryThreadSafe & _flags) LOCK;
2603    if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
2604    if (kIOMemoryThreadSafe & _flags) UNLOCK;
2605
2606    return (err);
2607}
2608
2609IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
2610                                     	    IOByteCount * dirtyPageCount )
2611{
2612   IOReturn err = kIOReturnNotReady;
2613
2614    if (kIOMemoryThreadSafe & _flags) LOCK;
2615    if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
2616    if (kIOMemoryThreadSafe & _flags) UNLOCK;
2617
2618    return (err);
2619}
2620
2621
2622extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
2623extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
2624
2625static void SetEncryptOp(addr64_t pa, unsigned int count)
2626{
2627    ppnum_t page, end;
2628
2629    page = atop_64(round_page_64(pa));
2630    end  = atop_64(trunc_page_64(pa + count));
2631    for (; page < end; page++)
2632    {
2633        pmap_clear_noencrypt(page);
2634    }
2635}
2636
2637static void ClearEncryptOp(addr64_t pa, unsigned int count)
2638{
2639    ppnum_t page, end;
2640
2641    page = atop_64(round_page_64(pa));
2642    end  = atop_64(trunc_page_64(pa + count));
2643    for (; page < end; page++)
2644    {
2645        pmap_set_noencrypt(page);
2646    }
2647}
2648
2649IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
2650                                                IOByteCount offset, IOByteCount length )
2651{
2652    IOByteCount remaining;
2653    unsigned int res;
2654    void (*func)(addr64_t pa, unsigned int count) = 0;
2655
2656    switch (options)
2657    {
2658        case kIOMemoryIncoherentIOFlush:
2659            func = &dcache_incoherent_io_flush64;
2660            break;
2661        case kIOMemoryIncoherentIOStore:
2662            func = &dcache_incoherent_io_store64;
2663            break;
2664
2665        case kIOMemorySetEncrypted:
2666            func = &SetEncryptOp;
2667            break;
2668        case kIOMemoryClearEncrypted:
2669            func = &ClearEncryptOp;
2670            break;
2671    }
2672
2673    if (!func)
2674        return (kIOReturnUnsupported);
2675
2676    if (kIOMemoryThreadSafe & _flags)
2677	LOCK;
2678
2679    res = 0x0UL;
2680    remaining = length = min(length, getLength() - offset);
2681    while (remaining)
2682    // (process another target segment?)
2683    {
2684        addr64_t    dstAddr64;
2685        IOByteCount dstLen;
2686
2687        dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2688        if (!dstAddr64)
2689            break;
2690
2691        // Clip segment length to remaining
2692        if (dstLen > remaining)
2693            dstLen = remaining;
2694
2695	(*func)(dstAddr64, dstLen);
2696
2697        offset    += dstLen;
2698        remaining -= dstLen;
2699    }
2700
2701    if (kIOMemoryThreadSafe & _flags)
2702	UNLOCK;
2703
2704    return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
2705}
2706
2707#if defined(__i386__) || defined(__x86_64__)
2708extern vm_offset_t		first_avail;
2709#define io_kernel_static_end	first_avail
2710#else
2711#error io_kernel_static_end is undefined for this architecture
2712#endif
2713
2714static kern_return_t
2715io_get_kernel_static_upl(
2716	vm_map_t		/* map */,
2717	uintptr_t		offset,
2718	vm_size_t		*upl_size,
2719	upl_t			*upl,
2720	upl_page_info_array_t	page_list,
2721	unsigned int		*count,
2722	ppnum_t			*highest_page)
2723{
2724    unsigned int pageCount, page;
2725    ppnum_t phys;
2726    ppnum_t highestPage = 0;
2727
2728    pageCount = atop_32(*upl_size);
2729    if (pageCount > *count)
2730	pageCount = *count;
2731
2732    *upl = NULL;
2733
2734    for (page = 0; page < pageCount; page++)
2735    {
2736	phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
2737	if (!phys)
2738	    break;
2739	page_list[page].phys_addr = phys;
2740	page_list[page].pageout	  = 0;
2741	page_list[page].absent	  = 0;
2742	page_list[page].dirty	  = 0;
2743	page_list[page].precious  = 0;
2744	page_list[page].device	  = 0;
2745	if (phys > highestPage)
2746	    highestPage = phys;
2747    }
2748
2749    *highest_page = highestPage;
2750
2751    return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
2752}
2753
2754IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
2755{
2756    IOOptionBits type = _flags & kIOMemoryTypeMask;
2757    IOReturn error = kIOReturnCannotWire;
2758    ioGMDData *dataP;
2759    upl_page_info_array_t pageInfo;
2760    ppnum_t mapBase;
2761
2762    assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
2763
2764    if ((kIODirectionOutIn & forDirection) == kIODirectionNone)
2765        forDirection = (IODirection) (forDirection | getDirection());
2766
2767    int uplFlags;    // This Mem Desc's default flags for upl creation
2768    switch (kIODirectionOutIn & forDirection)
2769    {
2770    case kIODirectionOut:
2771        // Pages do not need to be marked as dirty on commit
2772        uplFlags = UPL_COPYOUT_FROM;
2773        break;
2774
2775    case kIODirectionIn:
2776    default:
2777        uplFlags = 0;	// i.e. ~UPL_COPYOUT_FROM
2778        break;
2779    }
2780
2781    if (_wireCount)
2782    {
2783        if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags))
2784        {
2785	    OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
2786	    error = kIOReturnNotWritable;
2787        }
2788        else error = kIOReturnSuccess;
2789	return (error);
2790    }
2791
2792    dataP = getDataP(_memoryEntries);
2793    IOMapper *mapper;
2794    mapper = dataP->fMapper;
2795    dataP->fMappedBase = 0;
2796
2797    uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
2798    if (kIODirectionPrepareToPhys32 & forDirection)
2799    {
2800	if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR;
2801	if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32;
2802    }
2803    if (kIODirectionPrepareNoFault    & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT;
2804    if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO;
2805    if (kIODirectionPrepareNonCoherent & forDirection) uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
2806
2807    mapBase = 0;
2808
2809    // Note that appendBytes(NULL) zeros the data up to the desired length
2810    //           and the length parameter is an unsigned int
2811    size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
2812    if (uplPageSize > ((unsigned int)uplPageSize))    return (kIOReturnNoMemory);
2813    if (!_memoryEntries->appendBytes(0, uplPageSize)) return (kIOReturnNoMemory);
2814    dataP = 0;
2815
2816    // Find the appropriate vm_map for the given task
2817    vm_map_t curMap;
2818    if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))            curMap = 0;
2819    else                                                     curMap = get_task_map(_task);
2820
2821    // Iterate over the vector of virtual ranges
2822    Ranges vec = _ranges;
2823    unsigned int pageIndex  = 0;
2824    IOByteCount mdOffset    = 0;
2825    ppnum_t highestPage     = 0;
2826
2827    IOMemoryEntry * memRefEntry = 0;
2828    if (_memRef) memRefEntry = &_memRef->entries[0];
2829
2830    for (UInt range = 0; range < _rangesCount; range++) {
2831        ioPLBlock iopl;
2832	mach_vm_address_t startPage;
2833        mach_vm_size_t    numBytes;
2834	ppnum_t highPage = 0;
2835
2836	// Get the startPage address and length of vec[range]
2837	getAddrLenForInd(startPage, numBytes, type, vec, range);
2838	iopl.fPageOffset = startPage & PAGE_MASK;
2839	numBytes += iopl.fPageOffset;
2840	startPage = trunc_page_64(startPage);
2841
2842	if (mapper)
2843	    iopl.fMappedPage = mapBase + pageIndex;
2844	else
2845	    iopl.fMappedPage = 0;
2846
2847	// Iterate over the current range, creating UPLs
2848        while (numBytes) {
2849	    vm_address_t kernelStart = (vm_address_t) startPage;
2850            vm_map_t theMap;
2851	    if (curMap) theMap = curMap;
2852	    else if (_memRef)
2853	    {
2854	        theMap = NULL;
2855	    }
2856	    else
2857	    {
2858		assert(_task == kernel_task);
2859		theMap = IOPageableMapForAddress(kernelStart);
2860	    }
2861
2862            int ioplFlags = uplFlags;
2863	    dataP = getDataP(_memoryEntries);
2864	    pageInfo = getPageList(dataP);
2865            upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2866
2867            vm_size_t ioplSize = round_page(numBytes);
2868            unsigned int numPageInfo = atop_32(ioplSize);
2869
2870	    if ((theMap == kernel_map) && (kernelStart < io_kernel_static_end)) {
2871		error = io_get_kernel_static_upl(theMap,
2872						kernelStart,
2873						&ioplSize,
2874						&iopl.fIOPL,
2875						baseInfo,
2876						&numPageInfo,
2877						&highPage);
2878	    }
2879	    else if (_memRef) {
2880		memory_object_offset_t entryOffset;
2881
2882		entryOffset = (mdOffset - iopl.fPageOffset - memRefEntry->offset);
2883		if (entryOffset >= memRefEntry->size) {
2884		    memRefEntry++;
2885		    if (memRefEntry >= &_memRef->entries[_memRef->count]) panic("memRefEntry");
2886		    entryOffset = 0;
2887		}
2888		if (ioplSize > (memRefEntry->size - entryOffset)) ioplSize = (memRefEntry->size - entryOffset);
2889		error = memory_object_iopl_request(memRefEntry->entry,
2890						   entryOffset,
2891						   &ioplSize,
2892						   &iopl.fIOPL,
2893						   baseInfo,
2894						   &numPageInfo,
2895						   &ioplFlags);
2896	    }
2897	    else {
2898		assert(theMap);
2899		error = vm_map_create_upl(theMap,
2900						startPage,
2901						(upl_size_t*)&ioplSize,
2902						&iopl.fIOPL,
2903						baseInfo,
2904						&numPageInfo,
2905						&ioplFlags);
2906	    }
2907
2908            assert(ioplSize);
2909            if (error != KERN_SUCCESS)
2910                goto abortExit;
2911
2912	    if (iopl.fIOPL)
2913		highPage = upl_get_highest_page(iopl.fIOPL);
2914	    if (highPage > highestPage)
2915		highestPage = highPage;
2916
2917            error = kIOReturnCannotWire;
2918
2919            if (baseInfo->device) {
2920                numPageInfo = 1;
2921                iopl.fFlags = kIOPLOnDevice;
2922            }
2923            else {
2924                iopl.fFlags = 0;
2925            }
2926
2927            iopl.fIOMDOffset = mdOffset;
2928            iopl.fPageInfo = pageIndex;
2929            if (mapper && pageIndex && (page_mask & (mdOffset + iopl.fPageOffset))) dataP->fDiscontig = true;
2930
2931#if 0
2932	    // used to remove the upl for auto prepares here, for some errant code
2933	    // that freed memory before the descriptor pointing at it
2934	    if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
2935	    {
2936		upl_commit(iopl.fIOPL, 0, 0);
2937		upl_deallocate(iopl.fIOPL);
2938		iopl.fIOPL = 0;
2939	    }
2940#endif
2941
2942            if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
2943                // Clean up partial created and unsaved iopl
2944                if (iopl.fIOPL) {
2945                    upl_abort(iopl.fIOPL, 0);
2946                    upl_deallocate(iopl.fIOPL);
2947                }
2948                goto abortExit;
2949            }
2950	    dataP = 0;
2951
2952            // Check for a multiple iopl's in one virtual range
2953            pageIndex += numPageInfo;
2954            mdOffset -= iopl.fPageOffset;
2955            if (ioplSize < numBytes) {
2956                numBytes -= ioplSize;
2957                startPage += ioplSize;
2958                mdOffset += ioplSize;
2959                iopl.fPageOffset = 0;
2960		if (mapper) iopl.fMappedPage = mapBase + pageIndex;
2961            }
2962            else {
2963                mdOffset += numBytes;
2964                break;
2965            }
2966        }
2967    }
2968
2969    _highestPage = highestPage;
2970
2971    if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly;
2972
2973    return kIOReturnSuccess;
2974
2975abortExit:
2976    {
2977        dataP = getDataP(_memoryEntries);
2978        UInt done = getNumIOPL(_memoryEntries, dataP);
2979        ioPLBlock *ioplList = getIOPLList(dataP);
2980
2981        for (UInt range = 0; range < done; range++)
2982	{
2983	    if (ioplList[range].fIOPL) {
2984             upl_abort(ioplList[range].fIOPL, 0);
2985             upl_deallocate(ioplList[range].fIOPL);
2986	    }
2987	}
2988	(void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
2989    }
2990
2991    if (error == KERN_FAILURE)
2992        error = kIOReturnCannotWire;
2993    else if (error == KERN_MEMORY_ERROR)
2994        error = kIOReturnNoResources;
2995
2996    return error;
2997}
2998
2999bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
3000{
3001    ioGMDData * dataP;
3002    unsigned    dataSize = size;
3003
3004    if (!_memoryEntries) {
3005	_memoryEntries = OSData::withCapacity(dataSize);
3006	if (!_memoryEntries)
3007	    return false;
3008    }
3009    else if (!_memoryEntries->initWithCapacity(dataSize))
3010	return false;
3011
3012    _memoryEntries->appendBytes(0, computeDataSize(0, 0));
3013    dataP = getDataP(_memoryEntries);
3014
3015    if (mapper == kIOMapperWaitSystem) {
3016        IOMapper::checkForSystemMapper();
3017        mapper = IOMapper::gSystem;
3018    }
3019    dataP->fMapper               = mapper;
3020    dataP->fPageCnt              = 0;
3021    dataP->fMappedBase           = 0;
3022    dataP->fDMAMapNumAddressBits = 64;
3023    dataP->fDMAMapAlignment      = 0;
3024    dataP->fPreparationID        = kIOPreparationIDUnprepared;
3025    dataP->fDiscontig            = false;
3026    dataP->fCompletionError      = false;
3027
3028    return (true);
3029}
3030
3031IOReturn IOMemoryDescriptor::dmaMap(
3032    IOMapper                    * mapper,
3033    const IODMAMapSpecification * mapSpec,
3034    uint64_t                      offset,
3035    uint64_t                      length,
3036    uint64_t                    * address,
3037    ppnum_t                     * mapPages)
3038{
3039    IOMDDMAWalkSegmentState  walkState;
3040    IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState;
3041    IOOptionBits             mdOp;
3042    IOReturn                 ret;
3043    IOPhysicalLength         segLen;
3044    addr64_t                 phys, align, pageOffset;
3045    ppnum_t                  base, pageIndex, pageCount;
3046    uint64_t                 index;
3047    uint32_t                 mapOptions = 0;
3048
3049    if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
3050
3051    walkArgs->fMapped = false;
3052    mdOp = kIOMDFirstSegment;
3053    pageCount = 0;
3054    for (index = 0; index < length; )
3055    {
3056	if (index && (page_mask & (index + pageOffset))) break;
3057
3058	walkArgs->fOffset = offset + index;
3059	ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
3060	mdOp = kIOMDWalkSegments;
3061	if (ret != kIOReturnSuccess) break;
3062	phys = walkArgs->fIOVMAddr;
3063	segLen = walkArgs->fLength;
3064
3065	align = (phys & page_mask);
3066	if (!index) pageOffset = align;
3067	else if (align) break;
3068	pageCount += atop_64(round_page_64(align + segLen));
3069	index += segLen;
3070    }
3071
3072    if (index < length) return (kIOReturnVMError);
3073
3074    base = mapper->iovmMapMemory(this, offset, pageCount,
3075				 mapOptions, NULL, mapSpec);
3076
3077    if (!base) return (kIOReturnNoResources);
3078
3079    mdOp = kIOMDFirstSegment;
3080    for (pageIndex = 0, index = 0; index < length; )
3081    {
3082	walkArgs->fOffset = offset + index;
3083	ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
3084	mdOp = kIOMDWalkSegments;
3085	if (ret != kIOReturnSuccess) break;
3086	phys = walkArgs->fIOVMAddr;
3087	segLen = walkArgs->fLength;
3088
3089    	ppnum_t page = atop_64(phys);
3090    	ppnum_t count = atop_64(round_page_64(phys + segLen)) - page;
3091	while (count--)
3092	{
3093	    mapper->iovmInsert(base, pageIndex, page);
3094	    page++;
3095	    pageIndex++;
3096	}
3097	index += segLen;
3098    }
3099    if (pageIndex != pageCount) panic("pageIndex");
3100
3101    *address = ptoa_64(base) + pageOffset;
3102    if (mapPages) *mapPages = pageCount;
3103
3104    return (kIOReturnSuccess);
3105}
3106
3107IOReturn IOGeneralMemoryDescriptor::dmaMap(
3108    IOMapper                    * mapper,
3109    const IODMAMapSpecification * mapSpec,
3110    uint64_t                      offset,
3111    uint64_t                      length,
3112    uint64_t                    * address,
3113    ppnum_t                     * mapPages)
3114{
3115    IOReturn          err = kIOReturnSuccess;
3116    ioGMDData *       dataP;
3117    IOOptionBits      type = _flags & kIOMemoryTypeMask;
3118
3119    *address = 0;
3120    if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess);
3121
3122    if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
3123     || offset || (length != _length))
3124    {
3125	err = super::dmaMap(mapper, mapSpec, offset, length, address, mapPages);
3126    }
3127    else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries)))
3128    {
3129	const ioPLBlock * ioplList = getIOPLList(dataP);
3130	upl_page_info_t * pageList;
3131	uint32_t          mapOptions = 0;
3132	ppnum_t           base;
3133
3134	IODMAMapSpecification mapSpec;
3135	bzero(&mapSpec, sizeof(mapSpec));
3136	mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3137	mapSpec.alignment = dataP->fDMAMapAlignment;
3138
3139	// For external UPLs the fPageInfo field points directly to
3140	// the upl's upl_page_info_t array.
3141	if (ioplList->fFlags & kIOPLExternUPL)
3142	{
3143	    pageList = (upl_page_info_t *) ioplList->fPageInfo;
3144	    mapOptions |= kIODMAMapPagingPath;
3145	}
3146	else
3147	    pageList = getPageList(dataP);
3148
3149    if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
3150
3151	// Check for direct device non-paged memory
3152	if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous;
3153
3154	base = mapper->iovmMapMemory(
3155			this, offset, _pages, mapOptions, &pageList[0], &mapSpec);
3156	*address = ptoa_64(base) + (ioplList->fPageOffset & PAGE_MASK);
3157	if (mapPages) *mapPages = _pages;
3158    }
3159
3160    return (err);
3161}
3162
3163/*
3164 * prepare
3165 *
3166 * Prepare the memory for an I/O transfer.  This involves paging in
3167 * the memory, if necessary, and wiring it down for the duration of
3168 * the transfer.  The complete() method completes the processing of
3169 * the memory after the I/O transfer finishes.  This method needn't
3170 * called for non-pageable memory.
3171 */
3172
3173IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
3174{
3175    IOReturn error    = kIOReturnSuccess;
3176    IOOptionBits type = _flags & kIOMemoryTypeMask;
3177
3178    if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
3179	return kIOReturnSuccess;
3180
3181    if (_prepareLock)
3182	IOLockLock(_prepareLock);
3183
3184    if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3185    {
3186	error = wireVirtual(forDirection);
3187    }
3188
3189    if (kIOReturnSuccess == error)
3190    {
3191	if (1 == ++_wireCount)
3192	{
3193	    if (kIOMemoryClearEncrypt & _flags)
3194	    {
3195		performOperation(kIOMemoryClearEncrypted, 0, _length);
3196	    }
3197	}
3198    }
3199
3200    if (_prepareLock)
3201	IOLockUnlock(_prepareLock);
3202
3203    return error;
3204}
3205
3206/*
3207 * complete
3208 *
3209 * Complete processing of the memory after an I/O transfer finishes.
3210 * This method should not be called unless a prepare was previously
3211 * issued; the prepare() and complete() must occur in pairs, before
3212 * before and after an I/O transfer involving pageable memory.
3213 */
3214
3215IOReturn IOGeneralMemoryDescriptor::complete(IODirection forDirection)
3216{
3217    IOOptionBits type = _flags & kIOMemoryTypeMask;
3218    ioGMDData * dataP;
3219
3220    if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
3221	return kIOReturnSuccess;
3222
3223    if (_prepareLock)
3224	IOLockLock(_prepareLock);
3225
3226    assert(_wireCount);
3227
3228    if ((kIODirectionCompleteWithError & forDirection)
3229     && (dataP = getDataP(_memoryEntries)))
3230        dataP->fCompletionError = true;
3231
3232    if (_wireCount)
3233    {
3234        if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
3235        {
3236            performOperation(kIOMemorySetEncrypted, 0, _length);
3237        }
3238
3239	_wireCount--;
3240	if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection))
3241	{
3242	    IOOptionBits type = _flags & kIOMemoryTypeMask;
3243	    dataP = getDataP(_memoryEntries);
3244	    ioPLBlock *ioplList = getIOPLList(dataP);
3245	    UInt ind, count = getNumIOPL(_memoryEntries, dataP);
3246
3247	    if (_wireCount)
3248	    {
3249		// kIODirectionCompleteWithDataValid & forDirection
3250		if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3251		{
3252		    for (ind = 0; ind < count; ind++)
3253		    {
3254			if (ioplList[ind].fIOPL) iopl_valid_data(ioplList[ind].fIOPL);
3255		    }
3256		}
3257	    }
3258	    else
3259	    {
3260#if IOMD_DEBUG_DMAACTIVE
3261		if (__iomd_reservedA) panic("complete() while dma active");
3262#endif /* IOMD_DEBUG_DMAACTIVE */
3263
3264		if (dataP->fMappedBase) {
3265		    dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages);
3266		    dataP->fMappedBase = 0;
3267		}
3268		// Only complete iopls that we created which are for TypeVirtual
3269		if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3270		    for (ind = 0; ind < count; ind++)
3271			if (ioplList[ind].fIOPL) {
3272			    if (dataP->fCompletionError)
3273				upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
3274			    else
3275				upl_commit(ioplList[ind].fIOPL, 0, 0);
3276			    upl_deallocate(ioplList[ind].fIOPL);
3277			}
3278		} else if (kIOMemoryTypeUPL == type) {
3279		    upl_set_referenced(ioplList[0].fIOPL, false);
3280		}
3281
3282		(void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3283
3284		dataP->fPreparationID = kIOPreparationIDUnprepared;
3285	    }
3286	}
3287    }
3288
3289    if (_prepareLock)
3290	IOLockUnlock(_prepareLock);
3291
3292    return kIOReturnSuccess;
3293}
3294
3295IOReturn IOGeneralMemoryDescriptor::doMap(
3296	vm_map_t		__addressMap,
3297	IOVirtualAddress *	__address,
3298	IOOptionBits		options,
3299	IOByteCount		__offset,
3300	IOByteCount		__length )
3301
3302{
3303#ifndef __LP64__
3304    if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
3305#endif /* !__LP64__ */
3306
3307    kern_return_t  err;
3308
3309    IOMemoryMap *  mapping = (IOMemoryMap *) *__address;
3310    mach_vm_size_t offset  = mapping->fOffset + __offset;
3311    mach_vm_size_t length  = mapping->fLength;
3312
3313    IOOptionBits type = _flags & kIOMemoryTypeMask;
3314    Ranges vec = _ranges;
3315
3316    mach_vm_address_t range0Addr = 0;
3317    mach_vm_size_t    range0Len = 0;
3318
3319    if ((offset >= _length) || ((offset + length) > _length))
3320	return( kIOReturnBadArgument );
3321
3322    if (vec.v)
3323	getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
3324
3325    // mapping source == dest? (could be much better)
3326    if (_task
3327     && (mapping->fAddressTask == _task)
3328     && (mapping->fAddressMap == get_task_map(_task))
3329     && (options & kIOMapAnywhere)
3330     && (1 == _rangesCount)
3331     && (0 == offset)
3332     && range0Addr
3333     && (length <= range0Len))
3334    {
3335	mapping->fAddress = range0Addr;
3336	mapping->fOptions |= kIOMapStatic;
3337
3338	return( kIOReturnSuccess );
3339    }
3340
3341    if (!_memRef)
3342    {
3343        IOOptionBits createOptions = 0;
3344	if (!(kIOMapReadOnly & options))
3345	{
3346	    createOptions |= kIOMemoryReferenceWrite;
3347#if DEVELOPMENT || DEBUG
3348            if (kIODirectionOut == (kIODirectionOutIn & _flags))
3349            {
3350                OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
3351	    }
3352#endif
3353	}
3354	err = memoryReferenceCreate(createOptions, &_memRef);
3355	if (kIOReturnSuccess != err) return (err);
3356    }
3357
3358    memory_object_t pager;
3359    pager = (memory_object_t) (reserved ? reserved->dp.devicePager : 0);
3360
3361    // <upl_transpose //
3362    if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
3363    {
3364        do
3365	{
3366	    upl_t	   redirUPL2;
3367	    vm_size_t      size;
3368	    int		   flags;
3369	    unsigned int   lock_count;
3370
3371	    if (!_memRef || (1 != _memRef->count))
3372	    {
3373		err = kIOReturnNotReadable;
3374		break;
3375	    }
3376
3377	    size = round_page(mapping->fLength);
3378	    flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3379			| UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3380
3381	    if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
3382					    NULL, NULL,
3383					    &flags))
3384		redirUPL2 = NULL;
3385
3386	    for (lock_count = 0;
3387		 IORecursiveLockHaveLock(gIOMemoryLock);
3388		 lock_count++) {
3389	      UNLOCK;
3390	    }
3391	    err = upl_transpose(redirUPL2, mapping->fRedirUPL);
3392	    for (;
3393		 lock_count;
3394		 lock_count--) {
3395	      LOCK;
3396	    }
3397
3398	    if (kIOReturnSuccess != err)
3399	    {
3400		IOLog("upl_transpose(%x)\n", err);
3401		err = kIOReturnSuccess;
3402	    }
3403
3404	    if (redirUPL2)
3405	    {
3406		upl_commit(redirUPL2, NULL, 0);
3407		upl_deallocate(redirUPL2);
3408		redirUPL2 = 0;
3409	    }
3410	    {
3411		// swap the memEntries since they now refer to different vm_objects
3412		IOMemoryReference * me = _memRef;
3413		_memRef = mapping->fMemory->_memRef;
3414		mapping->fMemory->_memRef = me;
3415	    }
3416	    if (pager)
3417		err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
3418	}
3419	while (false);
3420    }
3421    // upl_transpose> //
3422    else
3423    {
3424	err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
3425
3426	if ((err == KERN_SUCCESS) && pager)
3427	{
3428	    err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
3429	    if (err != KERN_SUCCESS)
3430	    {
3431		doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
3432	    }
3433	    else if (kIOMapDefaultCache == (options & kIOMapCacheMask))
3434	    {
3435		mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
3436	    }
3437	}
3438    }
3439
3440    return (err);
3441}
3442
3443IOReturn IOGeneralMemoryDescriptor::doUnmap(
3444	vm_map_t		addressMap,
3445	IOVirtualAddress	__address,
3446	IOByteCount		__length )
3447{
3448    return (super::doUnmap(addressMap, __address, __length));
3449}
3450
3451/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3452
3453#undef super
3454#define super OSObject
3455
3456OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
3457
3458OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
3459OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
3460OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
3461OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
3462OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
3463OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
3464OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
3465OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
3466
3467/* ex-inline function implementation */
3468IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
3469    { return( getPhysicalSegment( 0, 0 )); }
3470
3471/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3472
3473bool IOMemoryMap::init(
3474        task_t			intoTask,
3475        mach_vm_address_t	toAddress,
3476        IOOptionBits		_options,
3477        mach_vm_size_t		_offset,
3478        mach_vm_size_t		_length )
3479{
3480    if (!intoTask)
3481	return( false);
3482
3483    if (!super::init())
3484	return(false);
3485
3486    fAddressMap  = get_task_map(intoTask);
3487    if (!fAddressMap)
3488	return(false);
3489    vm_map_reference(fAddressMap);
3490
3491    fAddressTask = intoTask;
3492    fOptions     = _options;
3493    fLength      = _length;
3494    fOffset	 = _offset;
3495    fAddress     = toAddress;
3496
3497    return (true);
3498}
3499
3500bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
3501{
3502    if (!_memory)
3503	return(false);
3504
3505    if (!fSuperMap)
3506    {
3507	if( (_offset + fLength) > _memory->getLength())
3508	    return( false);
3509	fOffset = _offset;
3510    }
3511
3512    _memory->retain();
3513    if (fMemory)
3514    {
3515	if (fMemory != _memory)
3516	    fMemory->removeMapping(this);
3517	fMemory->release();
3518    }
3519    fMemory = _memory;
3520
3521    return( true );
3522}
3523
3524IOReturn IOMemoryDescriptor::doMap(
3525	vm_map_t		__addressMap,
3526	IOVirtualAddress *	__address,
3527	IOOptionBits		options,
3528	IOByteCount		__offset,
3529	IOByteCount		__length )
3530{
3531    return (kIOReturnUnsupported);
3532}
3533
3534IOReturn IOMemoryDescriptor::handleFault(
3535        void *			_pager,
3536	mach_vm_size_t		sourceOffset,
3537	mach_vm_size_t		length)
3538{
3539    if( kIOMemoryRedirected & _flags)
3540    {
3541#if DEBUG
3542	IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
3543#endif
3544	do {
3545	    SLEEP;
3546	} while( kIOMemoryRedirected & _flags );
3547    }
3548    return (kIOReturnSuccess);
3549}
3550
3551IOReturn IOMemoryDescriptor::populateDevicePager(
3552        void *			_pager,
3553	vm_map_t		addressMap,
3554	mach_vm_address_t	address,
3555	mach_vm_size_t		sourceOffset,
3556	mach_vm_size_t		length,
3557        IOOptionBits		options )
3558{
3559    IOReturn		err = kIOReturnSuccess;
3560    memory_object_t	pager = (memory_object_t) _pager;
3561    mach_vm_size_t	size;
3562    mach_vm_size_t	bytes;
3563    mach_vm_size_t	page;
3564    mach_vm_size_t	pageOffset;
3565    mach_vm_size_t	pagerOffset;
3566    IOPhysicalLength	segLen;
3567    addr64_t		physAddr;
3568
3569    physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
3570    assert( physAddr );
3571    pageOffset = physAddr - trunc_page_64( physAddr );
3572    pagerOffset = sourceOffset;
3573
3574    size = length + pageOffset;
3575    physAddr -= pageOffset;
3576
3577    segLen += pageOffset;
3578    bytes = size;
3579    do
3580    {
3581	// in the middle of the loop only map whole pages
3582	if( segLen >= bytes) segLen = bytes;
3583	else if (segLen != trunc_page(segLen))    err = kIOReturnVMError;
3584        if (physAddr != trunc_page_64(physAddr))  err = kIOReturnBadArgument;
3585
3586	if (kIOReturnSuccess != err) break;
3587
3588	if (reserved && reserved->dp.pagerContig)
3589	{
3590	    IOPhysicalLength	allLen;
3591	    addr64_t		allPhys;
3592
3593	    allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone );
3594	    assert( allPhys );
3595	    err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) );
3596	}
3597	else
3598	{
3599	    for( page = 0;
3600		 (page < segLen) && (KERN_SUCCESS == err);
3601		 page += page_size)
3602	    {
3603		err = device_pager_populate_object(pager, pagerOffset,
3604			(ppnum_t)(atop_64(physAddr + page)), page_size);
3605		pagerOffset += page_size;
3606	    }
3607	}
3608	assert (KERN_SUCCESS == err);
3609	if (err) break;
3610
3611	// This call to vm_fault causes an early pmap level resolution
3612	// of the mappings created above for kernel mappings, since
3613	// faulting in later can't take place from interrupt level.
3614	if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
3615	{
3616	    vm_fault(addressMap,
3617		     (vm_map_offset_t)trunc_page_64(address),
3618		     VM_PROT_READ|VM_PROT_WRITE,
3619		     FALSE, THREAD_UNINT, NULL,
3620		     (vm_map_offset_t)0);
3621	}
3622
3623	sourceOffset += segLen - pageOffset;
3624	address += segLen;
3625	bytes -= segLen;
3626	pageOffset = 0;
3627    }
3628    while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
3629
3630    if (bytes)
3631        err = kIOReturnBadArgument;
3632
3633    return (err);
3634}
3635
3636IOReturn IOMemoryDescriptor::doUnmap(
3637	vm_map_t		addressMap,
3638	IOVirtualAddress	__address,
3639	IOByteCount		__length )
3640{
3641    IOReturn	      err;
3642    mach_vm_address_t address;
3643    mach_vm_size_t    length;
3644
3645    if (__length)
3646    {
3647	address = __address;
3648	length  = __length;
3649    }
3650    else
3651    {
3652	addressMap = ((IOMemoryMap *) __address)->fAddressMap;
3653	address    = ((IOMemoryMap *) __address)->fAddress;
3654	length     = ((IOMemoryMap *) __address)->fLength;
3655    }
3656
3657    if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
3658	addressMap = IOPageableMapForAddress( address );
3659
3660#if DEBUG
3661    if( kIOLogMapping & gIOKitDebug)
3662	IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3663		addressMap, address, length );
3664#endif
3665
3666    err = mach_vm_deallocate( addressMap, address, length );
3667
3668    return (err);
3669}
3670
3671IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
3672{
3673    IOReturn		err = kIOReturnSuccess;
3674    IOMemoryMap *	mapping = 0;
3675    OSIterator *	iter;
3676
3677    LOCK;
3678
3679    if( doRedirect)
3680        _flags |= kIOMemoryRedirected;
3681    else
3682        _flags &= ~kIOMemoryRedirected;
3683
3684    do {
3685	if( (iter = OSCollectionIterator::withCollection( _mappings))) {
3686
3687	    memory_object_t   pager;
3688
3689	    if( reserved)
3690		pager = (memory_object_t) reserved->dp.devicePager;
3691	    else
3692		pager = MACH_PORT_NULL;
3693
3694	    while( (mapping = (IOMemoryMap *) iter->getNextObject()))
3695	    {
3696		mapping->redirect( safeTask, doRedirect );
3697		if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap))
3698		{
3699		    err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
3700		}
3701	    }
3702
3703	    iter->release();
3704	}
3705    } while( false );
3706
3707    if (!doRedirect)
3708    {
3709        WAKEUP;
3710    }
3711
3712    UNLOCK;
3713
3714#ifndef __LP64__
3715    // temporary binary compatibility
3716    IOSubMemoryDescriptor * subMem;
3717    if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
3718	err = subMem->redirect( safeTask, doRedirect );
3719    else
3720	err = kIOReturnSuccess;
3721#endif /* !__LP64__ */
3722
3723    return( err );
3724}
3725
3726IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
3727{
3728    IOReturn err = kIOReturnSuccess;
3729
3730    if( fSuperMap) {
3731//        err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3732    } else {
3733
3734        LOCK;
3735
3736	do
3737	{
3738	    if (!fAddress)
3739		break;
3740	    if (!fAddressMap)
3741		break;
3742
3743	    if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3744	      && (0 == (fOptions & kIOMapStatic)))
3745	    {
3746		IOUnmapPages( fAddressMap, fAddress, fLength );
3747		err = kIOReturnSuccess;
3748#if DEBUG
3749		IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
3750#endif
3751	    }
3752	    else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
3753	    {
3754		IOOptionBits newMode;
3755		newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3756		IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
3757	    }
3758	}
3759	while (false);
3760	UNLOCK;
3761    }
3762
3763    if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3764	 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3765     && safeTask
3766     && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3767	fMemory->redirect(safeTask, doRedirect);
3768
3769    return( err );
3770}
3771
3772IOReturn IOMemoryMap::unmap( void )
3773{
3774    IOReturn	err;
3775
3776    LOCK;
3777
3778    if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3779	&& (0 == (fOptions & kIOMapStatic))) {
3780
3781        err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
3782
3783    } else
3784	err = kIOReturnSuccess;
3785
3786    if (fAddressMap)
3787    {
3788        vm_map_deallocate(fAddressMap);
3789        fAddressMap = 0;
3790    }
3791
3792    fAddress = 0;
3793
3794    UNLOCK;
3795
3796    return( err );
3797}
3798
3799void IOMemoryMap::taskDied( void )
3800{
3801    LOCK;
3802    if (fUserClientUnmap)
3803	unmap();
3804    if( fAddressMap) {
3805        vm_map_deallocate(fAddressMap);
3806        fAddressMap = 0;
3807    }
3808    fAddressTask = 0;
3809    fAddress	 = 0;
3810    UNLOCK;
3811}
3812
3813IOReturn IOMemoryMap::userClientUnmap( void )
3814{
3815    fUserClientUnmap = true;
3816    return (kIOReturnSuccess);
3817}
3818
3819// Overload the release mechanism.  All mappings must be a member
3820// of a memory descriptors _mappings set.  This means that we
3821// always have 2 references on a mapping.  When either of these mappings
3822// are released we need to free ourselves.
3823void IOMemoryMap::taggedRelease(const void *tag) const
3824{
3825    LOCK;
3826    super::taggedRelease(tag, 2);
3827    UNLOCK;
3828}
3829
3830void IOMemoryMap::free()
3831{
3832    unmap();
3833
3834    if (fMemory)
3835    {
3836        LOCK;
3837	fMemory->removeMapping(this);
3838	UNLOCK;
3839	fMemory->release();
3840    }
3841
3842    if (fOwner && (fOwner != fMemory))
3843    {
3844        LOCK;
3845	fOwner->removeMapping(this);
3846	UNLOCK;
3847    }
3848
3849    if (fSuperMap)
3850	fSuperMap->release();
3851
3852    if (fRedirUPL) {
3853	upl_commit(fRedirUPL, NULL, 0);
3854	upl_deallocate(fRedirUPL);
3855    }
3856
3857    super::free();
3858}
3859
3860IOByteCount IOMemoryMap::getLength()
3861{
3862    return( fLength );
3863}
3864
3865IOVirtualAddress IOMemoryMap::getVirtualAddress()
3866{
3867#ifndef __LP64__
3868    if (fSuperMap)
3869	fSuperMap->getVirtualAddress();
3870    else if (fAddressMap
3871		&& vm_map_is_64bit(fAddressMap)
3872		&& (sizeof(IOVirtualAddress) < 8))
3873    {
3874	OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
3875    }
3876#endif /* !__LP64__ */
3877
3878    return (fAddress);
3879}
3880
3881#ifndef __LP64__
3882mach_vm_address_t 	IOMemoryMap::getAddress()
3883{
3884    return( fAddress);
3885}
3886
3887mach_vm_size_t 	IOMemoryMap::getSize()
3888{
3889    return( fLength );
3890}
3891#endif /* !__LP64__ */
3892
3893
3894task_t IOMemoryMap::getAddressTask()
3895{
3896    if( fSuperMap)
3897	return( fSuperMap->getAddressTask());
3898    else
3899        return( fAddressTask);
3900}
3901
3902IOOptionBits IOMemoryMap::getMapOptions()
3903{
3904    return( fOptions);
3905}
3906
3907IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
3908{
3909    return( fMemory );
3910}
3911
3912IOMemoryMap * IOMemoryMap::copyCompatible(
3913		IOMemoryMap * newMapping )
3914{
3915    task_t		task      = newMapping->getAddressTask();
3916    mach_vm_address_t	toAddress = newMapping->fAddress;
3917    IOOptionBits	_options  = newMapping->fOptions;
3918    mach_vm_size_t	_offset   = newMapping->fOffset;
3919    mach_vm_size_t	_length   = newMapping->fLength;
3920
3921    if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
3922	return( 0 );
3923    if( (fOptions ^ _options) & kIOMapReadOnly)
3924	return( 0 );
3925    if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
3926     && ((fOptions ^ _options) & kIOMapCacheMask))
3927	return( 0 );
3928
3929    if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
3930	return( 0 );
3931
3932    if( _offset < fOffset)
3933	return( 0 );
3934
3935    _offset -= fOffset;
3936
3937    if( (_offset + _length) > fLength)
3938	return( 0 );
3939
3940    retain();
3941    if( (fLength == _length) && (!_offset))
3942    {
3943	newMapping = this;
3944    }
3945    else
3946    {
3947	newMapping->fSuperMap = this;
3948	newMapping->fOffset   = fOffset + _offset;
3949	newMapping->fAddress  = fAddress + _offset;
3950    }
3951
3952    return( newMapping );
3953}
3954
3955IOReturn IOMemoryMap::wireRange(
3956    	uint32_t		options,
3957        mach_vm_size_t		offset,
3958        mach_vm_size_t		length)
3959{
3960    IOReturn kr;
3961    mach_vm_address_t start = trunc_page_64(fAddress + offset);
3962    mach_vm_address_t end   = round_page_64(fAddress + offset + length);
3963
3964    if (kIODirectionOutIn & options)
3965    {
3966	kr = vm_map_wire(fAddressMap, start, end, (kIODirectionOutIn & options), FALSE);
3967    }
3968    else
3969    {
3970	kr = vm_map_unwire(fAddressMap, start, end, FALSE);
3971    }
3972
3973    return (kr);
3974}
3975
3976
3977IOPhysicalAddress
3978#ifdef __LP64__
3979IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
3980#else /* !__LP64__ */
3981IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3982#endif /* !__LP64__ */
3983{
3984    IOPhysicalAddress	address;
3985
3986    LOCK;
3987#ifdef __LP64__
3988    address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
3989#else /* !__LP64__ */
3990    address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
3991#endif /* !__LP64__ */
3992    UNLOCK;
3993
3994    return( address );
3995}
3996
3997/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3998
3999#undef super
4000#define super OSObject
4001
4002/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4003
4004void IOMemoryDescriptor::initialize( void )
4005{
4006    if( 0 == gIOMemoryLock)
4007	gIOMemoryLock = IORecursiveLockAlloc();
4008
4009    gIOLastPage = IOGetLastPageNumber();
4010}
4011
4012void IOMemoryDescriptor::free( void )
4013{
4014    if( _mappings)
4015	_mappings->release();
4016
4017    super::free();
4018}
4019
4020IOMemoryMap * IOMemoryDescriptor::setMapping(
4021	task_t			intoTask,
4022	IOVirtualAddress	mapAddress,
4023	IOOptionBits		options )
4024{
4025    return (createMappingInTask( intoTask, mapAddress,
4026				    options | kIOMapStatic,
4027				    0, getLength() ));
4028}
4029
4030IOMemoryMap * IOMemoryDescriptor::map(
4031	IOOptionBits		options )
4032{
4033    return (createMappingInTask( kernel_task, 0,
4034				options | kIOMapAnywhere,
4035				0, getLength() ));
4036}
4037
4038#ifndef __LP64__
4039IOMemoryMap * IOMemoryDescriptor::map(
4040	task_t		        intoTask,
4041	IOVirtualAddress	atAddress,
4042	IOOptionBits		options,
4043	IOByteCount		offset,
4044	IOByteCount		length )
4045{
4046    if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
4047    {
4048	OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
4049	return (0);
4050    }
4051
4052    return (createMappingInTask(intoTask, atAddress,
4053				options, offset, length));
4054}
4055#endif /* !__LP64__ */
4056
4057IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
4058	task_t			intoTask,
4059	mach_vm_address_t	atAddress,
4060	IOOptionBits		options,
4061	mach_vm_size_t		offset,
4062	mach_vm_size_t		length)
4063{
4064    IOMemoryMap * result;
4065    IOMemoryMap * mapping;
4066
4067    if (0 == length)
4068	length = getLength();
4069
4070    mapping = new IOMemoryMap;
4071
4072    if( mapping
4073     && !mapping->init( intoTask, atAddress,
4074			options, offset, length )) {
4075	mapping->release();
4076	mapping = 0;
4077    }
4078
4079    if (mapping)
4080	result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
4081    else
4082	result = 0;
4083
4084#if DEBUG
4085    if (!result)
4086	IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
4087		this, atAddress, (uint32_t) options, offset, length);
4088#endif
4089
4090    return (result);
4091}
4092
4093#ifndef __LP64__ // there is only a 64 bit version for LP64
4094IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4095			        IOOptionBits         options,
4096			        IOByteCount          offset)
4097{
4098    return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
4099}
4100#endif
4101
4102IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4103			        IOOptionBits         options,
4104			        mach_vm_size_t       offset)
4105{
4106    IOReturn err = kIOReturnSuccess;
4107    IOMemoryDescriptor * physMem = 0;
4108
4109    LOCK;
4110
4111    if (fAddress && fAddressMap) do
4112    {
4113	if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4114	    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4115	{
4116	    physMem = fMemory;
4117	    physMem->retain();
4118	}
4119
4120	if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count))
4121	{
4122	    vm_size_t size = round_page(fLength);
4123	    int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4124			| UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4125	    if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
4126					    NULL, NULL,
4127					    &flags))
4128		fRedirUPL = 0;
4129
4130	    if (physMem)
4131	    {
4132		IOUnmapPages( fAddressMap, fAddress, fLength );
4133		if ((false))
4134		    physMem->redirect(0, true);
4135	    }
4136	}
4137
4138	if (newBackingMemory)
4139	{
4140	    if (newBackingMemory != fMemory)
4141	    {
4142		fOffset = 0;
4143		if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
4144							    options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
4145							    offset, fLength))
4146		    err = kIOReturnError;
4147	    }
4148	    if (fRedirUPL)
4149	    {
4150		upl_commit(fRedirUPL, NULL, 0);
4151		upl_deallocate(fRedirUPL);
4152		fRedirUPL = 0;
4153	    }
4154	    if ((false) && physMem)
4155		physMem->redirect(0, false);
4156	}
4157    }
4158    while (false);
4159
4160    UNLOCK;
4161
4162    if (physMem)
4163	physMem->release();
4164
4165    return (err);
4166}
4167
4168IOMemoryMap * IOMemoryDescriptor::makeMapping(
4169	IOMemoryDescriptor *	owner,
4170	task_t			__intoTask,
4171	IOVirtualAddress	__address,
4172	IOOptionBits		options,
4173	IOByteCount		__offset,
4174	IOByteCount		__length )
4175{
4176#ifndef __LP64__
4177    if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
4178#endif /* !__LP64__ */
4179
4180    IOMemoryDescriptor * mapDesc = 0;
4181    IOMemoryMap *	 result = 0;
4182    OSIterator *	 iter;
4183
4184    IOMemoryMap *  mapping = (IOMemoryMap *) __address;
4185    mach_vm_size_t offset  = mapping->fOffset + __offset;
4186    mach_vm_size_t length  = mapping->fLength;
4187
4188    mapping->fOffset = offset;
4189
4190    LOCK;
4191
4192    do
4193    {
4194	if (kIOMapStatic & options)
4195	{
4196	    result = mapping;
4197	    addMapping(mapping);
4198	    mapping->setMemoryDescriptor(this, 0);
4199	    continue;
4200	}
4201
4202	if (kIOMapUnique & options)
4203	{
4204	    addr64_t phys;
4205	    IOByteCount       physLen;
4206
4207//	    if (owner != this)		continue;
4208
4209	    if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4210		|| ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4211	    {
4212		phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
4213		if (!phys || (physLen < length))
4214		    continue;
4215
4216		mapDesc = IOMemoryDescriptor::withAddressRange(
4217				phys, length, getDirection() | kIOMemoryMapperNone, NULL);
4218		if (!mapDesc)
4219		    continue;
4220		offset = 0;
4221		mapping->fOffset = offset;
4222	    }
4223	}
4224	else
4225	{
4226	    // look for a compatible existing mapping
4227	    if( (iter = OSCollectionIterator::withCollection(_mappings)))
4228	    {
4229		IOMemoryMap * lookMapping;
4230		while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
4231		{
4232		    if ((result = lookMapping->copyCompatible(mapping)))
4233		    {
4234			addMapping(result);
4235			result->setMemoryDescriptor(this, offset);
4236			break;
4237		    }
4238		}
4239		iter->release();
4240	    }
4241	    if (result || (options & kIOMapReference))
4242	    {
4243	        if (result != mapping)
4244	        {
4245                    mapping->release();
4246                    mapping = NULL;
4247                }
4248		continue;
4249	    }
4250	}
4251
4252	if (!mapDesc)
4253	{
4254	    mapDesc = this;
4255	    mapDesc->retain();
4256	}
4257	IOReturn
4258	kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
4259	if (kIOReturnSuccess == kr)
4260	{
4261	    result = mapping;
4262	    mapDesc->addMapping(result);
4263	    result->setMemoryDescriptor(mapDesc, offset);
4264	}
4265	else
4266	{
4267	    mapping->release();
4268	    mapping = NULL;
4269	}
4270    }
4271    while( false );
4272
4273    UNLOCK;
4274
4275    if (mapDesc)
4276	mapDesc->release();
4277
4278    return (result);
4279}
4280
4281void IOMemoryDescriptor::addMapping(
4282	IOMemoryMap * mapping )
4283{
4284    if( mapping)
4285    {
4286        if( 0 == _mappings)
4287            _mappings = OSSet::withCapacity(1);
4288	if( _mappings )
4289	    _mappings->setObject( mapping );
4290    }
4291}
4292
4293void IOMemoryDescriptor::removeMapping(
4294	IOMemoryMap * mapping )
4295{
4296    if( _mappings)
4297        _mappings->removeObject( mapping);
4298}
4299
4300#ifndef __LP64__
4301// obsolete initializers
4302// - initWithOptions is the designated initializer
4303bool
4304IOMemoryDescriptor::initWithAddress(void *      address,
4305                                    IOByteCount   length,
4306                                    IODirection direction)
4307{
4308    return( false );
4309}
4310
4311bool
4312IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
4313                                    IOByteCount    length,
4314                                    IODirection  direction,
4315                                    task_t       task)
4316{
4317    return( false );
4318}
4319
4320bool
4321IOMemoryDescriptor::initWithPhysicalAddress(
4322				 IOPhysicalAddress	address,
4323				 IOByteCount		length,
4324				 IODirection      	direction )
4325{
4326    return( false );
4327}
4328
4329bool
4330IOMemoryDescriptor::initWithRanges(
4331                                   	IOVirtualRange * ranges,
4332                                   	UInt32           withCount,
4333                                   	IODirection      direction,
4334                                   	task_t           task,
4335                                  	bool             asReference)
4336{
4337    return( false );
4338}
4339
4340bool
4341IOMemoryDescriptor::initWithPhysicalRanges(	IOPhysicalRange * ranges,
4342                                        	UInt32           withCount,
4343                                        	IODirection      direction,
4344                                        	bool             asReference)
4345{
4346    return( false );
4347}
4348
4349void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
4350					IOByteCount * lengthOfSegment)
4351{
4352    return( 0 );
4353}
4354#endif /* !__LP64__ */
4355
4356/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4357
4358bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
4359{
4360    OSSymbol const *keys[2];
4361    OSObject *values[2];
4362    OSArray * array;
4363
4364    struct SerData {
4365	user_addr_t address;
4366	user_size_t length;
4367    } *vcopy;
4368    unsigned int index, nRanges;
4369    bool result;
4370
4371    IOOptionBits type = _flags & kIOMemoryTypeMask;
4372
4373    if (s == NULL) return false;
4374
4375    array = OSArray::withCapacity(4);
4376    if (!array)  return (false);
4377
4378    nRanges = _rangesCount;
4379    vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
4380    if (vcopy == 0) return false;
4381
4382    keys[0] = OSSymbol::withCString("address");
4383    keys[1] = OSSymbol::withCString("length");
4384
4385    result = false;
4386    values[0] = values[1] = 0;
4387
4388    // From this point on we can go to bail.
4389
4390    // Copy the volatile data so we don't have to allocate memory
4391    // while the lock is held.
4392    LOCK;
4393    if (nRanges == _rangesCount) {
4394	Ranges vec = _ranges;
4395        for (index = 0; index < nRanges; index++) {
4396	    mach_vm_address_t addr; mach_vm_size_t len;
4397	    getAddrLenForInd(addr, len, type, vec, index);
4398            vcopy[index].address = addr;
4399            vcopy[index].length  = len;
4400        }
4401    } else {
4402	// The descriptor changed out from under us.  Give up.
4403        UNLOCK;
4404	result = false;
4405        goto bail;
4406    }
4407    UNLOCK;
4408
4409    for (index = 0; index < nRanges; index++)
4410    {
4411	user_addr_t addr = vcopy[index].address;
4412	IOByteCount len = (IOByteCount) vcopy[index].length;
4413	values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
4414	if (values[0] == 0) {
4415	  result = false;
4416	  goto bail;
4417	}
4418	values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
4419	if (values[1] == 0) {
4420	  result = false;
4421	  goto bail;
4422	}
4423        OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
4424	if (dict == 0) {
4425	  result = false;
4426	  goto bail;
4427	}
4428	array->setObject(dict);
4429	dict->release();
4430	values[0]->release();
4431	values[1]->release();
4432	values[0] = values[1] = 0;
4433    }
4434
4435    result = array->serialize(s);
4436
4437 bail:
4438    if (array)
4439      array->release();
4440    if (values[0])
4441      values[0]->release();
4442    if (values[1])
4443      values[1]->release();
4444    if (keys[0])
4445      keys[0]->release();
4446    if (keys[1])
4447      keys[1]->release();
4448    if (vcopy)
4449        IOFree(vcopy, sizeof(SerData) * nRanges);
4450
4451    return result;
4452}
4453
4454/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4455
4456#if DEVELOPMENT || DEBUG
4457
4458extern "C" void IOMemoryDescriptorTest(int x)
4459{
4460    IOGeneralMemoryDescriptor * md;
4461
4462    vm_offset_t data[2];
4463    vm_size_t  bsize = 16*1024*1024;
4464
4465    vm_size_t  srcsize, srcoffset, mapoffset, size;
4466
4467    kern_return_t kr;
4468
4469    kr = vm_allocate(kernel_map, &data[0], bsize, VM_FLAGS_ANYWHERE);
4470    vm_inherit(kernel_map, data[0] + 1*4096, 4096, VM_INHERIT_NONE);
4471    vm_inherit(kernel_map, data[0] + 16*4096, 4096, VM_INHERIT_NONE);
4472
4473    kprintf("data 0x%lx, 0x%lx\n", (long)data[0], (long)data[1]);
4474
4475    uint32_t idx, offidx;
4476    for (idx = 0; idx < (bsize / sizeof(uint32_t)); idx++)
4477    {
4478	((uint32_t*)data[0])[idx] = idx;
4479    }
4480
4481    for (srcoffset = 0; srcoffset < bsize; srcoffset = ((srcoffset << 1) + 0x40c))
4482    {
4483	for (srcsize = 4; srcsize < (bsize - srcoffset - 1); srcsize = ((srcsize << 1) + 0x3fc))
4484	{
4485	    IOAddressRange ranges[3];
4486	    uint32_t rangeCount = 1;
4487
4488	    bzero(&ranges[0], sizeof(ranges));
4489	    ranges[0].address = data[0] + srcoffset;
4490	    ranges[0].length  = srcsize;
4491
4492	    if (srcsize > 5*page_size)
4493	    {
4494		ranges[0].length  = 7634;
4495		ranges[1].length  = 9870;
4496		ranges[2].length  = srcsize - ranges[0].length - ranges[1].length;
4497		ranges[1].address = ranges[0].address + ranges[0].length;
4498		ranges[2].address = ranges[1].address + ranges[1].length;
4499		rangeCount = 3;
4500	    }
4501	    else if ((srcsize > 2*page_size) && !(page_mask & srcoffset))
4502	    {
4503		ranges[0].length  = 4096;
4504		ranges[1].length  = 4096;
4505		ranges[2].length  = srcsize - ranges[0].length - ranges[1].length;
4506		ranges[0].address = data[0] + srcoffset + 4096;
4507		ranges[1].address = data[0] + srcoffset;
4508		ranges[2].address = ranges[0].address + ranges[0].length;
4509		rangeCount = 3;
4510	    }
4511
4512	    md = OSDynamicCast(IOGeneralMemoryDescriptor,
4513	    	IOMemoryDescriptor::withAddressRanges(&ranges[0], rangeCount, kIODirectionInOut, kernel_task));
4514	    assert(md);
4515
4516	    kprintf("IOMemoryReferenceCreate [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n",
4517	    	    (long) srcsize, (long) srcoffset,
4518		    (long long) ranges[0].address - data[0], (long long) ranges[0].length,
4519		    (long long) ranges[1].address - data[0], (long long) ranges[1].length,
4520		    (long long) ranges[2].address - data[0], (long long) ranges[2].length);
4521
4522	    if (kIOReturnSuccess == kr)
4523	    {
4524		for (mapoffset = 0; mapoffset < srcsize; mapoffset = ((mapoffset << 1) + 0xf00))
4525		{
4526		    for (size = 4; size < (srcsize - mapoffset - 1); size = ((size << 1) + 0x20))
4527		    {
4528		    	IOMemoryMap     * map;
4529			mach_vm_address_t addr = 0;
4530			uint32_t          data;
4531
4532			kprintf("<mapRef [0x%lx @ 0x%lx]\n", (long) size, (long) mapoffset);
4533
4534			map = md->createMappingInTask(kernel_task, 0, kIOMapAnywhere, mapoffset, size);
4535			if (map) addr = map->getAddress();
4536			else kr = kIOReturnError;
4537
4538			kprintf(">mapRef 0x%x %llx\n", kr, addr);
4539
4540			if (kIOReturnSuccess != kr) break;
4541			kr = md->prepare();
4542			if (kIOReturnSuccess != kr)
4543			{
4544			    kprintf("prepare() fail 0x%x\n", kr);
4545			    break;
4546			}
4547			for (idx = 0; idx < size; idx += sizeof(uint32_t))
4548			{
4549			    offidx = (idx + mapoffset + srcoffset);
4550			    if ((srcsize <= 5*page_size) && (srcsize > 2*page_size) && !(page_mask & srcoffset))
4551			    {
4552			    	if (offidx < 8192) offidx ^= 0x1000;
4553			    }
4554			    offidx /= sizeof(uint32_t);
4555
4556			    if (offidx != ((uint32_t*)addr)[idx/sizeof(uint32_t)])
4557			    {
4558				kprintf("vm mismatch @ 0x%x, 0x%lx, 0x%lx, \n", idx, (long) srcoffset, (long) mapoffset);
4559				kr = kIOReturnBadMedia;
4560			    }
4561			    else
4562			    {
4563			        if (sizeof(data) != md->readBytes(mapoffset + idx, &data, sizeof(data))) data = 0;
4564				if (offidx != data)
4565				{
4566				    kprintf("phys mismatch @ 0x%x, 0x%lx, 0x%lx, \n", idx, (long) srcoffset, (long) mapoffset);
4567				    kr = kIOReturnBadMedia;
4568				}
4569			    }
4570			}
4571			md->complete();
4572			map->release();
4573			kprintf("unmapRef %llx\n", addr);
4574		    }
4575		    if (kIOReturnSuccess != kr) break;
4576		}
4577	    }
4578            if (kIOReturnSuccess != kr) break;
4579	}
4580	if (kIOReturnSuccess != kr) break;
4581    }
4582
4583    if (kIOReturnSuccess != kr) kprintf("FAIL: src 0x%lx @ 0x%lx, map 0x%lx @ 0x%lx\n",
4584    					(long) srcsize, (long) srcoffset, (long) size, (long) mapoffset);
4585
4586    vm_deallocate(kernel_map, data[0], bsize);
4587//    vm_deallocate(kernel_map, data[1], size);
4588}
4589
4590#endif  /* DEVELOPMENT || DEBUG */
4591
4592/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4593
4594OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
4595#ifdef __LP64__
4596OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
4597OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
4598OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
4599OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
4600OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
4601OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
4602OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
4603#else /* !__LP64__ */
4604OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
4605OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
4606OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
4607OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
4608OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
4609OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
4610OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
4611#endif /* !__LP64__ */
4612OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
4613OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
4614OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
4615OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
4616OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
4617OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
4618OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
4619OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
4620
4621/* ex-inline function implementation */
4622IOPhysicalAddress
4623IOMemoryDescriptor::getPhysicalAddress()
4624        { return( getPhysicalSegment( 0, 0 )); }
4625