1/*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Copyright (c) 1998 Apple Computer, Inc.  All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34
35
36#include <sys/cdefs.h>
37
38#include <IOKit/assert.h>
39#include <IOKit/system.h>
40#include <IOKit/IOLib.h>
41#include <IOKit/IOMemoryDescriptor.h>
42#include <IOKit/IOMapper.h>
43#include <IOKit/IODMACommand.h>
44#include <IOKit/IOKitKeysPrivate.h>
45
46#ifndef __LP64__
47#include <IOKit/IOSubMemoryDescriptor.h>
48#endif /* !__LP64__ */
49
50#include <IOKit/IOKitDebug.h>
51#include <libkern/OSDebug.h>
52
53#include "IOKitKernelInternal.h"
54
55#include <libkern/c++/OSContainers.h>
56#include <libkern/c++/OSDictionary.h>
57#include <libkern/c++/OSArray.h>
58#include <libkern/c++/OSSymbol.h>
59#include <libkern/c++/OSNumber.h>
60
61#include <sys/uio.h>
62
63__BEGIN_DECLS
64#include <vm/pmap.h>
65#include <vm/vm_pageout.h>
66#include <mach/memory_object_types.h>
67#include <device/device_port.h>
68
69#include <mach/vm_prot.h>
70#include <mach/mach_vm.h>
71#include <vm/vm_fault.h>
72#include <vm/vm_protos.h>
73
74extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
75extern void ipc_port_release_send(ipc_port_t port);
76
77kern_return_t
78memory_object_iopl_request(
79	ipc_port_t		port,
80	memory_object_offset_t	offset,
81	vm_size_t		*upl_size,
82	upl_t			*upl_ptr,
83	upl_page_info_array_t	user_page_list,
84	unsigned int		*page_list_count,
85	int			*flags);
86
87unsigned int  IOTranslateCacheBits(struct phys_entry *pp);
88
89__END_DECLS
90
91#define kIOMapperWaitSystem	((IOMapper *) 1)
92
93static IOMapper * gIOSystemMapper = NULL;
94
95ppnum_t		  gIOLastPage;
96
97/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
98
99OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
100
101#define super IOMemoryDescriptor
102
103OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
104
105/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
106
107static IORecursiveLock * gIOMemoryLock;
108
109#define LOCK	IORecursiveLockLock( gIOMemoryLock)
110#define UNLOCK	IORecursiveLockUnlock( gIOMemoryLock)
111#define SLEEP	IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
112#define WAKEUP	\
113    IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
114
115#if 0
116#define DEBG(fmt, args...)  	{ kprintf(fmt, ## args); }
117#else
118#define DEBG(fmt, args...)  	{}
119#endif
120
121#define IOMD_DEBUG_DMAACTIVE	1
122
123/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
124
125// Some data structures and accessor macros used by the initWithOptions
126// Function
127
128enum ioPLBlockFlags {
129    kIOPLOnDevice  = 0x00000001,
130    kIOPLExternUPL = 0x00000002,
131};
132
133struct typePersMDData
134{
135    const IOGeneralMemoryDescriptor *fMD;
136    ipc_port_t fMemEntry;
137};
138
139struct ioPLBlock {
140    upl_t fIOPL;
141    vm_address_t fPageInfo;   // Pointer to page list or index into it
142    uint32_t fIOMDOffset;	    // The offset of this iopl in descriptor
143    ppnum_t fMappedPage;	    // Page number of first page in this iopl
144    unsigned int fPageOffset;	    // Offset within first page of iopl
145    unsigned int fFlags;	    // Flags
146};
147
148struct ioGMDData {
149    IOMapper *  fMapper;
150    uint8_t	fDMAMapNumAddressBits;
151    uint64_t    fDMAMapAlignment;
152    addr64_t    fMappedBase;
153    uint64_t fPreparationID;
154    unsigned int fPageCnt;
155    unsigned char fDiscontig;
156#if __LP64__
157    // align arrays to 8 bytes so following macros work
158    unsigned char fPad[3];
159#endif
160    upl_page_info_t fPageList[1]; /* variable length */
161    ioPLBlock fBlocks[1]; /* variable length */
162};
163
164#define getDataP(osd)	((ioGMDData *) (osd)->getBytesNoCopy())
165#define getIOPLList(d)	((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
166#define getNumIOPL(osd, d)	\
167    (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
168#define getPageList(d)	(&(d->fPageList[0]))
169#define computeDataSize(p, u) \
170    (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
171
172
173/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
174
175#define next_page(a) ( trunc_page(a) + PAGE_SIZE )
176
177
178extern "C" {
179
180kern_return_t device_data_action(
181               uintptr_t               device_handle,
182               ipc_port_t              device_pager,
183               vm_prot_t               protection,
184               vm_object_offset_t      offset,
185               vm_size_t               size)
186{
187    kern_return_t	 kr;
188    IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
189    IOMemoryDescriptor * memDesc;
190
191    LOCK;
192    memDesc = ref->dp.memory;
193    if( memDesc)
194    {
195	memDesc->retain();
196	kr = memDesc->handleFault( device_pager, 0, 0,
197                offset, size, kIOMapDefaultCache /*?*/);
198	memDesc->release();
199    }
200    else
201	kr = KERN_ABORTED;
202    UNLOCK;
203
204    return( kr );
205}
206
207kern_return_t device_close(
208               uintptr_t     device_handle)
209{
210    IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
211
212    IODelete( ref, IOMemoryDescriptorReserved, 1 );
213
214    return( kIOReturnSuccess );
215}
216};	// end extern "C"
217
218// Note this inline function uses C++ reference arguments to return values
219// This means that pointers are not passed and NULLs don't have to be
220// checked for as a NULL reference is illegal.
221static inline void
222getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
223     UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
224{
225    assert(kIOMemoryTypeUIO       == type
226	|| kIOMemoryTypeVirtual   == type || kIOMemoryTypeVirtual64 == type
227	|| kIOMemoryTypePhysical  == type || kIOMemoryTypePhysical64 == type);
228    if (kIOMemoryTypeUIO == type) {
229	user_size_t us;
230	uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
231    }
232#ifndef __LP64__
233    else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
234	IOAddressRange cur = r.v64[ind];
235	addr = cur.address;
236	len  = cur.length;
237    }
238#endif /* !__LP64__ */
239    else {
240	IOVirtualRange cur = r.v[ind];
241	addr = cur.address;
242	len  = cur.length;
243    }
244}
245
246/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
247
248IOMemoryDescriptor *
249IOMemoryDescriptor::withAddress(void *      address,
250                                IOByteCount   length,
251                                IODirection direction)
252{
253    return IOMemoryDescriptor::
254        withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
255}
256
257#ifndef __LP64__
258IOMemoryDescriptor *
259IOMemoryDescriptor::withAddress(IOVirtualAddress address,
260                                IOByteCount  length,
261                                IODirection  direction,
262                                task_t       task)
263{
264    IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
265    if (that)
266    {
267	if (that->initWithAddress(address, length, direction, task))
268	    return that;
269
270        that->release();
271    }
272    return 0;
273}
274#endif /* !__LP64__ */
275
276IOMemoryDescriptor *
277IOMemoryDescriptor::withPhysicalAddress(
278				IOPhysicalAddress	address,
279				IOByteCount		length,
280				IODirection      	direction )
281{
282    return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
283}
284
285#ifndef __LP64__
286IOMemoryDescriptor *
287IOMemoryDescriptor::withRanges(	IOVirtualRange * ranges,
288				UInt32           withCount,
289				IODirection      direction,
290				task_t           task,
291				bool             asReference)
292{
293    IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
294    if (that)
295    {
296	if (that->initWithRanges(ranges, withCount, direction, task, asReference))
297	    return that;
298
299        that->release();
300    }
301    return 0;
302}
303#endif /* !__LP64__ */
304
305IOMemoryDescriptor *
306IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
307					mach_vm_size_t length,
308					IOOptionBits   options,
309					task_t         task)
310{
311    IOAddressRange range = { address, length };
312    return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
313}
314
315IOMemoryDescriptor *
316IOMemoryDescriptor::withAddressRanges(IOAddressRange *   ranges,
317					UInt32           rangeCount,
318					IOOptionBits     options,
319					task_t           task)
320{
321    IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
322    if (that)
323    {
324	if (task)
325	    options |= kIOMemoryTypeVirtual64;
326	else
327	    options |= kIOMemoryTypePhysical64;
328
329	if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
330	    return that;
331
332	that->release();
333    }
334
335    return 0;
336}
337
338
339/*
340 * withOptions:
341 *
342 * Create a new IOMemoryDescriptor. The buffer is made up of several
343 * virtual address ranges, from a given task.
344 *
345 * Passing the ranges as a reference will avoid an extra allocation.
346 */
347IOMemoryDescriptor *
348IOMemoryDescriptor::withOptions(void *		buffers,
349                                UInt32		count,
350                                UInt32		offset,
351                                task_t		task,
352                                IOOptionBits	opts,
353                                IOMapper *	mapper)
354{
355    IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
356
357    if (self
358    && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
359    {
360        self->release();
361        return 0;
362    }
363
364    return self;
365}
366
367bool IOMemoryDescriptor::initWithOptions(void *		buffers,
368                                         UInt32		count,
369                                         UInt32		offset,
370                                         task_t		task,
371                                         IOOptionBits	options,
372                                         IOMapper *	mapper)
373{
374    return( false );
375}
376
377#ifndef __LP64__
378IOMemoryDescriptor *
379IOMemoryDescriptor::withPhysicalRanges(	IOPhysicalRange * ranges,
380                                        UInt32          withCount,
381                                        IODirection     direction,
382                                        bool            asReference)
383{
384    IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
385    if (that)
386    {
387	if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
388	    return that;
389
390        that->release();
391    }
392    return 0;
393}
394
395IOMemoryDescriptor *
396IOMemoryDescriptor::withSubRange(IOMemoryDescriptor *	of,
397				IOByteCount		offset,
398				IOByteCount		length,
399				IODirection		direction)
400{
401    return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe));
402}
403#endif /* !__LP64__ */
404
405IOMemoryDescriptor *
406IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
407{
408    IOGeneralMemoryDescriptor *origGenMD =
409	OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
410
411    if (origGenMD)
412	return IOGeneralMemoryDescriptor::
413	    withPersistentMemoryDescriptor(origGenMD);
414    else
415	return 0;
416}
417
418IOMemoryDescriptor *
419IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
420{
421    ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
422
423    if (!sharedMem)
424	return 0;
425
426    if (sharedMem == originalMD->_memEntry) {
427	originalMD->retain();		    // Add a new reference to ourselves
428	ipc_port_release_send(sharedMem);   // Remove extra send right
429	return originalMD;
430    }
431
432    IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
433    typePersMDData initData = { originalMD, sharedMem };
434
435    if (self
436    && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
437        self->release();
438	self = 0;
439    }
440    return self;
441}
442
443void *IOGeneralMemoryDescriptor::createNamedEntry()
444{
445    kern_return_t error;
446    ipc_port_t sharedMem;
447
448    IOOptionBits type = _flags & kIOMemoryTypeMask;
449
450    user_addr_t range0Addr;
451    IOByteCount range0Len;
452    getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
453    range0Addr = trunc_page_64(range0Addr);
454
455    vm_size_t size = ptoa_32(_pages);
456    vm_address_t kernelPage = (vm_address_t) range0Addr;
457
458    vm_map_t theMap = ((_task == kernel_task)
459			&& (kIOMemoryBufferPageable & _flags))
460		    ? IOPageableMapForAddress(kernelPage)
461		    : get_task_map(_task);
462
463    memory_object_size_t  actualSize = size;
464    vm_prot_t             prot       = VM_PROT_READ;
465    if (kIODirectionOut != (kIODirectionOutIn & _flags))
466	prot |= VM_PROT_WRITE;
467
468    if (_memEntry)
469	prot |= MAP_MEM_NAMED_REUSE;
470
471    error = mach_make_memory_entry_64(theMap,
472	    &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
473
474    if (KERN_SUCCESS == error) {
475	if (actualSize == size) {
476	    return sharedMem;
477	} else {
478#if IOASSERT
479	    IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
480		  (UInt64)range0Addr, (UInt64)actualSize, (UInt64)size);
481#endif
482	    ipc_port_release_send( sharedMem );
483	}
484    }
485
486    return MACH_PORT_NULL;
487}
488
489#ifndef __LP64__
490bool
491IOGeneralMemoryDescriptor::initWithAddress(void *      address,
492                                    IOByteCount   withLength,
493                                    IODirection withDirection)
494{
495    _singleRange.v.address = (vm_offset_t) address;
496    _singleRange.v.length  = withLength;
497
498    return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
499}
500
501bool
502IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
503                                    IOByteCount    withLength,
504                                    IODirection  withDirection,
505                                    task_t       withTask)
506{
507    _singleRange.v.address = address;
508    _singleRange.v.length  = withLength;
509
510    return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
511}
512
513bool
514IOGeneralMemoryDescriptor::initWithPhysicalAddress(
515				 IOPhysicalAddress	address,
516				 IOByteCount		withLength,
517				 IODirection      	withDirection )
518{
519    _singleRange.p.address = address;
520    _singleRange.p.length  = withLength;
521
522    return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
523}
524
525bool
526IOGeneralMemoryDescriptor::initWithPhysicalRanges(
527                                IOPhysicalRange * ranges,
528                                UInt32            count,
529                                IODirection       direction,
530                                bool              reference)
531{
532    IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
533
534    if (reference)
535        mdOpts |= kIOMemoryAsReference;
536
537    return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
538}
539
540bool
541IOGeneralMemoryDescriptor::initWithRanges(
542                                   IOVirtualRange * ranges,
543                                   UInt32           count,
544                                   IODirection      direction,
545                                   task_t           task,
546                                   bool             reference)
547{
548    IOOptionBits mdOpts = direction;
549
550    if (reference)
551        mdOpts |= kIOMemoryAsReference;
552
553    if (task) {
554        mdOpts |= kIOMemoryTypeVirtual;
555
556	// Auto-prepare if this is a kernel memory descriptor as very few
557	// clients bother to prepare() kernel memory.
558	// But it was not enforced so what are you going to do?
559        if (task == kernel_task)
560            mdOpts |= kIOMemoryAutoPrepare;
561    }
562    else
563        mdOpts |= kIOMemoryTypePhysical;
564
565    return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
566}
567#endif /* !__LP64__ */
568
569/*
570 * initWithOptions:
571 *
572 *  IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
573 * from a given task, several physical ranges, an UPL from the ubc
574 * system or a uio (may be 64bit) from the BSD subsystem.
575 *
576 * Passing the ranges as a reference will avoid an extra allocation.
577 *
578 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
579 * existing instance -- note this behavior is not commonly supported in other
580 * I/O Kit classes, although it is supported here.
581 */
582
583bool
584IOGeneralMemoryDescriptor::initWithOptions(void *	buffers,
585                                           UInt32	count,
586                                           UInt32	offset,
587                                           task_t	task,
588                                           IOOptionBits	options,
589                                           IOMapper *	mapper)
590{
591    IOOptionBits type = options & kIOMemoryTypeMask;
592
593#ifndef __LP64__
594    if (task
595        && (kIOMemoryTypeVirtual == type)
596        && vm_map_is_64bit(get_task_map(task))
597        && ((IOVirtualRange *) buffers)->address)
598    {
599        OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
600        return false;
601    }
602#endif /* !__LP64__ */
603
604    // Grab the original MD's configuation data to initialse the
605    // arguments to this function.
606    if (kIOMemoryTypePersistentMD == type) {
607
608	typePersMDData *initData = (typePersMDData *) buffers;
609	const IOGeneralMemoryDescriptor *orig = initData->fMD;
610	ioGMDData *dataP = getDataP(orig->_memoryEntries);
611
612	// Only accept persistent memory descriptors with valid dataP data.
613	assert(orig->_rangesCount == 1);
614	if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
615	    return false;
616
617	_memEntry = initData->fMemEntry;	// Grab the new named entry
618	options = orig->_flags & ~kIOMemoryAsReference;
619        type = options & kIOMemoryTypeMask;
620	buffers = orig->_ranges.v;
621	count = orig->_rangesCount;
622
623	// Now grab the original task and whatever mapper was previously used
624	task = orig->_task;
625	mapper = dataP->fMapper;
626
627	// We are ready to go through the original initialisation now
628    }
629
630    switch (type) {
631    case kIOMemoryTypeUIO:
632    case kIOMemoryTypeVirtual:
633#ifndef __LP64__
634    case kIOMemoryTypeVirtual64:
635#endif /* !__LP64__ */
636        assert(task);
637        if (!task)
638            return false;
639	break;
640
641    case kIOMemoryTypePhysical:		// Neither Physical nor UPL should have a task
642#ifndef __LP64__
643    case kIOMemoryTypePhysical64:
644#endif /* !__LP64__ */
645    case kIOMemoryTypeUPL:
646        assert(!task);
647        break;
648    default:
649        return false;	/* bad argument */
650    }
651
652    assert(buffers);
653    assert(count);
654
655    /*
656     * We can check the _initialized  instance variable before having ever set
657     * it to an initial value because I/O Kit guarantees that all our instance
658     * variables are zeroed on an object's allocation.
659     */
660
661    if (_initialized) {
662        /*
663         * An existing memory descriptor is being retargeted to point to
664         * somewhere else.  Clean up our present state.
665         */
666	IOOptionBits type = _flags & kIOMemoryTypeMask;
667	if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
668	{
669	    while (_wireCount)
670		complete();
671	}
672        if (_ranges.v && !(kIOMemoryAsReference & _flags))
673	{
674	    if (kIOMemoryTypeUIO == type)
675		uio_free((uio_t) _ranges.v);
676#ifndef __LP64__
677	    else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
678		IODelete(_ranges.v64, IOAddressRange, _rangesCount);
679#endif /* !__LP64__ */
680	    else
681		IODelete(_ranges.v, IOVirtualRange, _rangesCount);
682	}
683
684	options |= (kIOMemoryRedirected & _flags);
685	if (!(kIOMemoryRedirected & options))
686	{
687	    if (_memEntry)
688	    {
689		ipc_port_release_send((ipc_port_t) _memEntry);
690		_memEntry = 0;
691	    }
692	    if (_mappings)
693		_mappings->flushCollection();
694	}
695    }
696    else {
697        if (!super::init())
698            return false;
699        _initialized = true;
700    }
701
702    // Grab the appropriate mapper
703    if (kIOMemoryHostOnly & options) options |= kIOMemoryMapperNone;
704    if (kIOMemoryMapperNone & options)
705        mapper = 0;	// No Mapper
706    else if (mapper == kIOMapperSystem) {
707        IOMapper::checkForSystemMapper();
708        gIOSystemMapper = mapper = IOMapper::gSystem;
709    }
710
711    // Temp binary compatibility for kIOMemoryThreadSafe
712    if (kIOMemoryReserved6156215 & options)
713    {
714	options &= ~kIOMemoryReserved6156215;
715	options |= kIOMemoryThreadSafe;
716    }
717    // Remove the dynamic internal use flags from the initial setting
718    options 		  &= ~(kIOMemoryPreparedReadOnly);
719    _flags		   = options;
720    _task                  = task;
721
722#ifndef __LP64__
723    _direction             = (IODirection) (_flags & kIOMemoryDirectionMask);
724#endif /* !__LP64__ */
725
726    __iomd_reservedA = 0;
727    __iomd_reservedB = 0;
728    _highestPage = 0;
729
730    if (kIOMemoryThreadSafe & options)
731    {
732	if (!_prepareLock)
733	    _prepareLock = IOLockAlloc();
734    }
735    else if (_prepareLock)
736    {
737	IOLockFree(_prepareLock);
738	_prepareLock = NULL;
739    }
740
741    if (kIOMemoryTypeUPL == type) {
742
743        ioGMDData *dataP;
744        unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
745
746        if (!initMemoryEntries(dataSize, mapper)) return (false);
747        dataP = getDataP(_memoryEntries);
748        dataP->fPageCnt = 0;
749
750 //       _wireCount++;	// UPLs start out life wired
751
752        _length    = count;
753        _pages    += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
754
755        ioPLBlock iopl;
756        iopl.fIOPL = (upl_t) buffers;
757        upl_set_referenced(iopl.fIOPL, true);
758        upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
759
760	if (upl_get_size(iopl.fIOPL) < (count + offset))
761	    panic("short external upl");
762
763        _highestPage = upl_get_highest_page(iopl.fIOPL);
764
765        // Set the flag kIOPLOnDevice convieniently equal to 1
766        iopl.fFlags  = pageList->device | kIOPLExternUPL;
767        if (!pageList->device) {
768            // Pre-compute the offset into the UPL's page list
769            pageList = &pageList[atop_32(offset)];
770            offset &= PAGE_MASK;
771        }
772        iopl.fIOMDOffset = 0;
773        iopl.fMappedPage = 0;
774        iopl.fPageInfo = (vm_address_t) pageList;
775        iopl.fPageOffset = offset;
776        _memoryEntries->appendBytes(&iopl, sizeof(iopl));
777    }
778    else {
779	// kIOMemoryTypeVirtual  | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
780	// kIOMemoryTypePhysical | kIOMemoryTypePhysical64
781
782	// Initialize the memory descriptor
783	if (options & kIOMemoryAsReference) {
784#ifndef __LP64__
785	    _rangesIsAllocated = false;
786#endif /* !__LP64__ */
787
788	    // Hack assignment to get the buffer arg into _ranges.
789	    // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
790	    // work, C++ sigh.
791	    // This also initialises the uio & physical ranges.
792	    _ranges.v = (IOVirtualRange *) buffers;
793	}
794	else {
795#ifndef __LP64__
796	    _rangesIsAllocated = true;
797#endif /* !__LP64__ */
798	    switch (type)
799	    {
800	      case kIOMemoryTypeUIO:
801		_ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
802		break;
803
804#ifndef __LP64__
805	      case kIOMemoryTypeVirtual64:
806	      case kIOMemoryTypePhysical64:
807		if (count == 1
808		    && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
809		    ) {
810		    if (kIOMemoryTypeVirtual64 == type)
811			type = kIOMemoryTypeVirtual;
812		    else
813			type = kIOMemoryTypePhysical;
814		    _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
815		    _rangesIsAllocated = false;
816		    _ranges.v = &_singleRange.v;
817		    _singleRange.v.address = ((IOAddressRange *) buffers)->address;
818		    _singleRange.v.length  = ((IOAddressRange *) buffers)->length;
819		    break;
820		}
821		_ranges.v64 = IONew(IOAddressRange, count);
822		if (!_ranges.v64)
823		    return false;
824		bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
825		break;
826#endif /* !__LP64__ */
827	      case kIOMemoryTypeVirtual:
828	      case kIOMemoryTypePhysical:
829		if (count == 1) {
830		    _flags |= kIOMemoryAsReference;
831#ifndef __LP64__
832		    _rangesIsAllocated = false;
833#endif /* !__LP64__ */
834		    _ranges.v = &_singleRange.v;
835		} else {
836		    _ranges.v = IONew(IOVirtualRange, count);
837		    if (!_ranges.v)
838			return false;
839		}
840		bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
841		break;
842	    }
843	}
844
845	// Find starting address within the vector of ranges
846	Ranges vec = _ranges;
847	UInt32 length = 0;
848	UInt32 pages = 0;
849	for (unsigned ind = 0; ind < count;  ind++) {
850	    user_addr_t addr;
851	    IOPhysicalLength len;
852
853	    // addr & len are returned by this function
854	    getAddrLenForInd(addr, len, type, vec, ind);
855	    pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
856	    len += length;
857	    assert(len >= length);	// Check for 32 bit wrap around
858	    length = len;
859
860	    if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
861	    {
862		ppnum_t highPage = atop_64(addr + len - 1);
863		if (highPage > _highestPage)
864		    _highestPage = highPage;
865	    }
866	}
867	_length      = length;
868	_pages       = pages;
869	_rangesCount = count;
870
871        // Auto-prepare memory at creation time.
872        // Implied completion when descriptor is free-ed
873        if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
874            _wireCount++;	// Physical MDs are, by definition, wired
875        else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
876            ioGMDData *dataP;
877            unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
878
879            if (!initMemoryEntries(dataSize, mapper)) return false;
880            dataP = getDataP(_memoryEntries);
881            dataP->fPageCnt = _pages;
882
883	    if ( (kIOMemoryPersistent & _flags) && !_memEntry)
884		_memEntry = createNamedEntry();
885
886            if ((_flags & kIOMemoryAutoPrepare)
887             && prepare() != kIOReturnSuccess)
888                return false;
889        }
890    }
891
892    return true;
893}
894
895/*
896 * free
897 *
898 * Free resources.
899 */
900void IOGeneralMemoryDescriptor::free()
901{
902    IOOptionBits type = _flags & kIOMemoryTypeMask;
903
904    if( reserved)
905    {
906	LOCK;
907	reserved->dp.memory = 0;
908	UNLOCK;
909    }
910    if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
911    {
912	ioGMDData * dataP;
913	if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
914	{
915	    dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages);
916	    dataP->fMappedBase = 0;
917	}
918    }
919    else
920    {
921	while (_wireCount) complete();
922    }
923
924    if (_memoryEntries) _memoryEntries->release();
925
926    if (_ranges.v && !(kIOMemoryAsReference & _flags))
927    {
928	if (kIOMemoryTypeUIO == type)
929	    uio_free((uio_t) _ranges.v);
930#ifndef __LP64__
931	else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
932	    IODelete(_ranges.v64, IOAddressRange, _rangesCount);
933#endif /* !__LP64__ */
934	else
935	    IODelete(_ranges.v, IOVirtualRange, _rangesCount);
936
937	_ranges.v = NULL;
938    }
939
940    if (reserved)
941    {
942        if (reserved->dp.devicePager)
943        {
944            // memEntry holds a ref on the device pager which owns reserved
945            // (IOMemoryDescriptorReserved) so no reserved access after this point
946            device_pager_deallocate( (memory_object_t) reserved->dp.devicePager );
947        }
948        else
949            IODelete(reserved, IOMemoryDescriptorReserved, 1);
950        reserved = NULL;
951    }
952
953    if (_memEntry)
954        ipc_port_release_send( (ipc_port_t) _memEntry );
955
956    if (_prepareLock)
957	IOLockFree(_prepareLock);
958
959    super::free();
960}
961
962#ifndef __LP64__
963void IOGeneralMemoryDescriptor::unmapFromKernel()
964{
965    panic("IOGMD::unmapFromKernel deprecated");
966}
967
968void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
969{
970    panic("IOGMD::mapIntoKernel deprecated");
971}
972#endif /* !__LP64__ */
973
974/*
975 * getDirection:
976 *
977 * Get the direction of the transfer.
978 */
979IODirection IOMemoryDescriptor::getDirection() const
980{
981#ifndef __LP64__
982    if (_direction)
983	return _direction;
984#endif /* !__LP64__ */
985    return (IODirection) (_flags & kIOMemoryDirectionMask);
986}
987
988/*
989 * getLength:
990 *
991 * Get the length of the transfer (over all ranges).
992 */
993IOByteCount IOMemoryDescriptor::getLength() const
994{
995    return _length;
996}
997
998void IOMemoryDescriptor::setTag( IOOptionBits tag )
999{
1000    _tag = tag;
1001}
1002
1003IOOptionBits IOMemoryDescriptor::getTag( void )
1004{
1005    return( _tag);
1006}
1007
1008#ifndef __LP64__
1009// @@@ gvdl: who is using this API?  Seems like a wierd thing to implement.
1010IOPhysicalAddress
1011IOMemoryDescriptor::getSourceSegment( IOByteCount   offset, IOByteCount * length )
1012{
1013    addr64_t physAddr = 0;
1014
1015    if( prepare() == kIOReturnSuccess) {
1016        physAddr = getPhysicalSegment64( offset, length );
1017        complete();
1018    }
1019
1020    return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1021}
1022#endif /* !__LP64__ */
1023
1024IOByteCount IOMemoryDescriptor::readBytes
1025                (IOByteCount offset, void *bytes, IOByteCount length)
1026{
1027    addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1028    IOByteCount remaining;
1029
1030    // Assert that this entire I/O is withing the available range
1031    assert(offset < _length);
1032    assert(offset + length <= _length);
1033    if (offset >= _length) {
1034        return 0;
1035    }
1036
1037    if (kIOMemoryThreadSafe & _flags)
1038	LOCK;
1039
1040    remaining = length = min(length, _length - offset);
1041    while (remaining) {	// (process another target segment?)
1042        addr64_t	srcAddr64;
1043        IOByteCount	srcLen;
1044
1045        srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1046        if (!srcAddr64)
1047            break;
1048
1049        // Clip segment length to remaining
1050        if (srcLen > remaining)
1051            srcLen = remaining;
1052
1053        copypv(srcAddr64, dstAddr, srcLen,
1054                            cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1055
1056        dstAddr   += srcLen;
1057        offset    += srcLen;
1058        remaining -= srcLen;
1059    }
1060
1061    if (kIOMemoryThreadSafe & _flags)
1062	UNLOCK;
1063
1064    assert(!remaining);
1065
1066    return length - remaining;
1067}
1068
1069IOByteCount IOMemoryDescriptor::writeBytes
1070                (IOByteCount offset, const void *bytes, IOByteCount length)
1071{
1072    addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1073    IOByteCount remaining;
1074
1075    // Assert that this entire I/O is withing the available range
1076    assert(offset < _length);
1077    assert(offset + length <= _length);
1078
1079    assert( !(kIOMemoryPreparedReadOnly & _flags) );
1080
1081    if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1082        return 0;
1083    }
1084
1085    if (kIOMemoryThreadSafe & _flags)
1086	LOCK;
1087
1088    remaining = length = min(length, _length - offset);
1089    while (remaining) {	// (process another target segment?)
1090        addr64_t    dstAddr64;
1091        IOByteCount dstLen;
1092
1093        dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1094        if (!dstAddr64)
1095            break;
1096
1097        // Clip segment length to remaining
1098        if (dstLen > remaining)
1099            dstLen = remaining;
1100
1101        copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1102                            cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1103
1104        srcAddr   += dstLen;
1105        offset    += dstLen;
1106        remaining -= dstLen;
1107    }
1108
1109    if (kIOMemoryThreadSafe & _flags)
1110	UNLOCK;
1111
1112    assert(!remaining);
1113
1114    return length - remaining;
1115}
1116
1117// osfmk/device/iokit_rpc.c
1118extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1119
1120#ifndef __LP64__
1121void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1122{
1123    panic("IOGMD::setPosition deprecated");
1124}
1125#endif /* !__LP64__ */
1126
1127static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1128
1129uint64_t
1130IOGeneralMemoryDescriptor::getPreparationID( void )
1131{
1132    ioGMDData *dataP;
1133
1134    if (!_wireCount)
1135	return (kIOPreparationIDUnprepared);
1136
1137    if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
1138      || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64))
1139    {
1140        IOMemoryDescriptor::setPreparationID();
1141        return (IOMemoryDescriptor::getPreparationID());
1142    }
1143
1144    if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1145	return (kIOPreparationIDUnprepared);
1146
1147    if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1148    {
1149	dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1150    }
1151    return (dataP->fPreparationID);
1152}
1153
1154IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void )
1155{
1156    if (!reserved)
1157    {
1158        reserved = IONew(IOMemoryDescriptorReserved, 1);
1159        if (reserved)
1160            bzero(reserved, sizeof(IOMemoryDescriptorReserved));
1161    }
1162    return (reserved);
1163}
1164
1165void IOMemoryDescriptor::setPreparationID( void )
1166{
1167    if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID))
1168    {
1169#if defined(__ppc__ )
1170        reserved->preparationID = gIOMDPreparationID++;
1171#else
1172        reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1173#endif
1174    }
1175}
1176
1177uint64_t IOMemoryDescriptor::getPreparationID( void )
1178{
1179    if (reserved)
1180        return (reserved->preparationID);
1181    else
1182        return (kIOPreparationIDUnsupported);
1183}
1184
1185IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1186{
1187    IOReturn err = kIOReturnSuccess;
1188    DMACommandOps params;
1189    IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1190    ioGMDData *dataP;
1191
1192    params = (op & ~kIOMDDMACommandOperationMask & op);
1193    op &= kIOMDDMACommandOperationMask;
1194
1195    if (kIOMDDMAMap == op)
1196    {
1197	if (dataSize < sizeof(IOMDDMAMapArgs))
1198	    return kIOReturnUnderrun;
1199
1200	IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1201
1202	if (!_memoryEntries
1203	    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1204
1205	if (_memoryEntries && data->fMapper)
1206	{
1207	    bool remap;
1208	    bool whole = ((data->fOffset == 0) && (data->fLength == _length));
1209	    dataP = getDataP(_memoryEntries);
1210
1211	    if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
1212	    if (data->fMapSpec.alignment      > dataP->fDMAMapAlignment)      dataP->fDMAMapAlignment      = data->fMapSpec.alignment;
1213
1214	    remap = (dataP->fDMAMapNumAddressBits < 64)
1215	    	 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
1216	    remap |= (dataP->fDMAMapAlignment > page_size);
1217	    remap |= (!whole);
1218	    if (remap || !dataP->fMappedBase)
1219	    {
1220//		if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
1221	    	err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
1222		if ((kIOReturnSuccess == err) && whole && !dataP->fMappedBase)
1223		{
1224		    dataP->fMappedBase = data->fAlloc;
1225		    data->fAllocCount = 0; 			// IOMD owns the alloc now
1226		}
1227	    }
1228	    else
1229	    {
1230	    	data->fAlloc = dataP->fMappedBase;
1231		data->fAllocCount = 0; 				// IOMD owns the alloc
1232	    }
1233	    data->fMapContig = !dataP->fDiscontig;
1234	}
1235
1236	return (err);
1237    }
1238
1239    if (kIOMDAddDMAMapSpec == op)
1240    {
1241	if (dataSize < sizeof(IODMAMapSpecification))
1242	    return kIOReturnUnderrun;
1243
1244	IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
1245
1246	if (!_memoryEntries
1247	    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1248
1249	if (_memoryEntries)
1250	{
1251	    dataP = getDataP(_memoryEntries);
1252	    if (data->numAddressBits < dataP->fDMAMapNumAddressBits)
1253	     	dataP->fDMAMapNumAddressBits = data->numAddressBits;
1254	    if (data->alignment > dataP->fDMAMapAlignment)
1255	     	dataP->fDMAMapAlignment = data->alignment;
1256	}
1257	return kIOReturnSuccess;
1258    }
1259
1260    if (kIOMDGetCharacteristics == op) {
1261
1262	if (dataSize < sizeof(IOMDDMACharacteristics))
1263	    return kIOReturnUnderrun;
1264
1265	IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1266	data->fLength = _length;
1267	data->fSGCount = _rangesCount;
1268	data->fPages = _pages;
1269	data->fDirection = getDirection();
1270	if (!_wireCount)
1271	    data->fIsPrepared = false;
1272	else {
1273	    data->fIsPrepared = true;
1274	    data->fHighestPage = _highestPage;
1275	    if (_memoryEntries)
1276	    {
1277		dataP = getDataP(_memoryEntries);
1278		ioPLBlock *ioplList = getIOPLList(dataP);
1279		UInt count = getNumIOPL(_memoryEntries, dataP);
1280		if (count == 1)
1281		    data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
1282	    }
1283	}
1284
1285	return kIOReturnSuccess;
1286
1287#if IOMD_DEBUG_DMAACTIVE
1288    } else if (kIOMDDMAActive == op) {
1289	if (params) OSIncrementAtomic(&md->__iomd_reservedA);
1290	else {
1291	    if (md->__iomd_reservedA)
1292		OSDecrementAtomic(&md->__iomd_reservedA);
1293	    else
1294		panic("kIOMDSetDMAInactive");
1295	}
1296#endif /* IOMD_DEBUG_DMAACTIVE */
1297
1298    } else if (kIOMDWalkSegments != op)
1299	return kIOReturnBadArgument;
1300
1301    // Get the next segment
1302    struct InternalState {
1303	IOMDDMAWalkSegmentArgs fIO;
1304	UInt fOffset2Index;
1305	UInt fIndex;
1306	UInt fNextOffset;
1307    } *isP;
1308
1309    // Find the next segment
1310    if (dataSize < sizeof(*isP))
1311	return kIOReturnUnderrun;
1312
1313    isP = (InternalState *) vData;
1314    UInt offset = isP->fIO.fOffset;
1315    bool mapped = isP->fIO.fMapped;
1316
1317    if (IOMapper::gSystem && mapped
1318        && (!(kIOMemoryHostOnly & _flags))
1319	&& (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBase))
1320//	&& (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase))
1321    {
1322	if (!_memoryEntries
1323	    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1324
1325	dataP = getDataP(_memoryEntries);
1326	if (dataP->fMapper)
1327	{
1328	    IODMAMapSpecification mapSpec;
1329	    bzero(&mapSpec, sizeof(mapSpec));
1330	    mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
1331	    mapSpec.alignment = dataP->fDMAMapAlignment;
1332	    err = md->dmaMap(dataP->fMapper, &mapSpec, 0, _length, &dataP->fMappedBase, NULL);
1333	    if (kIOReturnSuccess != err) return (err);
1334	}
1335    }
1336
1337    if (offset >= _length)
1338	return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
1339
1340    // Validate the previous offset
1341    UInt ind, off2Ind = isP->fOffset2Index;
1342    if (!params
1343	&& offset
1344	&& (offset == isP->fNextOffset || off2Ind <= offset))
1345	ind = isP->fIndex;
1346    else
1347	ind = off2Ind = 0;	// Start from beginning
1348
1349    UInt length;
1350    UInt64 address;
1351
1352
1353    if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1354
1355	// Physical address based memory descriptor
1356	const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
1357
1358	// Find the range after the one that contains the offset
1359	mach_vm_size_t len;
1360	for (len = 0; off2Ind <= offset; ind++) {
1361	    len = physP[ind].length;
1362	    off2Ind += len;
1363	}
1364
1365	// Calculate length within range and starting address
1366	length   = off2Ind - offset;
1367	address  = physP[ind - 1].address + len - length;
1368
1369	if (true && mapped && _memoryEntries
1370		&& (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1371	{
1372	    address = dataP->fMappedBase + offset;
1373	}
1374	else
1375	{
1376	    // see how far we can coalesce ranges
1377	    while (ind < _rangesCount && address + length == physP[ind].address) {
1378		len = physP[ind].length;
1379		length += len;
1380		off2Ind += len;
1381		ind++;
1382	    }
1383	}
1384
1385	// correct contiguous check overshoot
1386	ind--;
1387	off2Ind -= len;
1388    }
1389#ifndef __LP64__
1390    else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
1391
1392	// Physical address based memory descriptor
1393	const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
1394
1395	// Find the range after the one that contains the offset
1396	mach_vm_size_t len;
1397	for (len = 0; off2Ind <= offset; ind++) {
1398	    len = physP[ind].length;
1399	    off2Ind += len;
1400	}
1401
1402	// Calculate length within range and starting address
1403	length   = off2Ind - offset;
1404	address  = physP[ind - 1].address + len - length;
1405
1406	if (true && mapped && _memoryEntries
1407		&& (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1408	{
1409	    address = dataP->fMappedBase + offset;
1410	}
1411	else
1412	{
1413	    // see how far we can coalesce ranges
1414	    while (ind < _rangesCount && address + length == physP[ind].address) {
1415		len = physP[ind].length;
1416		length += len;
1417		off2Ind += len;
1418		ind++;
1419	    }
1420	}
1421	// correct contiguous check overshoot
1422	ind--;
1423	off2Ind -= len;
1424    }
1425#endif /* !__LP64__ */
1426    else do {
1427	if (!_wireCount)
1428	    panic("IOGMD: not wired for the IODMACommand");
1429
1430	assert(_memoryEntries);
1431
1432	dataP = getDataP(_memoryEntries);
1433	const ioPLBlock *ioplList = getIOPLList(dataP);
1434	UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1435	upl_page_info_t *pageList = getPageList(dataP);
1436
1437	assert(numIOPLs > 0);
1438
1439	// Scan through iopl info blocks looking for block containing offset
1440	while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
1441	    ind++;
1442
1443	// Go back to actual range as search goes past it
1444	ioPLBlock ioplInfo = ioplList[ind - 1];
1445	off2Ind = ioplInfo.fIOMDOffset;
1446
1447	if (ind < numIOPLs)
1448	    length = ioplList[ind].fIOMDOffset;
1449	else
1450	    length = _length;
1451	length -= offset;			// Remainder within iopl
1452
1453	// Subtract offset till this iopl in total list
1454	offset -= off2Ind;
1455
1456	// If a mapped address is requested and this is a pre-mapped IOPL
1457	// then just need to compute an offset relative to the mapped base.
1458	if (mapped && dataP->fMappedBase) {
1459	    offset += (ioplInfo.fPageOffset & PAGE_MASK);
1460	    address = trunc_page_64(dataP->fMappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
1461	    continue;	// Done leave do/while(false) now
1462	}
1463
1464	// The offset is rebased into the current iopl.
1465	// Now add the iopl 1st page offset.
1466	offset += ioplInfo.fPageOffset;
1467
1468	// For external UPLs the fPageInfo field points directly to
1469	// the upl's upl_page_info_t array.
1470	if (ioplInfo.fFlags & kIOPLExternUPL)
1471	    pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1472	else
1473	    pageList = &pageList[ioplInfo.fPageInfo];
1474
1475	// Check for direct device non-paged memory
1476	if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1477	    address = ptoa_64(pageList->phys_addr) + offset;
1478	    continue;	// Done leave do/while(false) now
1479	}
1480
1481	// Now we need compute the index into the pageList
1482	UInt pageInd = atop_32(offset);
1483	offset &= PAGE_MASK;
1484
1485	// Compute the starting address of this segment
1486	IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
1487	if (!pageAddr) {
1488	    panic("!pageList phys_addr");
1489	}
1490
1491	address = ptoa_64(pageAddr) + offset;
1492
1493	// length is currently set to the length of the remainider of the iopl.
1494	// We need to check that the remainder of the iopl is contiguous.
1495	// This is indicated by pageList[ind].phys_addr being sequential.
1496	IOByteCount contigLength = PAGE_SIZE - offset;
1497	while (contigLength < length
1498		&& ++pageAddr == pageList[++pageInd].phys_addr)
1499	{
1500	    contigLength += PAGE_SIZE;
1501	}
1502
1503	if (contigLength < length)
1504	    length = contigLength;
1505
1506
1507	assert(address);
1508	assert(length);
1509
1510    } while (false);
1511
1512    // Update return values and state
1513    isP->fIO.fIOVMAddr = address;
1514    isP->fIO.fLength   = length;
1515    isP->fIndex        = ind;
1516    isP->fOffset2Index = off2Ind;
1517    isP->fNextOffset   = isP->fIO.fOffset + length;
1518
1519    return kIOReturnSuccess;
1520}
1521
1522addr64_t
1523IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1524{
1525    IOReturn     ret;
1526    addr64_t     address = 0;
1527    IOByteCount  length  = 0;
1528    IOMapper *   mapper  = gIOSystemMapper;
1529    IOOptionBits type    = _flags & kIOMemoryTypeMask;
1530
1531    if (lengthOfSegment)
1532        *lengthOfSegment = 0;
1533
1534    if (offset >= _length)
1535        return 0;
1536
1537    // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
1538    // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
1539    // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
1540    // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
1541
1542    if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
1543    {
1544        unsigned rangesIndex = 0;
1545	Ranges vec = _ranges;
1546	user_addr_t addr;
1547
1548	// Find starting address within the vector of ranges
1549	for (;;) {
1550	    getAddrLenForInd(addr, length, type, vec, rangesIndex);
1551	    if (offset < length)
1552		break;
1553	    offset -= length; // (make offset relative)
1554	    rangesIndex++;
1555	}
1556
1557	// Now that we have the starting range,
1558	// lets find the last contiguous range
1559        addr   += offset;
1560        length -= offset;
1561
1562        for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1563	    user_addr_t      newAddr;
1564	    IOPhysicalLength newLen;
1565
1566	    getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1567	    if (addr + length != newAddr)
1568		break;
1569	    length += newLen;
1570	}
1571        if (addr)
1572	    address = (IOPhysicalAddress) addr;	// Truncate address to 32bit
1573    }
1574    else
1575    {
1576	IOMDDMAWalkSegmentState _state;
1577	IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
1578
1579	state->fOffset = offset;
1580	state->fLength = _length - offset;
1581	state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOnly);
1582
1583	ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
1584
1585	if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1586		DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1587					ret, this, state->fOffset,
1588					state->fIOVMAddr, state->fLength);
1589	if (kIOReturnSuccess == ret)
1590	{
1591	    address = state->fIOVMAddr;
1592	    length  = state->fLength;
1593	}
1594
1595	// dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
1596	// with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
1597
1598	if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
1599	{
1600	    if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
1601	    {
1602		addr64_t    origAddr = address;
1603		IOByteCount origLen  = length;
1604
1605		address = mapper->mapAddr(origAddr);
1606		length = page_size - (address & (page_size - 1));
1607		while ((length < origLen)
1608		    && ((address + length) == mapper->mapAddr(origAddr + length)))
1609		    length += page_size;
1610		if (length > origLen)
1611		    length = origLen;
1612	    }
1613	}
1614    }
1615
1616    if (!address)
1617        length = 0;
1618
1619    if (lengthOfSegment)
1620        *lengthOfSegment = length;
1621
1622    return (address);
1623}
1624
1625#ifndef __LP64__
1626addr64_t
1627IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1628{
1629    addr64_t address = 0;
1630
1631    if (options & _kIOMemorySourceSegment)
1632    {
1633        address = getSourceSegment(offset, lengthOfSegment);
1634    }
1635    else if (options & kIOMemoryMapperNone)
1636    {
1637        address = getPhysicalSegment64(offset, lengthOfSegment);
1638    }
1639    else
1640    {
1641        address = getPhysicalSegment(offset, lengthOfSegment);
1642    }
1643
1644    return (address);
1645}
1646
1647addr64_t
1648IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1649{
1650    return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
1651}
1652
1653IOPhysicalAddress
1654IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1655{
1656    addr64_t    address = 0;
1657    IOByteCount length  = 0;
1658
1659    address = getPhysicalSegment(offset, lengthOfSegment, 0);
1660
1661    if (lengthOfSegment)
1662	length = *lengthOfSegment;
1663
1664    if ((address + length) > 0x100000000ULL)
1665    {
1666	panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
1667		    address, (long) length, (getMetaClass())->getClassName());
1668    }
1669
1670    return ((IOPhysicalAddress) address);
1671}
1672
1673addr64_t
1674IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1675{
1676    IOPhysicalAddress phys32;
1677    IOByteCount	      length;
1678    addr64_t 	      phys64;
1679    IOMapper *        mapper = 0;
1680
1681    phys32 = getPhysicalSegment(offset, lengthOfSegment);
1682    if (!phys32)
1683	return 0;
1684
1685    if (gIOSystemMapper)
1686	mapper = gIOSystemMapper;
1687
1688    if (mapper)
1689    {
1690	IOByteCount origLen;
1691
1692	phys64 = mapper->mapAddr(phys32);
1693	origLen = *lengthOfSegment;
1694	length = page_size - (phys64 & (page_size - 1));
1695	while ((length < origLen)
1696	    && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
1697	    length += page_size;
1698	if (length > origLen)
1699	    length = origLen;
1700
1701	*lengthOfSegment = length;
1702    }
1703    else
1704	phys64 = (addr64_t) phys32;
1705
1706    return phys64;
1707}
1708
1709IOPhysicalAddress
1710IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1711{
1712    return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
1713}
1714
1715IOPhysicalAddress
1716IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1717{
1718    return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
1719}
1720
1721void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1722							IOByteCount * lengthOfSegment)
1723{
1724    if (_task == kernel_task)
1725        return (void *) getSourceSegment(offset, lengthOfSegment);
1726    else
1727        panic("IOGMD::getVirtualSegment deprecated");
1728
1729    return 0;
1730}
1731#endif /* !__LP64__ */
1732
1733IOReturn
1734IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1735{
1736    IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
1737    DMACommandOps params;
1738    IOReturn err;
1739
1740    params = (op & ~kIOMDDMACommandOperationMask & op);
1741    op &= kIOMDDMACommandOperationMask;
1742
1743    if (kIOMDGetCharacteristics == op) {
1744	if (dataSize < sizeof(IOMDDMACharacteristics))
1745	    return kIOReturnUnderrun;
1746
1747	IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1748	data->fLength = getLength();
1749	data->fSGCount = 0;
1750	data->fDirection = getDirection();
1751	data->fIsPrepared = true;	// Assume prepared - fails safe
1752    }
1753    else if (kIOMDWalkSegments == op) {
1754	if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
1755	    return kIOReturnUnderrun;
1756
1757	IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
1758	IOByteCount offset  = (IOByteCount) data->fOffset;
1759
1760	IOPhysicalLength length;
1761	if (data->fMapped && IOMapper::gSystem)
1762	    data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
1763	else
1764	    data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
1765	data->fLength = length;
1766    }
1767    else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported;
1768    else if (kIOMDDMAMap == op)
1769    {
1770	if (dataSize < sizeof(IOMDDMAMapArgs))
1771	    return kIOReturnUnderrun;
1772	IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1773
1774	if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
1775
1776	data->fMapContig = true;
1777	err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
1778	return (err);
1779    }
1780    else return kIOReturnBadArgument;
1781
1782    return kIOReturnSuccess;
1783}
1784
1785static IOReturn
1786purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
1787{
1788    IOReturn err = kIOReturnSuccess;
1789
1790    *control = VM_PURGABLE_SET_STATE;
1791
1792    enum { kIOMemoryPurgeableControlMask = 15 };
1793
1794    switch (kIOMemoryPurgeableControlMask & newState)
1795    {
1796	case kIOMemoryPurgeableKeepCurrent:
1797	    *control = VM_PURGABLE_GET_STATE;
1798	    break;
1799
1800	case kIOMemoryPurgeableNonVolatile:
1801	    *state = VM_PURGABLE_NONVOLATILE;
1802	    break;
1803	case kIOMemoryPurgeableVolatile:
1804	    *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
1805	    break;
1806	case kIOMemoryPurgeableEmpty:
1807	    *state = VM_PURGABLE_EMPTY;
1808	    break;
1809	default:
1810	    err = kIOReturnBadArgument;
1811	    break;
1812    }
1813    return (err);
1814}
1815
1816static IOReturn
1817purgeableStateBits(int * state)
1818{
1819    IOReturn err = kIOReturnSuccess;
1820
1821    switch (VM_PURGABLE_STATE_MASK & *state)
1822    {
1823	case VM_PURGABLE_NONVOLATILE:
1824	    *state = kIOMemoryPurgeableNonVolatile;
1825	    break;
1826	case VM_PURGABLE_VOLATILE:
1827	    *state = kIOMemoryPurgeableVolatile;
1828	    break;
1829	case VM_PURGABLE_EMPTY:
1830	    *state = kIOMemoryPurgeableEmpty;
1831	    break;
1832	default:
1833	    *state = kIOMemoryPurgeableNonVolatile;
1834	    err = kIOReturnNotReady;
1835	    break;
1836    }
1837    return (err);
1838}
1839
1840IOReturn
1841IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
1842						   IOOptionBits * oldState )
1843{
1844    IOReturn	  err = kIOReturnSuccess;
1845    vm_purgable_t control;
1846    int           state;
1847
1848    if (_memEntry)
1849    {
1850	err = super::setPurgeable(newState, oldState);
1851    }
1852    else
1853    {
1854	if (kIOMemoryThreadSafe & _flags)
1855	    LOCK;
1856	do
1857	{
1858	    // Find the appropriate vm_map for the given task
1859	    vm_map_t curMap;
1860	    if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1861	    {
1862		err = kIOReturnNotReady;
1863		break;
1864	    }
1865	    else if (!_task)
1866	    {
1867		err = kIOReturnUnsupported;
1868		break;
1869	    }
1870	    else
1871		curMap = get_task_map(_task);
1872
1873	    // can only do one range
1874	    Ranges vec = _ranges;
1875	    IOOptionBits type = _flags & kIOMemoryTypeMask;
1876	    user_addr_t addr;
1877	    IOByteCount len;
1878	    getAddrLenForInd(addr, len, type, vec, 0);
1879
1880	    err = purgeableControlBits(newState, &control, &state);
1881	    if (kIOReturnSuccess != err)
1882		break;
1883	    err = mach_vm_purgable_control(curMap, addr, control, &state);
1884	    if (oldState)
1885	    {
1886		if (kIOReturnSuccess == err)
1887		{
1888		    err = purgeableStateBits(&state);
1889		    *oldState = state;
1890		}
1891	    }
1892	}
1893	while (false);
1894	if (kIOMemoryThreadSafe & _flags)
1895	    UNLOCK;
1896    }
1897    return (err);
1898}
1899
1900IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1901                                           IOOptionBits * oldState )
1902{
1903    IOReturn	  err = kIOReturnSuccess;
1904    vm_purgable_t control;
1905    int           state;
1906
1907    if (kIOMemoryThreadSafe & _flags)
1908	LOCK;
1909
1910    do
1911    {
1912        if (!_memEntry)
1913        {
1914            err = kIOReturnNotReady;
1915            break;
1916        }
1917	err = purgeableControlBits(newState, &control, &state);
1918	if (kIOReturnSuccess != err)
1919	    break;
1920        err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1921	if (oldState)
1922	{
1923	    if (kIOReturnSuccess == err)
1924	    {
1925		err = purgeableStateBits(&state);
1926		*oldState = state;
1927	    }
1928	}
1929    }
1930    while (false);
1931
1932    if (kIOMemoryThreadSafe & _flags)
1933	UNLOCK;
1934
1935    return (err);
1936}
1937
1938
1939IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
1940                                     	    IOByteCount * dirtyPageCount )
1941{
1942   IOReturn err = kIOReturnSuccess;
1943   unsigned int _residentPageCount, _dirtyPageCount;
1944
1945   if (kIOMemoryThreadSafe & _flags) LOCK;
1946
1947    do
1948    {
1949       if (!_memEntry)
1950       {
1951	   err = kIOReturnNotReady;
1952	   break;
1953       }
1954       if ((residentPageCount == NULL) && (dirtyPageCount == NULL))
1955       {
1956	   err = kIOReturnBadArgument;
1957	   break;
1958       }
1959
1960       err = mach_memory_entry_get_page_counts((ipc_port_t) _memEntry,
1961						residentPageCount ? &_residentPageCount : NULL,
1962						dirtyPageCount    ? &_dirtyPageCount    : NULL);
1963       if (kIOReturnSuccess != err) break;
1964       if (residentPageCount) *residentPageCount = _residentPageCount;
1965       if (dirtyPageCount)    *dirtyPageCount    = _dirtyPageCount;
1966    }
1967    while (false);
1968
1969    if (kIOMemoryThreadSafe & _flags) UNLOCK;
1970
1971    return (err);
1972}
1973
1974
1975extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1976extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1977
1978static void SetEncryptOp(addr64_t pa, unsigned int count)
1979{
1980    ppnum_t page, end;
1981
1982    page = atop_64(round_page_64(pa));
1983    end  = atop_64(trunc_page_64(pa + count));
1984    for (; page < end; page++)
1985    {
1986        pmap_clear_noencrypt(page);
1987    }
1988}
1989
1990static void ClearEncryptOp(addr64_t pa, unsigned int count)
1991{
1992    ppnum_t page, end;
1993
1994    page = atop_64(round_page_64(pa));
1995    end  = atop_64(trunc_page_64(pa + count));
1996    for (; page < end; page++)
1997    {
1998        pmap_set_noencrypt(page);
1999    }
2000}
2001
2002IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
2003                                                IOByteCount offset, IOByteCount length )
2004{
2005    IOByteCount remaining;
2006    unsigned int res;
2007    void (*func)(addr64_t pa, unsigned int count) = 0;
2008
2009    switch (options)
2010    {
2011        case kIOMemoryIncoherentIOFlush:
2012            func = &dcache_incoherent_io_flush64;
2013            break;
2014        case kIOMemoryIncoherentIOStore:
2015            func = &dcache_incoherent_io_store64;
2016            break;
2017
2018        case kIOMemorySetEncrypted:
2019            func = &SetEncryptOp;
2020            break;
2021        case kIOMemoryClearEncrypted:
2022            func = &ClearEncryptOp;
2023            break;
2024    }
2025
2026    if (!func)
2027        return (kIOReturnUnsupported);
2028
2029    if (kIOMemoryThreadSafe & _flags)
2030	LOCK;
2031
2032    res = 0x0UL;
2033    remaining = length = min(length, getLength() - offset);
2034    while (remaining)
2035    // (process another target segment?)
2036    {
2037        addr64_t    dstAddr64;
2038        IOByteCount dstLen;
2039
2040        dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2041        if (!dstAddr64)
2042            break;
2043
2044        // Clip segment length to remaining
2045        if (dstLen > remaining)
2046            dstLen = remaining;
2047
2048	(*func)(dstAddr64, dstLen);
2049
2050        offset    += dstLen;
2051        remaining -= dstLen;
2052    }
2053
2054    if (kIOMemoryThreadSafe & _flags)
2055	UNLOCK;
2056
2057    return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
2058}
2059
2060#if defined(__i386__) || defined(__x86_64__)
2061extern vm_offset_t		first_avail;
2062#define io_kernel_static_end	first_avail
2063#else
2064#error io_kernel_static_end is undefined for this architecture
2065#endif
2066
2067static kern_return_t
2068io_get_kernel_static_upl(
2069	vm_map_t		/* map */,
2070	uintptr_t		offset,
2071	vm_size_t		*upl_size,
2072	upl_t			*upl,
2073	upl_page_info_array_t	page_list,
2074	unsigned int		*count,
2075	ppnum_t			*highest_page)
2076{
2077    unsigned int pageCount, page;
2078    ppnum_t phys;
2079    ppnum_t highestPage = 0;
2080
2081    pageCount = atop_32(*upl_size);
2082    if (pageCount > *count)
2083	pageCount = *count;
2084
2085    *upl = NULL;
2086
2087    for (page = 0; page < pageCount; page++)
2088    {
2089	phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
2090	if (!phys)
2091	    break;
2092	page_list[page].phys_addr = phys;
2093	page_list[page].pageout	  = 0;
2094	page_list[page].absent	  = 0;
2095	page_list[page].dirty	  = 0;
2096	page_list[page].precious  = 0;
2097	page_list[page].device	  = 0;
2098	if (phys > highestPage)
2099	    highestPage = phys;
2100    }
2101
2102    *highest_page = highestPage;
2103
2104    return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
2105}
2106
2107IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
2108{
2109    IOOptionBits type = _flags & kIOMemoryTypeMask;
2110    IOReturn error = kIOReturnCannotWire;
2111    ioGMDData *dataP;
2112    upl_page_info_array_t pageInfo;
2113    ppnum_t mapBase;
2114    ipc_port_t sharedMem;
2115
2116    assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
2117
2118    if ((kIODirectionOutIn & forDirection) == kIODirectionNone)
2119        forDirection = (IODirection) (forDirection | getDirection());
2120
2121    int uplFlags;    // This Mem Desc's default flags for upl creation
2122    switch (kIODirectionOutIn & forDirection)
2123    {
2124    case kIODirectionOut:
2125        // Pages do not need to be marked as dirty on commit
2126        uplFlags = UPL_COPYOUT_FROM;
2127        break;
2128
2129    case kIODirectionIn:
2130    default:
2131        uplFlags = 0;	// i.e. ~UPL_COPYOUT_FROM
2132        break;
2133    }
2134
2135    if (_wireCount)
2136    {
2137        if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags))
2138        {
2139	    OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
2140	    error = kIOReturnNotWritable;
2141        }
2142        else error = kIOReturnSuccess;
2143	return (error);
2144    }
2145
2146    dataP = getDataP(_memoryEntries);
2147    IOMapper *mapper;
2148    mapper = dataP->fMapper;
2149    dataP->fMappedBase = 0;
2150
2151    uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
2152    if (kIODirectionPrepareToPhys32 & forDirection)
2153    {
2154	if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR;
2155	if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32;
2156    }
2157    if (kIODirectionPrepareNoFault     & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT;
2158    if (kIODirectionPrepareNoZeroFill  & forDirection) uplFlags |= UPL_NOZEROFILLIO;
2159    if (kIODirectionPrepareNonCoherent & forDirection) uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
2160
2161    mapBase = 0;
2162    sharedMem = (ipc_port_t) _memEntry;
2163
2164    // Note that appendBytes(NULL) zeros the data up to the desired length.
2165    _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
2166    dataP = 0;
2167
2168    // Find the appropriate vm_map for the given task
2169    vm_map_t curMap;
2170    if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2171        curMap = 0;
2172    else
2173        { curMap = get_task_map(_task); }
2174
2175    // Iterate over the vector of virtual ranges
2176    Ranges vec = _ranges;
2177    unsigned int pageIndex  = 0;
2178    IOByteCount mdOffset    = 0;
2179    ppnum_t highestPage     = 0;
2180
2181    for (UInt range = 0; range < _rangesCount; range++) {
2182        ioPLBlock iopl;
2183	user_addr_t startPage;
2184        IOByteCount numBytes;
2185	ppnum_t highPage = 0;
2186
2187	// Get the startPage address and length of vec[range]
2188	getAddrLenForInd(startPage, numBytes, type, vec, range);
2189	iopl.fPageOffset = startPage & PAGE_MASK;
2190	numBytes += iopl.fPageOffset;
2191	startPage = trunc_page_64(startPage);
2192
2193	if (mapper)
2194	    iopl.fMappedPage = mapBase + pageIndex;
2195	else
2196	    iopl.fMappedPage = 0;
2197
2198	// Iterate over the current range, creating UPLs
2199        while (numBytes) {
2200	    vm_address_t kernelStart = (vm_address_t) startPage;
2201            vm_map_t theMap;
2202	    if (curMap)
2203		theMap = curMap;
2204	    else if (!sharedMem) {
2205		assert(_task == kernel_task);
2206		theMap = IOPageableMapForAddress(kernelStart);
2207	    }
2208	    else
2209		theMap = NULL;
2210
2211            int ioplFlags = uplFlags;
2212	    dataP = getDataP(_memoryEntries);
2213	    pageInfo = getPageList(dataP);
2214            upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2215
2216            vm_size_t ioplSize = round_page(numBytes);
2217            unsigned int numPageInfo = atop_32(ioplSize);
2218
2219	    if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
2220		error = io_get_kernel_static_upl(theMap,
2221						kernelStart,
2222						&ioplSize,
2223						&iopl.fIOPL,
2224						baseInfo,
2225						&numPageInfo,
2226						&highPage);
2227	    }
2228	    else if (sharedMem) {
2229		error = memory_object_iopl_request(sharedMem,
2230						ptoa_32(pageIndex),
2231						&ioplSize,
2232						&iopl.fIOPL,
2233						baseInfo,
2234						&numPageInfo,
2235						&ioplFlags);
2236	    }
2237	    else {
2238		assert(theMap);
2239		error = vm_map_create_upl(theMap,
2240						startPage,
2241						(upl_size_t*)&ioplSize,
2242						&iopl.fIOPL,
2243						baseInfo,
2244						&numPageInfo,
2245						&ioplFlags);
2246	    }
2247
2248            assert(ioplSize);
2249            if (error != KERN_SUCCESS)
2250                goto abortExit;
2251
2252	    if (iopl.fIOPL)
2253		highPage = upl_get_highest_page(iopl.fIOPL);
2254	    if (highPage > highestPage)
2255		highestPage = highPage;
2256
2257            error = kIOReturnCannotWire;
2258
2259            if (baseInfo->device) {
2260                numPageInfo = 1;
2261                iopl.fFlags = kIOPLOnDevice;
2262            }
2263            else {
2264                iopl.fFlags = 0;
2265            }
2266
2267            iopl.fIOMDOffset = mdOffset;
2268            iopl.fPageInfo = pageIndex;
2269            if (mapper && pageIndex && (page_mask & (mdOffset + iopl.fPageOffset))) dataP->fDiscontig = true;
2270
2271#if 0
2272	    // used to remove the upl for auto prepares here, for some errant code
2273	    // that freed memory before the descriptor pointing at it
2274	    if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
2275	    {
2276		upl_commit(iopl.fIOPL, 0, 0);
2277		upl_deallocate(iopl.fIOPL);
2278		iopl.fIOPL = 0;
2279	    }
2280#endif
2281
2282            if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
2283                // Clean up partial created and unsaved iopl
2284                if (iopl.fIOPL) {
2285                    upl_abort(iopl.fIOPL, 0);
2286                    upl_deallocate(iopl.fIOPL);
2287                }
2288                goto abortExit;
2289            }
2290	    dataP = 0;
2291
2292            // Check for a multiple iopl's in one virtual range
2293            pageIndex += numPageInfo;
2294            mdOffset -= iopl.fPageOffset;
2295            if (ioplSize < numBytes) {
2296                numBytes -= ioplSize;
2297                startPage += ioplSize;
2298                mdOffset += ioplSize;
2299                iopl.fPageOffset = 0;
2300		if (mapper) iopl.fMappedPage = mapBase + pageIndex;
2301            }
2302            else {
2303                mdOffset += numBytes;
2304                break;
2305            }
2306        }
2307    }
2308
2309    _highestPage = highestPage;
2310
2311    if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly;
2312
2313    return kIOReturnSuccess;
2314
2315abortExit:
2316    {
2317        dataP = getDataP(_memoryEntries);
2318        UInt done = getNumIOPL(_memoryEntries, dataP);
2319        ioPLBlock *ioplList = getIOPLList(dataP);
2320
2321        for (UInt range = 0; range < done; range++)
2322	{
2323	    if (ioplList[range].fIOPL) {
2324             upl_abort(ioplList[range].fIOPL, 0);
2325             upl_deallocate(ioplList[range].fIOPL);
2326	    }
2327	}
2328	(void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
2329    }
2330
2331    if (error == KERN_FAILURE)
2332        error = kIOReturnCannotWire;
2333    else if (error == KERN_MEMORY_ERROR)
2334        error = kIOReturnNoResources;
2335
2336    return error;
2337}
2338
2339bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
2340{
2341    ioGMDData * dataP;
2342    unsigned    dataSize = size;
2343
2344    if (!_memoryEntries) {
2345	_memoryEntries = OSData::withCapacity(dataSize);
2346	if (!_memoryEntries)
2347	    return false;
2348    }
2349    else if (!_memoryEntries->initWithCapacity(dataSize))
2350	return false;
2351
2352    _memoryEntries->appendBytes(0, computeDataSize(0, 0));
2353    dataP = getDataP(_memoryEntries);
2354
2355    if (mapper == kIOMapperWaitSystem) {
2356        IOMapper::checkForSystemMapper();
2357        mapper = IOMapper::gSystem;
2358    }
2359    dataP->fMapper               = mapper;
2360    dataP->fPageCnt              = 0;
2361    dataP->fMappedBase           = 0;
2362    dataP->fDMAMapNumAddressBits = 64;
2363    dataP->fDMAMapAlignment      = 0;
2364    dataP->fPreparationID        = kIOPreparationIDUnprepared;
2365    dataP->fDiscontig            = false;
2366
2367    return (true);
2368}
2369
2370IOReturn IOMemoryDescriptor::dmaMap(
2371    IOMapper                    * mapper,
2372    const IODMAMapSpecification * mapSpec,
2373    uint64_t                      offset,
2374    uint64_t                      length,
2375    uint64_t                    * address,
2376    ppnum_t                     * mapPages)
2377{
2378    IOMDDMAWalkSegmentState  walkState;
2379    IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState;
2380    IOOptionBits             mdOp;
2381    IOReturn                 ret;
2382    IOPhysicalLength         segLen;
2383    addr64_t                 phys, align, pageOffset;
2384    ppnum_t                  base, pageIndex, pageCount;
2385    uint64_t                 index;
2386    uint32_t                 mapOptions = 0;
2387
2388    if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
2389
2390    walkArgs->fMapped = false;
2391    mdOp = kIOMDFirstSegment;
2392    pageCount = 0;
2393    for (index = 0; index < length; )
2394    {
2395	if (index && (page_mask & (index + pageOffset))) break;
2396
2397	walkArgs->fOffset = offset + index;
2398	ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
2399	mdOp = kIOMDWalkSegments;
2400	if (ret != kIOReturnSuccess) break;
2401	phys = walkArgs->fIOVMAddr;
2402	segLen = walkArgs->fLength;
2403
2404	align = (phys & page_mask);
2405	if (!index) pageOffset = align;
2406	else if (align) break;
2407	pageCount += atop_64(round_page_64(align + segLen));
2408	index += segLen;
2409    }
2410
2411    if (index < length) return (kIOReturnVMError);
2412
2413    base = mapper->iovmMapMemory(this, offset, pageCount,
2414				 mapOptions, NULL, mapSpec);
2415
2416    if (!base) return (kIOReturnNoResources);
2417
2418    mdOp = kIOMDFirstSegment;
2419    for (pageIndex = 0, index = 0; index < length; )
2420    {
2421	walkArgs->fOffset = offset + index;
2422	ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
2423	mdOp = kIOMDWalkSegments;
2424	if (ret != kIOReturnSuccess) break;
2425	phys = walkArgs->fIOVMAddr;
2426	segLen = walkArgs->fLength;
2427
2428    	ppnum_t page = atop_64(phys);
2429    	ppnum_t count = atop_64(round_page_64(phys + segLen)) - page;
2430	while (count--)
2431	{
2432	    mapper->iovmInsert(base, pageIndex, page);
2433	    page++;
2434	    pageIndex++;
2435	}
2436	index += segLen;
2437    }
2438    if (pageIndex != pageCount) panic("pageIndex");
2439
2440    *address = ptoa_64(base) + pageOffset;
2441    if (mapPages) *mapPages = pageCount;
2442
2443    return (kIOReturnSuccess);
2444}
2445
2446IOReturn IOGeneralMemoryDescriptor::dmaMap(
2447    IOMapper                    * mapper,
2448    const IODMAMapSpecification * mapSpec,
2449    uint64_t                      offset,
2450    uint64_t                      length,
2451    uint64_t                    * address,
2452    ppnum_t                     * mapPages)
2453{
2454    IOReturn          err = kIOReturnSuccess;
2455    ioGMDData *       dataP;
2456    IOOptionBits      type = _flags & kIOMemoryTypeMask;
2457
2458    *address = 0;
2459    if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess);
2460
2461    if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
2462     || offset || (length != _length))
2463    {
2464	err = super::dmaMap(mapper, mapSpec, offset, length, address, mapPages);
2465    }
2466    else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries)))
2467    {
2468	const ioPLBlock * ioplList = getIOPLList(dataP);
2469	upl_page_info_t * pageList;
2470	uint32_t          mapOptions = 0;
2471	ppnum_t           base;
2472
2473	IODMAMapSpecification mapSpec;
2474	bzero(&mapSpec, sizeof(mapSpec));
2475	mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2476	mapSpec.alignment = dataP->fDMAMapAlignment;
2477
2478	// For external UPLs the fPageInfo field points directly to
2479	// the upl's upl_page_info_t array.
2480	if (ioplList->fFlags & kIOPLExternUPL)
2481	{
2482	    pageList = (upl_page_info_t *) ioplList->fPageInfo;
2483	    mapOptions |= kIODMAMapPagingPath;
2484	}
2485	else
2486	    pageList = getPageList(dataP);
2487
2488    if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
2489
2490	// Check for direct device non-paged memory
2491	if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous;
2492
2493	base = mapper->iovmMapMemory(
2494			this, offset, _pages, mapOptions, &pageList[0], &mapSpec);
2495	*address = ptoa_64(base) + (ioplList->fPageOffset & PAGE_MASK);
2496	if (mapPages) *mapPages = _pages;
2497    }
2498
2499    return (err);
2500}
2501
2502/*
2503 * prepare
2504 *
2505 * Prepare the memory for an I/O transfer.  This involves paging in
2506 * the memory, if necessary, and wiring it down for the duration of
2507 * the transfer.  The complete() method completes the processing of
2508 * the memory after the I/O transfer finishes.  This method needn't
2509 * called for non-pageable memory.
2510 */
2511
2512IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
2513{
2514    IOReturn error    = kIOReturnSuccess;
2515    IOOptionBits type = _flags & kIOMemoryTypeMask;
2516
2517    if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2518	return kIOReturnSuccess;
2519
2520    if (_prepareLock)
2521	IOLockLock(_prepareLock);
2522
2523    if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
2524    {
2525	error = wireVirtual(forDirection);
2526    }
2527
2528    if (kIOReturnSuccess == error)
2529    {
2530	if (1 == ++_wireCount)
2531	{
2532	    if (kIOMemoryClearEncrypt & _flags)
2533	    {
2534		performOperation(kIOMemoryClearEncrypted, 0, _length);
2535	    }
2536	}
2537    }
2538
2539    if (_prepareLock)
2540	IOLockUnlock(_prepareLock);
2541
2542    return error;
2543}
2544
2545/*
2546 * complete
2547 *
2548 * Complete processing of the memory after an I/O transfer finishes.
2549 * This method should not be called unless a prepare was previously
2550 * issued; the prepare() and complete() must occur in pairs, before
2551 * before and after an I/O transfer involving pageable memory.
2552 */
2553
2554IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
2555{
2556    IOOptionBits type = _flags & kIOMemoryTypeMask;
2557
2558    if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2559	return kIOReturnSuccess;
2560
2561    if (_prepareLock)
2562	IOLockLock(_prepareLock);
2563
2564    assert(_wireCount);
2565
2566    if (_wireCount)
2567    {
2568        if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
2569        {
2570            performOperation(kIOMemorySetEncrypted, 0, _length);
2571        }
2572
2573	_wireCount--;
2574	if (!_wireCount)
2575	{
2576	    IOOptionBits type = _flags & kIOMemoryTypeMask;
2577	    ioGMDData * dataP = getDataP(_memoryEntries);
2578	    ioPLBlock *ioplList = getIOPLList(dataP);
2579	    UInt count = getNumIOPL(_memoryEntries, dataP);
2580
2581#if IOMD_DEBUG_DMAACTIVE
2582	    if (__iomd_reservedA) panic("complete() while dma active");
2583#endif /* IOMD_DEBUG_DMAACTIVE */
2584
2585	    if (dataP->fMappedBase) {
2586		dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages);
2587	        dataP->fMappedBase = 0;
2588            }
2589	    // Only complete iopls that we created which are for TypeVirtual
2590	    if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
2591		for (UInt ind = 0; ind < count; ind++)
2592		    if (ioplList[ind].fIOPL) {
2593			 upl_commit(ioplList[ind].fIOPL, 0, 0);
2594			 upl_deallocate(ioplList[ind].fIOPL);
2595		    }
2596	    } else if (kIOMemoryTypeUPL == type) {
2597		upl_set_referenced(ioplList[0].fIOPL, false);
2598	    }
2599
2600	    (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
2601
2602	    dataP->fPreparationID = kIOPreparationIDUnprepared;
2603	}
2604    }
2605
2606    if (_prepareLock)
2607	IOLockUnlock(_prepareLock);
2608
2609    return kIOReturnSuccess;
2610}
2611
2612IOReturn IOGeneralMemoryDescriptor::doMap(
2613	vm_map_t		__addressMap,
2614	IOVirtualAddress *	__address,
2615	IOOptionBits		options,
2616	IOByteCount		__offset,
2617	IOByteCount		__length )
2618
2619{
2620#ifndef __LP64__
2621    if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
2622#endif /* !__LP64__ */
2623
2624    IOMemoryMap *  mapping = (IOMemoryMap *) *__address;
2625    mach_vm_size_t offset  = mapping->fOffset + __offset;
2626    mach_vm_size_t length  = mapping->fLength;
2627
2628    kern_return_t kr = kIOReturnVMError;
2629    ipc_port_t sharedMem = (ipc_port_t) _memEntry;
2630
2631    IOOptionBits type = _flags & kIOMemoryTypeMask;
2632    Ranges vec = _ranges;
2633
2634    user_addr_t range0Addr = 0;
2635    IOByteCount range0Len = 0;
2636
2637    if ((offset >= _length) || ((offset + length) > _length))
2638	return( kIOReturnBadArgument );
2639
2640    if (vec.v)
2641	getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
2642
2643    // mapping source == dest? (could be much better)
2644    if( _task
2645     && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
2646     && (1 == _rangesCount) && (0 == offset)
2647     && range0Addr && (length <= range0Len) )
2648    {
2649	mapping->fAddress = range0Addr;
2650	mapping->fOptions |= kIOMapStatic;
2651
2652	return( kIOReturnSuccess );
2653    }
2654
2655    if( 0 == sharedMem) {
2656
2657        vm_size_t size = ptoa_32(_pages);
2658
2659        if( _task) {
2660
2661            memory_object_size_t actualSize = size;
2662	    vm_prot_t            prot       = VM_PROT_READ;
2663	    if (!(kIOMapReadOnly & options))
2664		prot |= VM_PROT_WRITE;
2665	    else if (kIOMapDefaultCache != (options & kIOMapCacheMask))
2666		prot |= VM_PROT_WRITE;
2667
2668            if (_rangesCount == 1)
2669            {
2670                kr = mach_make_memory_entry_64(get_task_map(_task),
2671                                                &actualSize, range0Addr,
2672                                                prot, &sharedMem,
2673                                                NULL);
2674            }
2675            if( (_rangesCount != 1)
2676                || ((KERN_SUCCESS == kr) && (actualSize != round_page(size))))
2677            do
2678	    {
2679#if IOASSERT
2680                IOLog("mach_vm_remap path for ranges %d size (%08llx:%08llx)\n",
2681		      _rangesCount, (UInt64)actualSize, (UInt64)size);
2682#endif
2683                kr = kIOReturnVMError;
2684                if (sharedMem)
2685                {
2686                    ipc_port_release_send(sharedMem);
2687                    sharedMem = MACH_PORT_NULL;
2688                }
2689
2690		mach_vm_address_t address, segDestAddr;
2691                mach_vm_size_t    mapLength;
2692                unsigned          rangesIndex;
2693                IOOptionBits      type = _flags & kIOMemoryTypeMask;
2694                user_addr_t       srcAddr;
2695                IOPhysicalLength  segLen = 0;
2696
2697                // Find starting address within the vector of ranges
2698                for (rangesIndex = 0; rangesIndex < _rangesCount; rangesIndex++) {
2699                    getAddrLenForInd(srcAddr, segLen, type, _ranges, rangesIndex);
2700                    if (offset < segLen)
2701                        break;
2702                    offset -= segLen; // (make offset relative)
2703                }
2704
2705		mach_vm_size_t    pageOffset = (srcAddr & PAGE_MASK);
2706		address = trunc_page_64(mapping->fAddress);
2707
2708		if ((options & kIOMapAnywhere) || ((mapping->fAddress - address) == pageOffset))
2709		{
2710		    vm_map_t map = mapping->fAddressMap;
2711		    kr = IOMemoryDescriptorMapCopy(&map,
2712						    options,
2713						    offset, &address, round_page_64(length + pageOffset));
2714                    if (kr == KERN_SUCCESS)
2715                    {
2716                        segDestAddr  = address;
2717                        segLen      -= offset;
2718                        srcAddr     += offset;
2719                        mapLength    = length;
2720
2721                        while (true)
2722                        {
2723                            vm_prot_t cur_prot, max_prot;
2724
2725                            if (segLen > length) segLen = length;
2726                            kr = mach_vm_remap(map, &segDestAddr, round_page_64(segLen), PAGE_MASK,
2727                                                    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
2728                                                    get_task_map(_task), trunc_page_64(srcAddr),
2729                                                    FALSE /* copy */,
2730                                                    &cur_prot,
2731                                                    &max_prot,
2732                                                    VM_INHERIT_NONE);
2733                            if (KERN_SUCCESS == kr)
2734                            {
2735                                if ((!(VM_PROT_READ & cur_prot))
2736                                    || (!(kIOMapReadOnly & options) && !(VM_PROT_WRITE & cur_prot)))
2737                                {
2738                                    kr = KERN_PROTECTION_FAILURE;
2739                                }
2740                            }
2741                            if (KERN_SUCCESS != kr)
2742                                break;
2743                            segDestAddr += segLen;
2744                            mapLength   -= segLen;
2745                            if (!mapLength)
2746                                break;
2747                            rangesIndex++;
2748                            if (rangesIndex >= _rangesCount)
2749                            {
2750                                kr = kIOReturnBadArgument;
2751                                break;
2752                            }
2753                            getAddrLenForInd(srcAddr, segLen, type, vec, rangesIndex);
2754                            if (srcAddr & PAGE_MASK)
2755                            {
2756                                kr = kIOReturnBadArgument;
2757                                break;
2758                            }
2759                            if (segLen > mapLength)
2760                                segLen = mapLength;
2761                        }
2762                        if (KERN_SUCCESS != kr)
2763                        {
2764                            mach_vm_deallocate(mapping->fAddressMap, address, round_page_64(length + pageOffset));
2765                        }
2766                    }
2767
2768		    if (KERN_SUCCESS == kr)
2769			mapping->fAddress = address + pageOffset;
2770		    else
2771			mapping->fAddress = NULL;
2772		}
2773            }
2774            while (false);
2775        }
2776	else do
2777	{	// _task == 0, must be physical
2778
2779            memory_object_t 	pager;
2780	    unsigned int    	flags = 0;
2781    	    addr64_t		pa;
2782    	    IOPhysicalLength	segLen;
2783
2784	    pa = getPhysicalSegment( offset, &segLen, kIOMemoryMapperNone );
2785
2786            if( !getKernelReserved())
2787                continue;
2788            reserved->dp.pagerContig = (1 == _rangesCount);
2789	    reserved->dp.memory      = this;
2790
2791	    /*What cache mode do we need*/
2792            switch(options & kIOMapCacheMask ) {
2793
2794		case kIOMapDefaultCache:
2795		default:
2796		    flags = IODefaultCacheBits(pa);
2797		    if (DEVICE_PAGER_CACHE_INHIB & flags)
2798		    {
2799			if (DEVICE_PAGER_GUARDED & flags)
2800			    mapping->fOptions |= kIOMapInhibitCache;
2801			else
2802			    mapping->fOptions |= kIOMapWriteCombineCache;
2803		    }
2804		    else if (DEVICE_PAGER_WRITE_THROUGH & flags)
2805			mapping->fOptions |= kIOMapWriteThruCache;
2806		    else
2807			mapping->fOptions |= kIOMapCopybackCache;
2808		    break;
2809
2810		case kIOMapInhibitCache:
2811		    flags = DEVICE_PAGER_CACHE_INHIB |
2812				    DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2813		    break;
2814
2815		case kIOMapWriteThruCache:
2816		    flags = DEVICE_PAGER_WRITE_THROUGH |
2817				    DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2818		    break;
2819
2820		case kIOMapCopybackCache:
2821		    flags = DEVICE_PAGER_COHERENT;
2822		    break;
2823
2824		case kIOMapWriteCombineCache:
2825		    flags = DEVICE_PAGER_CACHE_INHIB |
2826				    DEVICE_PAGER_COHERENT;
2827		    break;
2828            }
2829
2830	    flags |= reserved->dp.pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
2831
2832            pager = device_pager_setup( (memory_object_t) 0, (uintptr_t) reserved,
2833								size, flags);
2834            assert( pager );
2835
2836            if( pager) {
2837                kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
2838                            size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
2839
2840                assert( KERN_SUCCESS == kr );
2841                if( KERN_SUCCESS != kr)
2842		{
2843		    device_pager_deallocate( pager );
2844                    pager = MACH_PORT_NULL;
2845                    sharedMem = MACH_PORT_NULL;
2846                }
2847            }
2848	    if( pager && sharedMem)
2849		reserved->dp.devicePager    = pager;
2850
2851        } while( false );
2852
2853        _memEntry = (void *) sharedMem;
2854    }
2855
2856    IOReturn result;
2857    if (0 == sharedMem)
2858      result = kr;
2859    else
2860      result = super::doMap( __addressMap, __address,
2861					options, __offset, __length );
2862
2863    return( result );
2864}
2865
2866IOReturn IOGeneralMemoryDescriptor::doUnmap(
2867	vm_map_t		addressMap,
2868	IOVirtualAddress	__address,
2869	IOByteCount		__length )
2870{
2871    return (super::doUnmap(addressMap, __address, __length));
2872}
2873
2874/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2875
2876#undef super
2877#define super OSObject
2878
2879OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
2880
2881OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
2882OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
2883OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
2884OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
2885OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
2886OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
2887OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
2888OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
2889
2890/* ex-inline function implementation */
2891IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
2892    { return( getPhysicalSegment( 0, 0 )); }
2893
2894/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2895
2896bool IOMemoryMap::init(
2897        task_t			intoTask,
2898        mach_vm_address_t	toAddress,
2899        IOOptionBits		_options,
2900        mach_vm_size_t		_offset,
2901        mach_vm_size_t		_length )
2902{
2903    if (!intoTask)
2904	return( false);
2905
2906    if (!super::init())
2907	return(false);
2908
2909    fAddressMap  = get_task_map(intoTask);
2910    if (!fAddressMap)
2911	return(false);
2912    vm_map_reference(fAddressMap);
2913
2914    fAddressTask = intoTask;
2915    fOptions     = _options;
2916    fLength      = _length;
2917    fOffset	 = _offset;
2918    fAddress     = toAddress;
2919
2920    return (true);
2921}
2922
2923bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
2924{
2925    if (!_memory)
2926	return(false);
2927
2928    if (!fSuperMap)
2929    {
2930	if( (_offset + fLength) > _memory->getLength())
2931	    return( false);
2932	fOffset = _offset;
2933    }
2934
2935    _memory->retain();
2936    if (fMemory)
2937    {
2938	if (fMemory != _memory)
2939	    fMemory->removeMapping(this);
2940	fMemory->release();
2941    }
2942    fMemory = _memory;
2943
2944    return( true );
2945}
2946
2947struct IOMemoryDescriptorMapAllocRef
2948{
2949    ipc_port_t		sharedMem;
2950    vm_map_t            map;
2951    mach_vm_address_t	mapped;
2952    mach_vm_size_t	size;
2953    mach_vm_size_t	sourceOffset;
2954    IOOptionBits	options;
2955};
2956
2957static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2958{
2959    IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2960    IOReturn			    err;
2961
2962    do {
2963        if( ref->sharedMem)
2964	{
2965            vm_prot_t prot = VM_PROT_READ
2966                            | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
2967
2968	    // VM system requires write access to change cache mode
2969	    if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask))
2970		prot |= VM_PROT_WRITE;
2971
2972            // set memory entry cache
2973            vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2974            switch (ref->options & kIOMapCacheMask)
2975            {
2976		case kIOMapInhibitCache:
2977                    SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2978                    break;
2979
2980		case kIOMapWriteThruCache:
2981                    SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2982                    break;
2983
2984		case kIOMapWriteCombineCache:
2985                    SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2986                    break;
2987
2988		case kIOMapCopybackCache:
2989                    SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2990                    break;
2991
2992		case kIOMapCopybackInnerCache:
2993                    SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode);
2994                    break;
2995
2996		case kIOMapDefaultCache:
2997		default:
2998                    SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2999                    break;
3000            }
3001
3002            vm_size_t unused = 0;
3003
3004            err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
3005                                            memEntryCacheMode, NULL, ref->sharedMem );
3006            if (KERN_SUCCESS != err)
3007                IOLog("MAP_MEM_ONLY failed %d\n", err);
3008
3009            err = mach_vm_map( map,
3010                            &ref->mapped,
3011                            ref->size, 0 /* mask */,
3012                            (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
3013                            | VM_MAKE_TAG(VM_MEMORY_IOKIT),
3014                            ref->sharedMem, ref->sourceOffset,
3015                            false, // copy
3016                            prot, // cur
3017                            prot, // max
3018                            VM_INHERIT_NONE);
3019
3020            if( KERN_SUCCESS != err) {
3021                ref->mapped = 0;
3022                continue;
3023            }
3024            ref->map = map;
3025        }
3026	else
3027	{
3028            err = mach_vm_allocate(map, &ref->mapped, ref->size,
3029                            ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
3030                            | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
3031            if( KERN_SUCCESS != err) {
3032                ref->mapped = 0;
3033                continue;
3034            }
3035            ref->map = map;
3036            // we have to make sure that these guys don't get copied if we fork.
3037            err = vm_inherit(map, ref->mapped, ref->size, VM_INHERIT_NONE);
3038            assert( KERN_SUCCESS == err );
3039        }
3040    }
3041    while( false );
3042
3043    return( err );
3044}
3045
3046kern_return_t
3047IOMemoryDescriptorMapMemEntry(vm_map_t * map, ipc_port_t entry, IOOptionBits options, bool pageable,
3048				mach_vm_size_t offset,
3049				mach_vm_address_t * address, mach_vm_size_t length)
3050{
3051    IOReturn err;
3052    IOMemoryDescriptorMapAllocRef ref;
3053
3054    ref.map          = *map;
3055    ref.sharedMem    = entry;
3056    ref.sourceOffset = trunc_page_64(offset);
3057    ref.options	     = options;
3058    ref.size         = length;
3059
3060    if (options & kIOMapAnywhere)
3061	// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
3062	ref.mapped = 0;
3063    else
3064	ref.mapped = *address;
3065
3066    if( ref.sharedMem && (ref.map == kernel_map) && pageable)
3067	err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
3068    else
3069	err = IOMemoryDescriptorMapAlloc( ref.map, &ref );
3070
3071    *address = ref.mapped;
3072    *map     = ref.map;
3073
3074    return (err);
3075}
3076
3077kern_return_t
3078IOMemoryDescriptorMapCopy(vm_map_t * map,
3079				IOOptionBits options,
3080				mach_vm_size_t offset,
3081				mach_vm_address_t * address, mach_vm_size_t length)
3082{
3083    IOReturn err;
3084    IOMemoryDescriptorMapAllocRef ref;
3085
3086    ref.map          = *map;
3087    ref.sharedMem    = NULL;
3088    ref.sourceOffset = trunc_page_64(offset);
3089    ref.options	     = options;
3090    ref.size         = length;
3091
3092    if (options & kIOMapAnywhere)
3093	// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
3094	ref.mapped = 0;
3095    else
3096	ref.mapped = *address;
3097
3098    if (ref.map == kernel_map)
3099	err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
3100    else
3101	err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
3102
3103    *address = ref.mapped;
3104    *map     = ref.map;
3105
3106    return (err);
3107}
3108
3109IOReturn IOMemoryDescriptor::doMap(
3110	vm_map_t		__addressMap,
3111	IOVirtualAddress *	__address,
3112	IOOptionBits		options,
3113	IOByteCount		__offset,
3114	IOByteCount		__length )
3115{
3116#ifndef __LP64__
3117    if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit");
3118#endif /* !__LP64__ */
3119
3120    IOMemoryMap *  mapping = (IOMemoryMap *) *__address;
3121    mach_vm_size_t offset  = mapping->fOffset + __offset;
3122    mach_vm_size_t length  = mapping->fLength;
3123
3124    IOReturn	      err = kIOReturnSuccess;
3125    memory_object_t   pager;
3126    mach_vm_size_t    pageOffset;
3127    IOPhysicalAddress sourceAddr;
3128    unsigned int lock_count;
3129
3130    do
3131    {
3132	sourceAddr = getPhysicalSegment( offset, NULL, _kIOMemorySourceSegment );
3133	pageOffset = sourceAddr - trunc_page( sourceAddr );
3134
3135	if( reserved)
3136	    pager = (memory_object_t) reserved->dp.devicePager;
3137	else
3138	    pager = MACH_PORT_NULL;
3139
3140	if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
3141	{
3142	    upl_t	   redirUPL2;
3143	    vm_size_t      size;
3144	    int		   flags;
3145
3146	    if (!_memEntry)
3147	    {
3148		err = kIOReturnNotReadable;
3149		continue;
3150	    }
3151
3152	    size = round_page(mapping->fLength + pageOffset);
3153	    flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3154			| UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3155
3156	    if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
3157					    NULL, NULL,
3158					    &flags))
3159		redirUPL2 = NULL;
3160
3161	    for (lock_count = 0;
3162		 IORecursiveLockHaveLock(gIOMemoryLock);
3163		 lock_count++) {
3164	      UNLOCK;
3165	    }
3166	    err = upl_transpose(redirUPL2, mapping->fRedirUPL);
3167	    for (;
3168		 lock_count;
3169		 lock_count--) {
3170	      LOCK;
3171	    }
3172
3173	    if (kIOReturnSuccess != err)
3174	    {
3175		IOLog("upl_transpose(%x)\n", err);
3176		err = kIOReturnSuccess;
3177	    }
3178
3179	    if (redirUPL2)
3180	    {
3181		upl_commit(redirUPL2, NULL, 0);
3182		upl_deallocate(redirUPL2);
3183		redirUPL2 = 0;
3184	    }
3185	    {
3186		// swap the memEntries since they now refer to different vm_objects
3187		void * me = _memEntry;
3188		_memEntry = mapping->fMemory->_memEntry;
3189		mapping->fMemory->_memEntry = me;
3190	    }
3191	    if (pager)
3192		err = handleFault( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
3193	}
3194	else
3195	{
3196	    mach_vm_address_t address;
3197
3198	    if (!(options & kIOMapAnywhere))
3199	    {
3200		address = trunc_page_64(mapping->fAddress);
3201		if( (mapping->fAddress - address) != pageOffset)
3202		{
3203		    err = kIOReturnVMError;
3204		    continue;
3205		}
3206	    }
3207
3208            vm_map_t map = mapping->fAddressMap;
3209	    err = IOMemoryDescriptorMapMemEntry(&map, (ipc_port_t) _memEntry,
3210						    options, (kIOMemoryBufferPageable & _flags),
3211						    offset, &address, round_page_64(length + pageOffset));
3212	    if( err != KERN_SUCCESS)
3213		continue;
3214
3215	    if (!_memEntry || pager)
3216	    {
3217		err = handleFault( pager, mapping->fAddressMap, address, offset, length, options );
3218		if (err != KERN_SUCCESS)
3219		    doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 );
3220	    }
3221
3222#if DEBUG
3223	if (kIOLogMapping & gIOKitDebug)
3224	    IOLog("mapping(%x) desc %p @ %qx, map %p, address %qx, offset %qx, length %qx\n",
3225		  err, this, (uint64_t)sourceAddr, mapping, address, offset, length);
3226#endif
3227
3228	    if (err == KERN_SUCCESS)
3229		mapping->fAddress = address + pageOffset;
3230	    else
3231		mapping->fAddress = NULL;
3232	}
3233    }
3234    while( false );
3235
3236    return (err);
3237}
3238
3239IOReturn IOMemoryDescriptor::handleFault(
3240        void *			_pager,
3241	vm_map_t		addressMap,
3242	mach_vm_address_t	address,
3243	mach_vm_size_t		sourceOffset,
3244	mach_vm_size_t		length,
3245        IOOptionBits		options )
3246{
3247    IOReturn		err = kIOReturnSuccess;
3248    memory_object_t	pager = (memory_object_t) _pager;
3249    mach_vm_size_t	size;
3250    mach_vm_size_t	bytes;
3251    mach_vm_size_t	page;
3252    mach_vm_size_t	pageOffset;
3253    mach_vm_size_t	pagerOffset;
3254    IOPhysicalLength	segLen;
3255    addr64_t		physAddr;
3256
3257    if( !addressMap)
3258    {
3259        if( kIOMemoryRedirected & _flags)
3260	{
3261#if DEBUG
3262            IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
3263#endif
3264            do {
3265	    	SLEEP;
3266            } while( kIOMemoryRedirected & _flags );
3267        }
3268
3269        return( kIOReturnSuccess );
3270    }
3271
3272    physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
3273    assert( physAddr );
3274    pageOffset = physAddr - trunc_page_64( physAddr );
3275    pagerOffset = sourceOffset;
3276
3277    size = length + pageOffset;
3278    physAddr -= pageOffset;
3279
3280    segLen += pageOffset;
3281    bytes = size;
3282    do
3283    {
3284	// in the middle of the loop only map whole pages
3285	if( segLen >= bytes)
3286	    segLen = bytes;
3287	else if( segLen != trunc_page( segLen))
3288	    err = kIOReturnVMError;
3289        if( physAddr != trunc_page_64( physAddr))
3290	    err = kIOReturnBadArgument;
3291	if (kIOReturnSuccess != err)
3292	    break;
3293
3294#if DEBUG
3295	if( kIOLogMapping & gIOKitDebug)
3296	    IOLog("IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
3297                addressMap, address + pageOffset, physAddr + pageOffset,
3298		segLen - pageOffset);
3299#endif
3300
3301
3302        if( pager) {
3303            if( reserved && reserved->dp.pagerContig) {
3304                IOPhysicalLength	allLen;
3305                addr64_t		allPhys;
3306
3307                allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone );
3308                assert( allPhys );
3309		err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) );
3310            }
3311	    else
3312	    {
3313
3314		for( page = 0;
3315                     (page < segLen) && (KERN_SUCCESS == err);
3316                     page += page_size)
3317		{
3318		    err = device_pager_populate_object(pager, pagerOffset,
3319			    (ppnum_t)(atop_64(physAddr + page)), page_size);
3320		    pagerOffset += page_size;
3321                }
3322            }
3323            assert( KERN_SUCCESS == err );
3324            if( err)
3325                break;
3326        }
3327
3328	// This call to vm_fault causes an early pmap level resolution
3329	// of the mappings created above for kernel mappings, since
3330	// faulting in later can't take place from interrupt level.
3331	/*  *** ALERT *** */
3332	/*  *** Temporary Workaround *** */
3333
3334	if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
3335	{
3336		vm_fault(addressMap,
3337			 (vm_map_offset_t)address,
3338			 VM_PROT_READ|VM_PROT_WRITE,
3339			 FALSE, THREAD_UNINT, NULL,
3340			 (vm_map_offset_t)0);
3341	}
3342
3343	/*  *** Temporary Workaround *** */
3344	/*  *** ALERT *** */
3345
3346	sourceOffset += segLen - pageOffset;
3347	address += segLen;
3348	bytes -= segLen;
3349	pageOffset = 0;
3350
3351    }
3352    while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
3353
3354    if (bytes)
3355        err = kIOReturnBadArgument;
3356
3357    return (err);
3358}
3359
3360IOReturn IOMemoryDescriptor::doUnmap(
3361	vm_map_t		addressMap,
3362	IOVirtualAddress	__address,
3363	IOByteCount		__length )
3364{
3365    IOReturn	      err;
3366    mach_vm_address_t address;
3367    mach_vm_size_t    length;
3368
3369    if (__length)
3370    {
3371	address = __address;
3372	length  = __length;
3373    }
3374    else
3375    {
3376	addressMap = ((IOMemoryMap *) __address)->fAddressMap;
3377	address    = ((IOMemoryMap *) __address)->fAddress;
3378	length     = ((IOMemoryMap *) __address)->fLength;
3379    }
3380
3381    if ((addressMap == kernel_map)
3382        && ((kIOMemoryBufferPageable & _flags) || !_memEntry))
3383	addressMap = IOPageableMapForAddress( address );
3384
3385#if DEBUG
3386    if( kIOLogMapping & gIOKitDebug)
3387	IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3388		addressMap, address, length );
3389#endif
3390
3391    err = mach_vm_deallocate( addressMap, address, length );
3392
3393    return (err);
3394}
3395
3396IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
3397{
3398    IOReturn		err = kIOReturnSuccess;
3399    IOMemoryMap *	mapping = 0;
3400    OSIterator *	iter;
3401
3402    LOCK;
3403
3404    if( doRedirect)
3405        _flags |= kIOMemoryRedirected;
3406    else
3407        _flags &= ~kIOMemoryRedirected;
3408
3409    do {
3410	if( (iter = OSCollectionIterator::withCollection( _mappings))) {
3411
3412	    memory_object_t   pager;
3413
3414	    if( reserved)
3415		pager = (memory_object_t) reserved->dp.devicePager;
3416	    else
3417		pager = MACH_PORT_NULL;
3418
3419	    while( (mapping = (IOMemoryMap *) iter->getNextObject()))
3420	    {
3421		mapping->redirect( safeTask, doRedirect );
3422		if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap))
3423		{
3424		    err = handleFault( pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
3425		}
3426	    }
3427
3428	    iter->release();
3429	}
3430    } while( false );
3431
3432    if (!doRedirect)
3433    {
3434        WAKEUP;
3435    }
3436
3437    UNLOCK;
3438
3439#ifndef __LP64__
3440    // temporary binary compatibility
3441    IOSubMemoryDescriptor * subMem;
3442    if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
3443	err = subMem->redirect( safeTask, doRedirect );
3444    else
3445	err = kIOReturnSuccess;
3446#endif /* !__LP64__ */
3447
3448    return( err );
3449}
3450
3451IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
3452{
3453    IOReturn err = kIOReturnSuccess;
3454
3455    if( fSuperMap) {
3456//        err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3457    } else {
3458
3459        LOCK;
3460
3461	do
3462	{
3463	    if (!fAddress)
3464		break;
3465	    if (!fAddressMap)
3466		break;
3467
3468	    if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3469	      && (0 == (fOptions & kIOMapStatic)))
3470	    {
3471		IOUnmapPages( fAddressMap, fAddress, fLength );
3472		err = kIOReturnSuccess;
3473#if DEBUG
3474		IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
3475#endif
3476	    }
3477	    else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
3478	    {
3479		IOOptionBits newMode;
3480		newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3481		IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
3482	    }
3483	}
3484	while (false);
3485	UNLOCK;
3486    }
3487
3488    if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3489	 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3490     && safeTask
3491     && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3492	fMemory->redirect(safeTask, doRedirect);
3493
3494    return( err );
3495}
3496
3497IOReturn IOMemoryMap::unmap( void )
3498{
3499    IOReturn	err;
3500
3501    LOCK;
3502
3503    if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3504	&& (0 == (fOptions & kIOMapStatic))) {
3505
3506        vm_map_iokit_unmapped_region(fAddressMap, fLength);
3507
3508        err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
3509
3510    } else
3511	err = kIOReturnSuccess;
3512
3513    if (fAddressMap)
3514    {
3515        vm_map_deallocate(fAddressMap);
3516        fAddressMap = 0;
3517    }
3518
3519    fAddress = 0;
3520
3521    UNLOCK;
3522
3523    return( err );
3524}
3525
3526void IOMemoryMap::taskDied( void )
3527{
3528    LOCK;
3529    if (fUserClientUnmap)
3530	unmap();
3531    if( fAddressMap) {
3532        vm_map_deallocate(fAddressMap);
3533        fAddressMap = 0;
3534    }
3535    fAddressTask = 0;
3536    fAddress	 = 0;
3537    UNLOCK;
3538}
3539
3540IOReturn IOMemoryMap::userClientUnmap( void )
3541{
3542    fUserClientUnmap = true;
3543    return (kIOReturnSuccess);
3544}
3545
3546// Overload the release mechanism.  All mappings must be a member
3547// of a memory descriptors _mappings set.  This means that we
3548// always have 2 references on a mapping.  When either of these mappings
3549// are released we need to free ourselves.
3550void IOMemoryMap::taggedRelease(const void *tag) const
3551{
3552    LOCK;
3553    super::taggedRelease(tag, 2);
3554    UNLOCK;
3555}
3556
3557void IOMemoryMap::free()
3558{
3559    unmap();
3560
3561    if (fMemory)
3562    {
3563        LOCK;
3564	fMemory->removeMapping(this);
3565	UNLOCK;
3566	fMemory->release();
3567    }
3568
3569    if (fOwner && (fOwner != fMemory))
3570    {
3571        LOCK;
3572	fOwner->removeMapping(this);
3573	UNLOCK;
3574    }
3575
3576    if (fSuperMap)
3577	fSuperMap->release();
3578
3579    if (fRedirUPL) {
3580	upl_commit(fRedirUPL, NULL, 0);
3581	upl_deallocate(fRedirUPL);
3582    }
3583
3584    super::free();
3585}
3586
3587IOByteCount IOMemoryMap::getLength()
3588{
3589    return( fLength );
3590}
3591
3592IOVirtualAddress IOMemoryMap::getVirtualAddress()
3593{
3594#ifndef __LP64__
3595    if (fSuperMap)
3596	fSuperMap->getVirtualAddress();
3597    else if (fAddressMap
3598		&& vm_map_is_64bit(fAddressMap)
3599		&& (sizeof(IOVirtualAddress) < 8))
3600    {
3601	OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
3602    }
3603#endif /* !__LP64__ */
3604
3605    return (fAddress);
3606}
3607
3608#ifndef __LP64__
3609mach_vm_address_t 	IOMemoryMap::getAddress()
3610{
3611    return( fAddress);
3612}
3613
3614mach_vm_size_t 	IOMemoryMap::getSize()
3615{
3616    return( fLength );
3617}
3618#endif /* !__LP64__ */
3619
3620
3621task_t IOMemoryMap::getAddressTask()
3622{
3623    if( fSuperMap)
3624	return( fSuperMap->getAddressTask());
3625    else
3626        return( fAddressTask);
3627}
3628
3629IOOptionBits IOMemoryMap::getMapOptions()
3630{
3631    return( fOptions);
3632}
3633
3634IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
3635{
3636    return( fMemory );
3637}
3638
3639IOMemoryMap * IOMemoryMap::copyCompatible(
3640		IOMemoryMap * newMapping )
3641{
3642    task_t		task      = newMapping->getAddressTask();
3643    mach_vm_address_t	toAddress = newMapping->fAddress;
3644    IOOptionBits	_options  = newMapping->fOptions;
3645    mach_vm_size_t	_offset   = newMapping->fOffset;
3646    mach_vm_size_t	_length   = newMapping->fLength;
3647
3648    if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
3649	return( 0 );
3650    if( (fOptions ^ _options) & kIOMapReadOnly)
3651	return( 0 );
3652    if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
3653     && ((fOptions ^ _options) & kIOMapCacheMask))
3654	return( 0 );
3655
3656    if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
3657	return( 0 );
3658
3659    if( _offset < fOffset)
3660	return( 0 );
3661
3662    _offset -= fOffset;
3663
3664    if( (_offset + _length) > fLength)
3665	return( 0 );
3666
3667    retain();
3668    if( (fLength == _length) && (!_offset))
3669    {
3670	newMapping = this;
3671    }
3672    else
3673    {
3674	newMapping->fSuperMap = this;
3675	newMapping->fOffset   = fOffset + _offset;
3676	newMapping->fAddress  = fAddress + _offset;
3677    }
3678
3679    return( newMapping );
3680}
3681
3682IOReturn IOMemoryMap::wireRange(
3683    	uint32_t		options,
3684        mach_vm_size_t		offset,
3685        mach_vm_size_t		length)
3686{
3687    IOReturn kr;
3688    mach_vm_address_t start = trunc_page_64(fAddress + offset);
3689    mach_vm_address_t end   = round_page_64(fAddress + offset + length);
3690
3691    if (kIODirectionOutIn & options)
3692    {
3693	kr = vm_map_wire(fAddressMap, start, end, (kIODirectionOutIn & options), FALSE);
3694    }
3695    else
3696    {
3697	kr = vm_map_unwire(fAddressMap, start, end, FALSE);
3698    }
3699
3700    return (kr);
3701}
3702
3703
3704IOPhysicalAddress
3705#ifdef __LP64__
3706IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
3707#else /* !__LP64__ */
3708IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3709#endif /* !__LP64__ */
3710{
3711    IOPhysicalAddress	address;
3712
3713    LOCK;
3714#ifdef __LP64__
3715    address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
3716#else /* !__LP64__ */
3717    address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
3718#endif /* !__LP64__ */
3719    UNLOCK;
3720
3721    return( address );
3722}
3723
3724/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3725
3726#undef super
3727#define super OSObject
3728
3729/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3730
3731void IOMemoryDescriptor::initialize( void )
3732{
3733    if( 0 == gIOMemoryLock)
3734	gIOMemoryLock = IORecursiveLockAlloc();
3735
3736    gIOLastPage = IOGetLastPageNumber();
3737}
3738
3739void IOMemoryDescriptor::free( void )
3740{
3741    if( _mappings)
3742	_mappings->release();
3743
3744    super::free();
3745}
3746
3747IOMemoryMap * IOMemoryDescriptor::setMapping(
3748	task_t			intoTask,
3749	IOVirtualAddress	mapAddress,
3750	IOOptionBits		options )
3751{
3752    return (createMappingInTask( intoTask, mapAddress,
3753				    options | kIOMapStatic,
3754				    0, getLength() ));
3755}
3756
3757IOMemoryMap * IOMemoryDescriptor::map(
3758	IOOptionBits		options )
3759{
3760    return (createMappingInTask( kernel_task, 0,
3761				options | kIOMapAnywhere,
3762				0, getLength() ));
3763}
3764
3765#ifndef __LP64__
3766IOMemoryMap * IOMemoryDescriptor::map(
3767	task_t		        intoTask,
3768	IOVirtualAddress	atAddress,
3769	IOOptionBits		options,
3770	IOByteCount		offset,
3771	IOByteCount		length )
3772{
3773    if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
3774    {
3775	OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3776	return (0);
3777    }
3778
3779    return (createMappingInTask(intoTask, atAddress,
3780				options, offset, length));
3781}
3782#endif /* !__LP64__ */
3783
3784IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
3785	task_t			intoTask,
3786	mach_vm_address_t	atAddress,
3787	IOOptionBits		options,
3788	mach_vm_size_t		offset,
3789	mach_vm_size_t		length)
3790{
3791    IOMemoryMap * result;
3792    IOMemoryMap * mapping;
3793
3794    if (0 == length)
3795	length = getLength();
3796
3797    mapping = new IOMemoryMap;
3798
3799    if( mapping
3800     && !mapping->init( intoTask, atAddress,
3801			options, offset, length )) {
3802	mapping->release();
3803	mapping = 0;
3804    }
3805
3806    if (mapping)
3807	result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
3808    else
3809	result = 0;
3810
3811#if DEBUG
3812    if (!result)
3813	IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
3814		this, atAddress, (uint32_t) options, offset, length);
3815#endif
3816
3817    return (result);
3818}
3819
3820#ifndef __LP64__ // there is only a 64 bit version for LP64
3821IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3822			        IOOptionBits         options,
3823			        IOByteCount          offset)
3824{
3825    return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
3826}
3827#endif
3828
3829IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3830			        IOOptionBits         options,
3831			        mach_vm_size_t       offset)
3832{
3833    IOReturn err = kIOReturnSuccess;
3834    IOMemoryDescriptor * physMem = 0;
3835
3836    LOCK;
3837
3838    if (fAddress && fAddressMap) do
3839    {
3840	if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3841	    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3842	{
3843	    physMem = fMemory;
3844	    physMem->retain();
3845	}
3846
3847	if (!fRedirUPL)
3848	{
3849	    vm_size_t size = round_page(fLength);
3850	    int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3851			| UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3852	    if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL,
3853					    NULL, NULL,
3854					    &flags))
3855		fRedirUPL = 0;
3856
3857	    if (physMem)
3858	    {
3859		IOUnmapPages( fAddressMap, fAddress, fLength );
3860		if (false)
3861		    physMem->redirect(0, true);
3862	    }
3863	}
3864
3865	if (newBackingMemory)
3866	{
3867	    if (newBackingMemory != fMemory)
3868	    {
3869		fOffset = 0;
3870		if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
3871							    options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
3872							    offset, fLength))
3873		    err = kIOReturnError;
3874	    }
3875	    if (fRedirUPL)
3876	    {
3877		upl_commit(fRedirUPL, NULL, 0);
3878		upl_deallocate(fRedirUPL);
3879		fRedirUPL = 0;
3880	    }
3881	    if (false && physMem)
3882		physMem->redirect(0, false);
3883	}
3884    }
3885    while (false);
3886
3887    UNLOCK;
3888
3889    if (physMem)
3890	physMem->release();
3891
3892    return (err);
3893}
3894
3895IOMemoryMap * IOMemoryDescriptor::makeMapping(
3896	IOMemoryDescriptor *	owner,
3897	task_t			__intoTask,
3898	IOVirtualAddress	__address,
3899	IOOptionBits		options,
3900	IOByteCount		__offset,
3901	IOByteCount		__length )
3902{
3903#ifndef __LP64__
3904    if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
3905#endif /* !__LP64__ */
3906
3907    IOMemoryDescriptor * mapDesc = 0;
3908    IOMemoryMap *	 result = 0;
3909    OSIterator *	 iter;
3910
3911    IOMemoryMap *  mapping = (IOMemoryMap *) __address;
3912    mach_vm_size_t offset  = mapping->fOffset + __offset;
3913    mach_vm_size_t length  = mapping->fLength;
3914
3915    mapping->fOffset = offset;
3916
3917    LOCK;
3918
3919    do
3920    {
3921	if (kIOMapStatic & options)
3922	{
3923	    result = mapping;
3924	    addMapping(mapping);
3925	    mapping->setMemoryDescriptor(this, 0);
3926	    continue;
3927	}
3928
3929	if (kIOMapUnique & options)
3930	{
3931	    addr64_t phys;
3932	    IOByteCount       physLen;
3933
3934//	    if (owner != this)		continue;
3935
3936	    if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3937		|| ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3938	    {
3939		phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
3940		if (!phys || (physLen < length))
3941		    continue;
3942
3943		mapDesc = IOMemoryDescriptor::withAddressRange(
3944				phys, length, getDirection() | kIOMemoryMapperNone, NULL);
3945		if (!mapDesc)
3946		    continue;
3947		offset = 0;
3948		mapping->fOffset = offset;
3949	    }
3950	}
3951	else
3952	{
3953	    // look for a compatible existing mapping
3954	    if( (iter = OSCollectionIterator::withCollection(_mappings)))
3955	    {
3956		IOMemoryMap * lookMapping;
3957		while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
3958		{
3959		    if ((result = lookMapping->copyCompatible(mapping)))
3960		    {
3961			addMapping(result);
3962			result->setMemoryDescriptor(this, offset);
3963			break;
3964		    }
3965		}
3966		iter->release();
3967	    }
3968	    if (result || (options & kIOMapReference))
3969	    {
3970	        if (result != mapping)
3971	        {
3972                    mapping->release();
3973                    mapping = NULL;
3974                }
3975		continue;
3976	    }
3977	}
3978
3979	if (!mapDesc)
3980	{
3981	    mapDesc = this;
3982	    mapDesc->retain();
3983	}
3984	IOReturn
3985	kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
3986	if (kIOReturnSuccess == kr)
3987	{
3988	    if (0 == (mapping->fOptions & kIOMapStatic)) {
3989            vm_map_iokit_mapped_region(mapping->fAddressMap, length);
3990        }
3991
3992	    result = mapping;
3993	    mapDesc->addMapping(result);
3994	    result->setMemoryDescriptor(mapDesc, offset);
3995	}
3996	else
3997	{
3998	    mapping->release();
3999	    mapping = NULL;
4000	}
4001    }
4002    while( false );
4003
4004    UNLOCK;
4005
4006    if (mapDesc)
4007	mapDesc->release();
4008
4009    return (result);
4010}
4011
4012void IOMemoryDescriptor::addMapping(
4013	IOMemoryMap * mapping )
4014{
4015    if( mapping)
4016    {
4017        if( 0 == _mappings)
4018            _mappings = OSSet::withCapacity(1);
4019	if( _mappings )
4020	    _mappings->setObject( mapping );
4021    }
4022}
4023
4024void IOMemoryDescriptor::removeMapping(
4025	IOMemoryMap * mapping )
4026{
4027    if( _mappings)
4028        _mappings->removeObject( mapping);
4029}
4030
4031#ifndef __LP64__
4032// obsolete initializers
4033// - initWithOptions is the designated initializer
4034bool
4035IOMemoryDescriptor::initWithAddress(void *      address,
4036                                    IOByteCount   length,
4037                                    IODirection direction)
4038{
4039    return( false );
4040}
4041
4042bool
4043IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
4044                                    IOByteCount    length,
4045                                    IODirection  direction,
4046                                    task_t       task)
4047{
4048    return( false );
4049}
4050
4051bool
4052IOMemoryDescriptor::initWithPhysicalAddress(
4053				 IOPhysicalAddress	address,
4054				 IOByteCount		length,
4055				 IODirection      	direction )
4056{
4057    return( false );
4058}
4059
4060bool
4061IOMemoryDescriptor::initWithRanges(
4062                                   	IOVirtualRange * ranges,
4063                                   	UInt32           withCount,
4064                                   	IODirection      direction,
4065                                   	task_t           task,
4066                                  	bool             asReference)
4067{
4068    return( false );
4069}
4070
4071bool
4072IOMemoryDescriptor::initWithPhysicalRanges(	IOPhysicalRange * ranges,
4073                                        	UInt32           withCount,
4074                                        	IODirection      direction,
4075                                        	bool             asReference)
4076{
4077    return( false );
4078}
4079
4080void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
4081					IOByteCount * lengthOfSegment)
4082{
4083    return( 0 );
4084}
4085#endif /* !__LP64__ */
4086
4087/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4088
4089bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
4090{
4091    OSSymbol const *keys[2];
4092    OSObject *values[2];
4093    struct SerData {
4094	user_addr_t address;
4095	user_size_t length;
4096    } *vcopy;
4097    unsigned int index, nRanges;
4098    bool result;
4099
4100    IOOptionBits type = _flags & kIOMemoryTypeMask;
4101
4102    if (s == NULL) return false;
4103    if (s->previouslySerialized(this)) return true;
4104
4105    // Pretend we are an array.
4106    if (!s->addXMLStartTag(this, "array")) return false;
4107
4108    nRanges = _rangesCount;
4109    vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
4110    if (vcopy == 0) return false;
4111
4112    keys[0] = OSSymbol::withCString("address");
4113    keys[1] = OSSymbol::withCString("length");
4114
4115    result = false;
4116    values[0] = values[1] = 0;
4117
4118    // From this point on we can go to bail.
4119
4120    // Copy the volatile data so we don't have to allocate memory
4121    // while the lock is held.
4122    LOCK;
4123    if (nRanges == _rangesCount) {
4124	Ranges vec = _ranges;
4125        for (index = 0; index < nRanges; index++) {
4126	    user_addr_t addr; IOByteCount len;
4127	    getAddrLenForInd(addr, len, type, vec, index);
4128            vcopy[index].address = addr;
4129            vcopy[index].length  = len;
4130        }
4131    } else {
4132	// The descriptor changed out from under us.  Give up.
4133        UNLOCK;
4134	result = false;
4135        goto bail;
4136    }
4137    UNLOCK;
4138
4139    for (index = 0; index < nRanges; index++)
4140    {
4141	user_addr_t addr = vcopy[index].address;
4142	IOByteCount len = (IOByteCount) vcopy[index].length;
4143	values[0] =
4144	    OSNumber::withNumber(addr, sizeof(addr) * 8);
4145	if (values[0] == 0) {
4146	  result = false;
4147	  goto bail;
4148	}
4149	values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
4150	if (values[1] == 0) {
4151	  result = false;
4152	  goto bail;
4153	}
4154        OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
4155	if (dict == 0) {
4156	  result = false;
4157	  goto bail;
4158	}
4159	values[0]->release();
4160	values[1]->release();
4161	values[0] = values[1] = 0;
4162
4163	result = dict->serialize(s);
4164	dict->release();
4165	if (!result) {
4166	  goto bail;
4167	}
4168    }
4169    result = s->addXMLEndTag("array");
4170
4171 bail:
4172    if (values[0])
4173      values[0]->release();
4174    if (values[1])
4175      values[1]->release();
4176    if (keys[0])
4177      keys[0]->release();
4178    if (keys[1])
4179      keys[1]->release();
4180    if (vcopy)
4181        IOFree(vcopy, sizeof(SerData) * nRanges);
4182    return result;
4183}
4184
4185/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4186
4187OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
4188#ifdef __LP64__
4189OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
4190OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
4191OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
4192OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
4193OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
4194OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
4195OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
4196#else /* !__LP64__ */
4197OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
4198OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
4199OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
4200OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
4201OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
4202OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
4203OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
4204#endif /* !__LP64__ */
4205OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
4206OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
4207OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
4208OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
4209OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
4210OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
4211OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
4212OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
4213
4214/* ex-inline function implementation */
4215IOPhysicalAddress
4216IOMemoryDescriptor::getPhysicalAddress()
4217        { return( getPhysicalSegment( 0, 0 )); }
4218