1/*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Copyright (c) 1998 Apple Computer, Inc.  All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34
35
36#include <sys/cdefs.h>
37
38#include <IOKit/assert.h>
39#include <IOKit/system.h>
40#include <IOKit/IOLib.h>
41#include <IOKit/IOMemoryDescriptor.h>
42#include <IOKit/IOMapper.h>
43#include <IOKit/IODMACommand.h>
44#include <IOKit/IOKitKeysPrivate.h>
45
46#ifndef __LP64__
47#include <IOKit/IOSubMemoryDescriptor.h>
48#endif /* !__LP64__ */
49
50#include <IOKit/IOKitDebug.h>
51#include <libkern/OSDebug.h>
52
53#include "IOKitKernelInternal.h"
54
55#include <libkern/c++/OSContainers.h>
56#include <libkern/c++/OSDictionary.h>
57#include <libkern/c++/OSArray.h>
58#include <libkern/c++/OSSymbol.h>
59#include <libkern/c++/OSNumber.h>
60
61#include <sys/uio.h>
62
63__BEGIN_DECLS
64#include <vm/pmap.h>
65#include <vm/vm_pageout.h>
66#include <mach/memory_object_types.h>
67#include <device/device_port.h>
68
69#include <mach/vm_prot.h>
70#include <mach/mach_vm.h>
71#include <vm/vm_fault.h>
72#include <vm/vm_protos.h>
73
74extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
75extern void ipc_port_release_send(ipc_port_t port);
76
77kern_return_t
78memory_object_iopl_request(
79	ipc_port_t		port,
80	memory_object_offset_t	offset,
81	vm_size_t		*upl_size,
82	upl_t			*upl_ptr,
83	upl_page_info_array_t	user_page_list,
84	unsigned int		*page_list_count,
85	int			*flags);
86
87unsigned int  IOTranslateCacheBits(struct phys_entry *pp);
88
89__END_DECLS
90
91#define kIOMaximumMappedIOByteCount	(512*1024*1024)
92
93#define kIOMapperWaitSystem	((IOMapper *) 1)
94
95static IOMapper * gIOSystemMapper = NULL;
96
97static ppnum_t	  gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
98
99ppnum_t		  gIOLastPage;
100
101/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
102
103OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
104
105#define super IOMemoryDescriptor
106
107OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
108
109/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
110
111static IORecursiveLock * gIOMemoryLock;
112
113#define LOCK	IORecursiveLockLock( gIOMemoryLock)
114#define UNLOCK	IORecursiveLockUnlock( gIOMemoryLock)
115#define SLEEP	IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
116#define WAKEUP	\
117    IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
118
119#if 0
120#define DEBG(fmt, args...)  	{ kprintf(fmt, ## args); }
121#else
122#define DEBG(fmt, args...)  	{}
123#endif
124
125#define IOMD_DEBUG_DMAACTIVE	1
126
127/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
128
129// Some data structures and accessor macros used by the initWithOptions
130// Function
131
132enum ioPLBlockFlags {
133    kIOPLOnDevice  = 0x00000001,
134    kIOPLExternUPL = 0x00000002,
135};
136
137struct typePersMDData
138{
139    const IOGeneralMemoryDescriptor *fMD;
140    ipc_port_t fMemEntry;
141};
142
143struct ioPLBlock {
144    upl_t fIOPL;
145    vm_address_t fPageInfo;   // Pointer to page list or index into it
146    uint32_t fIOMDOffset;	    // The offset of this iopl in descriptor
147    ppnum_t fMappedPage;	    // Page number of first page in this iopl
148    unsigned int fPageOffset;	    // Offset within first page of iopl
149    unsigned int fFlags;	    // Flags
150};
151
152struct ioGMDData {
153    IOMapper *  fMapper;
154    uint8_t	fDMAMapNumAddressBits;
155    uint64_t    fDMAMapAlignment;
156    addr64_t    fMappedBase;
157    uint64_t fPreparationID;
158    unsigned int fPageCnt;
159#if __LP64__
160    // align arrays to 8 bytes so following macros work
161    unsigned int fPad;
162#endif
163    upl_page_info_t fPageList[1]; /* variable length */
164    ioPLBlock fBlocks[1]; /* variable length */
165};
166
167#define getDataP(osd)	((ioGMDData *) (osd)->getBytesNoCopy())
168#define getIOPLList(d)	((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
169#define getNumIOPL(osd, d)	\
170    (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
171#define getPageList(d)	(&(d->fPageList[0]))
172#define computeDataSize(p, u) \
173    (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
174
175
176/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
177
178#define next_page(a) ( trunc_page(a) + PAGE_SIZE )
179
180
181extern "C" {
182
183kern_return_t device_data_action(
184               uintptr_t               device_handle,
185               ipc_port_t              device_pager,
186               vm_prot_t               protection,
187               vm_object_offset_t      offset,
188               vm_size_t               size)
189{
190    kern_return_t	 kr;
191    IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
192    IOMemoryDescriptor * memDesc;
193
194    LOCK;
195    memDesc = ref->dp.memory;
196    if( memDesc)
197    {
198	memDesc->retain();
199	kr = memDesc->handleFault( device_pager, 0, 0,
200                offset, size, kIOMapDefaultCache /*?*/);
201	memDesc->release();
202    }
203    else
204	kr = KERN_ABORTED;
205    UNLOCK;
206
207    return( kr );
208}
209
210kern_return_t device_close(
211               uintptr_t     device_handle)
212{
213    IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
214
215    IODelete( ref, IOMemoryDescriptorReserved, 1 );
216
217    return( kIOReturnSuccess );
218}
219};	// end extern "C"
220
221// Note this inline function uses C++ reference arguments to return values
222// This means that pointers are not passed and NULLs don't have to be
223// checked for as a NULL reference is illegal.
224static inline void
225getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
226     UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
227{
228    assert(kIOMemoryTypeUIO       == type
229	|| kIOMemoryTypeVirtual   == type || kIOMemoryTypeVirtual64 == type
230	|| kIOMemoryTypePhysical  == type || kIOMemoryTypePhysical64 == type);
231    if (kIOMemoryTypeUIO == type) {
232	user_size_t us;
233	uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
234    }
235#ifndef __LP64__
236    else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
237	IOAddressRange cur = r.v64[ind];
238	addr = cur.address;
239	len  = cur.length;
240    }
241#endif /* !__LP64__ */
242    else {
243	IOVirtualRange cur = r.v[ind];
244	addr = cur.address;
245	len  = cur.length;
246    }
247}
248
249/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
250
251IOMemoryDescriptor *
252IOMemoryDescriptor::withAddress(void *      address,
253                                IOByteCount   length,
254                                IODirection direction)
255{
256    return IOMemoryDescriptor::
257        withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
258}
259
260#ifndef __LP64__
261IOMemoryDescriptor *
262IOMemoryDescriptor::withAddress(IOVirtualAddress address,
263                                IOByteCount  length,
264                                IODirection  direction,
265                                task_t       task)
266{
267    IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
268    if (that)
269    {
270	if (that->initWithAddress(address, length, direction, task))
271	    return that;
272
273        that->release();
274    }
275    return 0;
276}
277#endif /* !__LP64__ */
278
279IOMemoryDescriptor *
280IOMemoryDescriptor::withPhysicalAddress(
281				IOPhysicalAddress	address,
282				IOByteCount		length,
283				IODirection      	direction )
284{
285    return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
286}
287
288#ifndef __LP64__
289IOMemoryDescriptor *
290IOMemoryDescriptor::withRanges(	IOVirtualRange * ranges,
291				UInt32           withCount,
292				IODirection      direction,
293				task_t           task,
294				bool             asReference)
295{
296    IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
297    if (that)
298    {
299	if (that->initWithRanges(ranges, withCount, direction, task, asReference))
300	    return that;
301
302        that->release();
303    }
304    return 0;
305}
306#endif /* !__LP64__ */
307
308IOMemoryDescriptor *
309IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
310					mach_vm_size_t length,
311					IOOptionBits   options,
312					task_t         task)
313{
314    IOAddressRange range = { address, length };
315    return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
316}
317
318IOMemoryDescriptor *
319IOMemoryDescriptor::withAddressRanges(IOAddressRange *   ranges,
320					UInt32           rangeCount,
321					IOOptionBits     options,
322					task_t           task)
323{
324    IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
325    if (that)
326    {
327	if (task)
328	    options |= kIOMemoryTypeVirtual64;
329	else
330	    options |= kIOMemoryTypePhysical64;
331
332	if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
333	    return that;
334
335	that->release();
336    }
337
338    return 0;
339}
340
341
342/*
343 * withOptions:
344 *
345 * Create a new IOMemoryDescriptor. The buffer is made up of several
346 * virtual address ranges, from a given task.
347 *
348 * Passing the ranges as a reference will avoid an extra allocation.
349 */
350IOMemoryDescriptor *
351IOMemoryDescriptor::withOptions(void *		buffers,
352                                UInt32		count,
353                                UInt32		offset,
354                                task_t		task,
355                                IOOptionBits	opts,
356                                IOMapper *	mapper)
357{
358    IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
359
360    if (self
361    && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
362    {
363        self->release();
364        return 0;
365    }
366
367    return self;
368}
369
370bool IOMemoryDescriptor::initWithOptions(void *		buffers,
371                                         UInt32		count,
372                                         UInt32		offset,
373                                         task_t		task,
374                                         IOOptionBits	options,
375                                         IOMapper *	mapper)
376{
377    return( false );
378}
379
380#ifndef __LP64__
381IOMemoryDescriptor *
382IOMemoryDescriptor::withPhysicalRanges(	IOPhysicalRange * ranges,
383                                        UInt32          withCount,
384                                        IODirection     direction,
385                                        bool            asReference)
386{
387    IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
388    if (that)
389    {
390	if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
391	    return that;
392
393        that->release();
394    }
395    return 0;
396}
397
398IOMemoryDescriptor *
399IOMemoryDescriptor::withSubRange(IOMemoryDescriptor *	of,
400				IOByteCount		offset,
401				IOByteCount		length,
402				IODirection		direction)
403{
404    return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe));
405}
406#endif /* !__LP64__ */
407
408IOMemoryDescriptor *
409IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
410{
411    IOGeneralMemoryDescriptor *origGenMD =
412	OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
413
414    if (origGenMD)
415	return IOGeneralMemoryDescriptor::
416	    withPersistentMemoryDescriptor(origGenMD);
417    else
418	return 0;
419}
420
421IOMemoryDescriptor *
422IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
423{
424    ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
425
426    if (!sharedMem)
427	return 0;
428
429    if (sharedMem == originalMD->_memEntry) {
430	originalMD->retain();		    // Add a new reference to ourselves
431	ipc_port_release_send(sharedMem);   // Remove extra send right
432	return originalMD;
433    }
434
435    IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
436    typePersMDData initData = { originalMD, sharedMem };
437
438    if (self
439    && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
440        self->release();
441	self = 0;
442    }
443    return self;
444}
445
446void *IOGeneralMemoryDescriptor::createNamedEntry()
447{
448    kern_return_t error;
449    ipc_port_t sharedMem;
450
451    IOOptionBits type = _flags & kIOMemoryTypeMask;
452
453    user_addr_t range0Addr;
454    IOByteCount range0Len;
455    getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
456    range0Addr = trunc_page_64(range0Addr);
457
458    vm_size_t size = ptoa_32(_pages);
459    vm_address_t kernelPage = (vm_address_t) range0Addr;
460
461    vm_map_t theMap = ((_task == kernel_task)
462			&& (kIOMemoryBufferPageable & _flags))
463		    ? IOPageableMapForAddress(kernelPage)
464		    : get_task_map(_task);
465
466    memory_object_size_t  actualSize = size;
467    vm_prot_t             prot       = VM_PROT_READ;
468    if (kIODirectionOut != (kIODirectionOutIn & _flags))
469	prot |= VM_PROT_WRITE;
470
471    if (_memEntry)
472	prot |= MAP_MEM_NAMED_REUSE;
473
474    error = mach_make_memory_entry_64(theMap,
475	    &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
476
477    if (KERN_SUCCESS == error) {
478	if (actualSize == size) {
479	    return sharedMem;
480	} else {
481#if IOASSERT
482	    IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
483		  (UInt64)range0Addr, (UInt64)actualSize, (UInt64)size);
484#endif
485	    ipc_port_release_send( sharedMem );
486	}
487    }
488
489    return MACH_PORT_NULL;
490}
491
492#ifndef __LP64__
493bool
494IOGeneralMemoryDescriptor::initWithAddress(void *      address,
495                                    IOByteCount   withLength,
496                                    IODirection withDirection)
497{
498    _singleRange.v.address = (vm_offset_t) address;
499    _singleRange.v.length  = withLength;
500
501    return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
502}
503
504bool
505IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
506                                    IOByteCount    withLength,
507                                    IODirection  withDirection,
508                                    task_t       withTask)
509{
510    _singleRange.v.address = address;
511    _singleRange.v.length  = withLength;
512
513    return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
514}
515
516bool
517IOGeneralMemoryDescriptor::initWithPhysicalAddress(
518				 IOPhysicalAddress	address,
519				 IOByteCount		withLength,
520				 IODirection      	withDirection )
521{
522    _singleRange.p.address = address;
523    _singleRange.p.length  = withLength;
524
525    return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
526}
527
528bool
529IOGeneralMemoryDescriptor::initWithPhysicalRanges(
530                                IOPhysicalRange * ranges,
531                                UInt32            count,
532                                IODirection       direction,
533                                bool              reference)
534{
535    IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
536
537    if (reference)
538        mdOpts |= kIOMemoryAsReference;
539
540    return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
541}
542
543bool
544IOGeneralMemoryDescriptor::initWithRanges(
545                                   IOVirtualRange * ranges,
546                                   UInt32           count,
547                                   IODirection      direction,
548                                   task_t           task,
549                                   bool             reference)
550{
551    IOOptionBits mdOpts = direction;
552
553    if (reference)
554        mdOpts |= kIOMemoryAsReference;
555
556    if (task) {
557        mdOpts |= kIOMemoryTypeVirtual;
558
559	// Auto-prepare if this is a kernel memory descriptor as very few
560	// clients bother to prepare() kernel memory.
561	// But it was not enforced so what are you going to do?
562        if (task == kernel_task)
563            mdOpts |= kIOMemoryAutoPrepare;
564    }
565    else
566        mdOpts |= kIOMemoryTypePhysical;
567
568    return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
569}
570#endif /* !__LP64__ */
571
572/*
573 * initWithOptions:
574 *
575 *  IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
576 * from a given task, several physical ranges, an UPL from the ubc
577 * system or a uio (may be 64bit) from the BSD subsystem.
578 *
579 * Passing the ranges as a reference will avoid an extra allocation.
580 *
581 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
582 * existing instance -- note this behavior is not commonly supported in other
583 * I/O Kit classes, although it is supported here.
584 */
585
586bool
587IOGeneralMemoryDescriptor::initWithOptions(void *	buffers,
588                                           UInt32	count,
589                                           UInt32	offset,
590                                           task_t	task,
591                                           IOOptionBits	options,
592                                           IOMapper *	mapper)
593{
594    IOOptionBits type = options & kIOMemoryTypeMask;
595
596#ifndef __LP64__
597    if (task
598        && (kIOMemoryTypeVirtual == type)
599        && vm_map_is_64bit(get_task_map(task))
600        && ((IOVirtualRange *) buffers)->address)
601    {
602        OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
603        return false;
604    }
605#endif /* !__LP64__ */
606
607    // Grab the original MD's configuation data to initialse the
608    // arguments to this function.
609    if (kIOMemoryTypePersistentMD == type) {
610
611	typePersMDData *initData = (typePersMDData *) buffers;
612	const IOGeneralMemoryDescriptor *orig = initData->fMD;
613	ioGMDData *dataP = getDataP(orig->_memoryEntries);
614
615	// Only accept persistent memory descriptors with valid dataP data.
616	assert(orig->_rangesCount == 1);
617	if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
618	    return false;
619
620	_memEntry = initData->fMemEntry;	// Grab the new named entry
621	options = orig->_flags & ~kIOMemoryAsReference;
622        type = options & kIOMemoryTypeMask;
623	buffers = orig->_ranges.v;
624	count = orig->_rangesCount;
625
626	// Now grab the original task and whatever mapper was previously used
627	task = orig->_task;
628	mapper = dataP->fMapper;
629
630	// We are ready to go through the original initialisation now
631    }
632
633    switch (type) {
634    case kIOMemoryTypeUIO:
635    case kIOMemoryTypeVirtual:
636#ifndef __LP64__
637    case kIOMemoryTypeVirtual64:
638#endif /* !__LP64__ */
639        assert(task);
640        if (!task)
641            return false;
642	break;
643
644    case kIOMemoryTypePhysical:		// Neither Physical nor UPL should have a task
645#ifndef __LP64__
646    case kIOMemoryTypePhysical64:
647#endif /* !__LP64__ */
648    case kIOMemoryTypeUPL:
649        assert(!task);
650        break;
651    default:
652        return false;	/* bad argument */
653    }
654
655    assert(buffers);
656    assert(count);
657
658    /*
659     * We can check the _initialized  instance variable before having ever set
660     * it to an initial value because I/O Kit guarantees that all our instance
661     * variables are zeroed on an object's allocation.
662     */
663
664    if (_initialized) {
665        /*
666         * An existing memory descriptor is being retargeted to point to
667         * somewhere else.  Clean up our present state.
668         */
669	IOOptionBits type = _flags & kIOMemoryTypeMask;
670	if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
671	{
672	    while (_wireCount)
673		complete();
674	}
675        if (_ranges.v && !(kIOMemoryAsReference & _flags))
676	{
677	    if (kIOMemoryTypeUIO == type)
678		uio_free((uio_t) _ranges.v);
679#ifndef __LP64__
680	    else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
681		IODelete(_ranges.v64, IOAddressRange, _rangesCount);
682#endif /* !__LP64__ */
683	    else
684		IODelete(_ranges.v, IOVirtualRange, _rangesCount);
685	}
686
687	if (_memEntry)
688	{
689	    ipc_port_release_send((ipc_port_t) _memEntry);
690	    _memEntry = 0;
691	}
692	if (_mappings)
693	    _mappings->flushCollection();
694    }
695    else {
696        if (!super::init())
697            return false;
698        _initialized = true;
699    }
700
701    // Grab the appropriate mapper
702    if (kIOMemoryHostOnly & options) options |= kIOMemoryMapperNone;
703    if (kIOMemoryMapperNone & options)
704        mapper = 0;	// No Mapper
705    else if (mapper == kIOMapperSystem) {
706        IOMapper::checkForSystemMapper();
707        gIOSystemMapper = mapper = IOMapper::gSystem;
708    }
709
710    // Temp binary compatibility for kIOMemoryThreadSafe
711    if (kIOMemoryReserved6156215 & options)
712    {
713	options &= ~kIOMemoryReserved6156215;
714	options |= kIOMemoryThreadSafe;
715    }
716    // Remove the dynamic internal use flags from the initial setting
717    options 		  &= ~(kIOMemoryPreparedReadOnly);
718    _flags		   = options;
719    _task                  = task;
720
721#ifndef __LP64__
722    _direction             = (IODirection) (_flags & kIOMemoryDirectionMask);
723#endif /* !__LP64__ */
724
725    __iomd_reservedA = 0;
726    __iomd_reservedB = 0;
727    _highestPage = 0;
728
729    if (kIOMemoryThreadSafe & options)
730    {
731	if (!_prepareLock)
732	    _prepareLock = IOLockAlloc();
733    }
734    else if (_prepareLock)
735    {
736	IOLockFree(_prepareLock);
737	_prepareLock = NULL;
738    }
739
740    if (kIOMemoryTypeUPL == type) {
741
742        ioGMDData *dataP;
743        unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
744
745        if (!initMemoryEntries(dataSize, mapper)) return (false);
746        dataP = getDataP(_memoryEntries);
747        dataP->fPageCnt = 0;
748
749 //       _wireCount++;	// UPLs start out life wired
750
751        _length    = count;
752        _pages    += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
753
754        ioPLBlock iopl;
755        iopl.fIOPL = (upl_t) buffers;
756        upl_set_referenced(iopl.fIOPL, true);
757        upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
758
759	if (upl_get_size(iopl.fIOPL) < (count + offset))
760	    panic("short external upl");
761
762        _highestPage = upl_get_highest_page(iopl.fIOPL);
763
764        // Set the flag kIOPLOnDevice convieniently equal to 1
765        iopl.fFlags  = pageList->device | kIOPLExternUPL;
766        if (!pageList->device) {
767            // Pre-compute the offset into the UPL's page list
768            pageList = &pageList[atop_32(offset)];
769            offset &= PAGE_MASK;
770        }
771        iopl.fIOMDOffset = 0;
772        iopl.fMappedPage = 0;
773        iopl.fPageInfo = (vm_address_t) pageList;
774        iopl.fPageOffset = offset;
775        _memoryEntries->appendBytes(&iopl, sizeof(iopl));
776    }
777    else {
778	// kIOMemoryTypeVirtual  | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
779	// kIOMemoryTypePhysical | kIOMemoryTypePhysical64
780
781	// Initialize the memory descriptor
782	if (options & kIOMemoryAsReference) {
783#ifndef __LP64__
784	    _rangesIsAllocated = false;
785#endif /* !__LP64__ */
786
787	    // Hack assignment to get the buffer arg into _ranges.
788	    // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
789	    // work, C++ sigh.
790	    // This also initialises the uio & physical ranges.
791	    _ranges.v = (IOVirtualRange *) buffers;
792	}
793	else {
794#ifndef __LP64__
795	    _rangesIsAllocated = true;
796#endif /* !__LP64__ */
797	    switch (type)
798	    {
799	      case kIOMemoryTypeUIO:
800		_ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
801		break;
802
803#ifndef __LP64__
804	      case kIOMemoryTypeVirtual64:
805	      case kIOMemoryTypePhysical64:
806		if (count == 1
807		    && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
808		    ) {
809		    if (kIOMemoryTypeVirtual64 == type)
810			type = kIOMemoryTypeVirtual;
811		    else
812			type = kIOMemoryTypePhysical;
813		    _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
814		    _rangesIsAllocated = false;
815		    _ranges.v = &_singleRange.v;
816		    _singleRange.v.address = ((IOAddressRange *) buffers)->address;
817		    _singleRange.v.length  = ((IOAddressRange *) buffers)->length;
818		    break;
819		}
820		_ranges.v64 = IONew(IOAddressRange, count);
821		if (!_ranges.v64)
822		    return false;
823		bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
824		break;
825#endif /* !__LP64__ */
826	      case kIOMemoryTypeVirtual:
827	      case kIOMemoryTypePhysical:
828		if (count == 1) {
829		    _flags |= kIOMemoryAsReference;
830#ifndef __LP64__
831		    _rangesIsAllocated = false;
832#endif /* !__LP64__ */
833		    _ranges.v = &_singleRange.v;
834		} else {
835		    _ranges.v = IONew(IOVirtualRange, count);
836		    if (!_ranges.v)
837			return false;
838		}
839		bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
840		break;
841	    }
842	}
843
844	// Find starting address within the vector of ranges
845	Ranges vec = _ranges;
846	UInt32 length = 0;
847	UInt32 pages = 0;
848	for (unsigned ind = 0; ind < count;  ind++) {
849	    user_addr_t addr;
850	    IOPhysicalLength len;
851
852	    // addr & len are returned by this function
853	    getAddrLenForInd(addr, len, type, vec, ind);
854	    pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
855	    len += length;
856	    assert(len >= length);	// Check for 32 bit wrap around
857	    length = len;
858
859	    if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
860	    {
861		ppnum_t highPage = atop_64(addr + len - 1);
862		if (highPage > _highestPage)
863		    _highestPage = highPage;
864	    }
865	}
866	_length      = length;
867	_pages       = pages;
868	_rangesCount = count;
869
870        // Auto-prepare memory at creation time.
871        // Implied completion when descriptor is free-ed
872        if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
873            _wireCount++;	// Physical MDs are, by definition, wired
874        else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
875            ioGMDData *dataP;
876            unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
877
878            if (!initMemoryEntries(dataSize, mapper)) return false;
879            dataP = getDataP(_memoryEntries);
880            dataP->fPageCnt = _pages;
881
882	    if ( (kIOMemoryPersistent & _flags) && !_memEntry)
883		_memEntry = createNamedEntry();
884
885            if ((_flags & kIOMemoryAutoPrepare)
886             && prepare() != kIOReturnSuccess)
887                return false;
888        }
889    }
890
891    return true;
892}
893
894/*
895 * free
896 *
897 * Free resources.
898 */
899void IOGeneralMemoryDescriptor::free()
900{
901    IOOptionBits type = _flags & kIOMemoryTypeMask;
902
903    if( reserved)
904    {
905	LOCK;
906	reserved->dp.memory = 0;
907	UNLOCK;
908    }
909
910    if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
911    {
912	while (_wireCount)
913	    complete();
914    }
915    if (_memoryEntries)
916        _memoryEntries->release();
917
918    if (_ranges.v && !(kIOMemoryAsReference & _flags))
919    {
920	if (kIOMemoryTypeUIO == type)
921	    uio_free((uio_t) _ranges.v);
922#ifndef __LP64__
923	else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
924	    IODelete(_ranges.v64, IOAddressRange, _rangesCount);
925#endif /* !__LP64__ */
926	else
927	    IODelete(_ranges.v, IOVirtualRange, _rangesCount);
928
929	_ranges.v = NULL;
930    }
931
932    if (reserved)
933    {
934        if (reserved->dp.devicePager)
935        {
936            // memEntry holds a ref on the device pager which owns reserved
937            // (IOMemoryDescriptorReserved) so no reserved access after this point
938            device_pager_deallocate( (memory_object_t) reserved->dp.devicePager );
939        }
940        else
941            IODelete(reserved, IOMemoryDescriptorReserved, 1);
942        reserved = NULL;
943    }
944
945    if (_memEntry)
946        ipc_port_release_send( (ipc_port_t) _memEntry );
947
948    if (_prepareLock)
949	IOLockFree(_prepareLock);
950
951    super::free();
952}
953
954#ifndef __LP64__
955void IOGeneralMemoryDescriptor::unmapFromKernel()
956{
957    panic("IOGMD::unmapFromKernel deprecated");
958}
959
960void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
961{
962    panic("IOGMD::mapIntoKernel deprecated");
963}
964#endif /* !__LP64__ */
965
966/*
967 * getDirection:
968 *
969 * Get the direction of the transfer.
970 */
971IODirection IOMemoryDescriptor::getDirection() const
972{
973#ifndef __LP64__
974    if (_direction)
975	return _direction;
976#endif /* !__LP64__ */
977    return (IODirection) (_flags & kIOMemoryDirectionMask);
978}
979
980/*
981 * getLength:
982 *
983 * Get the length of the transfer (over all ranges).
984 */
985IOByteCount IOMemoryDescriptor::getLength() const
986{
987    return _length;
988}
989
990void IOMemoryDescriptor::setTag( IOOptionBits tag )
991{
992    _tag = tag;
993}
994
995IOOptionBits IOMemoryDescriptor::getTag( void )
996{
997    return( _tag);
998}
999
1000#ifndef __LP64__
1001// @@@ gvdl: who is using this API?  Seems like a wierd thing to implement.
1002IOPhysicalAddress
1003IOMemoryDescriptor::getSourceSegment( IOByteCount   offset, IOByteCount * length )
1004{
1005    addr64_t physAddr = 0;
1006
1007    if( prepare() == kIOReturnSuccess) {
1008        physAddr = getPhysicalSegment64( offset, length );
1009        complete();
1010    }
1011
1012    return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1013}
1014#endif /* !__LP64__ */
1015
1016IOByteCount IOMemoryDescriptor::readBytes
1017                (IOByteCount offset, void *bytes, IOByteCount length)
1018{
1019    addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1020    IOByteCount remaining;
1021
1022    // Assert that this entire I/O is withing the available range
1023    assert(offset < _length);
1024    assert(offset + length <= _length);
1025    if (offset >= _length) {
1026        return 0;
1027    }
1028
1029    if (kIOMemoryThreadSafe & _flags)
1030	LOCK;
1031
1032    remaining = length = min(length, _length - offset);
1033    while (remaining) {	// (process another target segment?)
1034        addr64_t	srcAddr64;
1035        IOByteCount	srcLen;
1036
1037        srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1038        if (!srcAddr64)
1039            break;
1040
1041        // Clip segment length to remaining
1042        if (srcLen > remaining)
1043            srcLen = remaining;
1044
1045        copypv(srcAddr64, dstAddr, srcLen,
1046                            cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1047
1048        dstAddr   += srcLen;
1049        offset    += srcLen;
1050        remaining -= srcLen;
1051    }
1052
1053    if (kIOMemoryThreadSafe & _flags)
1054	UNLOCK;
1055
1056    assert(!remaining);
1057
1058    return length - remaining;
1059}
1060
1061IOByteCount IOMemoryDescriptor::writeBytes
1062                (IOByteCount offset, const void *bytes, IOByteCount length)
1063{
1064    addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1065    IOByteCount remaining;
1066
1067    // Assert that this entire I/O is withing the available range
1068    assert(offset < _length);
1069    assert(offset + length <= _length);
1070
1071    assert( !(kIOMemoryPreparedReadOnly & _flags) );
1072
1073    if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1074        return 0;
1075    }
1076
1077    if (kIOMemoryThreadSafe & _flags)
1078	LOCK;
1079
1080    remaining = length = min(length, _length - offset);
1081    while (remaining) {	// (process another target segment?)
1082        addr64_t    dstAddr64;
1083        IOByteCount dstLen;
1084
1085        dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1086        if (!dstAddr64)
1087            break;
1088
1089        // Clip segment length to remaining
1090        if (dstLen > remaining)
1091            dstLen = remaining;
1092
1093        copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1094                            cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1095
1096        srcAddr   += dstLen;
1097        offset    += dstLen;
1098        remaining -= dstLen;
1099    }
1100
1101    if (kIOMemoryThreadSafe & _flags)
1102	UNLOCK;
1103
1104    assert(!remaining);
1105
1106    return length - remaining;
1107}
1108
1109// osfmk/device/iokit_rpc.c
1110extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1111
1112#ifndef __LP64__
1113void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1114{
1115    panic("IOGMD::setPosition deprecated");
1116}
1117#endif /* !__LP64__ */
1118
1119static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1120
1121uint64_t
1122IOGeneralMemoryDescriptor::getPreparationID( void )
1123{
1124    ioGMDData *dataP;
1125
1126    if (!_wireCount)
1127	return (kIOPreparationIDUnprepared);
1128
1129    if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
1130      || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64))
1131    {
1132        IOMemoryDescriptor::setPreparationID();
1133        return (IOMemoryDescriptor::getPreparationID());
1134    }
1135
1136    if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1137	return (kIOPreparationIDUnprepared);
1138
1139    if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1140    {
1141	dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1142    }
1143    return (dataP->fPreparationID);
1144}
1145
1146IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void )
1147{
1148    if (!reserved)
1149    {
1150        reserved = IONew(IOMemoryDescriptorReserved, 1);
1151        if (reserved)
1152            bzero(reserved, sizeof(IOMemoryDescriptorReserved));
1153    }
1154    return (reserved);
1155}
1156
1157void IOMemoryDescriptor::setPreparationID( void )
1158{
1159    if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID))
1160    {
1161#if defined(__ppc__ )
1162        reserved->preparationID = gIOMDPreparationID++;
1163#else
1164        reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1165#endif
1166    }
1167}
1168
1169uint64_t IOMemoryDescriptor::getPreparationID( void )
1170{
1171    if (reserved)
1172        return (reserved->preparationID);
1173    else
1174        return (kIOPreparationIDUnsupported);
1175}
1176
1177IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1178{
1179    IOReturn err = kIOReturnSuccess;
1180    DMACommandOps params;
1181    IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1182    ioGMDData *dataP;
1183
1184    params = (op & ~kIOMDDMACommandOperationMask & op);
1185    op &= kIOMDDMACommandOperationMask;
1186
1187    if (kIOMDDMAMap == op)
1188    {
1189	if (dataSize < sizeof(IOMDDMAMapArgs))
1190	    return kIOReturnUnderrun;
1191
1192	IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1193
1194	if (!_memoryEntries
1195	    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1196
1197	if (_memoryEntries && data->fMapper)
1198	{
1199	    bool remap = false;
1200	    bool whole = ((data->fOffset == 0) && (data->fLength == _length));
1201	    dataP = getDataP(_memoryEntries);
1202	    if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits)
1203	    {
1204	     	dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
1205		remap = ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
1206	    }
1207	    if (data->fMapSpec.alignment > dataP->fDMAMapAlignment)
1208	    {
1209	     	dataP->fDMAMapAlignment = data->fMapSpec.alignment;
1210		remap |= (dataP->fDMAMapAlignment > page_size);
1211	    }
1212	    remap |= (!whole);
1213	    if (remap || !dataP->fMappedBase)
1214	    {
1215//		if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
1216	    	err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
1217		if ((kIOReturnSuccess == err) && whole && !dataP->fMappedBase)
1218		{
1219		    dataP->fMappedBase = data->fAlloc;
1220		    data->fAllocCount = 0; 			// IOMD owns the alloc now
1221		}
1222	    }
1223	    else
1224	    {
1225	    	data->fAlloc = dataP->fMappedBase;
1226		data->fAllocCount = 0; 				// IOMD owns the alloc
1227	    }
1228	}
1229
1230	return (err);
1231    }
1232
1233    if (kIOMDAddDMAMapSpec == op)
1234    {
1235	if (dataSize < sizeof(IODMAMapSpecification))
1236	    return kIOReturnUnderrun;
1237
1238	IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
1239
1240	if (!_memoryEntries
1241	    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1242
1243	if (_memoryEntries)
1244	{
1245	    dataP = getDataP(_memoryEntries);
1246	    if (data->numAddressBits < dataP->fDMAMapNumAddressBits)
1247	     	dataP->fDMAMapNumAddressBits = data->numAddressBits;
1248	    if (data->alignment > dataP->fDMAMapAlignment)
1249	     	dataP->fDMAMapAlignment = data->alignment;
1250	}
1251	return kIOReturnSuccess;
1252    }
1253
1254    if (kIOMDGetCharacteristics == op) {
1255
1256	if (dataSize < sizeof(IOMDDMACharacteristics))
1257	    return kIOReturnUnderrun;
1258
1259	IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1260	data->fLength = _length;
1261	data->fSGCount = _rangesCount;
1262	data->fPages = _pages;
1263	data->fDirection = getDirection();
1264	if (!_wireCount)
1265	    data->fIsPrepared = false;
1266	else {
1267	    data->fIsPrepared = true;
1268	    data->fHighestPage = _highestPage;
1269	    if (_memoryEntries)
1270	    {
1271		dataP = getDataP(_memoryEntries);
1272		ioPLBlock *ioplList = getIOPLList(dataP);
1273		UInt count = getNumIOPL(_memoryEntries, dataP);
1274		if (count == 1)
1275		    data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
1276	    }
1277	}
1278
1279	return kIOReturnSuccess;
1280
1281#if IOMD_DEBUG_DMAACTIVE
1282    } else if (kIOMDDMAActive == op) {
1283	if (params) OSIncrementAtomic(&md->__iomd_reservedA);
1284	else {
1285	    if (md->__iomd_reservedA)
1286		OSDecrementAtomic(&md->__iomd_reservedA);
1287	    else
1288		panic("kIOMDSetDMAInactive");
1289	}
1290#endif /* IOMD_DEBUG_DMAACTIVE */
1291
1292    } else if (kIOMDWalkSegments != op)
1293	return kIOReturnBadArgument;
1294
1295    // Get the next segment
1296    struct InternalState {
1297	IOMDDMAWalkSegmentArgs fIO;
1298	UInt fOffset2Index;
1299	UInt fIndex;
1300	UInt fNextOffset;
1301    } *isP;
1302
1303    // Find the next segment
1304    if (dataSize < sizeof(*isP))
1305	return kIOReturnUnderrun;
1306
1307    isP = (InternalState *) vData;
1308    UInt offset = isP->fIO.fOffset;
1309    bool mapped = isP->fIO.fMapped;
1310
1311    if (IOMapper::gSystem && mapped
1312        && (!(kIOMemoryHostOnly & _flags))
1313	&& (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBase))
1314//	&& (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase))
1315    {
1316	if (!_memoryEntries
1317	    && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1318
1319	dataP = getDataP(_memoryEntries);
1320	if (dataP->fMapper)
1321	{
1322	    IODMAMapSpecification mapSpec;
1323	    bzero(&mapSpec, sizeof(mapSpec));
1324	    mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
1325	    mapSpec.alignment = dataP->fDMAMapAlignment;
1326	    err = md->dmaMap(dataP->fMapper, &mapSpec, 0, _length, &dataP->fMappedBase, NULL);
1327	    if (kIOReturnSuccess != err) return (err);
1328	}
1329    }
1330
1331    if (offset >= _length)
1332	return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
1333
1334    // Validate the previous offset
1335    UInt ind, off2Ind = isP->fOffset2Index;
1336    if (!params
1337	&& offset
1338	&& (offset == isP->fNextOffset || off2Ind <= offset))
1339	ind = isP->fIndex;
1340    else
1341	ind = off2Ind = 0;	// Start from beginning
1342
1343    UInt length;
1344    UInt64 address;
1345
1346
1347    if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1348
1349	// Physical address based memory descriptor
1350	const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
1351
1352	// Find the range after the one that contains the offset
1353	mach_vm_size_t len;
1354	for (len = 0; off2Ind <= offset; ind++) {
1355	    len = physP[ind].length;
1356	    off2Ind += len;
1357	}
1358
1359	// Calculate length within range and starting address
1360	length   = off2Ind - offset;
1361	address  = physP[ind - 1].address + len - length;
1362
1363	if (true && mapped && _memoryEntries
1364		&& (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1365	{
1366	    address = dataP->fMappedBase + offset;
1367	}
1368	else
1369	{
1370	    // see how far we can coalesce ranges
1371	    while (ind < _rangesCount && address + length == physP[ind].address) {
1372		len = physP[ind].length;
1373		length += len;
1374		off2Ind += len;
1375		ind++;
1376	    }
1377	}
1378
1379	// correct contiguous check overshoot
1380	ind--;
1381	off2Ind -= len;
1382    }
1383#ifndef __LP64__
1384    else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
1385
1386	// Physical address based memory descriptor
1387	const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
1388
1389	// Find the range after the one that contains the offset
1390	mach_vm_size_t len;
1391	for (len = 0; off2Ind <= offset; ind++) {
1392	    len = physP[ind].length;
1393	    off2Ind += len;
1394	}
1395
1396	// Calculate length within range and starting address
1397	length   = off2Ind - offset;
1398	address  = physP[ind - 1].address + len - length;
1399
1400	if (true && mapped && _memoryEntries
1401		&& (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1402	{
1403	    address = dataP->fMappedBase + offset;
1404	}
1405	else
1406	{
1407	    // see how far we can coalesce ranges
1408	    while (ind < _rangesCount && address + length == physP[ind].address) {
1409		len = physP[ind].length;
1410		length += len;
1411		off2Ind += len;
1412		ind++;
1413	    }
1414	}
1415	// correct contiguous check overshoot
1416	ind--;
1417	off2Ind -= len;
1418    }
1419#endif /* !__LP64__ */
1420    else do {
1421	if (!_wireCount)
1422	    panic("IOGMD: not wired for the IODMACommand");
1423
1424	assert(_memoryEntries);
1425
1426	dataP = getDataP(_memoryEntries);
1427	const ioPLBlock *ioplList = getIOPLList(dataP);
1428	UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1429	upl_page_info_t *pageList = getPageList(dataP);
1430
1431	assert(numIOPLs > 0);
1432
1433	// Scan through iopl info blocks looking for block containing offset
1434	while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
1435	    ind++;
1436
1437	// Go back to actual range as search goes past it
1438	ioPLBlock ioplInfo = ioplList[ind - 1];
1439	off2Ind = ioplInfo.fIOMDOffset;
1440
1441	if (ind < numIOPLs)
1442	    length = ioplList[ind].fIOMDOffset;
1443	else
1444	    length = _length;
1445	length -= offset;			// Remainder within iopl
1446
1447	// Subtract offset till this iopl in total list
1448	offset -= off2Ind;
1449
1450	// If a mapped address is requested and this is a pre-mapped IOPL
1451	// then just need to compute an offset relative to the mapped base.
1452	if (mapped && dataP->fMappedBase) {
1453	    offset += (ioplInfo.fPageOffset & PAGE_MASK);
1454	    address = trunc_page_64(dataP->fMappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
1455	    continue;	// Done leave do/while(false) now
1456	}
1457
1458	// The offset is rebased into the current iopl.
1459	// Now add the iopl 1st page offset.
1460	offset += ioplInfo.fPageOffset;
1461
1462	// For external UPLs the fPageInfo field points directly to
1463	// the upl's upl_page_info_t array.
1464	if (ioplInfo.fFlags & kIOPLExternUPL)
1465	    pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1466	else
1467	    pageList = &pageList[ioplInfo.fPageInfo];
1468
1469	// Check for direct device non-paged memory
1470	if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1471	    address = ptoa_64(pageList->phys_addr) + offset;
1472	    continue;	// Done leave do/while(false) now
1473	}
1474
1475	// Now we need compute the index into the pageList
1476	UInt pageInd = atop_32(offset);
1477	offset &= PAGE_MASK;
1478
1479	// Compute the starting address of this segment
1480	IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
1481	if (!pageAddr) {
1482	    panic("!pageList phys_addr");
1483	}
1484
1485	address = ptoa_64(pageAddr) + offset;
1486
1487	// length is currently set to the length of the remainider of the iopl.
1488	// We need to check that the remainder of the iopl is contiguous.
1489	// This is indicated by pageList[ind].phys_addr being sequential.
1490	IOByteCount contigLength = PAGE_SIZE - offset;
1491	while (contigLength < length
1492		&& ++pageAddr == pageList[++pageInd].phys_addr)
1493	{
1494	    contigLength += PAGE_SIZE;
1495	}
1496
1497	if (contigLength < length)
1498	    length = contigLength;
1499
1500
1501	assert(address);
1502	assert(length);
1503
1504    } while (false);
1505
1506    // Update return values and state
1507    isP->fIO.fIOVMAddr = address;
1508    isP->fIO.fLength   = length;
1509    isP->fIndex        = ind;
1510    isP->fOffset2Index = off2Ind;
1511    isP->fNextOffset   = isP->fIO.fOffset + length;
1512
1513    return kIOReturnSuccess;
1514}
1515
1516addr64_t
1517IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1518{
1519    IOReturn     ret;
1520    addr64_t     address = 0;
1521    IOByteCount  length  = 0;
1522    IOMapper *   mapper  = gIOSystemMapper;
1523    IOOptionBits type    = _flags & kIOMemoryTypeMask;
1524
1525    if (lengthOfSegment)
1526        *lengthOfSegment = 0;
1527
1528    if (offset >= _length)
1529        return 0;
1530
1531    // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
1532    // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
1533    // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
1534    // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
1535
1536    if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
1537    {
1538        unsigned rangesIndex = 0;
1539	Ranges vec = _ranges;
1540	user_addr_t addr;
1541
1542	// Find starting address within the vector of ranges
1543	for (;;) {
1544	    getAddrLenForInd(addr, length, type, vec, rangesIndex);
1545	    if (offset < length)
1546		break;
1547	    offset -= length; // (make offset relative)
1548	    rangesIndex++;
1549	}
1550
1551	// Now that we have the starting range,
1552	// lets find the last contiguous range
1553        addr   += offset;
1554        length -= offset;
1555
1556        for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1557	    user_addr_t      newAddr;
1558	    IOPhysicalLength newLen;
1559
1560	    getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1561	    if (addr + length != newAddr)
1562		break;
1563	    length += newLen;
1564	}
1565        if (addr)
1566	    address = (IOPhysicalAddress) addr;	// Truncate address to 32bit
1567    }
1568    else
1569    {
1570	IOMDDMAWalkSegmentState _state;
1571	IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
1572
1573	state->fOffset = offset;
1574	state->fLength = _length - offset;
1575	state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOnly);
1576
1577	ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
1578
1579	if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1580		DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1581					ret, this, state->fOffset,
1582					state->fIOVMAddr, state->fLength);
1583	if (kIOReturnSuccess == ret)
1584	{
1585	    address = state->fIOVMAddr;
1586	    length  = state->fLength;
1587	}
1588
1589	// dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
1590	// with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
1591
1592	if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
1593	{
1594	    if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
1595	    {
1596		addr64_t    origAddr = address;
1597		IOByteCount origLen  = length;
1598
1599		address = mapper->mapAddr(origAddr);
1600		length = page_size - (address & (page_size - 1));
1601		while ((length < origLen)
1602		    && ((address + length) == mapper->mapAddr(origAddr + length)))
1603		    length += page_size;
1604		if (length > origLen)
1605		    length = origLen;
1606	    }
1607	}
1608    }
1609
1610    if (!address)
1611        length = 0;
1612
1613    if (lengthOfSegment)
1614        *lengthOfSegment = length;
1615
1616    return (address);
1617}
1618
1619#ifndef __LP64__
1620addr64_t
1621IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1622{
1623    addr64_t address = 0;
1624
1625    if (options & _kIOMemorySourceSegment)
1626    {
1627        address = getSourceSegment(offset, lengthOfSegment);
1628    }
1629    else if (options & kIOMemoryMapperNone)
1630    {
1631        address = getPhysicalSegment64(offset, lengthOfSegment);
1632    }
1633    else
1634    {
1635        address = getPhysicalSegment(offset, lengthOfSegment);
1636    }
1637
1638    return (address);
1639}
1640
1641addr64_t
1642IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1643{
1644    return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
1645}
1646
1647IOPhysicalAddress
1648IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1649{
1650    addr64_t    address = 0;
1651    IOByteCount length  = 0;
1652
1653    address = getPhysicalSegment(offset, lengthOfSegment, 0);
1654
1655    if (lengthOfSegment)
1656	length = *lengthOfSegment;
1657
1658    if ((address + length) > 0x100000000ULL)
1659    {
1660	panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
1661		    address, (long) length, (getMetaClass())->getClassName());
1662    }
1663
1664    return ((IOPhysicalAddress) address);
1665}
1666
1667addr64_t
1668IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1669{
1670    IOPhysicalAddress phys32;
1671    IOByteCount	      length;
1672    addr64_t 	      phys64;
1673    IOMapper *        mapper = 0;
1674
1675    phys32 = getPhysicalSegment(offset, lengthOfSegment);
1676    if (!phys32)
1677	return 0;
1678
1679    if (gIOSystemMapper)
1680	mapper = gIOSystemMapper;
1681
1682    if (mapper)
1683    {
1684	IOByteCount origLen;
1685
1686	phys64 = mapper->mapAddr(phys32);
1687	origLen = *lengthOfSegment;
1688	length = page_size - (phys64 & (page_size - 1));
1689	while ((length < origLen)
1690	    && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
1691	    length += page_size;
1692	if (length > origLen)
1693	    length = origLen;
1694
1695	*lengthOfSegment = length;
1696    }
1697    else
1698	phys64 = (addr64_t) phys32;
1699
1700    return phys64;
1701}
1702
1703IOPhysicalAddress
1704IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1705{
1706    return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
1707}
1708
1709IOPhysicalAddress
1710IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1711{
1712    return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
1713}
1714
1715void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1716							IOByteCount * lengthOfSegment)
1717{
1718    if (_task == kernel_task)
1719        return (void *) getSourceSegment(offset, lengthOfSegment);
1720    else
1721        panic("IOGMD::getVirtualSegment deprecated");
1722
1723    return 0;
1724}
1725#endif /* !__LP64__ */
1726
1727IOReturn
1728IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1729{
1730    IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
1731    DMACommandOps params;
1732    IOReturn err;
1733
1734    params = (op & ~kIOMDDMACommandOperationMask & op);
1735    op &= kIOMDDMACommandOperationMask;
1736
1737    if (kIOMDGetCharacteristics == op) {
1738	if (dataSize < sizeof(IOMDDMACharacteristics))
1739	    return kIOReturnUnderrun;
1740
1741	IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1742	data->fLength = getLength();
1743	data->fSGCount = 0;
1744	data->fDirection = getDirection();
1745	data->fIsPrepared = true;	// Assume prepared - fails safe
1746    }
1747    else if (kIOMDWalkSegments == op) {
1748	if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
1749	    return kIOReturnUnderrun;
1750
1751	IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
1752	IOByteCount offset  = (IOByteCount) data->fOffset;
1753
1754	IOPhysicalLength length;
1755	if (data->fMapped && IOMapper::gSystem)
1756	    data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
1757	else
1758	    data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
1759	data->fLength = length;
1760    }
1761    else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported;
1762    else if (kIOMDDMAMap == op)
1763    {
1764	if (dataSize < sizeof(IOMDDMAMapArgs))
1765	    return kIOReturnUnderrun;
1766	IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1767
1768	if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
1769
1770	err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
1771	return (err);
1772    }
1773    else return kIOReturnBadArgument;
1774
1775    return kIOReturnSuccess;
1776}
1777
1778static IOReturn
1779purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
1780{
1781    IOReturn err = kIOReturnSuccess;
1782
1783    *control = VM_PURGABLE_SET_STATE;
1784    switch (newState)
1785    {
1786	case kIOMemoryPurgeableKeepCurrent:
1787	    *control = VM_PURGABLE_GET_STATE;
1788	    break;
1789
1790	case kIOMemoryPurgeableNonVolatile:
1791	    *state = VM_PURGABLE_NONVOLATILE;
1792	    break;
1793	case kIOMemoryPurgeableVolatile:
1794	    *state = VM_PURGABLE_VOLATILE;
1795	    break;
1796	case kIOMemoryPurgeableEmpty:
1797	    *state = VM_PURGABLE_EMPTY;
1798	    break;
1799	default:
1800	    err = kIOReturnBadArgument;
1801	    break;
1802    }
1803    return (err);
1804}
1805
1806static IOReturn
1807purgeableStateBits(int * state)
1808{
1809    IOReturn err = kIOReturnSuccess;
1810
1811    switch (*state)
1812    {
1813	case VM_PURGABLE_NONVOLATILE:
1814	    *state = kIOMemoryPurgeableNonVolatile;
1815	    break;
1816	case VM_PURGABLE_VOLATILE:
1817	    *state = kIOMemoryPurgeableVolatile;
1818	    break;
1819	case VM_PURGABLE_EMPTY:
1820	    *state = kIOMemoryPurgeableEmpty;
1821	    break;
1822	default:
1823	    *state = kIOMemoryPurgeableNonVolatile;
1824	    err = kIOReturnNotReady;
1825	    break;
1826    }
1827    return (err);
1828}
1829
1830IOReturn
1831IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
1832						   IOOptionBits * oldState )
1833{
1834    IOReturn	  err = kIOReturnSuccess;
1835    vm_purgable_t control;
1836    int           state;
1837
1838    if (_memEntry)
1839    {
1840	err = super::setPurgeable(newState, oldState);
1841    }
1842    else
1843    {
1844	if (kIOMemoryThreadSafe & _flags)
1845	    LOCK;
1846	do
1847	{
1848	    // Find the appropriate vm_map for the given task
1849	    vm_map_t curMap;
1850	    if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1851	    {
1852		err = kIOReturnNotReady;
1853		break;
1854	    }
1855	    else
1856		curMap = get_task_map(_task);
1857
1858	    // can only do one range
1859	    Ranges vec = _ranges;
1860	    IOOptionBits type = _flags & kIOMemoryTypeMask;
1861	    user_addr_t addr;
1862	    IOByteCount len;
1863	    getAddrLenForInd(addr, len, type, vec, 0);
1864
1865	    err = purgeableControlBits(newState, &control, &state);
1866	    if (kIOReturnSuccess != err)
1867		break;
1868	    err = mach_vm_purgable_control(curMap, addr, control, &state);
1869	    if (oldState)
1870	    {
1871		if (kIOReturnSuccess == err)
1872		{
1873		    err = purgeableStateBits(&state);
1874		    *oldState = state;
1875		}
1876	    }
1877	}
1878	while (false);
1879	if (kIOMemoryThreadSafe & _flags)
1880	    UNLOCK;
1881    }
1882    return (err);
1883}
1884
1885IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1886                                           IOOptionBits * oldState )
1887{
1888    IOReturn	  err = kIOReturnSuccess;
1889    vm_purgable_t control;
1890    int           state;
1891
1892    if (kIOMemoryThreadSafe & _flags)
1893	LOCK;
1894
1895    do
1896    {
1897        if (!_memEntry)
1898        {
1899            err = kIOReturnNotReady;
1900            break;
1901        }
1902	err = purgeableControlBits(newState, &control, &state);
1903	if (kIOReturnSuccess != err)
1904	    break;
1905        err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1906	if (oldState)
1907	{
1908	    if (kIOReturnSuccess == err)
1909	    {
1910		err = purgeableStateBits(&state);
1911		*oldState = state;
1912	    }
1913	}
1914    }
1915    while (false);
1916
1917    if (kIOMemoryThreadSafe & _flags)
1918	UNLOCK;
1919
1920    return (err);
1921}
1922
1923extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1924extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1925
1926static void SetEncryptOp(addr64_t pa, unsigned int count)
1927{
1928    ppnum_t page, end;
1929
1930    page = atop_64(round_page_64(pa));
1931    end  = atop_64(trunc_page_64(pa + count));
1932    for (; page < end; page++)
1933    {
1934        pmap_clear_noencrypt(page);
1935    }
1936}
1937
1938static void ClearEncryptOp(addr64_t pa, unsigned int count)
1939{
1940    ppnum_t page, end;
1941
1942    page = atop_64(round_page_64(pa));
1943    end  = atop_64(trunc_page_64(pa + count));
1944    for (; page < end; page++)
1945    {
1946        pmap_set_noencrypt(page);
1947    }
1948}
1949
1950IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
1951                                                IOByteCount offset, IOByteCount length )
1952{
1953    IOByteCount remaining;
1954    unsigned int res;
1955    void (*func)(addr64_t pa, unsigned int count) = 0;
1956
1957    switch (options)
1958    {
1959        case kIOMemoryIncoherentIOFlush:
1960            func = &dcache_incoherent_io_flush64;
1961            break;
1962        case kIOMemoryIncoherentIOStore:
1963            func = &dcache_incoherent_io_store64;
1964            break;
1965
1966        case kIOMemorySetEncrypted:
1967            func = &SetEncryptOp;
1968            break;
1969        case kIOMemoryClearEncrypted:
1970            func = &ClearEncryptOp;
1971            break;
1972    }
1973
1974    if (!func)
1975        return (kIOReturnUnsupported);
1976
1977    if (kIOMemoryThreadSafe & _flags)
1978	LOCK;
1979
1980    res = 0x0UL;
1981    remaining = length = min(length, getLength() - offset);
1982    while (remaining)
1983    // (process another target segment?)
1984    {
1985        addr64_t    dstAddr64;
1986        IOByteCount dstLen;
1987
1988        dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1989        if (!dstAddr64)
1990            break;
1991
1992        // Clip segment length to remaining
1993        if (dstLen > remaining)
1994            dstLen = remaining;
1995
1996	(*func)(dstAddr64, dstLen);
1997
1998        offset    += dstLen;
1999        remaining -= dstLen;
2000    }
2001
2002    if (kIOMemoryThreadSafe & _flags)
2003	UNLOCK;
2004
2005    return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
2006}
2007
2008#if defined(__i386__) || defined(__x86_64__)
2009extern vm_offset_t		first_avail;
2010#define io_kernel_static_end	first_avail
2011#else
2012#endif
2013
2014extern vm_offset_t		first_avail;
2015#define io_kernel_static_end	first_avail
2016
2017
2018static kern_return_t
2019io_get_kernel_static_upl(
2020	vm_map_t		/* map */,
2021	uintptr_t		offset,
2022	vm_size_t		*upl_size,
2023	upl_t			*upl,
2024	upl_page_info_array_t	page_list,
2025	unsigned int		*count,
2026	ppnum_t			*highest_page)
2027{
2028    unsigned int pageCount, page;
2029    ppnum_t phys;
2030    ppnum_t highestPage = 0;
2031
2032    pageCount = atop_32(*upl_size);
2033    if (pageCount > *count)
2034	pageCount = *count;
2035
2036    *upl = NULL;
2037
2038    for (page = 0; page < pageCount; page++)
2039    {
2040	phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
2041	if (!phys)
2042	    break;
2043	page_list[page].phys_addr = phys;
2044	page_list[page].pageout	  = 0;
2045	page_list[page].absent	  = 0;
2046	page_list[page].dirty	  = 0;
2047	page_list[page].precious  = 0;
2048	page_list[page].device	  = 0;
2049	if (phys > highestPage)
2050	    highestPage = phys;
2051    }
2052
2053    *highest_page = highestPage;
2054
2055    return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
2056}
2057
2058IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
2059{
2060    IOOptionBits type = _flags & kIOMemoryTypeMask;
2061    IOReturn error = kIOReturnCannotWire;
2062    ioGMDData *dataP;
2063    upl_page_info_array_t pageInfo;
2064    ppnum_t mapBase = 0;
2065    ipc_port_t sharedMem = (ipc_port_t) _memEntry;
2066
2067    assert(!_wireCount);
2068    assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
2069
2070    if (_pages > gIOMaximumMappedIOPageCount)
2071	return kIOReturnNoResources;
2072
2073    dataP = getDataP(_memoryEntries);
2074    IOMapper *mapper;
2075    mapper = dataP->fMapper;
2076    dataP->fMappedBase = 0;
2077
2078    if (forDirection == kIODirectionNone)
2079        forDirection = getDirection();
2080
2081    int uplFlags;    // This Mem Desc's default flags for upl creation
2082    switch (kIODirectionOutIn & forDirection)
2083    {
2084    case kIODirectionOut:
2085        // Pages do not need to be marked as dirty on commit
2086        uplFlags = UPL_COPYOUT_FROM;
2087        _flags |= kIOMemoryPreparedReadOnly;
2088        break;
2089
2090    case kIODirectionIn:
2091    default:
2092        uplFlags = 0;	// i.e. ~UPL_COPYOUT_FROM
2093        break;
2094    }
2095    uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
2096
2097#ifdef UPL_NEED_32BIT_ADDR
2098    if (kIODirectionPrepareToPhys32 & forDirection)
2099    {
2100	if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR;
2101	if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32;
2102    }
2103#endif
2104
2105    // Note that appendBytes(NULL) zeros the data up to the desired length.
2106    _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
2107    dataP = 0;
2108
2109    // Find the appropriate vm_map for the given task
2110    vm_map_t curMap;
2111    if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2112        curMap = 0;
2113    else
2114        { curMap = get_task_map(_task); }
2115
2116    // Iterate over the vector of virtual ranges
2117    Ranges vec = _ranges;
2118    unsigned int pageIndex = 0;
2119    IOByteCount mdOffset = 0;
2120    ppnum_t highestPage = 0;
2121
2122    for (UInt range = 0; range < _rangesCount; range++) {
2123        ioPLBlock iopl;
2124	user_addr_t startPage;
2125        IOByteCount numBytes;
2126	ppnum_t highPage = 0;
2127
2128	// Get the startPage address and length of vec[range]
2129	getAddrLenForInd(startPage, numBytes, type, vec, range);
2130	iopl.fPageOffset = startPage & PAGE_MASK;
2131	numBytes += iopl.fPageOffset;
2132	startPage = trunc_page_64(startPage);
2133
2134	if (mapper)
2135	    iopl.fMappedPage = mapBase + pageIndex;
2136	else
2137	    iopl.fMappedPage = 0;
2138
2139	// Iterate over the current range, creating UPLs
2140        while (numBytes) {
2141	    vm_address_t kernelStart = (vm_address_t) startPage;
2142            vm_map_t theMap;
2143	    if (curMap)
2144		theMap = curMap;
2145	    else if (!sharedMem) {
2146		assert(_task == kernel_task);
2147		theMap = IOPageableMapForAddress(kernelStart);
2148	    }
2149	    else
2150		theMap = NULL;
2151
2152            int ioplFlags = uplFlags;
2153	    dataP = getDataP(_memoryEntries);
2154	    pageInfo = getPageList(dataP);
2155            upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2156
2157            vm_size_t ioplSize = round_page(numBytes);
2158            unsigned int numPageInfo = atop_32(ioplSize);
2159
2160	    if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
2161		error = io_get_kernel_static_upl(theMap,
2162						kernelStart,
2163						&ioplSize,
2164						&iopl.fIOPL,
2165						baseInfo,
2166						&numPageInfo,
2167						&highPage);
2168	    }
2169	    else if (sharedMem) {
2170		error = memory_object_iopl_request(sharedMem,
2171						ptoa_32(pageIndex),
2172						&ioplSize,
2173						&iopl.fIOPL,
2174						baseInfo,
2175						&numPageInfo,
2176						&ioplFlags);
2177	    }
2178	    else {
2179		assert(theMap);
2180		error = vm_map_create_upl(theMap,
2181						startPage,
2182						(upl_size_t*)&ioplSize,
2183						&iopl.fIOPL,
2184						baseInfo,
2185						&numPageInfo,
2186						&ioplFlags);
2187	    }
2188
2189            assert(ioplSize);
2190            if (error != KERN_SUCCESS)
2191                goto abortExit;
2192
2193	    if (iopl.fIOPL)
2194		highPage = upl_get_highest_page(iopl.fIOPL);
2195	    if (highPage > highestPage)
2196		highestPage = highPage;
2197
2198            error = kIOReturnCannotWire;
2199
2200            if (baseInfo->device) {
2201                numPageInfo = 1;
2202                iopl.fFlags  = kIOPLOnDevice;
2203            }
2204            else {
2205                iopl.fFlags = 0;
2206            }
2207
2208            iopl.fIOMDOffset = mdOffset;
2209            iopl.fPageInfo = pageIndex;
2210
2211#if 0
2212	    // used to remove the upl for auto prepares here, for some errant code
2213	    // that freed memory before the descriptor pointing at it
2214	    if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
2215	    {
2216		upl_commit(iopl.fIOPL, 0, 0);
2217		upl_deallocate(iopl.fIOPL);
2218		iopl.fIOPL = 0;
2219	    }
2220#endif
2221
2222            if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
2223                // Clean up partial created and unsaved iopl
2224                if (iopl.fIOPL) {
2225                    upl_abort(iopl.fIOPL, 0);
2226                    upl_deallocate(iopl.fIOPL);
2227                }
2228                goto abortExit;
2229            }
2230	    dataP = 0;
2231
2232            // Check for a multiple iopl's in one virtual range
2233            pageIndex += numPageInfo;
2234            mdOffset -= iopl.fPageOffset;
2235            if (ioplSize < numBytes) {
2236                numBytes -= ioplSize;
2237                startPage += ioplSize;
2238                mdOffset += ioplSize;
2239                iopl.fPageOffset = 0;
2240		if (mapper) iopl.fMappedPage = mapBase + pageIndex;
2241            }
2242            else {
2243                mdOffset += numBytes;
2244                break;
2245            }
2246        }
2247    }
2248
2249    _highestPage = highestPage;
2250
2251    return kIOReturnSuccess;
2252
2253abortExit:
2254    {
2255        dataP = getDataP(_memoryEntries);
2256        UInt done = getNumIOPL(_memoryEntries, dataP);
2257        ioPLBlock *ioplList = getIOPLList(dataP);
2258
2259        for (UInt range = 0; range < done; range++)
2260	{
2261	    if (ioplList[range].fIOPL) {
2262             upl_abort(ioplList[range].fIOPL, 0);
2263             upl_deallocate(ioplList[range].fIOPL);
2264	    }
2265	}
2266	(void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
2267    }
2268
2269    if (error == KERN_FAILURE)
2270        error = kIOReturnCannotWire;
2271
2272    return error;
2273}
2274
2275bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
2276{
2277    ioGMDData * dataP;
2278    unsigned    dataSize = size;
2279
2280    if (!_memoryEntries) {
2281	_memoryEntries = OSData::withCapacity(dataSize);
2282	if (!_memoryEntries)
2283	    return false;
2284    }
2285    else if (!_memoryEntries->initWithCapacity(dataSize))
2286	return false;
2287
2288    _memoryEntries->appendBytes(0, computeDataSize(0, 0));
2289    dataP = getDataP(_memoryEntries);
2290
2291    if (mapper == kIOMapperWaitSystem) {
2292        IOMapper::checkForSystemMapper();
2293        mapper = IOMapper::gSystem;
2294    }
2295    dataP->fMapper               = mapper;
2296    dataP->fPageCnt              = 0;
2297    dataP->fMappedBase           = 0;
2298    dataP->fDMAMapNumAddressBits = 64;
2299    dataP->fDMAMapAlignment      = 0;
2300    dataP->fPreparationID        = kIOPreparationIDUnprepared;
2301
2302    return (true);
2303}
2304
2305IOReturn IOMemoryDescriptor::dmaMap(
2306    IOMapper                    * mapper,
2307    const IODMAMapSpecification * mapSpec,
2308    uint64_t                      offset,
2309    uint64_t                      length,
2310    uint64_t                    * address,
2311    ppnum_t                     * mapPages)
2312{
2313    IOMDDMAWalkSegmentState  walkState;
2314    IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState;
2315    IOOptionBits             mdOp;
2316    IOReturn                 ret;
2317    IOPhysicalLength         segLen;
2318    addr64_t                 phys, align, pageOffset;
2319    ppnum_t                  base, pageIndex, pageCount;
2320    uint64_t                 index;
2321    uint32_t                 mapOptions = 0;
2322
2323    if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
2324
2325    walkArgs->fMapped = false;
2326    mdOp = kIOMDFirstSegment;
2327    pageCount = 0;
2328    for (index = 0; index < length; )
2329    {
2330	if (index && (page_mask & (index + pageOffset))) break;
2331
2332	walkArgs->fOffset = offset + index;
2333	ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
2334	mdOp = kIOMDWalkSegments;
2335	if (ret != kIOReturnSuccess) break;
2336	phys = walkArgs->fIOVMAddr;
2337	segLen = walkArgs->fLength;
2338
2339	align = (phys & page_mask);
2340	if (!index) pageOffset = align;
2341	else if (align) break;
2342	pageCount += atop_64(round_page_64(align + segLen));
2343	index += segLen;
2344    }
2345
2346    if (index < length) return (kIOReturnVMError);
2347
2348    base = mapper->iovmMapMemory(this, offset, pageCount,
2349				 mapOptions, NULL, mapSpec);
2350
2351    if (!base) return (kIOReturnNoResources);
2352
2353    mdOp = kIOMDFirstSegment;
2354    for (pageIndex = 0, index = 0; index < length; )
2355    {
2356	walkArgs->fOffset = offset + index;
2357	ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
2358	mdOp = kIOMDWalkSegments;
2359	if (ret != kIOReturnSuccess) break;
2360	phys = walkArgs->fIOVMAddr;
2361	segLen = walkArgs->fLength;
2362
2363    	ppnum_t page = atop_64(phys);
2364    	ppnum_t count = atop_64(round_page_64(phys + segLen)) - page;
2365	while (count--)
2366	{
2367	    mapper->iovmInsert(base, pageIndex, page);
2368	    page++;
2369	    pageIndex++;
2370	}
2371	index += segLen;
2372    }
2373    if (pageIndex != pageCount) panic("pageIndex");
2374
2375    *address = ptoa_64(base) + pageOffset;
2376    if (mapPages) *mapPages = pageCount;
2377
2378    return (kIOReturnSuccess);
2379}
2380
2381IOReturn IOGeneralMemoryDescriptor::dmaMap(
2382    IOMapper                    * mapper,
2383    const IODMAMapSpecification * mapSpec,
2384    uint64_t                      offset,
2385    uint64_t                      length,
2386    uint64_t                    * address,
2387    ppnum_t                     * mapPages)
2388{
2389    IOReturn          err = kIOReturnSuccess;
2390    ioGMDData *       dataP;
2391    IOOptionBits      type = _flags & kIOMemoryTypeMask;
2392
2393    *address = 0;
2394    if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess);
2395
2396    if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
2397     || offset || (length != _length))
2398    {
2399	err = super::dmaMap(mapper, mapSpec, offset, length, address, mapPages);
2400    }
2401    else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries)))
2402    {
2403	const ioPLBlock * ioplList = getIOPLList(dataP);
2404	upl_page_info_t * pageList;
2405	uint32_t          mapOptions = 0;
2406	ppnum_t           base;
2407
2408	IODMAMapSpecification mapSpec;
2409	bzero(&mapSpec, sizeof(mapSpec));
2410	mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2411	mapSpec.alignment = dataP->fDMAMapAlignment;
2412
2413	// For external UPLs the fPageInfo field points directly to
2414	// the upl's upl_page_info_t array.
2415	if (ioplList->fFlags & kIOPLExternUPL)
2416	{
2417	    pageList = (upl_page_info_t *) ioplList->fPageInfo;
2418	    mapOptions |= kIODMAMapPagingPath;
2419	}
2420	else
2421	    pageList = getPageList(dataP);
2422
2423    if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
2424
2425	// Check for direct device non-paged memory
2426	if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous;
2427
2428	base = mapper->iovmMapMemory(
2429			this, offset, _pages, mapOptions, &pageList[0], &mapSpec);
2430	*address = ptoa_64(base) + (ioplList->fPageOffset & PAGE_MASK);
2431	if (mapPages) *mapPages = _pages;
2432    }
2433
2434    return (err);
2435}
2436
2437/*
2438 * prepare
2439 *
2440 * Prepare the memory for an I/O transfer.  This involves paging in
2441 * the memory, if necessary, and wiring it down for the duration of
2442 * the transfer.  The complete() method completes the processing of
2443 * the memory after the I/O transfer finishes.  This method needn't
2444 * called for non-pageable memory.
2445 */
2446
2447IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
2448{
2449    IOReturn error    = kIOReturnSuccess;
2450    IOOptionBits type = _flags & kIOMemoryTypeMask;
2451
2452    if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2453	return kIOReturnSuccess;
2454
2455    if (_prepareLock)
2456	IOLockLock(_prepareLock);
2457
2458    if (!_wireCount
2459    && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) {
2460        error = wireVirtual(forDirection);
2461    }
2462
2463    if (kIOReturnSuccess == error)
2464    {
2465	if (1 == ++_wireCount)
2466	{
2467	    if (kIOMemoryClearEncrypt & _flags)
2468	    {
2469		performOperation(kIOMemoryClearEncrypted, 0, _length);
2470	    }
2471	}
2472    }
2473
2474    if (_prepareLock)
2475	IOLockUnlock(_prepareLock);
2476
2477    return error;
2478}
2479
2480/*
2481 * complete
2482 *
2483 * Complete processing of the memory after an I/O transfer finishes.
2484 * This method should not be called unless a prepare was previously
2485 * issued; the prepare() and complete() must occur in pairs, before
2486 * before and after an I/O transfer involving pageable memory.
2487 */
2488
2489IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
2490{
2491    IOOptionBits type = _flags & kIOMemoryTypeMask;
2492
2493    if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2494	return kIOReturnSuccess;
2495
2496    if (_prepareLock)
2497	IOLockLock(_prepareLock);
2498
2499    assert(_wireCount);
2500
2501    if (_wireCount)
2502    {
2503        if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
2504        {
2505            performOperation(kIOMemorySetEncrypted, 0, _length);
2506        }
2507
2508	_wireCount--;
2509	if (!_wireCount)
2510	{
2511	    IOOptionBits type = _flags & kIOMemoryTypeMask;
2512	    ioGMDData * dataP = getDataP(_memoryEntries);
2513	    ioPLBlock *ioplList = getIOPLList(dataP);
2514	    UInt count = getNumIOPL(_memoryEntries, dataP);
2515
2516#if IOMD_DEBUG_DMAACTIVE
2517	    if (__iomd_reservedA) panic("complete() while dma active");
2518#endif /* IOMD_DEBUG_DMAACTIVE */
2519
2520	    if (dataP->fMappedBase) {
2521		dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages);
2522	        dataP->fMappedBase = 0;
2523            }
2524	    // Only complete iopls that we created which are for TypeVirtual
2525	    if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
2526		for (UInt ind = 0; ind < count; ind++)
2527		    if (ioplList[ind].fIOPL) {
2528			 upl_commit(ioplList[ind].fIOPL, 0, 0);
2529			 upl_deallocate(ioplList[ind].fIOPL);
2530		    }
2531	    } else if (kIOMemoryTypeUPL == type) {
2532		upl_set_referenced(ioplList[0].fIOPL, false);
2533	    }
2534
2535	    (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
2536
2537	    dataP->fPreparationID = kIOPreparationIDUnprepared;
2538	}
2539    }
2540
2541    if (_prepareLock)
2542	IOLockUnlock(_prepareLock);
2543
2544    return kIOReturnSuccess;
2545}
2546
2547IOReturn IOGeneralMemoryDescriptor::doMap(
2548	vm_map_t		__addressMap,
2549	IOVirtualAddress *	__address,
2550	IOOptionBits		options,
2551	IOByteCount		__offset,
2552	IOByteCount		__length )
2553
2554{
2555#ifndef __LP64__
2556    if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
2557#endif /* !__LP64__ */
2558
2559    IOMemoryMap *  mapping = (IOMemoryMap *) *__address;
2560    mach_vm_size_t offset  = mapping->fOffset + __offset;
2561    mach_vm_size_t length  = mapping->fLength;
2562
2563    kern_return_t kr = kIOReturnVMError;
2564    ipc_port_t sharedMem = (ipc_port_t) _memEntry;
2565
2566    IOOptionBits type = _flags & kIOMemoryTypeMask;
2567    Ranges vec = _ranges;
2568
2569    user_addr_t range0Addr = 0;
2570    IOByteCount range0Len = 0;
2571
2572    if ((offset >= _length) || ((offset + length) > _length))
2573	return( kIOReturnBadArgument );
2574
2575    if (vec.v)
2576	getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
2577
2578    // mapping source == dest? (could be much better)
2579    if( _task
2580     && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
2581     && (1 == _rangesCount) && (0 == offset)
2582     && range0Addr && (length <= range0Len) )
2583    {
2584	mapping->fAddress = range0Addr;
2585	mapping->fOptions |= kIOMapStatic;
2586
2587	return( kIOReturnSuccess );
2588    }
2589
2590    if( 0 == sharedMem) {
2591
2592        vm_size_t size = ptoa_32(_pages);
2593
2594        if( _task) {
2595
2596            memory_object_size_t actualSize = size;
2597	    vm_prot_t            prot       = VM_PROT_READ;
2598	    if (!(kIOMapReadOnly & options))
2599		prot |= VM_PROT_WRITE;
2600	    else if (kIOMapDefaultCache != (options & kIOMapCacheMask))
2601		prot |= VM_PROT_WRITE;
2602
2603            if (_rangesCount == 1)
2604            {
2605                kr = mach_make_memory_entry_64(get_task_map(_task),
2606                                                &actualSize, range0Addr,
2607                                                prot, &sharedMem,
2608                                                NULL);
2609            }
2610            if( (_rangesCount != 1)
2611                || ((KERN_SUCCESS == kr) && (actualSize != round_page(size))))
2612            do
2613	    {
2614#if IOASSERT
2615                IOLog("mach_vm_remap path for ranges %d size (%08llx:%08llx)\n",
2616		      _rangesCount, (UInt64)actualSize, (UInt64)size);
2617#endif
2618                kr = kIOReturnVMError;
2619                if (sharedMem)
2620                {
2621                    ipc_port_release_send(sharedMem);
2622                    sharedMem = MACH_PORT_NULL;
2623                }
2624
2625		mach_vm_address_t address, segDestAddr;
2626                mach_vm_size_t    mapLength;
2627                unsigned          rangesIndex;
2628                IOOptionBits      type = _flags & kIOMemoryTypeMask;
2629                user_addr_t       srcAddr;
2630                IOPhysicalLength  segLen = 0;
2631
2632                // Find starting address within the vector of ranges
2633                for (rangesIndex = 0; rangesIndex < _rangesCount; rangesIndex++) {
2634                    getAddrLenForInd(srcAddr, segLen, type, _ranges, rangesIndex);
2635                    if (offset < segLen)
2636                        break;
2637                    offset -= segLen; // (make offset relative)
2638                }
2639
2640		mach_vm_size_t    pageOffset = (srcAddr & PAGE_MASK);
2641		address = trunc_page_64(mapping->fAddress);
2642
2643		if ((options & kIOMapAnywhere) || ((mapping->fAddress - address) == pageOffset))
2644		{
2645		    vm_map_t map = mapping->fAddressMap;
2646		    kr = IOMemoryDescriptorMapCopy(&map,
2647						    options,
2648						    offset, &address, round_page_64(length + pageOffset));
2649                    if (kr == KERN_SUCCESS)
2650                    {
2651                        segDestAddr  = address;
2652                        segLen      -= offset;
2653                        srcAddr     += offset;
2654                        mapLength    = length;
2655
2656                        while (true)
2657                        {
2658                            vm_prot_t cur_prot, max_prot;
2659
2660                            if (segLen > length) segLen = length;
2661                            kr = mach_vm_remap(map, &segDestAddr, round_page_64(segLen), PAGE_MASK,
2662                                                    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
2663                                                    get_task_map(_task), trunc_page_64(srcAddr),
2664                                                    FALSE /* copy */,
2665                                                    &cur_prot,
2666                                                    &max_prot,
2667                                                    VM_INHERIT_NONE);
2668                            if (KERN_SUCCESS == kr)
2669                            {
2670                                if ((!(VM_PROT_READ & cur_prot))
2671                                    || (!(kIOMapReadOnly & options) && !(VM_PROT_WRITE & cur_prot)))
2672                                {
2673                                    kr = KERN_PROTECTION_FAILURE;
2674                                }
2675                            }
2676                            if (KERN_SUCCESS != kr)
2677                                break;
2678                            segDestAddr += segLen;
2679                            mapLength   -= segLen;
2680                            if (!mapLength)
2681                                break;
2682                            rangesIndex++;
2683                            if (rangesIndex >= _rangesCount)
2684                            {
2685                                kr = kIOReturnBadArgument;
2686                                break;
2687                            }
2688                            getAddrLenForInd(srcAddr, segLen, type, vec, rangesIndex);
2689                            if (srcAddr & PAGE_MASK)
2690                            {
2691                                kr = kIOReturnBadArgument;
2692                                break;
2693                            }
2694                            if (segLen > mapLength)
2695                                segLen = mapLength;
2696                        }
2697                        if (KERN_SUCCESS != kr)
2698                        {
2699                            mach_vm_deallocate(mapping->fAddressMap, address, round_page_64(length + pageOffset));
2700                        }
2701                    }
2702
2703		    if (KERN_SUCCESS == kr)
2704			mapping->fAddress = address + pageOffset;
2705		    else
2706			mapping->fAddress = NULL;
2707		}
2708            }
2709            while (false);
2710        }
2711	else do
2712	{	// _task == 0, must be physical
2713
2714            memory_object_t 	pager;
2715	    unsigned int    	flags = 0;
2716    	    addr64_t		pa;
2717    	    IOPhysicalLength	segLen;
2718
2719	    pa = getPhysicalSegment( offset, &segLen, kIOMemoryMapperNone );
2720
2721            if( !getKernelReserved())
2722                continue;
2723            reserved->dp.pagerContig = (1 == _rangesCount);
2724	    reserved->dp.memory      = this;
2725
2726	    /*What cache mode do we need*/
2727            switch(options & kIOMapCacheMask ) {
2728
2729		case kIOMapDefaultCache:
2730		default:
2731		    flags = IODefaultCacheBits(pa);
2732		    if (DEVICE_PAGER_CACHE_INHIB & flags)
2733		    {
2734			if (DEVICE_PAGER_GUARDED & flags)
2735			    mapping->fOptions |= kIOMapInhibitCache;
2736			else
2737			    mapping->fOptions |= kIOMapWriteCombineCache;
2738		    }
2739		    else if (DEVICE_PAGER_WRITE_THROUGH & flags)
2740			mapping->fOptions |= kIOMapWriteThruCache;
2741		    else
2742			mapping->fOptions |= kIOMapCopybackCache;
2743		    break;
2744
2745		case kIOMapInhibitCache:
2746		    flags = DEVICE_PAGER_CACHE_INHIB |
2747				    DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2748		    break;
2749
2750		case kIOMapWriteThruCache:
2751		    flags = DEVICE_PAGER_WRITE_THROUGH |
2752				    DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2753		    break;
2754
2755		case kIOMapCopybackCache:
2756		    flags = DEVICE_PAGER_COHERENT;
2757		    break;
2758
2759		case kIOMapWriteCombineCache:
2760		    flags = DEVICE_PAGER_CACHE_INHIB |
2761				    DEVICE_PAGER_COHERENT;
2762		    break;
2763            }
2764
2765	    flags |= reserved->dp.pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
2766
2767            pager = device_pager_setup( (memory_object_t) 0, (uintptr_t) reserved,
2768								size, flags);
2769            assert( pager );
2770
2771            if( pager) {
2772                kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
2773                            size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
2774
2775                assert( KERN_SUCCESS == kr );
2776                if( KERN_SUCCESS != kr)
2777		{
2778		    device_pager_deallocate( pager );
2779                    pager = MACH_PORT_NULL;
2780                    sharedMem = MACH_PORT_NULL;
2781                }
2782            }
2783	    if( pager && sharedMem)
2784		reserved->dp.devicePager    = pager;
2785
2786        } while( false );
2787
2788        _memEntry = (void *) sharedMem;
2789    }
2790
2791    IOReturn result;
2792    if (0 == sharedMem)
2793      result = kr;
2794    else
2795      result = super::doMap( __addressMap, __address,
2796					options, __offset, __length );
2797
2798    return( result );
2799}
2800
2801IOReturn IOGeneralMemoryDescriptor::doUnmap(
2802	vm_map_t		addressMap,
2803	IOVirtualAddress	__address,
2804	IOByteCount		__length )
2805{
2806    return (super::doUnmap(addressMap, __address, __length));
2807}
2808
2809/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2810
2811#undef super
2812#define super OSObject
2813
2814OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
2815
2816OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
2817OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
2818OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
2819OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
2820OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
2821OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
2822OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
2823OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
2824
2825/* ex-inline function implementation */
2826IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
2827    { return( getPhysicalSegment( 0, 0 )); }
2828
2829/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2830
2831bool IOMemoryMap::init(
2832        task_t			intoTask,
2833        mach_vm_address_t	toAddress,
2834        IOOptionBits		_options,
2835        mach_vm_size_t		_offset,
2836        mach_vm_size_t		_length )
2837{
2838    if (!intoTask)
2839	return( false);
2840
2841    if (!super::init())
2842	return(false);
2843
2844    fAddressMap  = get_task_map(intoTask);
2845    if (!fAddressMap)
2846	return(false);
2847    vm_map_reference(fAddressMap);
2848
2849    fAddressTask = intoTask;
2850    fOptions     = _options;
2851    fLength      = _length;
2852    fOffset	 = _offset;
2853    fAddress     = toAddress;
2854
2855    return (true);
2856}
2857
2858bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
2859{
2860    if (!_memory)
2861	return(false);
2862
2863    if (!fSuperMap)
2864    {
2865	if( (_offset + fLength) > _memory->getLength())
2866	    return( false);
2867	fOffset = _offset;
2868    }
2869
2870    _memory->retain();
2871    if (fMemory)
2872    {
2873	if (fMemory != _memory)
2874	    fMemory->removeMapping(this);
2875	fMemory->release();
2876    }
2877    fMemory = _memory;
2878
2879    return( true );
2880}
2881
2882struct IOMemoryDescriptorMapAllocRef
2883{
2884    ipc_port_t		sharedMem;
2885    vm_map_t            map;
2886    mach_vm_address_t	mapped;
2887    mach_vm_size_t	size;
2888    mach_vm_size_t	sourceOffset;
2889    IOOptionBits	options;
2890};
2891
2892static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2893{
2894    IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2895    IOReturn			    err;
2896
2897    do {
2898        if( ref->sharedMem)
2899	{
2900            vm_prot_t prot = VM_PROT_READ
2901                            | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
2902
2903	    // VM system requires write access to change cache mode
2904	    if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask))
2905		prot |= VM_PROT_WRITE;
2906
2907            // set memory entry cache
2908            vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2909            switch (ref->options & kIOMapCacheMask)
2910            {
2911		case kIOMapInhibitCache:
2912                    SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2913                    break;
2914
2915		case kIOMapWriteThruCache:
2916                    SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2917                    break;
2918
2919		case kIOMapWriteCombineCache:
2920                    SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2921                    break;
2922
2923		case kIOMapCopybackCache:
2924                    SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2925                    break;
2926
2927		case kIOMapCopybackInnerCache:
2928                    SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode);
2929                    break;
2930
2931		case kIOMapDefaultCache:
2932		default:
2933                    SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2934                    break;
2935            }
2936
2937            vm_size_t unused = 0;
2938
2939            err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
2940                                            memEntryCacheMode, NULL, ref->sharedMem );
2941            if (KERN_SUCCESS != err)
2942                IOLog("MAP_MEM_ONLY failed %d\n", err);
2943
2944            err = mach_vm_map( map,
2945                            &ref->mapped,
2946                            ref->size, 0 /* mask */,
2947                            (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2948                            | VM_MAKE_TAG(VM_MEMORY_IOKIT),
2949                            ref->sharedMem, ref->sourceOffset,
2950                            false, // copy
2951                            prot, // cur
2952                            prot, // max
2953                            VM_INHERIT_NONE);
2954
2955            if( KERN_SUCCESS != err) {
2956                ref->mapped = 0;
2957                continue;
2958            }
2959            ref->map = map;
2960        }
2961	else
2962	{
2963            err = mach_vm_allocate(map, &ref->mapped, ref->size,
2964                            ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2965                            | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
2966            if( KERN_SUCCESS != err) {
2967                ref->mapped = 0;
2968                continue;
2969            }
2970            ref->map = map;
2971            // we have to make sure that these guys don't get copied if we fork.
2972            err = vm_inherit(map, ref->mapped, ref->size, VM_INHERIT_NONE);
2973            assert( KERN_SUCCESS == err );
2974        }
2975    }
2976    while( false );
2977
2978    return( err );
2979}
2980
2981kern_return_t
2982IOMemoryDescriptorMapMemEntry(vm_map_t * map, ipc_port_t entry, IOOptionBits options, bool pageable,
2983				mach_vm_size_t offset,
2984				mach_vm_address_t * address, mach_vm_size_t length)
2985{
2986    IOReturn err;
2987    IOMemoryDescriptorMapAllocRef ref;
2988
2989    ref.map          = *map;
2990    ref.sharedMem    = entry;
2991    ref.sourceOffset = trunc_page_64(offset);
2992    ref.options	     = options;
2993    ref.size         = length;
2994
2995    if (options & kIOMapAnywhere)
2996	// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2997	ref.mapped = 0;
2998    else
2999	ref.mapped = *address;
3000
3001    if( ref.sharedMem && (ref.map == kernel_map) && pageable)
3002	err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
3003    else
3004	err = IOMemoryDescriptorMapAlloc( ref.map, &ref );
3005
3006    *address = ref.mapped;
3007    *map     = ref.map;
3008
3009    return (err);
3010}
3011
3012kern_return_t
3013IOMemoryDescriptorMapCopy(vm_map_t * map,
3014				IOOptionBits options,
3015				mach_vm_size_t offset,
3016				mach_vm_address_t * address, mach_vm_size_t length)
3017{
3018    IOReturn err;
3019    IOMemoryDescriptorMapAllocRef ref;
3020
3021    ref.map          = *map;
3022    ref.sharedMem    = NULL;
3023    ref.sourceOffset = trunc_page_64(offset);
3024    ref.options	     = options;
3025    ref.size         = length;
3026
3027    if (options & kIOMapAnywhere)
3028	// vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
3029	ref.mapped = 0;
3030    else
3031	ref.mapped = *address;
3032
3033    if (ref.map == kernel_map)
3034	err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
3035    else
3036	err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
3037
3038    *address = ref.mapped;
3039    *map     = ref.map;
3040
3041    return (err);
3042}
3043
3044IOReturn IOMemoryDescriptor::doMap(
3045	vm_map_t		__addressMap,
3046	IOVirtualAddress *	__address,
3047	IOOptionBits		options,
3048	IOByteCount		__offset,
3049	IOByteCount		__length )
3050{
3051#ifndef __LP64__
3052    if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit");
3053#endif /* !__LP64__ */
3054
3055    IOMemoryMap *  mapping = (IOMemoryMap *) *__address;
3056    mach_vm_size_t offset  = mapping->fOffset + __offset;
3057    mach_vm_size_t length  = mapping->fLength;
3058
3059    IOReturn	      err = kIOReturnSuccess;
3060    memory_object_t   pager;
3061    mach_vm_size_t    pageOffset;
3062    IOPhysicalAddress sourceAddr;
3063    unsigned int lock_count;
3064
3065    do
3066    {
3067	sourceAddr = getPhysicalSegment( offset, NULL, _kIOMemorySourceSegment );
3068	pageOffset = sourceAddr - trunc_page( sourceAddr );
3069
3070	if( reserved)
3071	    pager = (memory_object_t) reserved->dp.devicePager;
3072	else
3073	    pager = MACH_PORT_NULL;
3074
3075	if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
3076	{
3077	    upl_t	   redirUPL2;
3078	    vm_size_t      size;
3079	    int		   flags;
3080
3081	    if (!_memEntry)
3082	    {
3083		err = kIOReturnNotReadable;
3084		continue;
3085	    }
3086
3087	    size = round_page(mapping->fLength + pageOffset);
3088	    flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3089			| UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3090
3091	    if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
3092					    NULL, NULL,
3093					    &flags))
3094		redirUPL2 = NULL;
3095
3096	    for (lock_count = 0;
3097		 IORecursiveLockHaveLock(gIOMemoryLock);
3098		 lock_count++) {
3099	      UNLOCK;
3100	    }
3101	    err = upl_transpose(redirUPL2, mapping->fRedirUPL);
3102	    for (;
3103		 lock_count;
3104		 lock_count--) {
3105	      LOCK;
3106	    }
3107
3108	    if (kIOReturnSuccess != err)
3109	    {
3110		IOLog("upl_transpose(%x)\n", err);
3111		err = kIOReturnSuccess;
3112	    }
3113
3114	    if (redirUPL2)
3115	    {
3116		upl_commit(redirUPL2, NULL, 0);
3117		upl_deallocate(redirUPL2);
3118		redirUPL2 = 0;
3119	    }
3120	    {
3121		// swap the memEntries since they now refer to different vm_objects
3122		void * me = _memEntry;
3123		_memEntry = mapping->fMemory->_memEntry;
3124		mapping->fMemory->_memEntry = me;
3125	    }
3126	    if (pager)
3127		err = handleFault( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
3128	}
3129	else
3130	{
3131	    mach_vm_address_t address;
3132
3133	    if (!(options & kIOMapAnywhere))
3134	    {
3135		address = trunc_page_64(mapping->fAddress);
3136		if( (mapping->fAddress - address) != pageOffset)
3137		{
3138		    err = kIOReturnVMError;
3139		    continue;
3140		}
3141	    }
3142
3143            vm_map_t map = mapping->fAddressMap;
3144	    err = IOMemoryDescriptorMapMemEntry(&map, (ipc_port_t) _memEntry,
3145						    options, (kIOMemoryBufferPageable & _flags),
3146						    offset, &address, round_page_64(length + pageOffset));
3147	    if( err != KERN_SUCCESS)
3148		continue;
3149
3150	    if (!_memEntry || pager)
3151	    {
3152		err = handleFault( pager, mapping->fAddressMap, address, offset, length, options );
3153		if (err != KERN_SUCCESS)
3154		    doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 );
3155	    }
3156
3157#if DEBUG
3158	if (kIOLogMapping & gIOKitDebug)
3159	    IOLog("mapping(%x) desc %p @ %qx, map %p, address %qx, offset %qx, length %qx\n",
3160		  err, this, (uint64_t)sourceAddr, mapping, address, offset, length);
3161#endif
3162
3163	    if (err == KERN_SUCCESS)
3164		mapping->fAddress = address + pageOffset;
3165	    else
3166		mapping->fAddress = NULL;
3167	}
3168    }
3169    while( false );
3170
3171    return (err);
3172}
3173
3174IOReturn IOMemoryDescriptor::handleFault(
3175        void *			_pager,
3176	vm_map_t		addressMap,
3177	mach_vm_address_t	address,
3178	mach_vm_size_t		sourceOffset,
3179	mach_vm_size_t		length,
3180        IOOptionBits		options )
3181{
3182    IOReturn		err = kIOReturnSuccess;
3183    memory_object_t	pager = (memory_object_t) _pager;
3184    mach_vm_size_t	size;
3185    mach_vm_size_t	bytes;
3186    mach_vm_size_t	page;
3187    mach_vm_size_t	pageOffset;
3188    mach_vm_size_t	pagerOffset;
3189    IOPhysicalLength	segLen;
3190    addr64_t		physAddr;
3191
3192    if( !addressMap)
3193    {
3194        if( kIOMemoryRedirected & _flags)
3195	{
3196#if DEBUG
3197            IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
3198#endif
3199            do {
3200	    	SLEEP;
3201            } while( kIOMemoryRedirected & _flags );
3202        }
3203
3204        return( kIOReturnSuccess );
3205    }
3206
3207    physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
3208    assert( physAddr );
3209    pageOffset = physAddr - trunc_page_64( physAddr );
3210    pagerOffset = sourceOffset;
3211
3212    size = length + pageOffset;
3213    physAddr -= pageOffset;
3214
3215    segLen += pageOffset;
3216    bytes = size;
3217    do
3218    {
3219	// in the middle of the loop only map whole pages
3220	if( segLen >= bytes)
3221	    segLen = bytes;
3222	else if( segLen != trunc_page( segLen))
3223	    err = kIOReturnVMError;
3224        if( physAddr != trunc_page_64( physAddr))
3225	    err = kIOReturnBadArgument;
3226	if (kIOReturnSuccess != err)
3227	    break;
3228
3229#if DEBUG
3230	if( kIOLogMapping & gIOKitDebug)
3231	    IOLog("IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
3232                addressMap, address + pageOffset, physAddr + pageOffset,
3233		segLen - pageOffset);
3234#endif
3235
3236
3237        if( pager) {
3238            if( reserved && reserved->dp.pagerContig) {
3239                IOPhysicalLength	allLen;
3240                addr64_t		allPhys;
3241
3242                allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone );
3243                assert( allPhys );
3244		err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) );
3245            }
3246	    else
3247	    {
3248
3249		for( page = 0;
3250                     (page < segLen) && (KERN_SUCCESS == err);
3251                     page += page_size)
3252		{
3253		    err = device_pager_populate_object(pager, pagerOffset,
3254			    (ppnum_t)(atop_64(physAddr + page)), page_size);
3255		    pagerOffset += page_size;
3256                }
3257            }
3258            assert( KERN_SUCCESS == err );
3259            if( err)
3260                break;
3261        }
3262
3263	// This call to vm_fault causes an early pmap level resolution
3264	// of the mappings created above for kernel mappings, since
3265	// faulting in later can't take place from interrupt level.
3266	/*  *** ALERT *** */
3267	/*  *** Temporary Workaround *** */
3268
3269	if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
3270	{
3271		vm_fault(addressMap,
3272			 (vm_map_offset_t)address,
3273			 VM_PROT_READ|VM_PROT_WRITE,
3274			 FALSE, THREAD_UNINT, NULL,
3275			 (vm_map_offset_t)0);
3276	}
3277
3278	/*  *** Temporary Workaround *** */
3279	/*  *** ALERT *** */
3280
3281	sourceOffset += segLen - pageOffset;
3282	address += segLen;
3283	bytes -= segLen;
3284	pageOffset = 0;
3285
3286    }
3287    while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
3288
3289    if (bytes)
3290        err = kIOReturnBadArgument;
3291
3292    return (err);
3293}
3294
3295IOReturn IOMemoryDescriptor::doUnmap(
3296	vm_map_t		addressMap,
3297	IOVirtualAddress	__address,
3298	IOByteCount		__length )
3299{
3300    IOReturn	      err;
3301    mach_vm_address_t address;
3302    mach_vm_size_t    length;
3303
3304    if (__length)
3305    {
3306	address = __address;
3307	length  = __length;
3308    }
3309    else
3310    {
3311	addressMap = ((IOMemoryMap *) __address)->fAddressMap;
3312	address    = ((IOMemoryMap *) __address)->fAddress;
3313	length     = ((IOMemoryMap *) __address)->fLength;
3314    }
3315
3316    if ((addressMap == kernel_map)
3317        && ((kIOMemoryBufferPageable & _flags) || !_memEntry))
3318	addressMap = IOPageableMapForAddress( address );
3319
3320#if DEBUG
3321    if( kIOLogMapping & gIOKitDebug)
3322	IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3323		addressMap, address, length );
3324#endif
3325
3326    err = mach_vm_deallocate( addressMap, address, length );
3327
3328    return (err);
3329}
3330
3331IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
3332{
3333    IOReturn		err = kIOReturnSuccess;
3334    IOMemoryMap *	mapping = 0;
3335    OSIterator *	iter;
3336
3337    LOCK;
3338
3339    if( doRedirect)
3340        _flags |= kIOMemoryRedirected;
3341    else
3342        _flags &= ~kIOMemoryRedirected;
3343
3344    do {
3345	if( (iter = OSCollectionIterator::withCollection( _mappings))) {
3346	    while( (mapping = (IOMemoryMap *) iter->getNextObject()))
3347		mapping->redirect( safeTask, doRedirect );
3348
3349	    iter->release();
3350	}
3351    } while( false );
3352
3353    if (!doRedirect)
3354    {
3355        WAKEUP;
3356    }
3357
3358    UNLOCK;
3359
3360#ifndef __LP64__
3361    // temporary binary compatibility
3362    IOSubMemoryDescriptor * subMem;
3363    if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
3364	err = subMem->redirect( safeTask, doRedirect );
3365    else
3366	err = kIOReturnSuccess;
3367#endif /* !__LP64__ */
3368
3369    return( err );
3370}
3371
3372IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
3373{
3374    IOReturn err = kIOReturnSuccess;
3375
3376    if( fSuperMap) {
3377//        err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3378    } else {
3379
3380        LOCK;
3381
3382	do
3383	{
3384	    if (!fAddress)
3385		break;
3386	    if (!fAddressMap)
3387		break;
3388
3389	    if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3390	      && (0 == (fOptions & kIOMapStatic)))
3391	    {
3392		IOUnmapPages( fAddressMap, fAddress, fLength );
3393		err = kIOReturnSuccess;
3394#if DEBUG
3395		IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
3396#endif
3397	    }
3398	    else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
3399	    {
3400		IOOptionBits newMode;
3401		newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3402		IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
3403	    }
3404	}
3405	while (false);
3406	UNLOCK;
3407    }
3408
3409    if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3410	 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3411     && safeTask
3412     && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3413	fMemory->redirect(safeTask, doRedirect);
3414
3415    return( err );
3416}
3417
3418IOReturn IOMemoryMap::unmap( void )
3419{
3420    IOReturn	err;
3421
3422    LOCK;
3423
3424    if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3425	&& (0 == (fOptions & kIOMapStatic))) {
3426
3427        err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
3428
3429    } else
3430	err = kIOReturnSuccess;
3431
3432    if (fAddressMap)
3433    {
3434        vm_map_deallocate(fAddressMap);
3435        fAddressMap = 0;
3436    }
3437
3438    fAddress = 0;
3439
3440    UNLOCK;
3441
3442    return( err );
3443}
3444
3445void IOMemoryMap::taskDied( void )
3446{
3447    LOCK;
3448    if (fUserClientUnmap)
3449	unmap();
3450    if( fAddressMap) {
3451        vm_map_deallocate(fAddressMap);
3452        fAddressMap = 0;
3453    }
3454    fAddressTask = 0;
3455    fAddress	 = 0;
3456    UNLOCK;
3457}
3458
3459IOReturn IOMemoryMap::userClientUnmap( void )
3460{
3461    fUserClientUnmap = true;
3462    return (kIOReturnSuccess);
3463}
3464
3465// Overload the release mechanism.  All mappings must be a member
3466// of a memory descriptors _mappings set.  This means that we
3467// always have 2 references on a mapping.  When either of these mappings
3468// are released we need to free ourselves.
3469void IOMemoryMap::taggedRelease(const void *tag) const
3470{
3471    LOCK;
3472    super::taggedRelease(tag, 2);
3473    UNLOCK;
3474}
3475
3476void IOMemoryMap::free()
3477{
3478    unmap();
3479
3480    if (fMemory)
3481    {
3482        LOCK;
3483	fMemory->removeMapping(this);
3484	UNLOCK;
3485	fMemory->release();
3486    }
3487
3488    if (fOwner && (fOwner != fMemory))
3489    {
3490        LOCK;
3491	fOwner->removeMapping(this);
3492	UNLOCK;
3493    }
3494
3495    if (fSuperMap)
3496	fSuperMap->release();
3497
3498    if (fRedirUPL) {
3499	upl_commit(fRedirUPL, NULL, 0);
3500	upl_deallocate(fRedirUPL);
3501    }
3502
3503    super::free();
3504}
3505
3506IOByteCount IOMemoryMap::getLength()
3507{
3508    return( fLength );
3509}
3510
3511IOVirtualAddress IOMemoryMap::getVirtualAddress()
3512{
3513#ifndef __LP64__
3514    if (fSuperMap)
3515	fSuperMap->getVirtualAddress();
3516    else if (fAddressMap
3517		&& vm_map_is_64bit(fAddressMap)
3518		&& (sizeof(IOVirtualAddress) < 8))
3519    {
3520	OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
3521    }
3522#endif /* !__LP64__ */
3523
3524    return (fAddress);
3525}
3526
3527#ifndef __LP64__
3528mach_vm_address_t 	IOMemoryMap::getAddress()
3529{
3530    return( fAddress);
3531}
3532
3533mach_vm_size_t 	IOMemoryMap::getSize()
3534{
3535    return( fLength );
3536}
3537#endif /* !__LP64__ */
3538
3539
3540task_t IOMemoryMap::getAddressTask()
3541{
3542    if( fSuperMap)
3543	return( fSuperMap->getAddressTask());
3544    else
3545        return( fAddressTask);
3546}
3547
3548IOOptionBits IOMemoryMap::getMapOptions()
3549{
3550    return( fOptions);
3551}
3552
3553IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
3554{
3555    return( fMemory );
3556}
3557
3558IOMemoryMap * IOMemoryMap::copyCompatible(
3559		IOMemoryMap * newMapping )
3560{
3561    task_t		task      = newMapping->getAddressTask();
3562    mach_vm_address_t	toAddress = newMapping->fAddress;
3563    IOOptionBits	_options  = newMapping->fOptions;
3564    mach_vm_size_t	_offset   = newMapping->fOffset;
3565    mach_vm_size_t	_length   = newMapping->fLength;
3566
3567    if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
3568	return( 0 );
3569    if( (fOptions ^ _options) & kIOMapReadOnly)
3570	return( 0 );
3571    if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
3572     && ((fOptions ^ _options) & kIOMapCacheMask))
3573	return( 0 );
3574
3575    if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
3576	return( 0 );
3577
3578    if( _offset < fOffset)
3579	return( 0 );
3580
3581    _offset -= fOffset;
3582
3583    if( (_offset + _length) > fLength)
3584	return( 0 );
3585
3586    retain();
3587    if( (fLength == _length) && (!_offset))
3588    {
3589	newMapping = this;
3590    }
3591    else
3592    {
3593	newMapping->fSuperMap = this;
3594	newMapping->fOffset   = fOffset + _offset;
3595	newMapping->fAddress  = fAddress + _offset;
3596    }
3597
3598    return( newMapping );
3599}
3600
3601IOReturn IOMemoryMap::wireRange(
3602    	uint32_t		options,
3603        mach_vm_size_t		offset,
3604        mach_vm_size_t		length)
3605{
3606    IOReturn kr;
3607    mach_vm_address_t start = trunc_page_64(fAddress + offset);
3608    mach_vm_address_t end   = round_page_64(fAddress + offset + length);
3609
3610    if (kIODirectionOutIn & options)
3611    {
3612	kr = vm_map_wire(fAddressMap, start, end, (kIODirectionOutIn & options), FALSE);
3613    }
3614    else
3615    {
3616	kr = vm_map_unwire(fAddressMap, start, end, FALSE);
3617    }
3618
3619    return (kr);
3620}
3621
3622
3623IOPhysicalAddress
3624#ifdef __LP64__
3625IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
3626#else /* !__LP64__ */
3627IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3628#endif /* !__LP64__ */
3629{
3630    IOPhysicalAddress	address;
3631
3632    LOCK;
3633#ifdef __LP64__
3634    address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
3635#else /* !__LP64__ */
3636    address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
3637#endif /* !__LP64__ */
3638    UNLOCK;
3639
3640    return( address );
3641}
3642
3643/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3644
3645#undef super
3646#define super OSObject
3647
3648/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3649
3650void IOMemoryDescriptor::initialize( void )
3651{
3652    if( 0 == gIOMemoryLock)
3653	gIOMemoryLock = IORecursiveLockAlloc();
3654
3655    IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
3656						    ptoa_64(gIOMaximumMappedIOPageCount), 64);
3657    gIOLastPage = IOGetLastPageNumber();
3658
3659    gIOPageAllocLock = IOSimpleLockAlloc();
3660    queue_init(&gIOPageAllocList);
3661}
3662
3663void IOMemoryDescriptor::free( void )
3664{
3665    if( _mappings)
3666	_mappings->release();
3667
3668    super::free();
3669}
3670
3671IOMemoryMap * IOMemoryDescriptor::setMapping(
3672	task_t			intoTask,
3673	IOVirtualAddress	mapAddress,
3674	IOOptionBits		options )
3675{
3676    return (createMappingInTask( intoTask, mapAddress,
3677				    options | kIOMapStatic,
3678				    0, getLength() ));
3679}
3680
3681IOMemoryMap * IOMemoryDescriptor::map(
3682	IOOptionBits		options )
3683{
3684    return (createMappingInTask( kernel_task, 0,
3685				options | kIOMapAnywhere,
3686				0, getLength() ));
3687}
3688
3689#ifndef __LP64__
3690IOMemoryMap * IOMemoryDescriptor::map(
3691	task_t		        intoTask,
3692	IOVirtualAddress	atAddress,
3693	IOOptionBits		options,
3694	IOByteCount		offset,
3695	IOByteCount		length )
3696{
3697    if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
3698    {
3699	OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3700	return (0);
3701    }
3702
3703    return (createMappingInTask(intoTask, atAddress,
3704				options, offset, length));
3705}
3706#endif /* !__LP64__ */
3707
3708IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
3709	task_t			intoTask,
3710	mach_vm_address_t	atAddress,
3711	IOOptionBits		options,
3712	mach_vm_size_t		offset,
3713	mach_vm_size_t		length)
3714{
3715    IOMemoryMap * result;
3716    IOMemoryMap * mapping;
3717
3718    if (0 == length)
3719	length = getLength();
3720
3721    mapping = new IOMemoryMap;
3722
3723    if( mapping
3724     && !mapping->init( intoTask, atAddress,
3725			options, offset, length )) {
3726	mapping->release();
3727	mapping = 0;
3728    }
3729
3730    if (mapping)
3731	result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
3732    else
3733	result = 0;
3734
3735#if DEBUG
3736    if (!result)
3737	IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
3738		this, atAddress, (uint32_t) options, offset, length);
3739#endif
3740
3741    return (result);
3742}
3743
3744#ifndef __LP64__ // there is only a 64 bit version for LP64
3745IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3746			        IOOptionBits         options,
3747			        IOByteCount          offset)
3748{
3749    return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
3750}
3751#endif
3752
3753IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3754			        IOOptionBits         options,
3755			        mach_vm_size_t       offset)
3756{
3757    IOReturn err = kIOReturnSuccess;
3758    IOMemoryDescriptor * physMem = 0;
3759
3760    LOCK;
3761
3762    if (fAddress && fAddressMap) do
3763    {
3764	if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3765	    || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3766	{
3767	    physMem = fMemory;
3768	    physMem->retain();
3769	}
3770
3771	if (!fRedirUPL)
3772	{
3773	    vm_size_t size = round_page(fLength);
3774	    int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3775			| UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3776	    if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL,
3777					    NULL, NULL,
3778					    &flags))
3779		fRedirUPL = 0;
3780
3781	    if (physMem)
3782	    {
3783		IOUnmapPages( fAddressMap, fAddress, fLength );
3784		if (false)
3785		    physMem->redirect(0, true);
3786	    }
3787	}
3788
3789	if (newBackingMemory)
3790	{
3791	    if (newBackingMemory != fMemory)
3792	    {
3793		fOffset = 0;
3794		if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
3795							    options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
3796							    offset, fLength))
3797		    err = kIOReturnError;
3798	    }
3799	    if (fRedirUPL)
3800	    {
3801		upl_commit(fRedirUPL, NULL, 0);
3802		upl_deallocate(fRedirUPL);
3803		fRedirUPL = 0;
3804	    }
3805	    if (false && physMem)
3806		physMem->redirect(0, false);
3807	}
3808    }
3809    while (false);
3810
3811    UNLOCK;
3812
3813    if (physMem)
3814	physMem->release();
3815
3816    return (err);
3817}
3818
3819IOMemoryMap * IOMemoryDescriptor::makeMapping(
3820	IOMemoryDescriptor *	owner,
3821	task_t			__intoTask,
3822	IOVirtualAddress	__address,
3823	IOOptionBits		options,
3824	IOByteCount		__offset,
3825	IOByteCount		__length )
3826{
3827#ifndef __LP64__
3828    if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
3829#endif /* !__LP64__ */
3830
3831    IOMemoryDescriptor * mapDesc = 0;
3832    IOMemoryMap *	 result = 0;
3833    OSIterator *	 iter;
3834
3835    IOMemoryMap *  mapping = (IOMemoryMap *) __address;
3836    mach_vm_size_t offset  = mapping->fOffset + __offset;
3837    mach_vm_size_t length  = mapping->fLength;
3838
3839    mapping->fOffset = offset;
3840
3841    LOCK;
3842
3843    do
3844    {
3845	if (kIOMapStatic & options)
3846	{
3847	    result = mapping;
3848	    addMapping(mapping);
3849	    mapping->setMemoryDescriptor(this, 0);
3850	    continue;
3851	}
3852
3853	if (kIOMapUnique & options)
3854	{
3855	    addr64_t phys;
3856	    IOByteCount       physLen;
3857
3858//	    if (owner != this)		continue;
3859
3860	    if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3861		|| ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3862	    {
3863		phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
3864		if (!phys || (physLen < length))
3865		    continue;
3866
3867		mapDesc = IOMemoryDescriptor::withAddressRange(
3868				phys, length, getDirection() | kIOMemoryMapperNone, NULL);
3869		if (!mapDesc)
3870		    continue;
3871		offset = 0;
3872		mapping->fOffset = offset;
3873	    }
3874	}
3875	else
3876	{
3877	    // look for a compatible existing mapping
3878	    if( (iter = OSCollectionIterator::withCollection(_mappings)))
3879	    {
3880		IOMemoryMap * lookMapping;
3881		while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
3882		{
3883		    if ((result = lookMapping->copyCompatible(mapping)))
3884		    {
3885			addMapping(result);
3886			result->setMemoryDescriptor(this, offset);
3887			break;
3888		    }
3889		}
3890		iter->release();
3891	    }
3892	    if (result || (options & kIOMapReference))
3893	    {
3894	        if (result != mapping)
3895	        {
3896                    mapping->release();
3897                    mapping = NULL;
3898                }
3899		continue;
3900	    }
3901	}
3902
3903	if (!mapDesc)
3904	{
3905	    mapDesc = this;
3906	    mapDesc->retain();
3907	}
3908	IOReturn
3909	kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
3910	if (kIOReturnSuccess == kr)
3911	{
3912	    result = mapping;
3913	    mapDesc->addMapping(result);
3914	    result->setMemoryDescriptor(mapDesc, offset);
3915	}
3916	else
3917	{
3918	    mapping->release();
3919	    mapping = NULL;
3920	}
3921    }
3922    while( false );
3923
3924    UNLOCK;
3925
3926    if (mapDesc)
3927	mapDesc->release();
3928
3929    return (result);
3930}
3931
3932void IOMemoryDescriptor::addMapping(
3933	IOMemoryMap * mapping )
3934{
3935    if( mapping)
3936    {
3937        if( 0 == _mappings)
3938            _mappings = OSSet::withCapacity(1);
3939	if( _mappings )
3940	    _mappings->setObject( mapping );
3941    }
3942}
3943
3944void IOMemoryDescriptor::removeMapping(
3945	IOMemoryMap * mapping )
3946{
3947    if( _mappings)
3948        _mappings->removeObject( mapping);
3949}
3950
3951#ifndef __LP64__
3952// obsolete initializers
3953// - initWithOptions is the designated initializer
3954bool
3955IOMemoryDescriptor::initWithAddress(void *      address,
3956                                    IOByteCount   length,
3957                                    IODirection direction)
3958{
3959    return( false );
3960}
3961
3962bool
3963IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
3964                                    IOByteCount    length,
3965                                    IODirection  direction,
3966                                    task_t       task)
3967{
3968    return( false );
3969}
3970
3971bool
3972IOMemoryDescriptor::initWithPhysicalAddress(
3973				 IOPhysicalAddress	address,
3974				 IOByteCount		length,
3975				 IODirection      	direction )
3976{
3977    return( false );
3978}
3979
3980bool
3981IOMemoryDescriptor::initWithRanges(
3982                                   	IOVirtualRange * ranges,
3983                                   	UInt32           withCount,
3984                                   	IODirection      direction,
3985                                   	task_t           task,
3986                                  	bool             asReference)
3987{
3988    return( false );
3989}
3990
3991bool
3992IOMemoryDescriptor::initWithPhysicalRanges(	IOPhysicalRange * ranges,
3993                                        	UInt32           withCount,
3994                                        	IODirection      direction,
3995                                        	bool             asReference)
3996{
3997    return( false );
3998}
3999
4000void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
4001					IOByteCount * lengthOfSegment)
4002{
4003    return( 0 );
4004}
4005#endif /* !__LP64__ */
4006
4007/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4008
4009bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
4010{
4011    OSSymbol const *keys[2];
4012    OSObject *values[2];
4013    struct SerData {
4014	user_addr_t address;
4015	user_size_t length;
4016    } *vcopy;
4017    unsigned int index, nRanges;
4018    bool result;
4019
4020    IOOptionBits type = _flags & kIOMemoryTypeMask;
4021
4022    if (s == NULL) return false;
4023    if (s->previouslySerialized(this)) return true;
4024
4025    // Pretend we are an array.
4026    if (!s->addXMLStartTag(this, "array")) return false;
4027
4028    nRanges = _rangesCount;
4029    vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
4030    if (vcopy == 0) return false;
4031
4032    keys[0] = OSSymbol::withCString("address");
4033    keys[1] = OSSymbol::withCString("length");
4034
4035    result = false;
4036    values[0] = values[1] = 0;
4037
4038    // From this point on we can go to bail.
4039
4040    // Copy the volatile data so we don't have to allocate memory
4041    // while the lock is held.
4042    LOCK;
4043    if (nRanges == _rangesCount) {
4044	Ranges vec = _ranges;
4045        for (index = 0; index < nRanges; index++) {
4046	    user_addr_t addr; IOByteCount len;
4047	    getAddrLenForInd(addr, len, type, vec, index);
4048            vcopy[index].address = addr;
4049            vcopy[index].length  = len;
4050        }
4051    } else {
4052	// The descriptor changed out from under us.  Give up.
4053        UNLOCK;
4054	result = false;
4055        goto bail;
4056    }
4057    UNLOCK;
4058
4059    for (index = 0; index < nRanges; index++)
4060    {
4061	user_addr_t addr = vcopy[index].address;
4062	IOByteCount len = (IOByteCount) vcopy[index].length;
4063	values[0] =
4064	    OSNumber::withNumber(addr, sizeof(addr) * 8);
4065	if (values[0] == 0) {
4066	  result = false;
4067	  goto bail;
4068	}
4069	values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
4070	if (values[1] == 0) {
4071	  result = false;
4072	  goto bail;
4073	}
4074        OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
4075	if (dict == 0) {
4076	  result = false;
4077	  goto bail;
4078	}
4079	values[0]->release();
4080	values[1]->release();
4081	values[0] = values[1] = 0;
4082
4083	result = dict->serialize(s);
4084	dict->release();
4085	if (!result) {
4086	  goto bail;
4087	}
4088    }
4089    result = s->addXMLEndTag("array");
4090
4091 bail:
4092    if (values[0])
4093      values[0]->release();
4094    if (values[1])
4095      values[1]->release();
4096    if (keys[0])
4097      keys[0]->release();
4098    if (keys[1])
4099      keys[1]->release();
4100    if (vcopy)
4101        IOFree(vcopy, sizeof(SerData) * nRanges);
4102    return result;
4103}
4104
4105/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4106
4107OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
4108#ifdef __LP64__
4109OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
4110OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
4111OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
4112OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
4113OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
4114OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
4115OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
4116#else /* !__LP64__ */
4117OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
4118OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
4119OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
4120OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
4121OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
4122OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
4123OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
4124#endif /* !__LP64__ */
4125OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
4126OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
4127OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
4128OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
4129OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
4130OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
4131OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
4132OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
4133
4134/* ex-inline function implementation */
4135IOPhysicalAddress
4136IOMemoryDescriptor::getPhysicalAddress()
4137        { return( getPhysicalSegment( 0, 0 )); }
4138