IOMemoryDescriptor.cpp [plain text]
#include <sys/cdefs.h>
#include <IOKit/assert.h>
#include <IOKit/system.h>
#include <IOKit/IOLib.h>
#include <IOKit/IOMemoryDescriptor.h>
#include <IOKit/IOMapper.h>
#include <IOKit/IOKitKeysPrivate.h>
#include <IOKit/IOKitDebug.h>
#include <libkern/c++/OSContainers.h>
#include <libkern/c++/OSDictionary.h>
#include <libkern/c++/OSArray.h>
#include <libkern/c++/OSSymbol.h>
#include <libkern/c++/OSNumber.h>
#include <sys/cdefs.h>
__BEGIN_DECLS
#include <vm/pmap.h>
#include <mach/memory_object_types.h>
#include <device/device_port.h>
#ifndef i386
struct phys_entry *pmap_find_physentry(ppnum_t pa);
#endif
extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
void ipc_port_release_send(ipc_port_t port);
kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which);
memory_object_t
device_pager_setup(
memory_object_t pager,
int device_handle,
vm_size_t size,
int flags);
void
device_pager_deallocate(
memory_object_t);
kern_return_t
device_pager_populate_object(
memory_object_t pager,
vm_object_offset_t offset,
ppnum_t phys_addr,
vm_size_t size);
kern_return_t
memory_object_iopl_request(
ipc_port_t port,
memory_object_offset_t offset,
vm_size_t *upl_size,
upl_t *upl_ptr,
upl_page_info_array_t user_page_list,
unsigned int *page_list_count,
int *flags);
extern kern_return_t vm_fault(
vm_map_t map,
vm_offset_t vaddr,
vm_prot_t fault_type,
boolean_t change_wiring,
int interruptible,
pmap_t caller_pmap,
vm_offset_t caller_pmap_addr);
unsigned int IOTranslateCacheBits(struct phys_entry *pp);
vm_map_t IOPageableMapForAddress( vm_address_t address );
typedef kern_return_t (*IOIteratePageableMapsCallback)(vm_map_t map, void * ref);
kern_return_t IOIteratePageableMaps(vm_size_t size,
IOIteratePageableMapsCallback callback, void * ref);
__END_DECLS
#define kIOMaximumMappedIOByteCount (512*1024*1024)
static IOMapper * gIOSystemMapper;
static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
#define super IOMemoryDescriptor
OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
static IORecursiveLock * gIOMemoryLock;
#define LOCK IORecursiveLockLock( gIOMemoryLock)
#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
#define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
#define WAKEUP \
IORecursiveLockWakeup( gIOMemoryLock, (void *)this, false)
#define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
extern "C" {
kern_return_t device_data_action(
int device_handle,
ipc_port_t device_pager,
vm_prot_t protection,
vm_object_offset_t offset,
vm_size_t size)
{
struct ExpansionData {
void * devicePager;
unsigned int pagerContig:1;
unsigned int unused:31;
IOMemoryDescriptor * memory;
};
kern_return_t kr;
ExpansionData * ref = (ExpansionData *) device_handle;
IOMemoryDescriptor * memDesc;
LOCK;
memDesc = ref->memory;
if( memDesc)
kr = memDesc->handleFault( device_pager, 0, 0,
offset, size, kIOMapDefaultCache );
else
kr = KERN_ABORTED;
UNLOCK;
return( kr );
}
kern_return_t device_close(
int device_handle)
{
struct ExpansionData {
void * devicePager;
unsigned int pagerContig:1;
unsigned int unused:31;
IOMemoryDescriptor * memory;
};
ExpansionData * ref = (ExpansionData *) device_handle;
IODelete( ref, ExpansionData, 1 );
return( kIOReturnSuccess );
}
}
IOMemoryDescriptor *
IOMemoryDescriptor::withAddress(void * address,
IOByteCount length,
IODirection direction)
{
return IOMemoryDescriptor::
withAddress((vm_address_t) address, length, direction, kernel_task);
}
IOMemoryDescriptor *
IOMemoryDescriptor::withAddress(vm_address_t address,
IOByteCount length,
IODirection direction,
task_t task)
{
IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
if (that)
{
if (that->initWithAddress(address, length, direction, task))
return that;
that->release();
}
return 0;
}
IOMemoryDescriptor *
IOMemoryDescriptor::withPhysicalAddress(
IOPhysicalAddress address,
IOByteCount length,
IODirection direction )
{
IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
if (self
&& !self->initWithPhysicalAddress(address, length, direction)) {
self->release();
return 0;
}
return self;
}
IOMemoryDescriptor *
IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
UInt32 withCount,
IODirection direction,
task_t task,
bool asReference)
{
IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
if (that)
{
if (that->initWithRanges(ranges, withCount, direction, task, asReference))
return that;
that->release();
}
return 0;
}
IOMemoryDescriptor *
IOMemoryDescriptor::withOptions(void * buffers,
UInt32 count,
UInt32 offset,
task_t task,
IOOptionBits opts,
IOMapper * mapper)
{
IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
if (self
&& !self->initWithOptions(buffers, count, offset, task, opts, mapper))
{
self->release();
return 0;
}
return self;
}
bool IOMemoryDescriptor::initWithOptions(void * buffers,
UInt32 count,
UInt32 offset,
task_t task,
IOOptionBits options,
IOMapper * mapper)
{
panic("IOMD::initWithOptions called\n");
return 0;
}
IOMemoryDescriptor *
IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
UInt32 withCount,
IODirection direction,
bool asReference)
{
IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
if (that)
{
if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
return that;
that->release();
}
return 0;
}
IOMemoryDescriptor *
IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
IOByteCount offset,
IOByteCount length,
IODirection direction)
{
IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor;
if (self && !self->initSubRange(of, offset, length, direction)) {
self->release();
self = 0;
}
return self;
}
bool
IOGeneralMemoryDescriptor::initWithAddress(void * address,
IOByteCount withLength,
IODirection withDirection)
{
_singleRange.v.address = (vm_address_t) address;
_singleRange.v.length = withLength;
return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
}
bool
IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address,
IOByteCount withLength,
IODirection withDirection,
task_t withTask)
{
_singleRange.v.address = address;
_singleRange.v.length = withLength;
return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
}
bool
IOGeneralMemoryDescriptor::initWithPhysicalAddress(
IOPhysicalAddress address,
IOByteCount withLength,
IODirection withDirection )
{
_singleRange.p.address = address;
_singleRange.p.length = withLength;
return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
}
bool
IOGeneralMemoryDescriptor::initWithPhysicalRanges(
IOPhysicalRange * ranges,
UInt32 count,
IODirection direction,
bool reference)
{
IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
if (reference)
mdOpts |= kIOMemoryAsReference;
return initWithOptions(ranges, count, 0, 0, mdOpts, 0);
}
bool
IOGeneralMemoryDescriptor::initWithRanges(
IOVirtualRange * ranges,
UInt32 count,
IODirection direction,
task_t task,
bool reference)
{
IOOptionBits mdOpts = direction;
if (reference)
mdOpts |= kIOMemoryAsReference;
if (task) {
mdOpts |= kIOMemoryTypeVirtual;
if (task == kernel_task)
mdOpts |= kIOMemoryAutoPrepare;
}
else
mdOpts |= kIOMemoryTypePhysical;
return initWithOptions(ranges, count, 0, task, mdOpts, 0);
}
enum ioPLBlockFlags {
kIOPLOnDevice = 0x00000001,
kIOPLExternUPL = 0x00000002,
};
struct ioPLBlock {
upl_t fIOPL;
vm_address_t fIOMDOffset; vm_offset_t fPageInfo; ppnum_t fMappedBase; unsigned int fPageOffset; unsigned int fFlags; };
struct ioGMDData {
IOMapper *fMapper;
unsigned int fPageCnt;
upl_page_info_t fPageList[0]; ioPLBlock fBlocks[0];
};
#define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
#define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
#define getNumIOPL(d,len) \
((len - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
#define getPageList(d) (&(d->fPageList[0]))
#define computeDataSize(p, u) \
(sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
bool
IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
UInt32 count,
UInt32 offset,
task_t task,
IOOptionBits options,
IOMapper * mapper)
{
switch (options & kIOMemoryTypeMask) {
case kIOMemoryTypeVirtual:
assert(task);
if (!task)
return false;
else
break;
case kIOMemoryTypePhysical: mapper = kIOMapperNone;
case kIOMemoryTypeUPL:
assert(!task);
break;
default:
panic("IOGMD::iWO(): bad type"); return false;
}
assert(buffers);
assert(count);
if (_initialized) {
while (_wireCount)
complete();
if (_kernPtrAligned)
unmapFromKernel();
if (_ranges.v && _rangesIsAllocated)
IODelete(_ranges.v, IOVirtualRange, _rangesCount);
}
else {
if (!super::init())
return false;
_initialized = true;
}
if (mapper == kIOMapperNone)
mapper = 0; else if (!mapper) {
IOMapper::checkForSystemMapper();
gIOSystemMapper = mapper = IOMapper::gSystem;
}
_flags = options;
_task = task;
_direction = (IODirection) (_flags & kIOMemoryDirectionMask);
_position = 0;
_kernPtrAligned = 0;
_cachedPhysicalAddress = 0;
_cachedVirtualAddress = 0;
if ( (options & kIOMemoryTypeMask) == kIOMemoryTypeUPL) {
ioGMDData *dataP;
unsigned int dataSize = computeDataSize( 0, 1);
if (!_memoryEntries) {
_memoryEntries = OSData::withCapacity(dataSize);
if (!_memoryEntries)
return false;
}
else if (!_memoryEntries->initWithCapacity(dataSize))
return false;
_memoryEntries->appendBytes(0, sizeof(ioGMDData));
dataP = getDataP(_memoryEntries);
dataP->fMapper = mapper;
dataP->fPageCnt = 0;
_wireCount++;
_length = count;
_pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
ioPLBlock iopl;
upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST((upl_t) buffers);
iopl.fIOPL = (upl_t) buffers;
iopl.fFlags = pageList->device | kIOPLExternUPL;
iopl.fIOMDOffset = 0;
if (!pageList->device) {
pageList = &pageList[atop_32(offset)];
offset &= PAGE_MASK;
if (mapper) {
iopl.fMappedBase = mapper->iovmAlloc(_pages);
mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
}
else
iopl.fMappedBase = 0;
}
else
iopl.fMappedBase = 0;
iopl.fPageInfo = (vm_address_t) pageList;
iopl.fPageOffset = offset;
_memoryEntries->appendBytes(&iopl, sizeof(iopl));
}
else {
IOVirtualRange *ranges = (IOVirtualRange *) buffers;
_length = 0;
_pages = 0;
for (unsigned ind = 0; ind < count; ind++) {
IOVirtualRange cur = ranges[ind];
_length += cur.length;
_pages += atop_32(cur.address + cur.length + PAGE_MASK)
- atop_32(cur.address);
}
_ranges.v = 0;
_rangesIsAllocated = !(options & kIOMemoryAsReference);
_rangesCount = count;
if (options & kIOMemoryAsReference)
_ranges.v = ranges;
else {
_ranges.v = IONew(IOVirtualRange, count);
if (!_ranges.v)
return false;
bcopy( ranges, _ranges.v,
count * sizeof(IOVirtualRange));
}
if ( (options & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
_wireCount++; else {
ioGMDData *dataP;
unsigned int dataSize =
computeDataSize(_pages, _rangesCount * 2);
if (!_memoryEntries) {
_memoryEntries = OSData::withCapacity(dataSize);
if (!_memoryEntries)
return false;
}
else if (!_memoryEntries->initWithCapacity(dataSize))
return false;
_memoryEntries->appendBytes(0, sizeof(ioGMDData));
dataP = getDataP(_memoryEntries);
dataP->fMapper = mapper;
dataP->fPageCnt = _pages;
if (kIOMemoryPersistent & _flags)
{
kern_return_t error;
ipc_port_t sharedMem;
vm_size_t size = _pages << PAGE_SHIFT;
vm_address_t startPage;
startPage = trunc_page_32(_ranges.v[0].address);
vm_map_t theMap = ((_task == kernel_task) && (kIOMemoryBufferPageable & _flags))
? IOPageableMapForAddress(startPage)
: get_task_map(_task);
vm_size_t actualSize = size;
error = mach_make_memory_entry( theMap,
&actualSize, startPage,
VM_PROT_READ | VM_PROT_WRITE, &sharedMem,
NULL );
if (KERN_SUCCESS == error) {
if (actualSize == round_page_32(size)) {
_memEntry = (void *) sharedMem;
} else {
#if IOASSERT
IOLog("mach_make_memory_entry_64 (%08x) size (%08lx:%08x)\n",
startPage, (UInt32)actualSize, size);
#endif
ipc_port_release_send( sharedMem );
}
}
}
if ((_flags & kIOMemoryAutoPrepare)
&& prepare() != kIOReturnSuccess)
return false;
}
}
return true;
}
void IOGeneralMemoryDescriptor::free()
{
LOCK;
if( reserved)
reserved->memory = 0;
UNLOCK;
while (_wireCount)
complete();
if (_memoryEntries)
_memoryEntries->release();
if (_kernPtrAligned)
unmapFromKernel();
if (_ranges.v && _rangesIsAllocated)
IODelete(_ranges.v, IOVirtualRange, _rangesCount);
if (reserved && reserved->devicePager)
device_pager_deallocate( (memory_object_t) reserved->devicePager );
if (_memEntry)
ipc_port_release_send( (ipc_port_t) _memEntry );
super::free();
}
void IOGeneralMemoryDescriptor::unmapFromKernel()
{
panic("IOGMD::unmapFromKernel deprecated");
}
void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
{
panic("IOGMD::mapIntoKernel deprecated");
}
IODirection IOMemoryDescriptor::getDirection() const
{
return _direction;
}
IOByteCount IOMemoryDescriptor::getLength() const
{
return _length;
}
void IOMemoryDescriptor::setTag( IOOptionBits tag )
{
_tag = tag;
}
IOOptionBits IOMemoryDescriptor::getTag( void )
{
return( _tag);
}
IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset,
IOByteCount * length )
{
IOPhysicalAddress physAddr = 0;
if( prepare() == kIOReturnSuccess) {
physAddr = getPhysicalSegment( offset, length );
complete();
}
return( physAddr );
}
IOByteCount IOMemoryDescriptor::readBytes
(IOByteCount offset, void *bytes, IOByteCount length)
{
addr64_t dstAddr = (addr64_t) (UInt32) bytes;
IOByteCount remaining;
assert(offset < _length);
assert(offset + length <= _length);
if (offset >= _length) {
IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset, length); return 0;
}
remaining = length = min(length, _length - offset);
while (remaining) { addr64_t srcAddr64;
IOByteCount srcLen;
srcAddr64 = getPhysicalSegment64(offset, &srcLen);
if (!srcAddr64)
break;
if (srcLen > remaining)
srcLen = remaining;
copypv(srcAddr64, dstAddr, srcLen,
cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
dstAddr += srcLen;
offset += srcLen;
remaining -= srcLen;
}
assert(!remaining);
return length - remaining;
}
IOByteCount IOMemoryDescriptor::writeBytes
(IOByteCount offset, const void *bytes, IOByteCount length)
{
addr64_t srcAddr = (addr64_t) (UInt32) bytes;
IOByteCount remaining;
assert(offset < _length);
assert(offset + length <= _length);
assert( !(kIOMemoryPreparedReadOnly & _flags) );
if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset, length); return 0;
}
remaining = length = min(length, _length - offset);
while (remaining) { addr64_t dstAddr64;
IOByteCount dstLen;
dstAddr64 = getPhysicalSegment64(offset, &dstLen);
if (!dstAddr64)
break;
if (dstLen > remaining)
dstLen = remaining;
copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
srcAddr += dstLen;
offset += dstLen;
remaining -= dstLen;
}
assert(!remaining);
return length - remaining;
}
extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
{
panic("IOGMD::setPosition deprecated");
}
IOPhysicalAddress IOGeneralMemoryDescriptor::getPhysicalSegment
(IOByteCount offset, IOByteCount *lengthOfSegment)
{
IOPhysicalAddress address = 0;
IOPhysicalLength length = 0;
if (offset < _length) {
if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
unsigned int ind;
for (ind = 0 ; offset >= _ranges.p[ind].length; ind++ )
offset -= _ranges.p[ind].length;
IOPhysicalRange cur = _ranges.p[ind];
address = cur.address + offset;
length = cur.length - offset;
for (++ind; ind < _rangesCount; ind++) {
cur = _ranges.p[ind];
if (address + length != cur.address)
break;
length += cur.length;
}
{
assert(address || (!_ranges.p[0].address && 1 == _rangesCount));
}
assert(length);
}
else do {
assert(_wireCount);
if (!_wireCount)
{
panic("IOGMD: not wired for getPhysicalSegment()");
continue;
}
assert(_memoryEntries);
ioGMDData * dataP = getDataP(_memoryEntries);
const ioPLBlock *ioplList = getIOPLList(dataP);
UInt ind, numIOPLs = getNumIOPL(dataP, _memoryEntries->getLength());
upl_page_info_t *pageList = getPageList(dataP);
assert(numIOPLs > 0);
for (ind = 1; ind < numIOPLs; ind++) {
if (offset < ioplList[ind].fIOMDOffset)
break;
}
ioPLBlock ioplInfo = ioplList[ind - 1];
if (ind < numIOPLs)
length = ioplList[ind].fIOMDOffset;
else
length = _length;
length -= offset;
offset -= ioplInfo.fIOMDOffset;
if (ioplInfo.fMappedBase) {
offset += (ioplInfo.fPageOffset & PAGE_MASK);
address = ptoa_32(ioplInfo.fMappedBase) + offset;
continue;
}
offset += ioplInfo.fPageOffset;
if (ioplInfo.fFlags & kIOPLExternUPL)
pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
else
pageList = &pageList[ioplInfo.fPageInfo];
if ( ioplInfo.fFlags & kIOPLOnDevice ) {
address = ptoa_32(pageList->phys_addr) + offset;
continue;
}
ind = atop_32(offset);
offset &= PAGE_MASK;
IOPhysicalAddress pageAddr = pageList[ind].phys_addr;
address = ptoa_32(pageAddr) + offset;
if (length > PAGE_SIZE - offset) {
IOByteCount contigLength = PAGE_SIZE - offset;
while (contigLength < length
&& ++pageAddr == pageList[++ind].phys_addr) {
contigLength += PAGE_SIZE;
}
if (length > contigLength)
length = contigLength;
}
assert(address);
assert(length);
} while (0);
if (!address)
length = 0;
}
if (lengthOfSegment)
*lengthOfSegment = length;
return address;
}
addr64_t IOMemoryDescriptor::getPhysicalSegment64
(IOByteCount offset, IOByteCount *lengthOfSegment)
{
IOPhysicalAddress phys32;
IOByteCount length;
addr64_t phys64;
phys32 = getPhysicalSegment(offset, lengthOfSegment);
if (!phys32)
return 0;
if (gIOSystemMapper)
{
IOByteCount origLen;
phys64 = gIOSystemMapper->mapAddr(phys32);
origLen = *lengthOfSegment;
length = page_size - (phys64 & (page_size - 1));
while ((length < origLen)
&& ((phys64 + length) == gIOSystemMapper->mapAddr(phys32 + length)))
length += page_size;
if (length > origLen)
length = origLen;
*lengthOfSegment = length;
}
else
phys64 = (addr64_t) phys32;
return phys64;
}
void * IOGeneralMemoryDescriptor::getBackingID() const
{
if (!_memEntry) return 0;
vm_size_t size = _pages << PAGE_SHIFT;
vm_size_t seenSize = 0;
vm_address_t basePage = trunc_page_32(_ranges.v[0].address);
void *retObjID = 0;
vm_map_t theMap =
((_task == kernel_task) && (kIOMemoryBufferPageable & _flags))
? IOPageableMapForAddress(basePage)
: get_task_map(_task);
for (;;) {
vm_region_object_info_data_64_t objInfo;
vm_address_t actualPage = basePage;
vm_size_t actualSize;
mach_msg_type_number_t objInfoSize;
kern_return_t error;
objInfoSize = VM_REGION_OBJECT_INFO_COUNT_64;
error = vm_region_64(theMap,
&actualPage,
&actualSize,
VM_REGION_OBJECT_INFO_64,
(vm_region_info_t) &objInfo,
&objInfoSize,
0);
if (KERN_SUCCESS != error || actualSize == 0 || actualPage > basePage
|| (retObjID && retObjID != (void *) objInfo.object_id))
return 0;
actualPage += actualSize; seenSize += actualPage - basePage; basePage = actualPage; if (seenSize >= size)
return (void *) objInfo.object_id;
if (!retObjID)
retObjID = (void *) objInfo.object_id;
}
}
IOPhysicalAddress IOGeneralMemoryDescriptor::getSourceSegment
(IOByteCount offset, IOByteCount *lengthOfSegment)
{
IOPhysicalAddress address = 0;
IOPhysicalLength length = 0;
assert(offset <= _length);
if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypeUPL)
return super::getSourceSegment( offset, lengthOfSegment );
if ( offset < _length ) {
unsigned rangesIndex = 0;
for ( ; offset >= _ranges.v[rangesIndex].length; rangesIndex++ )
{
offset -= _ranges.v[rangesIndex].length; }
address = _ranges.v[rangesIndex].address + offset;
length = _ranges.v[rangesIndex].length - offset;
for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ )
{
if ( address + length != _ranges.v[rangesIndex].address ) break;
length += _ranges.v[rangesIndex].length; }
assert(address);
if ( address == 0 ) length = 0;
}
if ( lengthOfSegment ) *lengthOfSegment = length;
return address;
}
void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
IOByteCount * lengthOfSegment)
{
if (_task == kernel_task)
return (void *) getSourceSegment(offset, lengthOfSegment);
else
panic("IOGMD::getVirtualSegment deprecated");
return 0;
}
#ifdef __ppc__
extern vm_offset_t static_memory_end;
#define io_kernel_static_end static_memory_end
#else
extern vm_offset_t first_avail;
#define io_kernel_static_end first_avail
#endif
static kern_return_t
io_get_kernel_static_upl(
vm_map_t map,
vm_address_t offset,
vm_size_t *upl_size,
upl_t *upl,
upl_page_info_array_t page_list,
unsigned int *count,
int *flags,
int force_data_sync)
{
unsigned int pageCount, page;
ppnum_t phys;
pageCount = atop_32(*upl_size);
if (pageCount > *count)
pageCount = *count;
*upl = NULL;
for (page = 0; page < pageCount; page++)
{
phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
if (!phys)
break;
page_list[page].phys_addr = phys;
page_list[page].pageout = 0;
page_list[page].absent = 0;
page_list[page].dirty = 0;
page_list[page].precious = 0;
page_list[page].device = 0;
}
return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
}
IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
{
IOReturn error = kIOReturnNoMemory;
ioGMDData *dataP;
ppnum_t mapBase = 0;
IOMapper *mapper;
ipc_port_t sharedMem = (ipc_port_t) _memEntry;
assert(!_wireCount);
if (_pages >= gIOMaximumMappedIOPageCount)
return kIOReturnNoResources;
dataP = getDataP(_memoryEntries);
mapper = dataP->fMapper;
if (mapper && _pages)
mapBase = mapper->iovmAlloc(_pages);
_memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
dataP = 0;
if (forDirection == kIODirectionNone)
forDirection = _direction;
int uplFlags; switch (forDirection)
{
case kIODirectionOut:
uplFlags = UPL_COPYOUT_FROM;
_flags |= kIOMemoryPreparedReadOnly;
break;
case kIODirectionIn:
default:
uplFlags = 0; break;
}
uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
unsigned int pageIndex = 0;
IOByteCount mdOffset = 0;
vm_map_t curMap;
if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
curMap = 0;
else
{ curMap = get_task_map(_task); }
for (UInt range = 0; range < _rangesCount; range++) {
ioPLBlock iopl;
IOVirtualRange curRange = _ranges.v[range];
vm_address_t startPage;
IOByteCount numBytes;
startPage = trunc_page_32(curRange.address);
iopl.fPageOffset = (short) curRange.address & PAGE_MASK;
if (mapper)
iopl.fMappedBase = mapBase + pageIndex;
else
iopl.fMappedBase = 0;
numBytes = iopl.fPageOffset + curRange.length;
while (numBytes) {
dataP = getDataP(_memoryEntries);
vm_map_t theMap =
(curMap)? curMap
: IOPageableMapForAddress(startPage);
upl_page_info_array_t pageInfo = getPageList(dataP);
int ioplFlags = uplFlags;
upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
vm_size_t ioplSize = round_page_32(numBytes);
unsigned int numPageInfo = atop_32(ioplSize);
if ((theMap == kernel_map) && (startPage < io_kernel_static_end))
{
error = io_get_kernel_static_upl(theMap,
startPage,
&ioplSize,
&iopl.fIOPL,
baseInfo,
&numPageInfo,
&ioplFlags,
false);
} else if (sharedMem && (kIOMemoryPersistent & _flags)) {
error = memory_object_iopl_request(sharedMem,
ptoa_32(pageIndex),
&ioplSize,
&iopl.fIOPL,
baseInfo,
&numPageInfo,
&ioplFlags);
} else {
error = vm_map_get_upl(theMap,
startPage,
&ioplSize,
&iopl.fIOPL,
baseInfo,
&numPageInfo,
&ioplFlags,
false);
}
assert(ioplSize);
if (error != KERN_SUCCESS)
goto abortExit;
error = kIOReturnNoMemory;
if (baseInfo->device) {
numPageInfo = 1;
iopl.fFlags = kIOPLOnDevice;
if (mapper && mapBase) {
mapper->iovmFree(mapBase, _pages);
mapBase = 0;
iopl.fMappedBase = 0;
}
}
else {
iopl.fFlags = 0;
if (mapper)
mapper->iovmInsert(mapBase, pageIndex,
baseInfo, numPageInfo);
}
iopl.fIOMDOffset = mdOffset;
iopl.fPageInfo = pageIndex;
if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
{
kernel_upl_commit(iopl.fIOPL, 0, 0);
iopl.fIOPL = 0;
}
if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
if (iopl.fIOPL)
kernel_upl_abort(iopl.fIOPL, 0);
goto abortExit;
}
pageIndex += numPageInfo;
mdOffset -= iopl.fPageOffset;
if (ioplSize < numBytes) {
numBytes -= ioplSize;
startPage += ioplSize;
mdOffset += ioplSize;
iopl.fPageOffset = 0;
if (mapper)
iopl.fMappedBase = mapBase + pageIndex;
}
else {
mdOffset += numBytes;
break;
}
}
}
return kIOReturnSuccess;
abortExit:
{
dataP = getDataP(_memoryEntries);
UInt done = getNumIOPL(dataP, _memoryEntries->getLength());
ioPLBlock *ioplList = getIOPLList(dataP);
for (UInt range = 0; range < done; range++)
{
if (ioplList[range].fIOPL)
kernel_upl_abort(ioplList[range].fIOPL, 0);
}
if (mapper && mapBase)
mapper->iovmFree(mapBase, _pages);
}
return error;
}
IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
{
IOReturn error = kIOReturnSuccess;
if (!_wireCount && (_flags & kIOMemoryTypeMask) == kIOMemoryTypeVirtual) {
error = wireVirtual(forDirection);
if (error)
return error;
}
_wireCount++;
return kIOReturnSuccess;
}
IOReturn IOGeneralMemoryDescriptor::complete(IODirection )
{
assert(_wireCount);
if (!_wireCount)
return kIOReturnSuccess;
_wireCount--;
if (!_wireCount) {
if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
}
else {
ioGMDData * dataP = getDataP(_memoryEntries);
ioPLBlock *ioplList = getIOPLList(dataP);
UInt count = getNumIOPL(dataP, _memoryEntries->getLength());
if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypeVirtual) {
for (UInt ind = 0; ind < count; ind++)
if (ioplList[ind].fIOPL)
kernel_upl_commit(ioplList[ind].fIOPL, 0, 0);
}
(void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); }
}
return kIOReturnSuccess;
}
IOReturn IOGeneralMemoryDescriptor::doMap(
vm_map_t addressMap,
IOVirtualAddress * atAddress,
IOOptionBits options,
IOByteCount sourceOffset,
IOByteCount length )
{
kern_return_t kr;
ipc_port_t sharedMem = (ipc_port_t) _memEntry;
if( _task && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
&& (1 == _rangesCount) && (0 == sourceOffset)
&& (length <= _ranges.v[0].length) ) {
*atAddress = _ranges.v[0].address;
return( kIOReturnSuccess );
}
if( 0 == sharedMem) {
vm_size_t size = _pages << PAGE_SHIFT;
if( _task) {
#ifndef i386
vm_size_t actualSize = size;
kr = mach_make_memory_entry( get_task_map(_task),
&actualSize, _ranges.v[0].address,
VM_PROT_READ | VM_PROT_WRITE, &sharedMem,
NULL );
if( (KERN_SUCCESS == kr) && (actualSize != round_page_32(size))) {
#if IOASSERT
IOLog("mach_make_memory_entry_64 (%08x) size (%08lx:%08x)\n",
_ranges.v[0].address, (UInt32)actualSize, size);
#endif
kr = kIOReturnVMError;
ipc_port_release_send( sharedMem );
}
if( KERN_SUCCESS != kr)
#endif
sharedMem = MACH_PORT_NULL;
} else do {
memory_object_t pager;
unsigned int flags = 0;
addr64_t pa;
IOPhysicalLength segLen;
pa = getPhysicalSegment64( sourceOffset, &segLen );
if( !reserved) {
reserved = IONew( ExpansionData, 1 );
if( !reserved)
continue;
}
reserved->pagerContig = (1 == _rangesCount);
reserved->memory = this;
switch(options & kIOMapCacheMask ) {
case kIOMapDefaultCache:
default:
flags = IODefaultCacheBits(pa);
break;
case kIOMapInhibitCache:
flags = DEVICE_PAGER_CACHE_INHIB |
DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
break;
case kIOMapWriteThruCache:
flags = DEVICE_PAGER_WRITE_THROUGH |
DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
break;
case kIOMapCopybackCache:
flags = DEVICE_PAGER_COHERENT;
break;
case kIOMapWriteCombineCache:
flags = DEVICE_PAGER_CACHE_INHIB |
DEVICE_PAGER_COHERENT;
break;
}
flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
pager = device_pager_setup( (memory_object_t) 0, (int) reserved,
size, flags);
assert( pager );
if( pager) {
kr = mach_memory_object_memory_entry_64( (host_t) 1, false ,
size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
assert( KERN_SUCCESS == kr );
if( KERN_SUCCESS != kr) {
device_pager_deallocate( pager );
pager = MACH_PORT_NULL;
sharedMem = MACH_PORT_NULL;
}
}
if( pager && sharedMem)
reserved->devicePager = pager;
else {
IODelete( reserved, ExpansionData, 1 );
reserved = 0;
}
} while( false );
_memEntry = (void *) sharedMem;
}
#ifndef i386
if( 0 == sharedMem)
kr = kIOReturnVMError;
else
#endif
kr = super::doMap( addressMap, atAddress,
options, sourceOffset, length );
return( kr );
}
IOReturn IOGeneralMemoryDescriptor::doUnmap(
vm_map_t addressMap,
IOVirtualAddress logical,
IOByteCount length )
{
if( _task && (addressMap == get_task_map(_task)) && (1 == _rangesCount)
&& (logical == _ranges.v[0].address)
&& (length <= _ranges.v[0].length) )
return( kIOReturnSuccess );
return( super::doUnmap( addressMap, logical, length ));
}
extern "C" {
extern kern_return_t IOMapPages( vm_map_t map, vm_offset_t va, vm_offset_t pa,
vm_size_t length, unsigned int mapFlags);
extern kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length);
};
OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject )
IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
{ return( getPhysicalSegment( 0, 0 )); }
class _IOMemoryMap : public IOMemoryMap
{
OSDeclareDefaultStructors(_IOMemoryMap)
IOMemoryDescriptor * memory;
IOMemoryMap * superMap;
IOByteCount offset;
IOByteCount length;
IOVirtualAddress logical;
task_t addressTask;
vm_map_t addressMap;
IOOptionBits options;
protected:
virtual void taggedRelease(const void *tag = 0) const;
virtual void free();
public:
virtual IOVirtualAddress getVirtualAddress();
virtual IOByteCount getLength();
virtual task_t getAddressTask();
virtual IOMemoryDescriptor * getMemoryDescriptor();
virtual IOOptionBits getMapOptions();
virtual IOReturn unmap();
virtual void taskDied();
virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
IOByteCount * length);
_IOMemoryMap * copyCompatible(
IOMemoryDescriptor * owner,
task_t intoTask,
IOVirtualAddress toAddress,
IOOptionBits options,
IOByteCount offset,
IOByteCount length );
bool initCompatible(
IOMemoryDescriptor * memory,
IOMemoryMap * superMap,
IOByteCount offset,
IOByteCount length );
bool initWithDescriptor(
IOMemoryDescriptor * memory,
task_t intoTask,
IOVirtualAddress toAddress,
IOOptionBits options,
IOByteCount offset,
IOByteCount length );
IOReturn redirect(
task_t intoTask, bool redirect );
};
#undef super
#define super IOMemoryMap
OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap)
bool _IOMemoryMap::initCompatible(
IOMemoryDescriptor * _memory,
IOMemoryMap * _superMap,
IOByteCount _offset,
IOByteCount _length )
{
if( !super::init())
return( false);
if( (_offset + _length) > _superMap->getLength())
return( false);
_memory->retain();
memory = _memory;
_superMap->retain();
superMap = _superMap;
offset = _offset;
if( _length)
length = _length;
else
length = _memory->getLength();
options = superMap->getMapOptions();
logical = superMap->getVirtualAddress() + offset;
return( true );
}
bool _IOMemoryMap::initWithDescriptor(
IOMemoryDescriptor * _memory,
task_t intoTask,
IOVirtualAddress toAddress,
IOOptionBits _options,
IOByteCount _offset,
IOByteCount _length )
{
bool ok;
if( (!_memory) || (!intoTask) || !super::init())
return( false);
if( (_offset + _length) > _memory->getLength())
return( false);
addressMap = get_task_map(intoTask);
if( !addressMap)
return( false);
vm_map_reference(addressMap);
_memory->retain();
memory = _memory;
offset = _offset;
if( _length)
length = _length;
else
length = _memory->getLength();
addressTask = intoTask;
logical = toAddress;
options = _options;
if( options & kIOMapStatic)
ok = true;
else
ok = (kIOReturnSuccess == memory->doMap( addressMap, &logical,
options, offset, length ));
if( !ok) {
logical = 0;
memory->release();
memory = 0;
vm_map_deallocate(addressMap);
addressMap = 0;
}
return( ok );
}
struct IOMemoryDescriptorMapAllocRef
{
ipc_port_t sharedMem;
vm_size_t size;
vm_offset_t mapped;
IOByteCount sourceOffset;
IOOptionBits options;
};
static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
{
IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
IOReturn err;
do {
if( ref->sharedMem) {
vm_prot_t prot = VM_PROT_READ
| ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
switch (ref->options & kIOMapCacheMask)
{
case kIOMapInhibitCache:
SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
break;
case kIOMapWriteThruCache:
SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
break;
case kIOMapWriteCombineCache:
SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
break;
case kIOMapCopybackCache:
SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
break;
case kIOMapDefaultCache:
default:
SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
break;
}
vm_size_t unused = 0;
err = mach_make_memory_entry( NULL , &unused, 0 ,
memEntryCacheMode, NULL, ref->sharedMem );
if (KERN_SUCCESS != err)
IOLog("MAP_MEM_ONLY failed %d\n", err);
err = vm_map( map,
&ref->mapped,
ref->size, 0 ,
(( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
| VM_MAKE_TAG(VM_MEMORY_IOKIT),
ref->sharedMem, ref->sourceOffset,
false, prot, prot, VM_INHERIT_NONE);
if( KERN_SUCCESS != err) {
ref->mapped = 0;
continue;
}
} else {
err = vm_allocate( map, &ref->mapped, ref->size,
((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
| VM_MAKE_TAG(VM_MEMORY_IOKIT) );
if( KERN_SUCCESS != err) {
ref->mapped = 0;
continue;
}
err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE);
assert( KERN_SUCCESS == err );
}
} while( false );
return( err );
}
IOReturn IOMemoryDescriptor::doMap(
vm_map_t addressMap,
IOVirtualAddress * atAddress,
IOOptionBits options,
IOByteCount sourceOffset,
IOByteCount length )
{
IOReturn err = kIOReturnSuccess;
memory_object_t pager;
vm_address_t logical;
IOByteCount pageOffset;
IOPhysicalAddress sourceAddr;
IOMemoryDescriptorMapAllocRef ref;
ref.sharedMem = (ipc_port_t) _memEntry;
ref.sourceOffset = sourceOffset;
ref.options = options;
do {
if( 0 == length)
length = getLength();
sourceAddr = getSourceSegment( sourceOffset, NULL );
assert( sourceAddr );
pageOffset = sourceAddr - trunc_page_32( sourceAddr );
ref.size = round_page_32( length + pageOffset );
logical = *atAddress;
if( options & kIOMapAnywhere)
ref.mapped = 0;
else {
ref.mapped = trunc_page_32( logical );
if( (logical - ref.mapped) != pageOffset) {
err = kIOReturnVMError;
continue;
}
}
if( ref.sharedMem && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
else
err = IOMemoryDescriptorMapAlloc( addressMap, &ref );
if( err != KERN_SUCCESS)
continue;
if( reserved)
pager = (memory_object_t) reserved->devicePager;
else
pager = MACH_PORT_NULL;
if( !ref.sharedMem || pager )
err = handleFault( pager, addressMap, ref.mapped, sourceOffset, length, options );
} while( false );
if( err != KERN_SUCCESS) {
if( ref.mapped)
doUnmap( addressMap, ref.mapped, ref.size );
*atAddress = NULL;
} else
*atAddress = ref.mapped + pageOffset;
return( err );
}
enum {
kIOMemoryRedirected = 0x00010000
};
IOReturn IOMemoryDescriptor::handleFault(
void * _pager,
vm_map_t addressMap,
IOVirtualAddress address,
IOByteCount sourceOffset,
IOByteCount length,
IOOptionBits options )
{
IOReturn err = kIOReturnSuccess;
memory_object_t pager = (memory_object_t) _pager;
vm_size_t size;
vm_size_t bytes;
vm_size_t page;
IOByteCount pageOffset;
IOByteCount pagerOffset;
IOPhysicalLength segLen;
addr64_t physAddr;
if( !addressMap) {
if( kIOMemoryRedirected & _flags) {
#ifdef DEBUG
IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset);
#endif
do {
SLEEP;
} while( kIOMemoryRedirected & _flags );
}
return( kIOReturnSuccess );
}
physAddr = getPhysicalSegment64( sourceOffset, &segLen );
assert( physAddr );
pageOffset = physAddr - trunc_page_64( physAddr );
pagerOffset = sourceOffset;
size = length + pageOffset;
physAddr -= pageOffset;
segLen += pageOffset;
bytes = size;
do {
if( segLen >= bytes)
segLen = bytes;
else if( segLen != trunc_page_32( segLen))
err = kIOReturnVMError;
if( physAddr != trunc_page_64( physAddr))
err = kIOReturnBadArgument;
#ifdef DEBUG
if( kIOLogMapping & gIOKitDebug)
IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n",
addressMap, address + pageOffset, physAddr + pageOffset,
segLen - pageOffset);
#endif
#ifdef i386
if( addressMap && (kIOReturnSuccess == err))
err = IOMapPages( addressMap, address, (IOPhysicalAddress) physAddr, segLen, options );
assert( KERN_SUCCESS == err );
if( err)
break;
#endif
if( pager) {
if( reserved && reserved->pagerContig) {
IOPhysicalLength allLen;
addr64_t allPhys;
allPhys = getPhysicalSegment64( 0, &allLen );
assert( allPhys );
err = device_pager_populate_object( pager, 0, allPhys >> PAGE_SHIFT, round_page_32(allLen) );
} else {
for( page = 0;
(page < segLen) && (KERN_SUCCESS == err);
page += page_size) {
err = device_pager_populate_object(pager, pagerOffset,
(ppnum_t)((physAddr + page) >> PAGE_SHIFT), page_size);
pagerOffset += page_size;
}
}
assert( KERN_SUCCESS == err );
if( err)
break;
}
#ifndef i386
if(!(kIOMemoryRedirected & _flags)) {
vm_fault(addressMap, address, 3, FALSE, FALSE, NULL, 0);
}
#endif
sourceOffset += segLen - pageOffset;
address += segLen;
bytes -= segLen;
pageOffset = 0;
} while( bytes
&& (physAddr = getPhysicalSegment64( sourceOffset, &segLen )));
if( bytes)
err = kIOReturnBadArgument;
return( err );
}
IOReturn IOMemoryDescriptor::doUnmap(
vm_map_t addressMap,
IOVirtualAddress logical,
IOByteCount length )
{
IOReturn err;
#ifdef DEBUG
if( kIOLogMapping & gIOKitDebug)
kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
addressMap, logical, length );
#endif
if( true ) {
if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
addressMap = IOPageableMapForAddress( logical );
err = vm_deallocate( addressMap, logical, length );
} else
err = kIOReturnSuccess;
return( err );
}
IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool redirect )
{
IOReturn err;
_IOMemoryMap * mapping = 0;
OSIterator * iter;
LOCK;
do {
if( (iter = OSCollectionIterator::withCollection( _mappings))) {
while( (mapping = (_IOMemoryMap *) iter->getNextObject()))
mapping->redirect( safeTask, redirect );
iter->release();
}
} while( false );
if( redirect)
_flags |= kIOMemoryRedirected;
else {
_flags &= ~kIOMemoryRedirected;
WAKEUP;
}
UNLOCK;
IOSubMemoryDescriptor * subMem;
if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
err = subMem->redirect( safeTask, redirect );
else
err = kIOReturnSuccess;
return( err );
}
IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool redirect )
{
return( _parent->redirect( safeTask, redirect ));
}
IOReturn _IOMemoryMap::redirect( task_t safeTask, bool redirect )
{
IOReturn err = kIOReturnSuccess;
if( superMap) {
} else {
LOCK;
if( logical && addressMap
&& (get_task_map( safeTask) != addressMap)
&& (0 == (options & kIOMapStatic))) {
IOUnmapPages( addressMap, logical, length );
if( !redirect) {
err = vm_deallocate( addressMap, logical, length );
err = memory->doMap( addressMap, &logical,
(options & ~kIOMapAnywhere) ,
offset, length );
} else
err = kIOReturnSuccess;
#ifdef DEBUG
IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", redirect, this, logical, length, addressMap);
#endif
}
UNLOCK;
}
return( err );
}
IOReturn _IOMemoryMap::unmap( void )
{
IOReturn err;
LOCK;
if( logical && addressMap && (0 == superMap)
&& (0 == (options & kIOMapStatic))) {
err = memory->doUnmap( addressMap, logical, length );
vm_map_deallocate(addressMap);
addressMap = 0;
} else
err = kIOReturnSuccess;
logical = 0;
UNLOCK;
return( err );
}
void _IOMemoryMap::taskDied( void )
{
LOCK;
if( addressMap) {
vm_map_deallocate(addressMap);
addressMap = 0;
}
addressTask = 0;
logical = 0;
UNLOCK;
}
void _IOMemoryMap::taggedRelease(const void *tag) const
{
LOCK;
super::taggedRelease(tag, 2);
UNLOCK;
}
void _IOMemoryMap::free()
{
unmap();
if( memory) {
LOCK;
memory->removeMapping( this);
UNLOCK;
memory->release();
}
if( superMap)
superMap->release();
super::free();
}
IOByteCount _IOMemoryMap::getLength()
{
return( length );
}
IOVirtualAddress _IOMemoryMap::getVirtualAddress()
{
return( logical);
}
task_t _IOMemoryMap::getAddressTask()
{
if( superMap)
return( superMap->getAddressTask());
else
return( addressTask);
}
IOOptionBits _IOMemoryMap::getMapOptions()
{
return( options);
}
IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor()
{
return( memory );
}
_IOMemoryMap * _IOMemoryMap::copyCompatible(
IOMemoryDescriptor * owner,
task_t task,
IOVirtualAddress toAddress,
IOOptionBits _options,
IOByteCount _offset,
IOByteCount _length )
{
_IOMemoryMap * mapping;
if( (!task) || (!addressMap) || (addressMap != get_task_map(task)))
return( 0 );
if( (options ^ _options) & kIOMapReadOnly)
return( 0 );
if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
&& ((options ^ _options) & kIOMapCacheMask))
return( 0 );
if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress))
return( 0 );
if( _offset < offset)
return( 0 );
_offset -= offset;
if( (_offset + _length) > length)
return( 0 );
if( (length == _length) && (!_offset)) {
retain();
mapping = this;
} else {
mapping = new _IOMemoryMap;
if( mapping
&& !mapping->initCompatible( owner, this, _offset, _length )) {
mapping->release();
mapping = 0;
}
}
return( mapping );
}
IOPhysicalAddress _IOMemoryMap::getPhysicalSegment( IOByteCount _offset,
IOPhysicalLength * length)
{
IOPhysicalAddress address;
LOCK;
address = memory->getPhysicalSegment( offset + _offset, length );
UNLOCK;
return( address );
}
#undef super
#define super OSObject
void IOMemoryDescriptor::initialize( void )
{
if( 0 == gIOMemoryLock)
gIOMemoryLock = IORecursiveLockAlloc();
IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
ptoa_64(gIOMaximumMappedIOPageCount), 64);
}
void IOMemoryDescriptor::free( void )
{
if( _mappings)
_mappings->release();
super::free();
}
IOMemoryMap * IOMemoryDescriptor::setMapping(
task_t intoTask,
IOVirtualAddress mapAddress,
IOOptionBits options )
{
_IOMemoryMap * map;
map = new _IOMemoryMap;
LOCK;
if( map
&& !map->initWithDescriptor( this, intoTask, mapAddress,
options | kIOMapStatic, 0, getLength() )) {
map->release();
map = 0;
}
addMapping( map);
UNLOCK;
return( map);
}
IOMemoryMap * IOMemoryDescriptor::map(
IOOptionBits options )
{
return( makeMapping( this, kernel_task, 0,
options | kIOMapAnywhere,
0, getLength() ));
}
IOMemoryMap * IOMemoryDescriptor::map(
task_t intoTask,
IOVirtualAddress toAddress,
IOOptionBits options,
IOByteCount offset,
IOByteCount length )
{
if( 0 == length)
length = getLength();
return( makeMapping( this, intoTask, toAddress, options, offset, length ));
}
IOMemoryMap * IOMemoryDescriptor::makeMapping(
IOMemoryDescriptor * owner,
task_t intoTask,
IOVirtualAddress toAddress,
IOOptionBits options,
IOByteCount offset,
IOByteCount length )
{
_IOMemoryMap * mapping = 0;
OSIterator * iter;
LOCK;
do {
if( (iter = OSCollectionIterator::withCollection( _mappings))) {
while( (mapping = (_IOMemoryMap *) iter->getNextObject())) {
if( (mapping = mapping->copyCompatible(
owner, intoTask, toAddress,
options | kIOMapReference,
offset, length )))
break;
}
iter->release();
if( mapping)
continue;
}
if( mapping || (options & kIOMapReference))
continue;
owner = this;
mapping = new _IOMemoryMap;
if( mapping
&& !mapping->initWithDescriptor( owner, intoTask, toAddress, options,
offset, length )) {
#ifdef DEBUG
IOLog("Didn't make map %08lx : %08lx\n", offset, length );
#endif
mapping->release();
mapping = 0;
}
} while( false );
owner->addMapping( mapping);
UNLOCK;
return( mapping);
}
void IOMemoryDescriptor::addMapping(
IOMemoryMap * mapping )
{
if( mapping) {
if( 0 == _mappings)
_mappings = OSSet::withCapacity(1);
if( _mappings )
_mappings->setObject( mapping );
}
}
void IOMemoryDescriptor::removeMapping(
IOMemoryMap * mapping )
{
if( _mappings)
_mappings->removeObject( mapping);
}
#undef super
#define super IOMemoryDescriptor
OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor)
bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent,
IOByteCount offset, IOByteCount length,
IODirection direction )
{
if( !parent)
return( false);
if( (offset + length) > parent->getLength())
return( false);
if( !_parent) {
if( !super::init())
return( false );
} else {
_parent->release();
_parent = 0;
}
parent->retain();
_parent = parent;
_start = offset;
_length = length;
_direction = direction;
_tag = parent->getTag();
return( true );
}
void IOSubMemoryDescriptor::free( void )
{
if( _parent)
_parent->release();
super::free();
}
IOPhysicalAddress IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset,
IOByteCount * length )
{
IOPhysicalAddress address;
IOByteCount actualLength;
assert(offset <= _length);
if( length)
*length = 0;
if( offset >= _length)
return( 0 );
address = _parent->getPhysicalSegment( offset + _start, &actualLength );
if( address && length)
*length = min( _length - offset, actualLength );
return( address );
}
IOPhysicalAddress IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset,
IOByteCount * length )
{
IOPhysicalAddress address;
IOByteCount actualLength;
assert(offset <= _length);
if( length)
*length = 0;
if( offset >= _length)
return( 0 );
address = _parent->getSourceSegment( offset + _start, &actualLength );
if( address && length)
*length = min( _length - offset, actualLength );
return( address );
}
void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset,
IOByteCount * lengthOfSegment)
{
return( 0 );
}
IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset,
void * bytes, IOByteCount length)
{
IOByteCount byteCount;
assert(offset <= _length);
if( offset >= _length)
return( 0 );
LOCK;
byteCount = _parent->readBytes( _start + offset, bytes,
min(length, _length - offset) );
UNLOCK;
return( byteCount );
}
IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset,
const void* bytes, IOByteCount length)
{
IOByteCount byteCount;
assert(offset <= _length);
if( offset >= _length)
return( 0 );
LOCK;
byteCount = _parent->writeBytes( _start + offset, bytes,
min(length, _length - offset) );
UNLOCK;
return( byteCount );
}
IOReturn IOSubMemoryDescriptor::prepare(
IODirection forDirection)
{
IOReturn err;
LOCK;
err = _parent->prepare( forDirection);
UNLOCK;
return( err );
}
IOReturn IOSubMemoryDescriptor::complete(
IODirection forDirection)
{
IOReturn err;
LOCK;
err = _parent->complete( forDirection);
UNLOCK;
return( err );
}
IOMemoryMap * IOSubMemoryDescriptor::makeMapping(
IOMemoryDescriptor * owner,
task_t intoTask,
IOVirtualAddress toAddress,
IOOptionBits options,
IOByteCount offset,
IOByteCount length )
{
IOMemoryMap * mapping;
mapping = (IOMemoryMap *) _parent->makeMapping(
_parent, intoTask,
toAddress - (_start + offset),
options | kIOMapReference,
_start + offset, length );
if( !mapping)
mapping = (IOMemoryMap *) _parent->makeMapping(
_parent, intoTask,
toAddress,
options, _start + offset, length );
if( !mapping)
mapping = super::makeMapping( owner, intoTask, toAddress, options,
offset, length );
return( mapping );
}
bool
IOSubMemoryDescriptor::initWithAddress(void * address,
IOByteCount length,
IODirection direction)
{
return( false );
}
bool
IOSubMemoryDescriptor::initWithAddress(vm_address_t address,
IOByteCount length,
IODirection direction,
task_t task)
{
return( false );
}
bool
IOSubMemoryDescriptor::initWithPhysicalAddress(
IOPhysicalAddress address,
IOByteCount length,
IODirection direction )
{
return( false );
}
bool
IOSubMemoryDescriptor::initWithRanges(
IOVirtualRange * ranges,
UInt32 withCount,
IODirection direction,
task_t task,
bool asReference)
{
return( false );
}
bool
IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
UInt32 withCount,
IODirection direction,
bool asReference)
{
return( false );
}
bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
{
OSSymbol const *keys[2];
OSObject *values[2];
IOVirtualRange *vcopy;
unsigned int index, nRanges;
bool result;
if (s == NULL) return false;
if (s->previouslySerialized(this)) return true;
if (!s->addXMLStartTag(this, "array")) return false;
nRanges = _rangesCount;
vcopy = (IOVirtualRange *) IOMalloc(sizeof(IOVirtualRange) * nRanges);
if (vcopy == 0) return false;
keys[0] = OSSymbol::withCString("address");
keys[1] = OSSymbol::withCString("length");
result = false;
values[0] = values[1] = 0;
LOCK;
if (nRanges == _rangesCount) {
for (index = 0; index < nRanges; index++) {
vcopy[index] = _ranges.v[index];
}
} else {
UNLOCK;
result = false;
goto bail;
}
UNLOCK;
for (index = 0; index < nRanges; index++)
{
values[0] = OSNumber::withNumber(_ranges.v[index].address, sizeof(_ranges.v[index].address) * 8);
if (values[0] == 0) {
result = false;
goto bail;
}
values[1] = OSNumber::withNumber(_ranges.v[index].length, sizeof(_ranges.v[index].length) * 8);
if (values[1] == 0) {
result = false;
goto bail;
}
OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
if (dict == 0) {
result = false;
goto bail;
}
values[0]->release();
values[1]->release();
values[0] = values[1] = 0;
result = dict->serialize(s);
dict->release();
if (!result) {
goto bail;
}
}
result = s->addXMLEndTag("array");
bail:
if (values[0])
values[0]->release();
if (values[1])
values[1]->release();
if (keys[0])
keys[0]->release();
if (keys[1])
keys[1]->release();
if (vcopy)
IOFree(vcopy, sizeof(IOVirtualRange) * nRanges);
return result;
}
bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const
{
if (!s) {
return (false);
}
if (s->previouslySerialized(this)) return true;
if (!s->addXMLStartTag(this, "dict")) return false;
char const *keys[3] = {"offset", "length", "parent"};
OSObject *values[3];
values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8);
if (values[0] == 0)
return false;
values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8);
if (values[1] == 0) {
values[0]->release();
return false;
}
values[2] = _parent;
bool result = true;
for (int i=0; i<3; i++) {
if (!s->addString("<key>") ||
!s->addString(keys[i]) ||
!s->addXMLEndTag("key") ||
!values[i]->serialize(s)) {
result = false;
break;
}
}
values[0]->release();
values[1]->release();
if (!result) {
return false;
}
return s->addXMLEndTag("dict");
}
OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
IOPhysicalAddress IOMemoryDescriptor::getPhysicalAddress()
{ return( getPhysicalSegment( 0, 0 )); }