#include <zone_debug.h>
#include <cpus.h>
#include <debug.h>
#include <mach_kgdb.h>
#include <mach_vm_debug.h>
#include <db_machine_commands.h>
#include <kern/thread.h>
#include <kern/simple_lock.h>
#include <mach/vm_attributes.h>
#include <mach/vm_param.h>
#include <kern/spl.h>
#include <kern/misc_protos.h>
#include <ppc/misc_protos.h>
#include <ppc/proc_reg.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_page.h>
#include <ppc/pmap.h>
#include <ppc/pmap_internals.h>
#include <ppc/mem.h>
#include <ppc/mappings.h>
#include <ppc/new_screen.h>
#include <ppc/Firmware.h>
#include <ppc/savearea.h>
#include <ppc/exception.h>
#include <ddb/db_output.h>
#if DB_MACHINE_COMMANDS
#define PMAP_LOWTRACE 0
#else
#define PMAP_LOWTRACE 0
#endif
#define PERFTIMES 0
#if PERFTIMES && DEBUG
#define debugLog2(a, b, c) dbgLog2(a, b, c)
#else
#define debugLog2(a, b, c)
#endif
extern unsigned int avail_remaining;
extern unsigned int mappingdeb0;
extern struct Saveanchor saveanchor;
extern int real_ncpus;
unsigned int debugbackpocket;
vm_offset_t avail_next;
vm_offset_t first_free_virt;
int current_free_region;
void pmap_activate(pmap_t pmap, thread_t th, int which_cpu);
void pmap_deactivate(pmap_t pmap, thread_t th, int which_cpu);
void copy_to_phys(vm_offset_t sva, vm_offset_t dpa, int bytecount);
#if MACH_VM_DEBUG
int pmap_list_resident_pages(pmap_t pmap, vm_offset_t *listp, int space);
#endif
#if DEBUG
#define PDB_USER 0x01
#define PDB_MAPPING 0x02
#define PDB_ENTER 0x04
#define PDB_COPY 0x08
#define PDB_ZERO 0x10
#define PDB_WIRED 0x20
#define PDB_PTEG 0x40
#define PDB_LOCK 0x100
#define PDB_IO 0x200
int pmdebug=0;
#endif
extern struct pmap kernel_pmap_store;
pmap_t kernel_pmap;
pmap_t cursor_pmap;
struct zone *pmap_zone;
boolean_t pmap_initialized = FALSE;
struct phys_entry *phys_table;
lock_t pmap_system_lock;
decl_simple_lock_data(,tlb_system_lock)
int free_pmap_max = 32;
int free_pmap_count;
pmap_t free_pmap_list;
decl_simple_lock_data(,free_pmap_lock)
struct phys_entry *pmap_find_physentry(vm_offset_t pa)
{
int i;
struct phys_entry *entry;
for (i = pmap_mem_regions_count-1; i >= 0; i--) {
if (pa < pmap_mem_regions[i].start)
continue;
if (pa >= pmap_mem_regions[i].end)
return PHYS_NULL;
entry = &pmap_mem_regions[i].phys_table[(pa - pmap_mem_regions[i].start) >> PPC_PGSHIFT];
__asm__ volatile("dcbt 0,%0" : : "r" (entry));
return entry;
}
kprintf("DEBUG : pmap_find_physentry 0x%08x out of range\n",pa);
return PHYS_NULL;
}
kern_return_t pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa,
boolean_t available, unsigned int attr)
{
int i,j;
spl_t s;
panic("Forget it! You can't map no more memory, you greedy puke!\n");
spa = trunc_page(spa);
epa = round_page(epa);
assert (epa >= spa);
for (i = 0; i < pmap_mem_regions_count; i++) {
if (epa < pmap_mem_regions[i].start)
break;
if (spa < pmap_mem_regions[i].end) {
#if DEBUG
kprintf("pmap_add_physical_memory(0x%08x,0x%08x,0x%08x) - memory already present\n",spa,epa,attr);
#endif
return KERN_NO_SPACE;
}
}
#if DEBUG
kprintf("pmap_add_physical_memory; region insert spot: %d out of %d\n", i, pmap_mem_regions_count);
#endif
if (pmap_mem_regions_count == PMAP_MEM_REGION_MAX)
return KERN_RESOURCE_SHORTAGE;
#if DEBUG
kprintf("pmap_add_physical_memory; kalloc\n");
#endif
phys_table = (struct phys_entry *)
kalloc(sizeof(struct phys_entry) * atop(epa-spa));
#if DEBUG
kprintf("pmap_add_physical_memory; new phys_table: %08X\n", phys_table);
#endif
for (j = 0; j < atop(epa-spa); j++) {
phys_table[j].phys_link = MAPPING_NULL;
mapping_phys_init(&phys_table[j], spa+(j*PAGE_SIZE), attr);
}
s = splhigh();
for (j = pmap_mem_regions_count; j > i ; j--)
pmap_mem_regions[j] = pmap_mem_regions[j-1];
pmap_mem_regions[i].start = spa;
pmap_mem_regions[i].end = epa;
pmap_mem_regions[i].phys_table = phys_table;
pmap_mem_regions_count++;
splx(s);
#if DEBUG
for(i=0; i<pmap_mem_regions_count; i++) {
kprintf("region %d: %08X %08X %08X\n", i, pmap_mem_regions[i].start,
pmap_mem_regions[i].end, pmap_mem_regions[i].phys_table);
}
#endif
if (available) {
kprintf("warning : pmap_add_physical_mem() "
"available not yet supported\n");
}
return KERN_SUCCESS;
}
vm_offset_t
pmap_map(
vm_offset_t va,
vm_offset_t spa,
vm_offset_t epa,
vm_prot_t prot)
{
if (spa == epa)
return(va);
assert(epa > spa);
debugLog2(40, va, spa);
pmap_map_block(kernel_pmap, va, spa, epa - spa, prot, PTE_WIMG_DEFAULT, blkPerm);
debugLog2(41, epa, prot);
return(va);
}
vm_offset_t
pmap_map_bd(
vm_offset_t va,
vm_offset_t spa,
vm_offset_t epa,
vm_prot_t prot)
{
register struct mapping *mp;
register struct phys_entry *pp;
if (spa == epa)
return(va);
assert(epa > spa);
debugLog2(42, va, epa);
pmap_map_block(kernel_pmap, va, spa, epa - spa, prot, PTE_WIMG_IO, blkPerm);
debugLog2(43, epa, prot);
return(va);
}
void
pmap_bootstrap(unsigned int mem_size, vm_offset_t *first_avail, vm_offset_t *first_phys_avail, unsigned int kmapsize)
{
register struct mapping *mp;
vm_offset_t addr;
vm_size_t size;
int i, num, j, rsize, mapsize, vmpagesz, vmmapsz;
unsigned int mask;
vm_offset_t first_used_addr;
PCA *pcaptr;
*first_avail = round_page(*first_avail);
#if DEBUG
kprintf("first_avail=%08X; first_phys_avail=%08X; avail_remaining=%d\n",
*first_avail, *first_phys_avail, avail_remaining);
#endif
assert(PAGE_SIZE == PPC_PGBYTES);
kernel_pmap = &kernel_pmap_store;
cursor_pmap = &kernel_pmap_store;
lock_init(&pmap_system_lock,
FALSE,
ETAP_VM_PMAP_SYS,
ETAP_VM_PMAP_SYS_I);
simple_lock_init(&kernel_pmap->lock, ETAP_VM_PMAP_KERNEL);
kernel_pmap->pmap_link.next = (queue_t)kernel_pmap;
kernel_pmap->pmap_link.prev = (queue_t)kernel_pmap;
kernel_pmap->ref_count = 1;
kernel_pmap->space = PPC_SID_KERNEL;
kernel_pmap->pmapvr = 0;
kernel_pmap->bmaps = 0;
for(i=0; i < 128; i++) {
kernel_pmap->pmapUsage[i] = 0;
}
for(i=0; i < 16; i++) {
kernel_pmap->pmapSegs[i] = SEG_REG_PROT | (i << 20) | PPC_SID_KERNEL;
}
num = sizeof(pte_t) * (mem_size >> 10);
for (hash_table_size = 64 * 1024;
hash_table_size < num;
hash_table_size *= 2)
continue;
if (num > (sizeof(pte_t) * 524288))
hash_table_size = hash_table_size/2;
do {
num = atop(mem_size);
size = (vm_size_t) (
(InitialSaveBloks * PAGE_SIZE) +
((InitialSaveBloks / 2) * PAGE_SIZE) +
hash_table_size +
hash_table_size +
(num * sizeof(struct phys_entry))
);
mapsize = size = round_page(size);
mapsize = mapsize + kmapsize;
vmpagesz = round_page(num * sizeof(struct vm_page));
vmmapsz = round_page((num / 8) * sizeof(struct vm_map_entry));
mapsize = mapsize + vmpagesz + vmmapsz;
mapsize = mapsize + (4 * 1024 * 1024);
mapsize = ((mapsize / PAGE_SIZE) + MAPPERBLOK - 1) / MAPPERBLOK;
mapsize = mapsize + ((mapsize + MAPPERBLOK - 1) / MAPPERBLOK);
#if DEBUG
kprintf("pmap_bootstrap: initial vm_pages = %08X\n", vmpagesz);
kprintf("pmap_bootstrap: initial vm_maps = %08X\n", vmmapsz);
kprintf("pmap_bootstrap: size before mappings = %08X\n", size);
kprintf("pmap_bootstrap: kernel map size = %08X\n", kmapsize);
kprintf("pmap_bootstrap: mapping blocks rqrd = %08X\n", mapsize);
#endif
size = size + (mapsize * PAGE_SIZE);
addr = (*first_avail +
(hash_table_size-1)) & ~(hash_table_size-1);
if (addr + size > pmap_mem_regions[0].end) {
hash_table_size /= 2;
} else {
break;
}
if (hash_table_size == 32 * 1024)
panic("cannot lay out pmap memory map correctly");
} while (1);
#if DEBUG
kprintf("hash table size=%08X, total size of area=%08X, addr=%08X\n",
hash_table_size, size, addr);
#endif
if (round_page(*first_phys_avail) < trunc_page(addr)) {
free_regions[free_regions_count].start = round_page(*first_phys_avail);
free_regions[free_regions_count].end = trunc_page(addr);
avail_remaining += (free_regions[free_regions_count].end -
free_regions[free_regions_count].start) /
PPC_PGBYTES;
#if DEBUG
kprintf("ADDED FREE REGION from 0x%08x to 0x%08x, avail_remaining = %d\n",
free_regions[free_regions_count].start,free_regions[free_regions_count].end,
avail_remaining);
#endif
free_regions_count++;
}
bzero((char *)addr, size);
first_used_addr = addr;
hash_table_base = addr;
addr += hash_table_size;
addr += hash_table_size;
assert((hash_table_base & (hash_table_size-1)) == 0);
pcaptr = (PCA *)(hash_table_base+hash_table_size);
mapCtl.mapcflush.pcaptr = pcaptr;
for(i=0; i < (hash_table_size/64) ; i++) {
pcaptr[i].flgs.PCAalflgs.PCAfree=0xFF;
pcaptr[i].flgs.PCAalflgs.PCAsteal=0x01;
}
savearea_init(&addr);
phys_table = (struct phys_entry *) addr;
#if DEBUG
kprintf("hash_table_base =%08X\n", hash_table_base);
kprintf("phys_table =%08X\n", phys_table);
kprintf("pmap_mem_regions_count =%08X\n", pmap_mem_regions_count);
#endif
for (i = 0; i < pmap_mem_regions_count; i++) {
pmap_mem_regions[i].phys_table = phys_table;
rsize = (pmap_mem_regions[i].end - (unsigned int)pmap_mem_regions[i].start)/PAGE_SIZE;
#if DEBUG
kprintf("Initializing physical table for region %d\n", i);
kprintf(" table=%08X, size=%08X, start=%08X, end=%08X\n",
phys_table, rsize, pmap_mem_regions[i].start,
(unsigned int)pmap_mem_regions[i].end);
#endif
for (j = 0; j < rsize; j++) {
phys_table[j].phys_link = MAPPING_NULL;
mapping_phys_init(&phys_table[j], (unsigned int)pmap_mem_regions[i].start+(j*PAGE_SIZE),
PTE_WIMG_DEFAULT);
}
phys_table = phys_table +
atop(pmap_mem_regions[i].end - pmap_mem_regions[i].start);
}
phys_table = (struct phys_entry *) addr;
addr += sizeof(struct phys_entry) * num;
simple_lock_init(&tlb_system_lock, ETAP_VM_PMAP_TLB);
#if DEBUG
kprintf("*** hash_table_init: base=%08X, size=%08X\n", hash_table_base, hash_table_size);
#endif
hash_table_init(hash_table_base, hash_table_size);
mapping_init();
for(i = addr; i < first_used_addr + size; i += PAGE_SIZE) {
mapping_free_init(i, 1, 0);
}
mapCtl.mapcmin = MAPPERBLOK;
#if DEBUG
kprintf("mapping kernel memory from 0x%08x to 0x%08x, to address 0x%08x\n",
first_used_addr, round_page(first_used_addr+size),
first_used_addr);
#endif
pmap_map(first_used_addr, first_used_addr,
round_page(first_used_addr+size), VM_PROT_READ | VM_PROT_WRITE);
#if DEBUG
for(i=first_used_addr; i < round_page(first_used_addr+size); i+=PAGE_SIZE) {
if(i != (j = kvtophys(i))) {
kprintf("*** V=R mapping failed to verify: V=%08X; R=%08X\n", i, j);
}
}
#endif
*first_avail = round_page(first_used_addr + size);
first_free_virt = round_page(first_used_addr + size);
free_regions[free_regions_count].start = *first_avail;
free_regions[free_regions_count].end = pmap_mem_regions[0].end;
avail_remaining += (free_regions[free_regions_count].end -
free_regions[free_regions_count].start) /
PPC_PGBYTES;
#if DEBUG
kprintf("ADDED FREE REGION from 0x%08x to 0x%08x, avail_remaining = %d\n",
free_regions[free_regions_count].start,free_regions[free_regions_count].end,
avail_remaining);
#endif
free_regions_count++;
current_free_region = 0;
avail_next = free_regions[current_free_region].start;
#if DEBUG
kprintf("Number of free regions=%d\n",free_regions_count);
kprintf("Current free region=%d\n",current_free_region);
for(i=0;i<free_regions_count; i++) {
kprintf("Free region %3d - from %08X to %08X\n", i, free_regions[i].start,
free_regions[i].end);
}
for (i = 0; i < pmap_mem_regions_count; i++) {
kprintf("PMAP region %3d - from %08X to %08X; phys=%08X\n", i,
pmap_mem_regions[i].start,
pmap_mem_regions[i].end,
pmap_mem_regions[i].phys_table);
}
#endif
}
void
pmap_init(void)
{
pmap_zone = zinit(pmapSize, 400 * pmapSize, 4096, "pmap");
#if ZONE_DEBUG
zone_debug_disable(pmap_zone);
#endif
pmap_initialized = TRUE;
free_pmap_list = 0;
free_pmap_count = 0;
simple_lock_init(&free_pmap_lock, ETAP_VM_PMAP_CACHE);
}
unsigned int pmap_free_pages(void)
{
return avail_remaining;
}
boolean_t pmap_next_page(vm_offset_t *addrp)
{
if (current_free_region >= free_regions_count) {
int current_pmap_mem_region = current_free_region -
free_regions_count + 1;
if (current_pmap_mem_region > pmap_mem_regions_count)
return FALSE;
*addrp = avail_next;
avail_next += PAGE_SIZE;
avail_remaining--;
if (avail_next >= pmap_mem_regions[current_pmap_mem_region].end) {
current_free_region++;
current_pmap_mem_region++;
avail_next = pmap_mem_regions[current_pmap_mem_region].start;
#if DEBUG
kprintf("pmap_next_page : next region start=0x%08x\n",avail_next);
#endif
}
return TRUE;
}
*addrp = avail_next;
avail_next += PAGE_SIZE;
avail_remaining--;
if (avail_next >= free_regions[current_free_region].end) {
current_free_region++;
if (current_free_region < free_regions_count)
avail_next = free_regions[current_free_region].start;
else
avail_next = pmap_mem_regions[current_free_region -
free_regions_count + 1].start;
#if DEBUG
kprintf("pmap_next_page : next region start=0x%08x\n",avail_next);
#endif
}
return TRUE;
}
void pmap_virtual_space(
vm_offset_t *startp,
vm_offset_t *endp)
{
*startp = round_page(first_free_virt);
*endp = VM_MAX_KERNEL_ADDRESS;
}
pmap_t
pmap_create(vm_size_t size)
{
pmap_t pmap, ckpmap, fore, aft;
int s, i;
space_t sid;
unsigned int currSID;
#if PMAP_LOWTRACE
dbgTrace(0xF1D00001, size, 0);
#endif
#if DEBUG
if (pmdebug & PDB_USER)
kprintf("pmap_create(size=%x)%c", size, size ? '\n' : ' ');
#endif
if (size)
return(PMAP_NULL);
s = splhigh();
simple_lock(&free_pmap_lock);
if(free_pmap_list) {
pmap = free_pmap_list;
free_pmap_list = (pmap_t)pmap->bmaps;
free_pmap_count--;
}
else {
simple_unlock(&free_pmap_lock);
splx(s);
pmap = (pmap_t) zalloc(pmap_zone);
if (pmap == PMAP_NULL) return(PMAP_NULL);
bzero((char *)pmap, pmapSize);
s = splhigh();
simple_lock(&free_pmap_lock);
ckpmap = cursor_pmap;
currSID = ckpmap->spaceNum;
while(1) {
currSID = (currSID + 1) & SID_MAX;
ckpmap = (pmap_t)ckpmap->pmap_link.next;
if(ckpmap->spaceNum != currSID) break;
if(ckpmap == cursor_pmap) {
panic("pmap_create: Maximum number (2^20) active address spaces reached\n");
}
}
pmap->space = (currSID * incrVSID) & SID_MAX;
pmap->spaceNum = currSID;
fore = (pmap_t)ckpmap->pmap_link.prev;
pmap->pmap_link.next = (queue_t)ckpmap;
fore->pmap_link.next = (queue_t)pmap;
pmap->pmap_link.prev = (queue_t)fore;
ckpmap->pmap_link.prev = (queue_t)pmap;
simple_lock_init(&pmap->lock, ETAP_VM_PMAP);
pmap->pmapvr = (unsigned int)pmap ^ (unsigned int)pmap_extract(kernel_pmap, (vm_offset_t)pmap);
}
pmap->ref_count = 1;
pmap->stats.resident_count = 0;
pmap->stats.wired_count = 0;
pmap->bmaps = 0;
pmap->vflags = 0;
for(i=0; i < 128; i++) {
pmap->pmapUsage[i] = 0;
}
for(i=0; i < 16; i++) {
pmap->pmapSegs[i] = SEG_REG_PROT | (i << 20) | pmap->space;
}
#if PMAP_LOWTRACE
dbgTrace(0xF1D00002, (unsigned int)pmap, (unsigned int)pmap->space);
#endif
#if DEBUG
if (pmdebug & PDB_USER)
kprintf("-> %x, space id = %d\n", pmap, pmap->space);
#endif
simple_unlock(&free_pmap_lock);
splx(s);
return(pmap);
}
void
pmap_destroy(pmap_t pmap)
{
int ref_count;
spl_t s;
pmap_t fore, aft;
#if PMAP_LOWTRACE
dbgTrace(0xF1D00003, (unsigned int)pmap, 0);
#endif
#if DEBUG
if (pmdebug & PDB_USER)
kprintf("pmap_destroy(pmap=%x)\n", pmap);
#endif
if (pmap == PMAP_NULL)
return;
ref_count=hw_atomic_sub(&pmap->ref_count, 1);
if(ref_count>0) return;
if(ref_count < 0)
panic("pmap_destroy(): ref_count < 0");
#ifdef notdef
if(pmap->stats.resident_count != 0)
panic("PMAP_DESTROY: pmap not empty");
#else
if(pmap->stats.resident_count != 0) {
pmap_remove(pmap, 0, 0xFFFFF000);
}
#endif
s = splhigh();
simple_lock(&free_pmap_lock);
if (free_pmap_count <= free_pmap_max) {
pmap->bmaps = (struct blokmap *)free_pmap_list;
free_pmap_list = pmap;
free_pmap_count++;
simple_unlock(&free_pmap_lock);
} else {
if(cursor_pmap == pmap) cursor_pmap = (pmap_t)pmap->pmap_link.prev;
fore = (pmap_t)pmap->pmap_link.prev;
aft = (pmap_t)pmap->pmap_link.next;
fore->pmap_link.next = pmap->pmap_link.next;
aft->pmap_link.prev = pmap->pmap_link.prev;
simple_unlock(&free_pmap_lock);
zfree(pmap_zone, (vm_offset_t) pmap);
}
splx(s);
}
void
pmap_reference(pmap_t pmap)
{
spl_t s;
#if PMAP_LOWTRACE
dbgTrace(0xF1D00004, (unsigned int)pmap, 0);
#endif
#if DEBUG
if (pmdebug & PDB_USER)
kprintf("pmap_reference(pmap=%x)\n", pmap);
#endif
if (pmap != PMAP_NULL) hw_atomic_add(&pmap->ref_count, 1);
}
void pmap_remove_some_phys(
pmap_t pmap,
vm_offset_t pa)
{
register struct phys_entry *pp;
register struct mapping *mp, *mpv;
if (pmap == PMAP_NULL) return;
pp = pmap_find_physentry(pa);
if (pp == PHYS_NULL) return;
if (pmap->vflags & pmapVMhost)
mapping_purge(pp);
else
mapping_purge_pmap(pp, pmap);
return;
}
void
pmap_remove(
pmap_t pmap,
vm_offset_t sva,
vm_offset_t eva)
{
spl_t spl;
struct mapping *mp, *blm;
vm_offset_t lpage;
#if PMAP_LOWTRACE
dbgTrace(0xF1D00005, (unsigned int)pmap, sva|((eva-sva)>>12));
#endif
#if DEBUG
if (pmdebug & PDB_USER)
kprintf("pmap_remove(pmap=%x, sva=%x, eva=%x)\n",
pmap, sva, eva);
#endif
if (pmap == PMAP_NULL)
return;
assert(eva >= sva);
assert((sva == trunc_page(sva)) && (eva == trunc_page(eva)));
debugLog2(44, sva, eva);
sva = trunc_page(sva);
lpage = trunc_page(eva) - PAGE_SIZE;
while(mp = (mapping *)hw_rem_blk(pmap, sva, lpage)) {
if((unsigned int)mp & 1) {
blm = (struct mapping *)hw_cpv((mapping *)((unsigned int)mp & 0xFFFFFFFC));
panic("mapping_remove: attempt to unmap a permanent mapping - pmap = %08X, va = %08X, mapping = %08X\n",
pmap, sva, blm);
}
if (!((unsigned int)mp & 2))
mapping_free(hw_cpv(mp));
}
while (pmap->stats.resident_count && (eva > sva)) {
eva -= PAGE_SIZE;
#if 1
if((0x00008000 >> (sva >> 28)) & pmap->vflags)
panic("pmap_remove: attempt to remove nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, sva);
#endif
if(!(pmap->pmapUsage[(eva >> pmapUsageShft) & pmapUsageMask])) {
eva = eva & (-pmapUsageSize);
continue;
}
mapping_remove(pmap, eva);
}
debugLog2(45, 0, 0);
}
void
pmap_page_protect(
vm_offset_t pa,
vm_prot_t prot)
{
register struct phys_entry *pp;
boolean_t remove;
#if PMAP_LOWTRACE
dbgTrace(0xF1D00006, (unsigned int)pa, (unsigned int)prot);
#endif
#if DEBUG
if (pmdebug & PDB_USER)
kprintf("pmap_page_protect(pa=%x, prot=%x)\n", pa, prot);
#endif
debugLog2(46, pa, prot);
switch (prot) {
case VM_PROT_READ:
case VM_PROT_READ|VM_PROT_EXECUTE:
remove = FALSE;
break;
case VM_PROT_ALL:
return;
default:
remove = TRUE;
break;
}
pp = pmap_find_physentry(pa);
if (pp == PHYS_NULL) return;
if (remove) {
mapping_purge(pp);
debugLog2(47, 0, 0);
return;
}
mapping_protect_phys(pp, prot, 0);
debugLog2(47, 1, 0);
}
void pmap_protect(
pmap_t pmap,
vm_offset_t sva,
vm_offset_t eva,
vm_prot_t prot)
{
spl_t spl;
register struct phys_entry *pp;
register struct mapping *mp, *mpv;
#if PMAP_LOWTRACE
dbgTrace(0xF1D00008, (unsigned int)pmap, (unsigned int)(sva|((eva-sva)>>12)));
#endif
#if DEBUG
if (pmdebug & PDB_USER)
kprintf("pmap_protect(pmap=%x, sva=%x, eva=%x, prot=%x)\n", pmap, sva, eva, prot);
assert(sva < eva);
#endif
if (pmap == PMAP_NULL) return;
debugLog2(48, sva, eva);
if (prot == VM_PROT_NONE) {
pmap_remove(pmap, sva, eva);
debugLog2(49, prot, 0);
return;
}
sva = trunc_page(sva);
while(sva < eva) {
if(!(pmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) {
sva = (sva + pmapUsageSize) &(-pmapUsageSize);
if(!sva) break;
continue;
}
#if 1
if((0x00008000 >> (sva >> 28)) & pmap->vflags)
panic("pmap_protect: attempt to protect nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, sva);
#endif
mapping_protect(pmap, sva, prot);
sva += PAGE_SIZE;
}
debugLog2(49, prot, 1);
return;
}
void
pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
unsigned int flags, boolean_t wired)
{
spl_t spl;
struct mapping *mp;
struct phys_entry *pp;
int memattr;
#if PMAP_LOWTRACE
dbgTrace(0xF1D00009, (unsigned int)pmap, (unsigned int)va);
dbgTrace(0xF1D04009, (unsigned int)pa, (unsigned int)prot);
#endif
if (pmap == PMAP_NULL) return;
debugLog2(50, va, pa);
pp = pmap_find_physentry(pa);
if((0x00008000 >> (va >> 28)) & pmap->vflags)
panic("pmap_enter: attempt to map into nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, va);
spl=splhigh();
mapping_remove(pmap, va);
if(flags & VM_WIMG_USE_DEFAULT) {
if(pp) {
memattr = ((pp->pte1&0x00000078) >> 3);
} else {
memattr = PTE_WIMG_UNCACHED_COHERENT_GUARDED;
}
} else {
memattr = flags & VM_WIMG_MASK;
}
mp=mapping_make(pmap, pp, va, pa, prot, memattr, 0);
splx(spl);
debugLog2(51, prot, 0);
#if DEBUG
if (pmdebug & (PDB_USER|PDB_ENTER))
kprintf("leaving pmap_enter\n");
#endif
}
vm_offset_t pmap_extract(pmap_t pmap, vm_offset_t va) {
spl_t spl;
register struct mapping *mp, *mpv;
register vm_offset_t pa;
unsigned int seg;
pmap_t actpmap;
#if PMAP_LOWTRACE
dbgTrace(0xF1D0000B, (unsigned int)pmap, (unsigned int)va);
#endif
#if DEBUG
if (pmdebug & PDB_USER)
kprintf("pmap_extract(pmap=%x, va=%x)\n", pmap, va);
#endif
seg = va >> 28;
if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg];
else actpmap = pmap;
pa = (vm_offset_t) 0;
debugLog2(52, actpmap->space, va);
spl = splhigh();
if(mp=hw_lock_phys_vir(actpmap->space, va)) {
if((unsigned int)mp&1) {
panic("pmap_extract: timeout obtaining lock on physical entry\n");
splx(spl);
return 0;
}
mpv = hw_cpv(mp);
pa = (vm_offset_t)((mpv->PTEr & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1)));
if(mpv->physent) hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK);
splx(spl);
debugLog2(53, pa, 0);
return pa;
}
pa = hw_cvp_blk(pmap, va);
splx(spl);
debugLog2(53, pa, 0);
return pa;
}
kern_return_t
pmap_attribute_cache_sync(address, size, attribute, value)
vm_offset_t address;
vm_size_t size;
vm_machine_attribute_t attribute;
vm_machine_attribute_val_t* value;
{
while(size) {
switch (*value) {
case MATTR_VAL_CACHE_SYNC:
sync_cache(address, PAGE_SIZE);
break;
case MATTR_VAL_CACHE_FLUSH:
flush_dcache(address, PAGE_SIZE, TRUE);
invalidate_icache(address,
PAGE_SIZE, TRUE);
break;
case MATTR_VAL_DCACHE_FLUSH:
flush_dcache(address, PAGE_SIZE, TRUE);
break;
case MATTR_VAL_ICACHE_FLUSH:
invalidate_icache(address,
PAGE_SIZE, TRUE);
break;
}
size -= PAGE_SIZE;
}
return KERN_SUCCESS;;
}
kern_return_t
pmap_attribute(pmap, address, size, attribute, value)
pmap_t pmap;
vm_offset_t address;
vm_size_t size;
vm_machine_attribute_t attribute;
vm_machine_attribute_val_t* value;
{
spl_t s;
vm_offset_t sva, eva;
vm_offset_t pa;
kern_return_t ret;
register struct mapping *mp, *mpv;
register struct phys_entry *pp;
int total, seg;
pmap_t actpmap;
if (attribute != MATTR_CACHE)
return KERN_INVALID_ARGUMENT;
if ((*value == MATTR_VAL_GET) &&
(trunc_page(address) != trunc_page(address+size-1)))
return KERN_INVALID_ARGUMENT;
if (pmap == PMAP_NULL)
return KERN_SUCCESS;
sva = trunc_page(address);
eva = round_page(address + size);
ret = KERN_SUCCESS;
debugLog2(54, address, attribute);
switch (*value) {
case MATTR_VAL_CACHE_SYNC:
case MATTR_VAL_CACHE_FLUSH:
case MATTR_VAL_DCACHE_FLUSH:
case MATTR_VAL_ICACHE_FLUSH:
sva = trunc_page(sva);
s = splhigh();
while (sva < eva) {
seg = sva >> 28;
if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg];
else actpmap = pmap;
if(!(actpmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) {
sva = (sva + pmapUsageSize) & (-pmapUsageSize);
if(!sva) break;
continue;
}
if(!(mp = hw_lock_phys_vir(actpmap->space, sva))) {
sva += PAGE_SIZE;
continue;
}
if((unsigned int)mp&1) {
panic("pmap_attribute: timeout obtaining lock on physical entry\n");
continue;
}
mpv = hw_cpv(mp);
if((unsigned int)mpv->physent) {
pa = (vm_offset_t)mpv->physent->pte1 & -PAGE_SIZE;
}
else {
pa = (vm_offset_t)(mpv->PTEr & PAGE_SIZE);
}
switch (*value) {
case MATTR_VAL_CACHE_SYNC:
sync_cache(pa, PAGE_SIZE);
break;
case MATTR_VAL_CACHE_FLUSH:
flush_dcache(pa, PAGE_SIZE, TRUE);
invalidate_icache(pa, PAGE_SIZE, TRUE);
break;
case MATTR_VAL_DCACHE_FLUSH:
flush_dcache(pa, PAGE_SIZE, TRUE);
break;
case MATTR_VAL_ICACHE_FLUSH:
invalidate_icache(pa, PAGE_SIZE, TRUE);
break;
}
if(mpv->physent) hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK);
sva += PAGE_SIZE;
}
splx(s);
break;
case MATTR_VAL_GET_INFO:
total = 0;
s = splhigh();
if (size <= PAGE_SIZE) {
seg = sva >> 28;
if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg];
else actpmap = pmap;
if(!(mp = hw_lock_phys_vir(actpmap->space, sva))) {
*value = 0;
}
else {
if((unsigned int)mp&1) {
panic("pmap_attribute: timeout obtaining lock on physical entry\n");
}
mpv = hw_cpv(mp);
if(pp = mpv->physent) {
total = 0;
for (mpv = (mapping *)hw_cpv((mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS)); mpv != NULL; mpv = hw_cpv(mp->next)) total++;
*value = (vm_machine_attribute_val_t) ((pp->pte1 & -PAGE_SIZE) | total);
hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK);
}
else {
*value = (vm_machine_attribute_val_t) ((mpv->PTEr & -PAGE_SIZE) | 1);
}
}
}
else {
total = 0;
while (sva < eva) {
seg = sva >> 28;
if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg];
else actpmap = pmap;
if(!(actpmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) {
sva = (sva + pmapUsageSize) & (-pmapUsageSize);
if(!sva) break;
continue;
}
if(mp = hw_lock_phys_vir(actpmap->space, sva)) {
if((unsigned int)mp&1) {
panic("pmap_attribute: timeout obtaining lock on physical entry\n");
continue;
}
mpv = hw_cpv(mp);
total += 65536 + (mpv->physent && ((mapping *)((unsigned int)mpv->physent->phys_link & -32))->next);
hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK);
}
sva += PAGE_SIZE;
}
*value = total;
}
splx(s);
break;
case MATTR_VAL_GET:
case MATTR_VAL_OFF:
case MATTR_VAL_ON:
default:
ret = KERN_INVALID_ARGUMENT;
break;
}
debugLog2(55, 0, 0);
return ret;
}
void pmap_sync_caches_phys(vm_offset_t pa) {
spl_t s;
s = splhigh();
sync_cache(trunc_page(pa), PAGE_SIZE);
splx(s);
return;
}
void
pmap_collect(pmap_t pmap)
{
return;
}
void
pmap_activate(
pmap_t pmap,
thread_t th,
int which_cpu)
{
return;
}
void
pmap_deactivate(
pmap_t pmap,
thread_t th,
int which_cpu)
{
return;
}
#if DEBUG
extern void pmap_zero_page_assembler(vm_offset_t p);
extern void pmap_copy_page_assembler(vm_offset_t src, vm_offset_t dst);
void
pmap_zero_page(
vm_offset_t p)
{
register struct mapping *mp;
register struct phys_entry *pp;
if (pmdebug & (PDB_USER|PDB_ZERO))
kprintf("pmap_zero_page(pa=%x)\n", p);
if (pmap_find_physentry(p) == PHYS_NULL)
panic("zero_page: physaddr out of range");
pmap_zero_page_assembler(p);
}
void
pmap_copy_page(
vm_offset_t src,
vm_offset_t dst)
{
register struct phys_entry *pp;
if (pmdebug & (PDB_USER|PDB_COPY))
kprintf("pmap_copy_page(spa=%x, dpa=%x)\n", src, dst);
if (pmdebug & PDB_COPY)
kprintf("pmap_copy_page: phys_copy(%x, %x, %x)\n",
src, dst, PAGE_SIZE);
pmap_copy_page_assembler(src, dst);
}
#endif
void
pmap_pageable(
pmap_t pmap,
vm_offset_t start,
vm_offset_t end,
boolean_t pageable)
{
return;
}
void
pmap_change_wiring(
register pmap_t pmap,
vm_offset_t va,
boolean_t wired)
{
return;
}
void
pmap_modify_pages(
pmap_t pmap,
vm_offset_t sva,
vm_offset_t eva)
{
spl_t spl;
mapping *mp;
#if PMAP_LOWTRACE
dbgTrace(0xF1D00010, (unsigned int)pmap, (unsigned int)(sva|((eva-sva)>>12)));
#endif
#if DEBUG
if (pmdebug & PDB_USER) kprintf("pmap_modify_pages(pmap=%x, sva=%x, eva=%x)\n", pmap, sva, eva);
#endif
if (pmap == PMAP_NULL) return;
debugLog2(56, sva, eva);
spl=splhigh();
for ( ; sva < eva; sva += PAGE_SIZE) {
mp = hw_lock_phys_vir(pmap->space, sva);
if(mp) {
if((unsigned int)mp&1) {
panic("pmap_modify_pages: timeout obtaining lock on physical entry\n");
continue;
}
mp = hw_cpv(mp);
if(!mp->physent) continue;
mapping_set_mod(mp->physent);
hw_unlock_bit((unsigned int *)&mp->physent->phys_link, PHYS_LOCK);
}
}
splx(spl);
debugLog2(57, 0, 0);
return;
}
void
pmap_clear_modify(vm_offset_t pa)
{
register struct phys_entry *pp;
spl_t spl;
#if PMAP_LOWTRACE
dbgTrace(0xF1D00011, (unsigned int)pa, 0);
#endif
#if DEBUG
if (pmdebug & PDB_USER)
kprintf("pmap_clear_modify(pa=%x)\n", pa);
#endif
pp = pmap_find_physentry(pa);
if (pp == PHYS_NULL) return;
debugLog2(58, pa, 0);
spl=splhigh();
if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) {
panic("pmap_clear_modify: Timeout getting lock on physent at %08X\n", pp);
splx(spl);
return;
}
mapping_clr_mod(pp);
hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK);
splx(spl);
debugLog2(59, 0, 0);
}
boolean_t
pmap_is_modified(register vm_offset_t pa)
{
register struct phys_entry *pp;
spl_t spl;
boolean_t ret;
#if PMAP_LOWTRACE
dbgTrace(0xF1D00012, (unsigned int)pa, 0);
#endif
#if DEBUG
if (pmdebug & PDB_USER)
kprintf("pmap_is_modified(pa=%x)\n", pa);
#endif
pp = pmap_find_physentry(pa);
if (pp == PHYS_NULL) return(FALSE);
debugLog2(60, pa, 0);
spl=splhigh();
if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) {
panic("pmap_is_modified: Timeout getting lock on physent at %08X\n", pp);
splx(spl);
return 0;
}
ret = mapping_tst_mod(pp);
hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK);
splx(spl);
debugLog2(61, ret, 0);
return ret;
}
void
pmap_clear_reference(vm_offset_t pa)
{
register struct phys_entry *pp;
spl_t spl;
#if PMAP_LOWTRACE
dbgTrace(0xF1D00013, (unsigned int)pa, 0);
#endif
#if DEBUG
if (pmdebug & PDB_USER)
kprintf("pmap_clear_reference(pa=%x)\n", pa);
#endif
pp = pmap_find_physentry(pa);
if (pp == PHYS_NULL) return;
debugLog2(62, pa, 0);
spl=splhigh();
mapping_clr_ref(pp);
splx(spl);
debugLog2(63, 0, 0);
}
boolean_t
pmap_is_referenced(vm_offset_t pa)
{
register struct phys_entry *pp;
spl_t spl;
boolean_t ret;
#if PMAP_LOWTRACE
dbgTrace(0xF1D00014, (unsigned int)pa, 0);
#endif
#if DEBUG
if (pmdebug & PDB_USER)
kprintf("pmap_is_referenced(pa=%x)\n", pa);
#endif
pp = pmap_find_physentry(pa);
if (pp == PHYS_NULL) return(FALSE);
debugLog2(64, pa, 0);
spl=splhigh();
if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) {
panic("pmap_is_referenced: Timeout getting lock on physent at %08X\n", pp);
splx(spl);
return 0;
}
ret = mapping_tst_ref(pp);
hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK);
splx(spl);
debugLog2(65, ret, 0);
return ret;
}
#if MACH_VM_DEBUG
int
pmap_list_resident_pages(
register pmap_t pmap,
register vm_offset_t *listp,
register int space)
{
return 0;
}
#endif
void
pmap_copy_part_page(
vm_offset_t src,
vm_offset_t src_offset,
vm_offset_t dst,
vm_offset_t dst_offset,
vm_size_t len)
{
register struct phys_entry *pp_src, *pp_dst;
spl_t s;
#if PMAP_LOWTRACE
dbgTrace(0xF1D00019, (unsigned int)src+src_offset, (unsigned int)dst+dst_offset);
dbgTrace(0xF1D04019, (unsigned int)len, 0);
#endif
s = splhigh();
assert(((dst & PAGE_MASK)+dst_offset+len) <= PAGE_SIZE);
assert(((src & PAGE_MASK)+src_offset+len) <= PAGE_SIZE);
phys_copy((vm_offset_t) src+src_offset,
(vm_offset_t) dst+dst_offset, len);
splx(s);
}
void
pmap_zero_part_page(
vm_offset_t p,
vm_offset_t offset,
vm_size_t len)
{
panic("pmap_zero_part_page");
}
boolean_t pmap_verify_free(vm_offset_t pa) {
struct phys_entry *pp;
#if PMAP_LOWTRACE
dbgTrace(0xF1D00007, (unsigned int)pa, 0);
#endif
#if DEBUG
if (pmdebug & PDB_USER)
kprintf("pmap_verify_free(pa=%x)\n", pa);
#endif
if (!pmap_initialized) return(TRUE);
pp = pmap_find_physentry(pa);
if (pp == PHYS_NULL) return FALSE;
return ((mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS) == MAPPING_NULL);
}
void pmap_switch(pmap_t map)
{
unsigned int i;
#if DEBUG
if (watchacts & WA_PCB) {
kprintf("Switching to map at 0x%08x, space=%d\n",
map,map->space);
}
#endif
if (map->space == PPC_SID_KERNEL) {
return;
}
hw_set_user_space(map);
return;
}
kern_return_t pmap_nest(pmap_t grand, pmap_t subord, vm_offset_t vaddr, vm_size_t size) {
unsigned int oflags, seg, grandr;
int i;
if(size != 0x10000000) return KERN_INVALID_VALUE;
if(vaddr & 0x0FFFFFFF) return KERN_INVALID_VALUE;
while(1) {
oflags = subord->vflags & ~pmapAltSeg;
if(subord->vflags & pmapAltSeg) {
panic("pmap_nest: Attempt to nest an already nested pmap\n");
}
if(hw_compare_and_store(oflags, oflags | pmapSubord, &subord->vflags)) break;
}
simple_lock(&grand->lock);
if(grand->vflags & pmapSubord) {
simple_unlock(&grand->lock);
panic("pmap_nest: Attempt to nest into subordinate pmap\n");
return KERN_FAILURE;
}
seg = vaddr >> 28;
if((0x00008000 >> seg) & grand->vflags) {
simple_unlock(&grand->lock);
panic("pmap_nest: Attempt to nest into already nested segment\n");
return KERN_FAILURE;
}
grand->pmapPmaps[seg] = subord;
grand->pmapSegs[seg] = SEG_REG_PROT | (seg << 20) | subord->space;
grand->vflags |= (0x00008000 >> seg);
grandr = (unsigned int)grand ^ grand->pmapvr;
simple_unlock(&grand->lock);
for(i=0; i < real_ncpus; i++) {
(void)hw_compare_and_store((unsigned int)grandr, 0, &per_proc_info[i].Lastpmap);
}
return KERN_SUCCESS;
}
kern_return_t pmap_unnest(pmap_t grand, vm_offset_t vaddr, vm_size_t size) {
unsigned int oflags, seg, grandr, tstamp;
int i, tcpu, mycpu;
if(size != 0x10000000) return KERN_INVALID_VALUE;
if(vaddr & 0x0FFFFFFF) return KERN_INVALID_VALUE;
simple_lock(&grand->lock);
disable_preemption();
seg = vaddr >> 28;
if(!((0x00008000 >> seg) & grand->vflags)) {
enable_preemption();
simple_unlock(&grand->lock);
panic("pmap_unnest: Attempt to unnest an unnested segment\n");
return KERN_FAILURE;
}
grand->pmapPmaps[seg] = (pmap_t)0;
grand->pmapSegs[seg] = grand->space;
grand->pmapSegs[seg] = SEG_REG_PROT | (seg << 20) | grand->space;
grand->vflags &= ~(0x00008000 >> seg);
grandr = (unsigned int)grand ^ grand->pmapvr;
simple_unlock(&grand->lock);
mycpu = cpu_number();
for(i=0; i < real_ncpus; i++) {
if(hw_compare_and_store((unsigned int)grandr, 0, &per_proc_info[i].Lastpmap)) {
if(i == mycpu) continue;
tstamp = per_proc_info[i].ruptStamp[1];
if(cpu_signal(i, SIGPwake, 0, 0) != KERN_SUCCESS) {
continue;
}
if(!hw_cpu_wcng(&per_proc_info[i].ruptStamp[1], tstamp, LockTimeOut)) {
panic("pmap_unnest: Other processor (%d) did not see interruption request\n", i);
}
}
}
enable_preemption();
return KERN_SUCCESS;
}
void pmap_ver(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) {
int cnt, i, j, k;
vm_offset_t xx;
if(!pmap) return;
sva = trunc_page(sva);
eva = trunc_page(eva);
for(i = 0; i < (pmapUsageMask + 1); i++) {
if((pmap->pmapUsage[i]) > 8192) {
panic("pmap_ver: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
i * pmapUsageSize, pmap->pmapUsage[i], pmap);
}
}
j = 0;
while(1) {
cnt = 0;
for(i = 0; i < (pmapUsageMask + 1); i++) {
cnt = cnt + pmap->pmapUsage[i];
}
if(cnt == pmap->stats.resident_count) break;
j++;
for(i = 0; i < 100000; i++) {
k = j + i;
}
if(j >= 10) {
panic("pmap_ver: pmapUsage total (%d) does not match resident count (%d) for pmap %08X\n",
cnt, pmap->stats.resident_count, pmap);
}
}
for(xx = sva; xx < eva; xx += PAGE_SIZE) {
if(pmap_extract(pmap, xx)) {
panic("pmap_ver: range (%08X to %08X) not empty at %08X for pmap %08X\n",
sva, eva, xx, pmap);
}
}
}
boolean_t
coredumpok(vm_map_t map, vm_offset_t va)
{
return TRUE;
}