mark pages whose refcount were >1 and drop to 1 and are
read/write writable in the pagetable right away instead of waiting for a pagefault. minor optimization. some a sanity check of SLAB-allocated pointers. vm gets its own _exit and __exit like PM, so the stock (library) panic works.
This commit is contained in:
parent
e0f3a5acf1
commit
2dd02cc560
10 changed files with 309 additions and 65 deletions
|
@ -116,3 +116,13 @@ PUBLIC int do_willexit(message *msg)
|
|||
return OK;
|
||||
}
|
||||
|
||||
PUBLIC void _exit(int code)
|
||||
{
|
||||
sys_exit(SELF);
|
||||
}
|
||||
|
||||
PUBLIC void __exit(int code)
|
||||
{
|
||||
sys_exit(SELF);
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
EXTERN struct vmproc vmproc[_NR_PROCS+1];
|
||||
|
||||
#if SANITYCHECKS
|
||||
EXTERN int nocheck;
|
||||
u32_t data1[200];
|
||||
#define CHECKADDR 0
|
||||
EXTERN long vm_sanitychecklevel;
|
||||
|
|
|
@ -99,7 +99,7 @@ PUBLIC vir_bytes arch_map2vir(struct vmproc *vmp, vir_bytes addr)
|
|||
{
|
||||
vir_bytes bottom = CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys);
|
||||
|
||||
vm_assert(bottom <= addr);
|
||||
/* vm_assert(bottom <= addr); */
|
||||
|
||||
return addr - bottom;
|
||||
}
|
||||
|
|
|
@ -74,6 +74,7 @@ PUBLIC int main(void)
|
|||
int result, who_e;
|
||||
|
||||
#if SANITYCHECKS
|
||||
nocheck = 0;
|
||||
memcpy(data1, CHECKADDR, sizeof(data1));
|
||||
#endif
|
||||
SANITYCHECK(SCL_TOP);
|
||||
|
@ -111,7 +112,7 @@ PUBLIC int main(void)
|
|||
/* Kernel wants to have memory ranges
|
||||
* verified.
|
||||
*/
|
||||
handle_memory();
|
||||
do_memory();
|
||||
break;
|
||||
case PM_PROC_NR:
|
||||
/* PM sends a notify() on shutdown, which
|
||||
|
@ -122,7 +123,7 @@ PUBLIC int main(void)
|
|||
/* This indicates a page fault has happened,
|
||||
* which we have to handle.
|
||||
*/
|
||||
handle_pagefaults();
|
||||
do_pagefaults();
|
||||
break;
|
||||
default:
|
||||
/* No-one else should send us notifies. */
|
||||
|
|
|
@ -47,9 +47,9 @@ char *pf_errstr(u32_t err)
|
|||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* handle_pagefaults *
|
||||
* do_pagefaults *
|
||||
*===========================================================================*/
|
||||
PUBLIC void handle_pagefaults(void)
|
||||
PUBLIC void do_pagefaults(void)
|
||||
{
|
||||
endpoint_t ep;
|
||||
u32_t addr, err;
|
||||
|
@ -62,7 +62,7 @@ PUBLIC void handle_pagefaults(void)
|
|||
int p, wr = PFERR_WRITE(err);
|
||||
|
||||
if(vm_isokendpt(ep, &p) != OK)
|
||||
vm_panic("handle_pagefaults: endpoint wrong", ep);
|
||||
vm_panic("do_pagefaults: endpoint wrong", ep);
|
||||
|
||||
vmp = &vmproc[p];
|
||||
vm_assert(vmp->vm_flags & VMF_INUSE);
|
||||
|
@ -97,7 +97,7 @@ PUBLIC void handle_pagefaults(void)
|
|||
offset = addr - region->vaddr;
|
||||
|
||||
/* Access is allowed; handle it. */
|
||||
if((r=map_pagefault(vmp, region, offset, wr)) != OK) {
|
||||
if((r=map_pf(vmp, region, offset, wr)) != OK) {
|
||||
printf("VM: pagefault: SIGSEGV %d pagefault not handled\n", ep);
|
||||
sys_sysctl_stacktrace(vmp->vm_endpoint);
|
||||
if((s=sys_kill(vmp->vm_endpoint, SIGSEGV)) != OK)
|
||||
|
@ -108,16 +108,16 @@ PUBLIC void handle_pagefaults(void)
|
|||
|
||||
/* Pagefault is handled, so now reactivate the process. */
|
||||
if((s=sys_vmctl(ep, VMCTL_CLEAR_PAGEFAULT, r)) != OK)
|
||||
vm_panic("handle_pagefaults: sys_vmctl failed", ep);
|
||||
vm_panic("do_pagefaults: sys_vmctl failed", ep);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* handle_memory *
|
||||
* do_memory *
|
||||
*===========================================================================*/
|
||||
PUBLIC void handle_memory(void)
|
||||
PUBLIC void do_memory(void)
|
||||
{
|
||||
int r, s;
|
||||
endpoint_t who;
|
||||
|
@ -132,7 +132,7 @@ PUBLIC void handle_memory(void)
|
|||
vir_bytes o;
|
||||
|
||||
if(vm_isokendpt(who, &p) != OK)
|
||||
vm_panic("handle_memory: endpoint wrong", who);
|
||||
vm_panic("do_memory: endpoint wrong", who);
|
||||
vmp = &vmproc[p];
|
||||
|
||||
/* Page-align memory and length. */
|
||||
|
@ -143,13 +143,13 @@ PUBLIC void handle_memory(void)
|
|||
if(o > 0) len += VM_PAGE_SIZE - o;
|
||||
|
||||
if(!(region = map_lookup(vmp, mem))) {
|
||||
printf("VM: handle_memory: memory doesn't exist\n");
|
||||
printf("VM: do_memory: memory doesn't exist\n");
|
||||
r = EFAULT;
|
||||
} else if(mem + len > region->vaddr + region->length) {
|
||||
vm_assert(region->vaddr <= mem);
|
||||
vm_panic("handle_memory: not contained", NO_NUM);
|
||||
vm_panic("do_memory: not contained", NO_NUM);
|
||||
} else if(!(region->flags & VR_WRITABLE) && wrflag) {
|
||||
printf("VM: handle_memory: write to unwritable map\n");
|
||||
printf("VM: do_memory: write to unwritable map\n");
|
||||
r = EFAULT;
|
||||
} else {
|
||||
vir_bytes offset;
|
||||
|
@ -168,7 +168,7 @@ PUBLIC void handle_memory(void)
|
|||
}
|
||||
|
||||
if(sys_vmctl(who, VMCTL_MEMREQ_REPLY, r) != OK)
|
||||
vm_panic("handle_memory: sys_vmctl failed", r);
|
||||
vm_panic("do_memory: sys_vmctl failed", r);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -78,8 +78,8 @@ _PROTOTYPE(int do_map_phys, (message *msg) );
|
|||
_PROTOTYPE(int do_unmap_phys, (message *msg) );
|
||||
|
||||
/* pagefaults.c */
|
||||
_PROTOTYPE( void handle_pagefaults, (void) );
|
||||
_PROTOTYPE( void handle_memory, (void) );
|
||||
_PROTOTYPE( void do_pagefaults, (void) );
|
||||
_PROTOTYPE( void do_memory, (void) );
|
||||
_PROTOTYPE( char *pf_errstr, (u32_t err));
|
||||
|
||||
/* $(ARCH)/pagetable.c */
|
||||
|
@ -111,6 +111,17 @@ _PROTOTYPE(void slabfree,(void *mem, int bytes));
|
|||
_PROTOTYPE(void slabstats,(void));
|
||||
#define SLABALLOC(var) (var = slaballoc(sizeof(*var)))
|
||||
#define SLABFREE(ptr) slabfree(ptr, sizeof(*(ptr)))
|
||||
#if SANITYCHECKS
|
||||
_PROTOTYPE(int slabsane,(void *mem, int bytes));
|
||||
#define SLABSANE(ptr) { \
|
||||
if(!slabsane(ptr, sizeof(*(ptr)))) { \
|
||||
printf("VM:%s:%d: SLABSANE(%s)\n", __FILE__, __LINE__, #ptr); \
|
||||
vm_panic("SLABSANE failed", NO_NUM); \
|
||||
} \
|
||||
}
|
||||
#else
|
||||
#define SLABSANE(ptr)
|
||||
#endif
|
||||
|
||||
/* region.c */
|
||||
_PROTOTYPE(struct vir_region * map_page_region,(struct vmproc *vmp, \
|
||||
|
@ -123,7 +134,7 @@ _PROTOTYPE(int map_unmap_region,(struct vmproc *vmp, struct vir_region *vr));
|
|||
_PROTOTYPE(int map_free_proc,(struct vmproc *vmp));
|
||||
_PROTOTYPE(int map_proc_copy,(struct vmproc *dst, struct vmproc *src));
|
||||
_PROTOTYPE(struct vir_region *map_lookup,(struct vmproc *vmp, vir_bytes addr));
|
||||
_PROTOTYPE(int map_pagefault,(struct vmproc *vmp,
|
||||
_PROTOTYPE(int map_pf,(struct vmproc *vmp,
|
||||
struct vir_region *region, vir_bytes offset, int write));
|
||||
_PROTOTYPE(int map_handle_memory,(struct vmproc *vmp,
|
||||
struct vir_region *region, vir_bytes offset, vir_bytes len, int write));
|
||||
|
|
|
@ -105,6 +105,11 @@ PUBLIC void map_sanitycheck(char *file, int line)
|
|||
} \
|
||||
}
|
||||
|
||||
#define MYSLABSANE(s) MYASSERT(slabsane(s, sizeof(*(s))))
|
||||
/* Basic pointers check. */
|
||||
ALLREGIONS(MYSLABSANE(vr),MYSLABSANE(pr); MYSLABSANE(pr->ph);MYSLABSANE(pr->parent));
|
||||
ALLREGIONS(MYASSERT(vr->parent == vmp),MYASSERT(pr->parent == vr););
|
||||
|
||||
/* Do counting for consistency check. */
|
||||
ALLREGIONS(;,pr->ph->seencount = 0;);
|
||||
ALLREGIONS(;,pr->ph->seencount++;);
|
||||
|
@ -123,6 +128,25 @@ PUBLIC void map_sanitycheck(char *file, int line)
|
|||
pr->ph->offset + pr->ph->length,
|
||||
pr->ph->refcount, pr->ph->seencount);
|
||||
}
|
||||
{
|
||||
int n_others = 0;
|
||||
struct phys_region *others;
|
||||
if(pr->ph->refcount > 0) {
|
||||
MYASSERT(pr->ph->firstregion);
|
||||
if(pr->ph->refcount == 1) {
|
||||
MYASSERT(pr->ph->firstregion == pr);
|
||||
}
|
||||
} else {
|
||||
MYASSERT(!pr->ph->firstregion);
|
||||
}
|
||||
for(others = pr->ph->firstregion; others;
|
||||
others = others->next_ph_list) {
|
||||
MYSLABSANE(others);
|
||||
MYASSERT(others->ph == pr->ph);
|
||||
n_others++;
|
||||
}
|
||||
MYASSERT(pr->ph->refcount == n_others);
|
||||
}
|
||||
MYASSERT(pr->ph->refcount == pr->ph->seencount);
|
||||
MYASSERT(!(pr->ph->offset % VM_PAGE_SIZE));
|
||||
MYASSERT(!(pr->ph->length % VM_PAGE_SIZE)););
|
||||
|
@ -171,7 +195,7 @@ PUBLIC int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
|
|||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* map_region *
|
||||
* map_page_region *
|
||||
*===========================================================================*/
|
||||
PUBLIC struct vir_region *map_page_region(vmp, minv, maxv, length,
|
||||
what, flags, mapflags)
|
||||
|
@ -269,6 +293,7 @@ int mapflags;
|
|||
newregion->first = NULL;
|
||||
newregion->flags = flags;
|
||||
newregion->tag = VRT_NONE;
|
||||
newregion->parent = vmp;
|
||||
|
||||
/* If we know what we're going to map to, map it right away. */
|
||||
if(what != MAP_NONE) {
|
||||
|
@ -321,6 +346,76 @@ int mapflags;
|
|||
return newregion;
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* pb_unreferenced *
|
||||
*===========================================================================*/
|
||||
void pb_unreferenced(struct vir_region *region, struct phys_region *pr)
|
||||
{
|
||||
struct phys_block *pb;
|
||||
int remap = 0;
|
||||
|
||||
SLABSANE(pr);
|
||||
pb = pr->ph;
|
||||
SLABSANE(pb);
|
||||
vm_assert(pb->refcount > 0);
|
||||
pb->refcount--;
|
||||
vm_assert(pb->refcount >= 0);
|
||||
|
||||
SLABSANE(pb->firstregion);
|
||||
if(pb->firstregion == pr) {
|
||||
pb->firstregion = pr->next_ph_list;
|
||||
if(pb->firstregion) {
|
||||
SLABSANE(pb->firstregion);
|
||||
}
|
||||
} else {
|
||||
struct phys_region *others;
|
||||
|
||||
for(others = pb->firstregion; others;
|
||||
others = others->next_ph_list) {
|
||||
SLABSANE(others);
|
||||
vm_assert(others->ph == pb);
|
||||
if(others->next_ph_list == pr) {
|
||||
others->next_ph_list = pr->next_ph_list;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
vm_assert(others); /* Otherwise, wasn't on the list. */
|
||||
}
|
||||
|
||||
if(pb->refcount == 0) {
|
||||
vm_assert(!pb->firstregion);
|
||||
if(region->flags & VR_ANON) {
|
||||
FREE_MEM(ABS2CLICK(pb->phys),
|
||||
ABS2CLICK(pb->length));
|
||||
} else if(region->flags & VR_DIRECT) {
|
||||
; /* No action required. */
|
||||
} else {
|
||||
vm_panic("strange phys flags", NO_NUM);
|
||||
}
|
||||
SLABFREE(pb);
|
||||
} else {
|
||||
SLABSANE(pb->firstregion);
|
||||
/* If a writable piece of physical memory is now only
|
||||
* referenced once, map it writable right away instead of
|
||||
* waiting for a page fault.
|
||||
*/
|
||||
if(pb->refcount == 1 && (region->flags & VR_WRITABLE)) {
|
||||
vm_assert(pb);
|
||||
vm_assert(pb->firstregion);
|
||||
vm_assert(!pb->firstregion->next_ph_list);
|
||||
vm_assert(pb->firstregion->ph == pb);
|
||||
vm_assert(pb->firstregion->ph == pb);
|
||||
SLABSANE(pb);
|
||||
SLABSANE(pb->firstregion);
|
||||
SLABSANE(pb->firstregion->parent);
|
||||
if(map_ph_writept(pb->firstregion->parent->parent,
|
||||
pb->firstregion->parent, pb, NULL, NULL) != OK) {
|
||||
vm_panic("pb_unreferenced: writept", NO_NUM);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* map_free *
|
||||
|
@ -329,22 +424,29 @@ PRIVATE int map_free(struct vir_region *region)
|
|||
{
|
||||
struct phys_region *pr, *nextpr;
|
||||
|
||||
#if SANITYCHECKS
|
||||
for(pr = region->first; pr; pr = pr->next) {
|
||||
struct phys_region *others;
|
||||
struct phys_block *pb;
|
||||
|
||||
SLABSANE(pr);
|
||||
pb = pr->ph;
|
||||
SLABSANE(pb);
|
||||
SLABSANE(pb->firstregion);
|
||||
|
||||
for(others = pb->firstregion; others;
|
||||
others = others->next_ph_list) {
|
||||
SLABSANE(others);
|
||||
vm_assert(others->ph == pb);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
for(pr = region->first; pr; pr = nextpr) {
|
||||
vm_assert(pr->ph->refcount > 0);
|
||||
pr->ph->refcount--;
|
||||
SANITYCHECK(SCL_DETAIL);
|
||||
pb_unreferenced(region, pr);
|
||||
nextpr = pr->next;
|
||||
region->first = nextpr; /* For sanity checks. */
|
||||
if(pr->ph->refcount == 0) {
|
||||
if(region->flags & VR_ANON) {
|
||||
FREE_MEM(ABS2CLICK(pr->ph->phys),
|
||||
ABS2CLICK(pr->ph->length));
|
||||
} else if(region->flags & VR_DIRECT) {
|
||||
; /* No action required. */
|
||||
} else {
|
||||
vm_panic("strange phys flags", NO_NUM);
|
||||
}
|
||||
SLABFREE(pr->ph);
|
||||
}
|
||||
SLABFREE(pr);
|
||||
}
|
||||
|
||||
|
@ -365,8 +467,16 @@ struct vmproc *vmp;
|
|||
|
||||
for(r = vmp->vm_regions; r; r = nextr) {
|
||||
nextr = r->next;
|
||||
SANITYCHECK(SCL_DETAIL);
|
||||
#if SANITYCHECKS
|
||||
nocheck++;
|
||||
#endif
|
||||
map_free(r);
|
||||
vmp->vm_regions = nextr; /* For sanity checks. */
|
||||
#if SANITYCHECKS
|
||||
nocheck--;
|
||||
#endif
|
||||
SANITYCHECK(SCL_DETAIL);
|
||||
}
|
||||
|
||||
vmp->vm_regions = NULL;
|
||||
|
@ -412,7 +522,7 @@ vir_bytes length;
|
|||
phys_bytes what_mem;
|
||||
struct phys_region *physhint;
|
||||
{
|
||||
struct phys_region *physr, *newphysr;
|
||||
struct phys_region *newphysr;
|
||||
struct phys_block *newpb;
|
||||
phys_bytes mem_clicks, clicks;
|
||||
vir_bytes mem;
|
||||
|
@ -445,19 +555,25 @@ struct phys_region *physhint;
|
|||
} else {
|
||||
mem = what_mem;
|
||||
}
|
||||
SANITYCHECK(SCL_DETAIL);
|
||||
|
||||
/* New physical block. */
|
||||
newpb->phys = mem;
|
||||
newpb->refcount = 1;
|
||||
newpb->offset = offset;
|
||||
newpb->length = length;
|
||||
newpb->firstregion = newphysr;
|
||||
SLABSANE(newpb->firstregion);
|
||||
|
||||
/* New physical region. */
|
||||
newphysr->ph = newpb;
|
||||
newphysr->parent = region;
|
||||
newphysr->next_ph_list = NULL; /* No other references to this block. */
|
||||
|
||||
/* Update pagetable. */
|
||||
vm_assert(!(length % VM_PAGE_SIZE));
|
||||
vm_assert(!(newpb->length % VM_PAGE_SIZE));
|
||||
SANITYCHECK(SCL_DETAIL);
|
||||
if(map_ph_writept(vmp, region, newpb, NULL, NULL) != OK) {
|
||||
if(what_mem == MAP_NONE)
|
||||
FREE_MEM(mem_clicks, clicks);
|
||||
|
@ -474,6 +590,7 @@ struct phys_region *physhint;
|
|||
newphysr->next = region->first;
|
||||
region->first = newphysr;
|
||||
} else {
|
||||
struct phys_region *physr;
|
||||
for(physr = physhint; physr; physr = physr->next) {
|
||||
if(!physr->next || physr->next->ph->offset > offset) {
|
||||
newphysr->next = physr->next;
|
||||
|
@ -510,7 +627,7 @@ struct phys_region *ph;
|
|||
/* This is only to be done if there is more than one copy. */
|
||||
vm_assert(ph->ph->refcount > 1);
|
||||
|
||||
/* Do actal copy on write; allocate new physblock. */
|
||||
/* Do actual copy on write; allocate new physblock. */
|
||||
if(!SLABALLOC(newpb)) {
|
||||
printf("VM: map_copy_ph_block: couldn't allocate newpb\n");
|
||||
SANITYCHECK(SCL_FUNCTIONS);
|
||||
|
@ -526,13 +643,18 @@ struct phys_region *ph;
|
|||
return ENOMEM;
|
||||
}
|
||||
newmem = CLICK2ABS(newmem_cl);
|
||||
vm_assert(ABS2CLICK(newmem) == newmem_cl);
|
||||
|
||||
ph->ph->refcount--;
|
||||
pb_unreferenced(region, ph);
|
||||
SLABSANE(ph);
|
||||
SLABSANE(ph->ph);
|
||||
vm_assert(ph->ph->refcount > 0);
|
||||
newpb->length = ph->ph->length;
|
||||
newpb->offset = ph->ph->offset;
|
||||
newpb->refcount = 1;
|
||||
newpb->phys = newmem;
|
||||
newpb->firstregion = ph;
|
||||
ph->next_ph_list = NULL;
|
||||
|
||||
/* Copy old memory to new memory. */
|
||||
if((r=sys_abscopy(ph->ph->phys, newpb->phys, newpb->length)) != OK) {
|
||||
|
@ -564,9 +686,9 @@ struct phys_region *ph;
|
|||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* map_pagefault *
|
||||
* map_pf *
|
||||
*===========================================================================*/
|
||||
PUBLIC int map_pagefault(vmp, region, offset, write)
|
||||
PUBLIC int map_pf(vmp, region, offset, write)
|
||||
struct vmproc *vmp;
|
||||
struct vir_region *region;
|
||||
vir_bytes offset;
|
||||
|
@ -613,7 +735,7 @@ int write;
|
|||
}
|
||||
|
||||
if(r != OK)
|
||||
printf("VM: map_pagefault: failed (%d)\n", r);
|
||||
printf("VM: map_pf: failed (%d)\n", r);
|
||||
|
||||
SANITYCHECK(SCL_FUNCTIONS);
|
||||
|
||||
|
@ -717,6 +839,14 @@ static int countregions(struct vir_region *vr)
|
|||
*===========================================================================*/
|
||||
PRIVATE struct vir_region *map_copy_region(struct vir_region *vr)
|
||||
{
|
||||
/* map_copy_region creates a complete copy of the vir_region
|
||||
* data structure, linking in the same phys_blocks directly,
|
||||
* but all in limbo, i.e., the caller has to link the vir_region
|
||||
* to a process. Therefore it doesn't increase the refcount in
|
||||
* the phys_block; the caller has to do this once it's linked.
|
||||
* The reason for this is to keep the sanity checks working
|
||||
* within this function.
|
||||
*/
|
||||
struct vir_region *newvr;
|
||||
struct phys_region *ph, *prevph = NULL;
|
||||
#if SANITYCHECKS
|
||||
|
@ -739,6 +869,8 @@ PRIVATE struct vir_region *map_copy_region(struct vir_region *vr)
|
|||
}
|
||||
newph->next = NULL;
|
||||
newph->ph = ph->ph;
|
||||
newph->next_ph_list = NULL;
|
||||
newph->parent = newvr;
|
||||
if(prevph) prevph->next = newph;
|
||||
else newvr->first = newph;
|
||||
prevph = newph;
|
||||
|
@ -783,7 +915,7 @@ struct vmproc *src;
|
|||
SANITYCHECK(SCL_FUNCTIONS);
|
||||
for(vr = src->vm_regions; vr; vr = vr->next) {
|
||||
struct vir_region *newvr;
|
||||
struct phys_region *ph;
|
||||
struct phys_region *orig_ph, *new_ph;
|
||||
SANITYCHECK(SCL_DETAIL);
|
||||
if(!(newvr = map_copy_region(vr))) {
|
||||
map_free_proc(dst);
|
||||
|
@ -791,13 +923,36 @@ struct vmproc *src;
|
|||
return ENOMEM;
|
||||
}
|
||||
SANITYCHECK(SCL_DETAIL);
|
||||
newvr->parent = dst;
|
||||
if(prevvr) { prevvr->next = newvr; }
|
||||
else { dst->vm_regions = newvr; }
|
||||
for(ph = vr->first; ph; ph = ph->next) {
|
||||
vm_assert(ph->ph->refcount > 0);
|
||||
ph->ph->refcount++;
|
||||
vm_assert(ph->ph->refcount > 1);
|
||||
new_ph = newvr->first;
|
||||
for(orig_ph = vr->first; orig_ph; orig_ph = orig_ph->next) {
|
||||
struct phys_block *pb;
|
||||
/* Check two physregions both are nonnull,
|
||||
* are different, and match physblocks.
|
||||
*/
|
||||
vm_assert(orig_ph && new_ph);
|
||||
vm_assert(orig_ph != new_ph);
|
||||
pb = orig_ph->ph;
|
||||
vm_assert(pb == new_ph->ph);
|
||||
|
||||
/* Link in new physregion. */
|
||||
vm_assert(!new_ph->next_ph_list);
|
||||
new_ph->next_ph_list = pb->firstregion;
|
||||
pb->firstregion = new_ph;
|
||||
SLABSANE(new_ph);
|
||||
SLABSANE(new_ph->next_ph_list);
|
||||
|
||||
/* Increase phys block refcount */
|
||||
vm_assert(pb->refcount > 0);
|
||||
pb->refcount++;
|
||||
vm_assert(pb->refcount > 1);
|
||||
|
||||
/* Get next new physregion */
|
||||
new_ph = new_ph->next;
|
||||
}
|
||||
vm_assert(!new_ph);
|
||||
SANITYCHECK(SCL_DETAIL);
|
||||
prevvr = newvr;
|
||||
SANITYCHECK(SCL_DETAIL);
|
||||
|
|
|
@ -10,11 +10,18 @@ struct phys_block {
|
|||
vir_bytes length; /* no. of contiguous bytes */
|
||||
phys_bytes phys; /* physical memory */
|
||||
u8_t refcount; /* Refcount of these pages */
|
||||
|
||||
/* first in list of phys_regions that reference this block */
|
||||
struct phys_region *firstregion;
|
||||
};
|
||||
|
||||
struct phys_region {
|
||||
struct phys_region *next; /* next contiguous block */
|
||||
struct phys_block *ph;
|
||||
struct vir_region *parent; /* Region that owns this phys_region. */
|
||||
|
||||
/* list of phys_regions that reference the same phys_block */
|
||||
struct phys_region *next_ph_list;
|
||||
};
|
||||
|
||||
struct vir_region {
|
||||
|
@ -24,6 +31,7 @@ struct vir_region {
|
|||
struct phys_region *first; /* phys regions in vir region */
|
||||
u16_t flags;
|
||||
u32_t tag; /* Opaque to mapping code. */
|
||||
struct vmproc *parent; /* Process that owns this vir_region. */
|
||||
};
|
||||
|
||||
/* Mapping flags: */
|
||||
|
@ -38,6 +46,7 @@ struct vir_region {
|
|||
/* Tag values: */
|
||||
#define VRT_NONE 0xBEEF0000
|
||||
#define VRT_HEAP 0xBEEF0001
|
||||
#define VRT_CODE 0xBEEF0002
|
||||
|
||||
/* map_page_region flags */
|
||||
#define MF_PREALLOC 0x01
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
printf("VM:%s:%d: %s failed\n", file, line, #c); \
|
||||
vm_panic("sanity check failed", NO_NUM); } } while(0)
|
||||
|
||||
#define SANITYCHECK(l) if((l) <= vm_sanitychecklevel) { \
|
||||
#define SANITYCHECK(l) if(!nocheck && ((l) <= vm_sanitychecklevel)) { \
|
||||
int failflag = 0; \
|
||||
u32_t *origptr = CHECKADDR;\
|
||||
int _sanep; \
|
||||
|
|
|
@ -94,6 +94,8 @@ PRIVATE struct slabheader {
|
|||
} *list_head[LIST_NUMBER];
|
||||
} slabs[SLABSIZES];
|
||||
|
||||
FORWARD _PROTOTYPE( int objstats, (void *, int, struct slabheader **, struct slabdata **, int *));
|
||||
|
||||
#define GETSLAB(b, s) { \
|
||||
int i; \
|
||||
vm_assert((b) >= MINSIZE); \
|
||||
|
@ -206,6 +208,16 @@ PUBLIC void slab_sanitycheck(char *file, int line)
|
|||
}
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* int slabsane *
|
||||
*===========================================================================*/
|
||||
PUBLIC int slabsane(void *mem, int bytes)
|
||||
{
|
||||
struct slabheader *s;
|
||||
struct slabdata *f;
|
||||
int i;
|
||||
return (objstats(mem, bytes, &s, &f, &i) == OK);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*===========================================================================*
|
||||
|
@ -279,6 +291,12 @@ PUBLIC void *slaballoc(int bytes)
|
|||
#endif
|
||||
SLABSANITYCHECK(SCL_FUNCTIONS);
|
||||
firstused->sdh.freeguess = i+1;
|
||||
|
||||
#if SANITYCHECKS
|
||||
if(!slabsane(ret, bytes))
|
||||
vm_panic("slaballoc: slabsane failed", NO_NUM);
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -293,6 +311,63 @@ PUBLIC void *slaballoc(int bytes)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* int objstats *
|
||||
*===========================================================================*/
|
||||
PRIVATE int objstats(void *mem, int bytes,
|
||||
struct slabheader **sp, struct slabdata **fp, int *ip)
|
||||
{
|
||||
#define OBJSTATSCHECK(cond) \
|
||||
if(!(cond)) { \
|
||||
printf("VM:objstats: %s failed for ptr 0x%p, %d bytes\n", \
|
||||
#cond, mem, bytes); \
|
||||
return EINVAL; \
|
||||
}
|
||||
|
||||
struct slabheader *s;
|
||||
struct slabdata *f;
|
||||
int i;
|
||||
|
||||
OBJSTATSCHECK((char *) mem >= (char *) VM_PAGE_SIZE);
|
||||
|
||||
#if SANITYCHECKS
|
||||
if(*(u32_t *) mem == JUNK) {
|
||||
util_stacktrace();
|
||||
printf("VM: WARNING: JUNK seen in slab object\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Retrieve entry in slabs[]. */
|
||||
GETSLAB(bytes, s);
|
||||
|
||||
/* Round address down to VM_PAGE_SIZE boundary to get header. */
|
||||
f = (struct slabdata *) ((char *) mem - (vir_bytes) mem % VM_PAGE_SIZE);
|
||||
|
||||
#if SANITYCHECKS
|
||||
OBJSTATSCHECK(f->sdh.magic == MAGIC);
|
||||
#endif
|
||||
OBJSTATSCHECK(f->sdh.list == LIST_USED || f->sdh.list == LIST_FULL);
|
||||
|
||||
/* Make sure it's in range. */
|
||||
OBJSTATSCHECK((char *) mem >= (char *) f->data);
|
||||
OBJSTATSCHECK((char *) mem < (char *) f->data + sizeof(f->data));
|
||||
|
||||
/* Get position. */
|
||||
i = (char *) mem - (char *) f->data;
|
||||
OBJSTATSCHECK(!(i % bytes));
|
||||
i = i / bytes;
|
||||
|
||||
/* Make sure it is marked as allocated. */
|
||||
OBJSTATSCHECK(GETBIT(f, i));
|
||||
|
||||
/* return values */
|
||||
*ip = i;
|
||||
*fp = f;
|
||||
*sp = s;
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* void *slabfree *
|
||||
*===========================================================================*/
|
||||
|
@ -309,27 +384,9 @@ PUBLIC void slabfree(void *mem, int bytes)
|
|||
printf("VM: WARNING: likely double free, JUNK seen\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Retrieve entry in slabs[]. */
|
||||
GETSLAB(bytes, s);
|
||||
|
||||
/* Round address down to VM_PAGE_SIZE boundary to get header. */
|
||||
f = (struct slabdata *) ((char *) mem - (vir_bytes) mem % VM_PAGE_SIZE);
|
||||
|
||||
vm_assert(f->sdh.magic == MAGIC);
|
||||
vm_assert(f->sdh.list == LIST_USED || f->sdh.list == LIST_FULL);
|
||||
|
||||
/* Make sure it's in range. */
|
||||
vm_assert((char *) mem >= (char *) f->data);
|
||||
vm_assert((char *) mem < (char *) f->data + sizeof(f->data));
|
||||
|
||||
/* Get position. */
|
||||
i = (char *) mem - (char *) f->data;
|
||||
vm_assert(!(i % bytes));
|
||||
i = i / bytes;
|
||||
|
||||
/* Make sure it _was_ allocated. */
|
||||
vm_assert(GETBIT(f, i));
|
||||
if(objstats(mem, bytes, &s, &f, &i) != OK) {
|
||||
vm_panic("slabfree objstats failed", NO_NUM);
|
||||
}
|
||||
|
||||
/* Free this data. */
|
||||
CLEARBIT(f, i);
|
||||
|
|
Loading…
Reference in a new issue