VM: restore >4k secondary cache functionality

. by storing length in the yielded blocks node again
This commit is contained in:
Ben Gras 2012-09-18 13:17:52 +02:00
parent ed1af3c86c
commit ddf1981004
6 changed files with 120 additions and 80 deletions

View file

@ -227,7 +227,7 @@ int do_unmap_phys(message *m)
vmp = &vmproc[n];
if(!(region = map_lookup(vmp, (vir_bytes) m->VMUM_ADDR))) {
if(!(region = map_lookup(vmp, (vir_bytes) m->VMUM_ADDR, NULL))) {
return EINVAL;
}
@ -281,7 +281,7 @@ int do_remap(message *m)
* about whether the user needs to bind to
* THAT address or be chosen by the system.
*/
if (!(region = map_lookup(svmp, sa)))
if (!(region = map_lookup(svmp, sa, NULL)))
return EINVAL;
if(region->vaddr != sa) {
@ -331,7 +331,7 @@ int do_shared_unmap(message *m)
addr = m->VMUN_ADDR;
if(!(vr = map_lookup(vmp, addr))) {
if(!(vr = map_lookup(vmp, addr, NULL))) {
printf("VM: addr 0x%lx not found.\n", m->VMUN_ADDR);
return EFAULT;
}
@ -421,7 +421,7 @@ int do_munmap(message *m)
assert(m->m_type == VM_MUNMAP);
addr = (vir_bytes) (vir_bytes) m->VMUM_ADDR;
if(!(vr = map_lookup(vmp, addr))) {
if(!(vr = map_lookup(vmp, addr, NULL))) {
printf("VM: unmap: virtual address %p not found in %d\n",
m->VMUM_ADDR, vmp->vm_endpoint);
return EFAULT;

View file

@ -70,7 +70,7 @@ void do_pagefaults(message *m)
assert(vmp->vm_flags & VMF_INUSE);
/* See if address is valid at all. */
if(!(region = map_lookup(vmp, addr))) {
if(!(region = map_lookup(vmp, addr, NULL))) {
if(PFERR_PROT(err)) {
printf("VM: pagefault: SIGSEGV %d protected addr 0x%x; %s\n",
ep, addr, pf_errstr(err));
@ -177,7 +177,7 @@ int handle_memory(struct vmproc *vmp, vir_bytes mem, vir_bytes len, int wrflag)
while(len > 0) {
int r;
if(!(region = map_lookup(vmp, mem))) {
if(!(region = map_lookup(vmp, mem, NULL))) {
#if VERBOSE
map_printmap(vmp);
printf("VM: do_memory: memory doesn't exist\n");

View file

@ -137,7 +137,8 @@ int map_free_proc(struct vmproc *vmp);
int map_proc_copy(struct vmproc *dst, struct vmproc *src);
int map_proc_copy_from(struct vmproc *dst, struct vmproc *src, struct
vir_region *start_src_vr);
struct vir_region *map_lookup(struct vmproc *vmp, vir_bytes addr);
struct vir_region *map_lookup(struct vmproc *vmp, vir_bytes addr,
struct phys_region **pr);
int map_pf(struct vmproc *vmp, struct vir_region *region, vir_bytes
offset, int write);
int map_pin_memory(struct vmproc *vmp);

View file

@ -813,7 +813,7 @@ static phys_bytes freeyieldednode(yielded_t *node, int freemem)
/* Free associated memory if requested. */
if(freemem) {
free_mem(ABS2CLICK(node->addr), 1);
free_mem(ABS2CLICK(node->physaddr), node->pages);
}
/* Free node. */
@ -875,9 +875,10 @@ struct vmproc *vmp;
/*===========================================================================*
* map_lookup *
*===========================================================================*/
struct vir_region *map_lookup(vmp, offset)
struct vir_region *map_lookup(vmp, offset, physr)
struct vmproc *vmp;
vir_bytes offset;
struct phys_region **physr;
{
struct vir_region *r;
@ -889,8 +890,15 @@ vir_bytes offset;
#endif
if((r = region_search(&vmp->vm_regions_avl, offset, AVL_LESS_EQUAL))) {
if(offset >= r->vaddr && offset < r->vaddr + r->length)
vir_bytes ph;
if(offset >= r->vaddr && offset < r->vaddr + r->length) {
ph = offset - r->vaddr;
if(physr) {
*physr = physr_search(r->phys, ph, AVL_EQUAL);
assert((*physr)->offset == ph);
}
return r;
}
}
SANITYCHECK(SCL_FUNCTIONS);
@ -1811,7 +1819,7 @@ int map_get_phys(struct vmproc *vmp, vir_bytes addr, phys_bytes *r)
struct phys_region *ph;
physr_iter iter;
if (!(vr = map_lookup(vmp, addr)) ||
if (!(vr = map_lookup(vmp, addr, NULL)) ||
(vr->vaddr != addr))
return EINVAL;
@ -1838,7 +1846,7 @@ int map_get_ref(struct vmproc *vmp, vir_bytes addr, u8_t *cnt)
struct phys_region *ph;
physr_iter iter;
if (!(vr = map_lookup(vmp, addr)) ||
if (!(vr = map_lookup(vmp, addr, NULL)) ||
(vr->vaddr != addr))
return EINVAL;
@ -2082,7 +2090,7 @@ int unmap_memory(endpoint_t sour, endpoint_t dest,
panic("unmap_memory: bad endpoint: %d", dest);
vmd = &vmproc[p];
vrd = map_lookup(vmd, virt_d);
vrd = map_lookup(vmd, virt_d, NULL);
assert(vrd);
/* Search for the first phys region in the destination process. */
@ -2155,9 +2163,9 @@ int map_memory(endpoint_t sour, endpoint_t dest,
panic("map_memory: bad endpoint: %d", dest);
vmd = &vmproc[p];
vrs = map_lookup(vms, virt_s);
vrs = map_lookup(vms, virt_s, NULL);
assert(vrs);
vrd = map_lookup(vmd, virt_d);
vrd = map_lookup(vmd, virt_d, NULL);
assert(vrd);
/* Linear address -> offset from start of vir region. */
@ -2185,39 +2193,19 @@ static struct phys_region *
get_clean_phys_region(struct vmproc *vmp, vir_bytes vaddr, struct vir_region **ret_region)
{
struct vir_region *region;
vir_bytes regionoffset, mapaddr;
vir_bytes mapaddr;
struct phys_region *ph;
mapaddr = vaddr;
if(!(region = map_lookup(vmp, mapaddr))) {
if(!(region = map_lookup(vmp, mapaddr, &ph)) || !ph) {
printf("VM: get_clean_phys_region: 0x%lx not found\n", vaddr);
return NULL;
}
if(!(region->flags & VR_ANON)) {
printf("VM: get_clean_phys_region: non-anon 0x%lx\n", vaddr);
return NULL;
}
assert(mapaddr >= region->vaddr);
assert(mapaddr < region->vaddr + region->length);
regionoffset = mapaddr-region->vaddr;
/* For now, only support the yielding of blocks that are
* exactly a mapped phys_region. Go get that phys_region.
* (This can be improved without changing the interface.)
*/
if(!(ph = physr_search(region->phys, regionoffset,
AVL_EQUAL))) {
printf("VM: get_clean_phys_region: exact block not found\n");
return NULL;
}
/* Make sure this is what we asked for. */
assert(ph->offset == regionoffset);
/* If it's mapped more than once, make a copy. */
assert(ph->ph->refcount > 0);
if(ph->ph->refcount > 1) {
@ -2235,13 +2223,15 @@ get_clean_phys_region(struct vmproc *vmp, vir_bytes vaddr, struct vir_region **r
return ph;
}
static int getblock(struct vmproc *vmp, u64_t id, vir_bytes vaddr)
static int getblock(struct vmproc *vmp, u64_t id, vir_bytes vaddr, int pages)
{
yielded_t *yb;
struct phys_region *ph;
struct vir_region *region;
yielded_avl *avl;
block_id_t blockid;
phys_bytes phaddr;
int p;
/* Try to get the yielded block */
blockid.owner = vmp->vm_endpoint;
@ -2251,26 +2241,39 @@ static int getblock(struct vmproc *vmp, u64_t id, vir_bytes vaddr)
return ESRCH;
}
/* Get the intended phys region, make sure refcount is 1. */
if(!(ph = get_clean_phys_region(vmp, vaddr, &region))) {
printf("VM: getblock: not found for %d\n", vmp->vm_endpoint);
return EINVAL;
if(yb->pages != pages) {
printf("VM: getblock: length mismatch (%d != %d)\n",
pages, yb->pages);
return EFAULT;
}
assert(ph->ph->refcount == 1);
phaddr = yb->physaddr;
/* Free the block that is currently there. */
free_mem(ABS2CLICK(ph->ph->phys), 1);
for(p = 0; p < pages; p++) {
/* Get the intended phys region, make sure refcount is 1. */
if(!(ph = get_clean_phys_region(vmp, vaddr, &region))) {
printf("VM: getblock: not found for %d\n", vmp->vm_endpoint);
return EINVAL;
}
/* Set the phys block to new addr and update pagetable. */
USE(ph->ph, ph->ph->phys = yb->addr;);
if(map_ph_writept(vmp, region, ph) != OK) {
/* Presumably it was mapped, so there is no reason
* updating should fail.
*/
panic("do_get_block: couldn't write pt");
assert(ph->ph->refcount == 1);
/* Free the block that is currently there. */
free_mem(ABS2CLICK(ph->ph->phys), 1);
/* Set the phys block to new addr and update pagetable. */
USE(ph->ph, ph->ph->phys = phaddr;);
if(map_ph_writept(vmp, region, ph) != OK) {
/* Presumably it was mapped, so there is no reason
* updating should fail.
*/
panic("do_get_block: couldn't write pt");
}
vaddr += VM_PAGE_SIZE;
phaddr += VM_PAGE_SIZE;
}
/* Forget about the yielded block and free the struct. */
freeyieldednode(yb, 0);
@ -2278,12 +2281,12 @@ static int getblock(struct vmproc *vmp, u64_t id, vir_bytes vaddr)
}
static int yieldblock(struct vmproc *vmp, u64_t id,
vir_bytes vaddr, yielded_t **retyb)
vir_bytes vaddr, yielded_t **retyb, int pages)
{
yielded_t *newyb;
vir_bytes mem_clicks, clicks;
vir_bytes mem_clicks, v, p, new_phaddr;
struct vir_region *region;
struct phys_region *ph;
struct phys_region *ph = NULL, *prev_ph = NULL, *first_ph = NULL;
yielded_avl *avl;
block_id_t blockid;
@ -2298,12 +2301,33 @@ static int yieldblock(struct vmproc *vmp, u64_t id,
return EINVAL;
}
if(vaddr % VM_PAGE_SIZE) return EFAULT;
if((vaddr % VM_PAGE_SIZE) || pages < 1) return EFAULT;
if(!(ph = get_clean_phys_region(vmp, vaddr, &region))) {
printf("VM: do_yield_block: not found for %d\n",
vmp->vm_endpoint);
return EINVAL;
v = vaddr;
for(p = 0; p < pages; p++) {
if(!(region = map_lookup(vmp, v, &ph)) || !ph) {
printf("VM: do_yield_block: not found for %d\n",
vmp->vm_endpoint);
return EINVAL;
}
if(!(region->flags & VR_ANON)) {
printf("VM: yieldblock: non-anon 0x%lx\n", v);
return EFAULT;
}
if(ph->ph->refcount != 1) {
printf("VM: do_yield_block: mapped not once for %d\n",
vmp->vm_endpoint);
return EFAULT;
}
if(prev_ph) {
if(ph->ph->phys != prev_ph->ph->phys + VM_PAGE_SIZE) {
printf("VM: physically discontiguous yield\n");
return EINVAL;
}
}
prev_ph = ph;
if(!first_ph) first_ph = ph;
v += VM_PAGE_SIZE;
}
/* Make a new block to record the yielding in. */
@ -2313,8 +2337,7 @@ static int yieldblock(struct vmproc *vmp, u64_t id,
assert(!(ph->ph->phys % VM_PAGE_SIZE));
clicks = 1;
if((mem_clicks = alloc_mem(clicks, PAF_CLEAR)) == NO_MEM) {
if((mem_clicks = alloc_mem(pages, PAF_CLEAR)) == NO_MEM) {
SLABFREE(newyb);
return ENOMEM;
}
@ -2322,17 +2345,28 @@ static int yieldblock(struct vmproc *vmp, u64_t id,
/* Update yielded block info. */
USE(newyb,
newyb->id = blockid;
newyb->addr = ph->ph->phys;
newyb->physaddr = first_ph->ph->phys;
newyb->pages = pages;
newyb->younger = NULL;);
new_phaddr = CLICK2ABS(mem_clicks);
/* Set new phys block to new addr and update pagetable. */
USE(ph->ph,
ph->ph->phys = CLICK2ABS(mem_clicks););
if(map_ph_writept(vmp, region, ph) != OK) {
/* Presumably it was mapped, so there is no reason
* updating should fail.
*/
panic("yield_block: couldn't write pt");
v = vaddr;
for(p = 0; p < pages; p++) {
region = map_lookup(vmp, v, &ph);
assert(region && ph);
assert(ph->ph->refcount == 1);
USE(ph->ph,
ph->ph->phys = new_phaddr;);
if(map_ph_writept(vmp, region, ph) != OK) {
/* Presumably it was mapped, so there is no reason
* updating should fail.
*/
panic("yield_block: couldn't write pt");
}
v += VM_PAGE_SIZE;
new_phaddr += VM_PAGE_SIZE;
}
/* Remember yielded block. */
@ -2425,6 +2459,7 @@ int do_yieldblockgetblock(message *m)
struct vmproc *vmp;
yielded_t *yb = NULL;
int r = ESRCH;
int pages;
if(vm_isokendpt(caller, &n) != OK)
panic("do_yieldblockgetblock: message from strange source: %d",
@ -2432,13 +2467,15 @@ int do_yieldblockgetblock(message *m)
vmp = &vmproc[n];
if(m->VMYBGB_LEN != VM_PAGE_SIZE) {
static int printed = 0;
pages = m->VMYBGB_LEN / VM_PAGE_SIZE;
if((m->VMYBGB_LEN % VM_PAGE_SIZE) || pages < 1) {
static int printed;
if(!printed) {
printed = 1;
printf("vm: secondary cache for non-page-sized blocks temporarily disabled\n");
printf("vm: non-page-aligned or short block length\n");
}
return ENOSYS;
return EFAULT;
}
yieldid = make64(m->VMYBGB_YIELDIDLO, m->VMYBGB_YIELDIDHI);
@ -2446,12 +2483,13 @@ int do_yieldblockgetblock(message *m)
if(cmp64(yieldid, VM_BLOCKID_NONE) != 0) {
/* A block was given to yield. */
yieldblock(vmp, yieldid, (vir_bytes) m->VMYBGB_VADDR, &yb);
yieldblock(vmp, yieldid, (vir_bytes) m->VMYBGB_VADDR, &yb,
pages);
}
if(cmp64(getid, VM_BLOCKID_NONE) != 0) {
/* A block was given to get. */
r = getblock(vmp, getid, (vir_bytes) m->VMYBGB_VADDR);
r = getblock(vmp, getid, (vir_bytes) m->VMYBGB_VADDR, pages);
}
return r;

View file

@ -297,8 +297,8 @@ int swap_proc_dyn_data(struct vmproc *src_vmp, struct vmproc *dst_vmp)
*/
if(!is_vm) {
struct vir_region *vr;
vr = map_lookup(dst_vmp, VM_STACKTOP);
if(vr && !map_lookup(src_vmp, VM_STACKTOP)) {
vr = map_lookup(dst_vmp, VM_STACKTOP, NULL);
if(vr && !map_lookup(src_vmp, VM_STACKTOP, NULL)) {
#if LU_DEBUG
printf("VM: swap_proc_dyn_data: tranferring regions above the stack from %d to %d\n",
src_vmp->vm_endpoint, dst_vmp->vm_endpoint);

View file

@ -14,7 +14,8 @@ typedef struct yielded {
* uniquely identify a yielded block.
*/
block_id_t id;
phys_bytes addr;
phys_bytes physaddr;
int pages;
/* LRU fields */
struct yielded *younger, *older;