VM: pagequeue data structure

. data structure that automatically keeps a set
	  of pages in reserve, to replace sparepages and
	  possibly re-used in the future for similar situations,
	  e.g. if in-filesystem-cache block eviction is
	  implemented and FS asks for a new block

Change-Id: I149d46c14b9c8e75df16cb94e08907f008c339a6
This commit is contained in:
Ben Gras 2012-12-29 00:16:33 +00:00
parent a212fd1ba5
commit 90d777f053
4 changed files with 235 additions and 97 deletions

View file

@ -53,6 +53,189 @@ struct {
#define page_isfree(i) GET_BIT(free_pages_bitmap, i)
#define RESERVEDMAGIC 0x6e4c74d5
#define MAXRESERVEDPAGES 100
#define MAXRESERVEDQUEUES 15
static struct reserved_pages {
struct reserved_pages *next; /* next in use */
int max_available; /* queue depth use, 0 if not in use at all */
int npages; /* number of consecutive pages */
int mappedin; /* must reserved pages also be mapped? */
int n_available; /* number of queue entries */
int allocflags; /* allocflags for alloc_mem */
struct reserved_pageslot {
phys_bytes phys;
void *vir;
} slots[MAXRESERVEDPAGES];
u32_t magic;
} reservedqueues[MAXRESERVEDQUEUES], *first_reserved_inuse = NULL;
int missing_spares = 0;
static void sanitycheck_queues(void)
{
struct reserved_pages *mrq;
int m = 0;
for(mrq = first_reserved_inuse; mrq > 0; mrq = mrq->next) {
assert(mrq->max_available > 0);
assert(mrq->max_available >= mrq->n_available);
m += mrq->max_available - mrq->n_available;
}
assert(m == missing_spares);
}
static void sanitycheck_rq(struct reserved_pages *rq)
{
assert(rq->magic == RESERVEDMAGIC);
assert(rq->n_available >= 0);
assert(rq->n_available <= MAXRESERVEDPAGES);
assert(rq->n_available <= rq->max_available);
sanitycheck_queues();
}
void *reservedqueue_new(int max_available, int npages, int mapped, int allocflags)
{
int r;
struct reserved_pages *rq;
assert(max_available > 0);
assert(max_available < MAXRESERVEDPAGES);
assert(npages > 0);
assert(npages < 10);
for(r = 0; r < MAXRESERVEDQUEUES; r++)
if(!reservedqueues[r].max_available)
break;
if(r >= MAXRESERVEDQUEUES) {
printf("VM: %d reserved queues in use\n", MAXRESERVEDQUEUES);
return NULL;
}
rq = &reservedqueues[r];
memset(rq, 0, sizeof(*rq));
rq->next = first_reserved_inuse;
first_reserved_inuse = rq;
rq->max_available = max_available;
rq->npages = npages;
rq->mappedin = mapped;
rq->allocflags = allocflags;
rq->magic = RESERVEDMAGIC;
missing_spares += max_available;
return rq;
}
static void
reservedqueue_fillslot(struct reserved_pages *rq,
struct reserved_pageslot *rps, phys_bytes ph, void *vir)
{
rps->phys = ph;
rps->vir = vir;
assert(missing_spares > 0);
if(rq->mappedin) assert(vir);
missing_spares--;
rq->n_available++;
}
static int
reservedqueue_addslot(struct reserved_pages *rq)
{
phys_bytes cl, cl_addr;
void *vir;
struct reserved_pageslot *rps;
sanitycheck_rq(rq);
if((cl = alloc_mem(rq->npages, rq->allocflags)) == NO_MEM)
return ENOMEM;
cl_addr = CLICK2ABS(cl);
vir = NULL;
if(rq->mappedin) {
if(!(vir = vm_mappages(cl_addr, rq->npages))) {
free_mem(cl, rq->npages);
printf("reservedqueue_addslot: vm_mappages failed\n");
return ENOMEM;
}
}
rps = &rq->slots[rq->n_available];
reservedqueue_fillslot(rq, rps, cl_addr, vir);
return OK;
}
void reservedqueue_add(void *rq_v, void *vir, phys_bytes ph)
{
struct reserved_pages *rq = rq_v;
struct reserved_pageslot *rps;
sanitycheck_rq(rq);
rps = &rq->slots[rq->n_available];
reservedqueue_fillslot(rq, rps, ph, vir);
}
int reservedqueue_fill(void *rq_v)
{
struct reserved_pages *rq = rq_v;
int r;
sanitycheck_rq(rq);
while(rq->n_available < rq->max_available)
if((r=reservedqueue_addslot(rq)) != OK)
return r;
return OK;
}
int
reservedqueue_alloc(void *rq_v, phys_bytes *ph, void **vir)
{
struct reserved_pages *rq = rq_v;
struct reserved_pageslot *rps;
sanitycheck_rq(rq);
if(rq->n_available < 1) return ENOMEM;
rq->n_available--;
missing_spares++;
rps = &rq->slots[rq->n_available];
*ph = rps->phys;
*vir = rps->vir;
sanitycheck_rq(rq);
return OK;
}
void alloc_cycle(void)
{
struct reserved_pages *rq;
sanitycheck_queues();
for(rq = first_reserved_inuse; rq && missing_spares > 0; rq = rq->next) {
sanitycheck_rq(rq);
reservedqueue_fill(rq);
sanitycheck_rq(rq);
}
sanitycheck_queues();
}
/*===========================================================================*
* alloc_mem *
*===========================================================================*/

View file

@ -82,12 +82,6 @@ static struct {
phys_bytes phys;
} sparepagedirs[SPAREPAGEDIRS];
int missing_spares = SPAREPAGES;
static struct {
void *page;
phys_bytes phys;
} sparepages[SPAREPAGES];
extern char _end;
#define is_staticaddr(v) ((vir_bytes) (v) < (vir_bytes) &_end)
@ -111,6 +105,7 @@ int kernmappings = 0;
#error CLICK_SIZE must be page size.
#endif
static void *spare_pagequeue;
static char static_sparepages[VM_PAGE_SIZE*STATIC_SPAREPAGES]
__aligned(VM_PAGE_SIZE);
@ -282,21 +277,13 @@ void vm_freepages(vir_bytes vir, int pages)
*===========================================================================*/
static void *vm_getsparepage(phys_bytes *phys)
{
int s;
assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
for(s = 0; s < SPAREPAGES; s++) {
if(sparepages[s].page) {
void *sp;
sp = sparepages[s].page;
*phys = sparepages[s].phys;
sparepages[s].page = NULL;
missing_spares++;
assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
return sp;
}
void *ptr;
if(reservedqueue_alloc(spare_pagequeue, phys, &ptr) != OK) {
printf("vm_getsparepage: no spare found\n");
return NULL;
}
printf("no spare found, %d missing\n", missing_spares);
return NULL;
assert(ptr);
return ptr;
}
/*===========================================================================*
@ -320,31 +307,37 @@ static void *vm_getsparepagedir(phys_bytes *phys)
return NULL;
}
/*===========================================================================*
* vm_checkspares *
*===========================================================================*/
static void *vm_checkspares(void)
void *vm_mappages(phys_bytes p, int pages)
{
int s, n = 0;
static int total = 0, worst = 0;
assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
for(s = 0; s < SPAREPAGES && missing_spares > 0; s++) {
if(!sparepages[s].page) {
n++;
if((sparepages[s].page = vm_allocpage(&sparepages[s].phys,
VMP_SPARE))) {
missing_spares--;
assert(missing_spares >= 0);
assert(missing_spares <= SPAREPAGES);
} else {
printf("VM: warning: couldn't get new spare page\n");
}
}
}
if(worst < n) worst = n;
total += n;
vir_bytes loc;
int r;
pt_t *pt = &vmprocess->vm_pt;
return NULL;
/* Where in our virtual address space can we put it? */
loc = findhole(pages);
if(loc == NO_MEM) {
printf("vm_mappages: findhole failed\n");
return NULL;
}
/* Map this page into our address space. */
if((r=pt_writemap(vmprocess, pt, loc, p, VM_PAGE_SIZE*pages,
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
#if defined(__arm__)
| ARM_VM_PTE_WB
#endif
, 0)) != OK) {
printf("vm_mappages writemap failed\n");
return NULL;
}
if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
panic("VMCTL_FLUSHTLB failed: %d", r);
}
assert(loc);
return (void *) loc;
}
static int pt_init_done;
@ -356,14 +349,10 @@ void *vm_allocpages(phys_bytes *phys, int reason, int pages)
{
/* Allocate a page for use by VM itself. */
phys_bytes newpage;
vir_bytes loc;
pt_t *pt;
int r;
static int level = 0;
void *ret;
u32_t mem_flags = 0;
pt = &vmprocess->vm_pt;
assert(reason >= 0 && reason < VMP_CATEGORIES);
assert(pages > 0);
@ -395,16 +384,6 @@ void *vm_allocpages(phys_bytes *phys, int reason, int pages)
}
#endif
/* VM does have a pagetable, so get a page and map it in there.
* Where in our virtual address space can we put it?
*/
loc = findhole(pages);
if(loc == NO_MEM) {
level--;
printf("VM: vm_allocpage: findhole failed\n");
return NULL;
}
/* Allocate page of memory for use by VM. As VM
* is trusted, we don't have to pre-clear it.
*/
@ -416,29 +395,15 @@ void *vm_allocpages(phys_bytes *phys, int reason, int pages)
*phys = CLICK2ABS(newpage);
/* Map this page into our address space. */
if((r=pt_writemap(vmprocess, pt, loc, *phys, VM_PAGE_SIZE*pages,
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
#if defined(__arm__)
| ARM_VM_PTE_WT
#endif
, 0)) != OK) {
free_mem(newpage, pages);
printf("vm_allocpage writemap failed\n");
if(!(ret = vm_mappages(*phys, pages))) {
level--;
printf("VM: vm_allocpage: vm_mappages failed\n");
return NULL;
}
if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
panic("VMCTL_FLUSHTLB failed: %d", r);
}
level--;
/* Return user-space-ready pointer to it. */
ret = (void *) loc;
vm_self_pages++;
return ret;
}
@ -1153,21 +1118,17 @@ void pt_init(void)
}
#endif
missing_spares = 0;
if(!(spare_pagequeue = reservedqueue_new(SPAREPAGES, 1, 1, 0)))
panic("reservedqueue_new for single pages failed");
assert(STATIC_SPAREPAGES < SPAREPAGES);
for(s = 0; s < SPAREPAGES; s++) {
vir_bytes v = (sparepages_mem + s*VM_PAGE_SIZE);;
for(s = 0; s < STATIC_SPAREPAGES; s++) {
void *v = (void *) (sparepages_mem + s*VM_PAGE_SIZE);
phys_bytes ph;
if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
VM_PAGE_SIZE*SPAREPAGES, &ph)) != OK)
panic("pt_init: sys_umap failed: %d", r);
if(s >= STATIC_SPAREPAGES) {
sparepages[s].page = NULL;
missing_spares++;
continue;
}
sparepages[s].page = (void *) v;
sparepages[s].phys = ph;
reservedqueue_add(spare_pagequeue, v, ph);
}
#if defined(__i386__)
@ -1343,8 +1304,6 @@ void pt_init(void)
pt_init_done = 1;
vm_checkspares();
/* All OK. */
return;
}
@ -1508,12 +1467,4 @@ int pt_mapkernel(pt_t *pt)
return OK;
}
/*===========================================================================*
* pt_cycle *
*===========================================================================*/
void pt_cycle(void)
{
vm_checkspares();
}
int get_vm_self_pages(void) { return vm_self_pages; }

View file

@ -93,7 +93,7 @@ int main(void)
SANITYCHECK(SCL_TOP);
if(missing_spares > 0) {
pt_cycle(); /* pagetable code wants to be called */
alloc_cycle(); /* mem alloc code wants to be called */
}
if ((r=sef_receive_status(ANY, &msg, &rcv_sts)) != OK)
@ -461,7 +461,7 @@ static void sef_cb_signal_handler(int signo)
* though.
*/
if(missing_spares > 0) {
pt_cycle(); /* pagetable code wants to be called */
alloc_cycle(); /* pagetable code wants to be called */
}
pt_clearmapcache();

View file

@ -18,6 +18,10 @@ struct phys_region;
#include "yielded.h"
/* alloc.c */
void *reservedqueue_new(int, int, int, int);
int reservedqueue_alloc(void *, phys_bytes *, void **);
void reservedqueue_add(void *, void *, phys_bytes);
void alloc_cycle(void);
void mem_sanitycheck(char *file, int line);
phys_clicks alloc_mem(phys_clicks clicks, u32_t flags);
void memstats(int *nodes, int *pages, int *largest);
@ -92,10 +96,10 @@ int pt_writemap(struct vmproc * vmp, pt_t *pt, vir_bytes v, phys_bytes
physaddr, size_t bytes, u32_t flags, u32_t writemapflags);
int pt_checkrange(pt_t *pt, vir_bytes v, size_t bytes, int write);
int pt_bind(pt_t *pt, struct vmproc *who);
void *vm_mappages(phys_bytes p, int pages);
void *vm_allocpage(phys_bytes *p, int cat);
void *vm_allocpages(phys_bytes *p, int cat, int pages);
void *vm_allocpagedir(phys_bytes *p);
void pt_cycle(void);
int pt_mapkernel(pt_t *pt);
void vm_pagelock(void *vir, int lockflag);
int vm_addrok(void *vir, int write);