let vm use physically fragmented memory for allocations.

map_copy_ph_block is replaced by map_clone_ph_block, which can
replace a single physical block by multiple physical blocks.

also,
 . merge map_mem.c with region.c, as they manipulate the same
   data structures
 . NOTRUNNABLE removed as sanity check
 . use direct functions for ALLOC_MEM and FREE_MEM again
 . add some checks to shared memory mapping code
 . fix for data structure integrity when using shared memory
 . fix sanity checks
This commit is contained in:
Ben Gras 2010-04-12 11:25:24 +00:00
parent 76fbf21026
commit c78250332d
22 changed files with 987 additions and 629 deletions

View file

@ -4,7 +4,7 @@
PROG= vm PROG= vm
SRCS= main.c alloc.c utility.c exec.c exit.c fork.c break.c \ SRCS= main.c alloc.c utility.c exec.c exit.c fork.c break.c \
signal.c mmap.c slaballoc.c region.c pagefaults.c addravl.c \ signal.c mmap.c slaballoc.c region.c pagefaults.c addravl.c \
physravl.c rs.c queryexit.c map_mem.c physravl.c rs.c queryexit.c
DPADD+= ${LIBSYS} DPADD+= ${LIBSYS}
LDADD+= -lsys LDADD+= -lsys

View file

@ -41,13 +41,14 @@
#include "pagerange.h" #include "pagerange.h"
#include "addravl.h" #include "addravl.h"
#include "sanitycheck.h" #include "sanitycheck.h"
#include "memlist.h"
/* AVL tree of free pages. */ /* AVL tree of free pages. */
addr_avl addravl; addr_avl addravl;
/* Used for sanity check. */ /* Used for sanity check. */
PRIVATE phys_bytes mem_low, mem_high; PRIVATE phys_bytes mem_low, mem_high;
#define vm_assert_range(addr, len) \ #define assert_range(addr, len) \
vm_assert((addr) >= mem_low); \ vm_assert((addr) >= mem_low); \
vm_assert((addr) + (len) - 1 <= mem_high); vm_assert((addr) + (len) - 1 <= mem_high);
@ -73,13 +74,15 @@ PRIVATE struct hole *free_slots;/* ptr to list of unused table slots */
FORWARD _PROTOTYPE( void del_slot, (struct hole *prev_ptr, struct hole *hp) ); FORWARD _PROTOTYPE( void del_slot, (struct hole *prev_ptr, struct hole *hp) );
FORWARD _PROTOTYPE( void merge, (struct hole *hp) ); FORWARD _PROTOTYPE( void merge, (struct hole *hp) );
FORWARD _PROTOTYPE( void free_pages, (phys_bytes addr, int pages) ); FORWARD _PROTOTYPE( void free_pages, (phys_bytes addr, int pages) );
FORWARD _PROTOTYPE( phys_bytes alloc_pages, (int pages, int flags) ); FORWARD _PROTOTYPE( phys_bytes alloc_pages, (int pages, int flags,
phys_bytes *ret));
#if SANITYCHECKS #if SANITYCHECKS
FORWARD _PROTOTYPE( void holes_sanity_f, (char *fn, int line) ); FORWARD _PROTOTYPE( void holes_sanity_f, (char *fn, int line) );
#define CHECKHOLES holes_sanity_f(__FILE__, __LINE__) #define CHECKHOLES holes_sanity_f(__FILE__, __LINE__)
#define MAXPAGES (1024*1024*1024/VM_PAGE_SIZE) /* 1GB of memory */ #define PAGESPERGB (1024*1024*1024/VM_PAGE_SIZE) /* 1GB of memory */
#define MAXPAGES (2*PAGESPERGB)
#define CHUNKS BITMAP_CHUNKS(MAXPAGES) #define CHUNKS BITMAP_CHUNKS(MAXPAGES)
PRIVATE bitchunk_t pagemap[CHUNKS]; PRIVATE bitchunk_t pagemap[CHUNKS];
@ -87,32 +90,6 @@ PRIVATE bitchunk_t pagemap[CHUNKS];
#define CHECKHOLES #define CHECKHOLES
#endif #endif
/* Sanity check for parameters of node p. */
#define vm_assert_params(p, bytes, next) { \
vm_assert((p) != NO_MEM); \
vm_assert(!((bytes) % VM_PAGE_SIZE)); \
vm_assert(!((next) % VM_PAGE_SIZE)); \
vm_assert((bytes) > 0); \
vm_assert((p) + (bytes) > (p)); \
vm_assert((next) == NO_MEM || ((p) + (bytes) <= (next))); \
vm_assert_range((p), (bytes)); \
vm_assert_range((next), 1); \
}
/* Retrieve size of free block and pointer to next block from physical
* address (page) p.
*/
#define GET_PARAMS(p, bytes, next) { \
phys_readaddr((p), &(bytes), &(next)); \
vm_assert_params((p), (bytes), (next)); \
}
/* Write parameters to physical page p. */
#define SET_PARAMS(p, bytes, next) { \
vm_assert_params((p), (bytes), (next)); \
phys_writeaddr((p), (bytes), (next)); \
}
#if SANITYCHECKS #if SANITYCHECKS
@ -127,7 +104,7 @@ int line;
if(!(c)) { \ if(!(c)) { \
printf("holes_sanity_f:%s:%d: %s failed\n", file, line, #c); \ printf("holes_sanity_f:%s:%d: %s failed\n", file, line, #c); \
util_stacktrace(); \ util_stacktrace(); \
panic("assert failed"); } \ panic("vm_assert failed"); } \
} }
int h, c = 0, n = 0; int h, c = 0, n = 0;
@ -187,9 +164,9 @@ int line;
#endif #endif
/*===========================================================================* /*===========================================================================*
* alloc_mem_f * * alloc_mem *
*===========================================================================*/ *===========================================================================*/
PUBLIC phys_clicks alloc_mem_f(phys_clicks clicks, u32_t memflags) PUBLIC phys_clicks alloc_mem(phys_clicks clicks, u32_t memflags)
{ {
/* Allocate a block of memory from the free list using first fit. The block /* Allocate a block of memory from the free list using first fit. The block
* consists of a sequence of contiguous bytes, whose length in clicks is * consists of a sequence of contiguous bytes, whose length in clicks is
@ -208,7 +185,7 @@ PUBLIC phys_clicks alloc_mem_f(phys_clicks clicks, u32_t memflags)
if(vm_paged) { if(vm_paged) {
vm_assert(CLICK_SIZE == VM_PAGE_SIZE); vm_assert(CLICK_SIZE == VM_PAGE_SIZE);
mem = alloc_pages(clicks, memflags); mem = alloc_pages(clicks, memflags, NULL);
} else { } else {
CHECKHOLES; CHECKHOLES;
prev_ptr = NIL_HOLE; prev_ptr = NIL_HOLE;
@ -253,7 +230,7 @@ CHECKHOLES;
if(o > 0) { if(o > 0) {
phys_clicks e; phys_clicks e;
e = align_clicks - o; e = align_clicks - o;
FREE_MEM(mem, e); free_mem(mem, e);
mem += e; mem += e;
} }
} }
@ -263,9 +240,9 @@ CHECKHOLES;
} }
/*===========================================================================* /*===========================================================================*
* free_mem_f * * free_mem *
*===========================================================================*/ *===========================================================================*/
PUBLIC void free_mem_f(phys_clicks base, phys_clicks clicks) PUBLIC void free_mem(phys_clicks base, phys_clicks clicks)
{ {
/* Return a block of free memory to the hole list. The parameters tell where /* Return a block of free memory to the hole list. The parameters tell where
* the block starts in physical memory and how big it is. The block is added * the block starts in physical memory and how big it is. The block is added
@ -414,7 +391,7 @@ struct memory *chunks; /* list of free memory chunks */
to = CLICK2ABS(chunks[i].base+chunks[i].size)-1; to = CLICK2ABS(chunks[i].base+chunks[i].size)-1;
if(first || from < mem_low) mem_low = from; if(first || from < mem_low) mem_low = from;
if(first || to > mem_high) mem_high = to; if(first || to > mem_high) mem_high = to;
FREE_MEM(chunks[i].base, chunks[i].size); free_mem(chunks[i].base, chunks[i].size);
total_pages += chunks[i].size; total_pages += chunks[i].size;
first = 0; first = 0;
} }
@ -465,7 +442,7 @@ PUBLIC void memstats(int *nodes, int *pages, int *largest)
/*===========================================================================* /*===========================================================================*
* alloc_pages * * alloc_pages *
*===========================================================================*/ *===========================================================================*/
PRIVATE PUBLIC phys_bytes alloc_pages(int pages, int memflags) PRIVATE PUBLIC phys_bytes alloc_pages(int pages, int memflags, phys_bytes *len)
{ {
addr_iter iter; addr_iter iter;
pagerange_t *pr; pagerange_t *pr;
@ -494,7 +471,8 @@ PRIVATE PUBLIC phys_bytes alloc_pages(int pages, int memflags)
while((pr = addr_get_iter(&iter))) { while((pr = addr_get_iter(&iter))) {
SLABSANE(pr); SLABSANE(pr);
if(pr->size >= pages) { vm_assert(pr->size > 0);
if(pr->size >= pages || (memflags & PAF_FIRSTBLOCK)) {
if(memflags & PAF_LOWER16MB) { if(memflags & PAF_LOWER16MB) {
if(pr->addr + pages > boundary16) if(pr->addr + pages > boundary16)
return NO_MEM; return NO_MEM;
@ -518,6 +496,8 @@ PRIVATE PUBLIC phys_bytes alloc_pages(int pages, int memflags)
printf("VM: alloc_pages: alloc failed of %d pages\n", pages); printf("VM: alloc_pages: alloc failed of %d pages\n", pages);
util_stacktrace(); util_stacktrace();
printmemstats(); printmemstats();
if(len)
*len = 0;
#if SANITYCHECKS #if SANITYCHECKS
if(largest >= pages) { if(largest >= pages) {
panic("no memory but largest was enough"); panic("no memory but largest was enough");
@ -528,6 +508,22 @@ PRIVATE PUBLIC phys_bytes alloc_pages(int pages, int memflags)
SLABSANE(pr); SLABSANE(pr);
if(memflags & PAF_FIRSTBLOCK) {
vm_assert(len);
/* block doesn't have to as big as requested;
* return its size though.
*/
if(pr->size < pages) {
pages = pr->size;
#if SANITYCHECKS
wantpages = firstpages - pages;
#endif
}
}
if(len)
*len = pages;
/* Allocated chunk is off the end. */ /* Allocated chunk is off the end. */
mem = pr->addr + pr->size - pages; mem = pr->addr + pr->size - pages;
@ -556,6 +552,10 @@ PRIVATE PUBLIC phys_bytes alloc_pages(int pages, int memflags)
memstats(&finalnodes, &finalpages, &largest); memstats(&finalnodes, &finalpages, &largest);
sanitycheck(); sanitycheck();
if(finalpages != wantpages) {
printf("pages start: %d req: %d final: %d\n",
firstpages, pages, finalpages);
}
vm_assert(finalnodes == wantnodes); vm_assert(finalnodes == wantnodes);
vm_assert(finalpages == wantpages); vm_assert(finalpages == wantpages);
#endif #endif
@ -754,7 +754,7 @@ PUBLIC int do_deldma(message *msg)
if (j >= NR_DMA) if (j >= NR_DMA)
{ {
/* Last segment */ /* Last segment */
FREE_MEM(dmatab[i].dt_seg_base, free_mem(dmatab[i].dt_seg_base,
dmatab[i].dt_seg_size); dmatab[i].dt_seg_size);
} }
} }
@ -822,7 +822,7 @@ PUBLIC void release_dma(struct vmproc *vmp)
} }
if (!found_one) if (!found_one)
FREE_MEM(base, size); free_mem(base, size);
msg->VMRD_FOUND = found_one; msg->VMRD_FOUND = found_one;
#endif #endif
@ -867,7 +867,7 @@ int usedpages_add_f(phys_bytes addr, phys_bytes len, char *file, int line)
vm_assert(!(addr % VM_PAGE_SIZE)); vm_assert(!(addr % VM_PAGE_SIZE));
vm_assert(!(len % VM_PAGE_SIZE)); vm_assert(!(len % VM_PAGE_SIZE));
vm_assert(len > 0); vm_assert(len > 0);
vm_assert_range(addr, len); assert_range(addr, len);
pagestart = addr / VM_PAGE_SIZE; pagestart = addr / VM_PAGE_SIZE;
pages = len / VM_PAGE_SIZE; pages = len / VM_PAGE_SIZE;
@ -892,3 +892,91 @@ int usedpages_add_f(phys_bytes addr, phys_bytes len, char *file, int line)
} }
#endif #endif
/*===========================================================================*
* alloc_mem_in_list *
*===========================================================================*/
struct memlist *alloc_mem_in_list(phys_bytes bytes, u32_t flags)
{
phys_bytes rempages;
struct memlist *head = NULL, *ml;
vm_assert(bytes > 0);
vm_assert(!(bytes % VM_PAGE_SIZE));
rempages = bytes / VM_PAGE_SIZE;
/* unless we are told to allocate all memory
* contiguously, tell alloc function to grab whatever
* block it can find.
*/
if(!(flags & PAF_CONTIG))
flags |= PAF_FIRSTBLOCK;
do {
struct memlist *ml;
phys_bytes mem, gotpages;
mem = alloc_pages(rempages, flags, &gotpages);
if(mem == NO_MEM) {
free_mem_list(head, 1);
return NULL;
}
vm_assert(gotpages <= rempages);
vm_assert(gotpages > 0);
if(!(SLABALLOC(ml))) {
free_mem_list(head, 1);
free_pages(mem, gotpages);
return NULL;
}
USE(ml,
ml->phys = CLICK2ABS(mem);
ml->length = CLICK2ABS(gotpages);
ml->next = head;);
head = ml;
rempages -= gotpages;
} while(rempages > 0);
for(ml = head; ml; ml = ml->next) {
vm_assert(ml->phys);
vm_assert(ml->length);
}
return head;
}
/*===========================================================================*
* free_mem_list *
*===========================================================================*/
void free_mem_list(struct memlist *list, int all)
{
while(list) {
struct memlist *next;
next = list->next;
vm_assert(!(list->phys % VM_PAGE_SIZE));
vm_assert(!(list->length % VM_PAGE_SIZE));
if(all)
free_pages(list->phys / VM_PAGE_SIZE,
list->length / VM_PAGE_SIZE);
SLABFREE(list);
list = next;
}
}
/*===========================================================================*
* print_mem_list *
*===========================================================================*/
void print_mem_list(struct memlist *list)
{
while(list) {
vm_assert(list->length > 0);
printf("0x%lx-0x%lx", list->phys, list->phys+list->length-1);
printf(" ");
list = list->next;
}
printf("\n");
}

View file

@ -17,6 +17,7 @@
#include <minix/safecopies.h> #include <minix/safecopies.h>
#include <minix/cpufeature.h> #include <minix/cpufeature.h>
#include <minix/bitmap.h> #include <minix/bitmap.h>
#include <minix/debug.h>
#include <errno.h> #include <errno.h>
#include <stdlib.h> #include <stdlib.h>
@ -45,7 +46,7 @@ PRIVATE int proc_pde = 0;
PRIVATE int bigpage_ok = 0; PRIVATE int bigpage_ok = 0;
/* Our process table entry. */ /* Our process table entry. */
struct vmproc *vmp = &vmproc[VM_PROC_NR]; struct vmproc *vmprocess = &vmproc[VM_PROC_NR];
/* Spare memory, ready to go after initialization, to avoid a /* Spare memory, ready to go after initialization, to avoid a
* circular dependency on allocating memory and writing it into VM's * circular dependency on allocating memory and writing it into VM's
@ -114,6 +115,8 @@ PUBLIC void pt_sanitycheck(pt_t *pt, char *file, int line)
for(i = proc_pde; i < I386_VM_DIR_ENTRIES; i++) { for(i = proc_pde; i < I386_VM_DIR_ENTRIES; i++) {
if(pt->pt_pt[i]) { if(pt->pt_pt[i]) {
int pte;
MYASSERT(vm_addrok(pt->pt_pt[i], 1));
if(!(pt->pt_dir[i] & I386_VM_PRESENT)) { if(!(pt->pt_dir[i] & I386_VM_PRESENT)) {
printf("slot %d: pt->pt_pt[%d] = 0x%lx, but pt_dir entry 0x%lx\n", printf("slot %d: pt->pt_pt[%d] = 0x%lx, but pt_dir entry 0x%lx\n",
slot, i, pt->pt_pt[i], pt->pt_dir[i]); slot, i, pt->pt_pt[i], pt->pt_dir[i]);
@ -208,17 +211,26 @@ PRIVATE u32_t findhole(pt_t *pt, u32_t vmin, u32_t vmax)
PRIVATE void vm_freepages(vir_bytes vir, vir_bytes phys, int pages, int reason) PRIVATE void vm_freepages(vir_bytes vir, vir_bytes phys, int pages, int reason)
{ {
vm_assert(reason >= 0 && reason < VMP_CATEGORIES); vm_assert(reason >= 0 && reason < VMP_CATEGORIES);
if(vir >= vmp->vm_stacktop) { if(vir >= vmprocess->vm_stacktop) {
vm_assert(!(vir % I386_PAGE_SIZE)); vm_assert(!(vir % I386_PAGE_SIZE));
vm_assert(!(phys % I386_PAGE_SIZE)); vm_assert(!(phys % I386_PAGE_SIZE));
FREE_MEM(ABS2CLICK(phys), pages); free_mem(ABS2CLICK(phys), pages);
if(pt_writemap(&vmp->vm_pt, arch_vir2map(vmp, vir), if(pt_writemap(&vmprocess->vm_pt, arch_vir2map(vmprocess, vir),
MAP_NONE, pages*I386_PAGE_SIZE, 0, WMF_OVERWRITE) != OK) MAP_NONE, pages*I386_PAGE_SIZE, 0, WMF_OVERWRITE) != OK)
panic("vm_freepages: pt_writemap failed"); panic("vm_freepages: pt_writemap failed");
} else { } else {
printf("VM: vm_freepages not freeing VM heap pages (%d)\n", printf("VM: vm_freepages not freeing VM heap pages (%d)\n",
pages); pages);
} }
#if SANITYCHECKS
/* If SANITYCHECKS are on, flush tlb so accessing freed pages is
* always trapped, also if not in tlb.
*/
if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
panic("VMCTL_FLUSHTLB failed");
}
#endif
} }
/*===========================================================================* /*===========================================================================*
@ -281,7 +293,7 @@ PUBLIC void *vm_allocpage(phys_bytes *phys, int reason)
static int level = 0; static int level = 0;
void *ret; void *ret;
pt = &vmp->vm_pt; pt = &vmprocess->vm_pt;
vm_assert(reason >= 0 && reason < VMP_CATEGORIES); vm_assert(reason >= 0 && reason < VMP_CATEGORIES);
level++; level++;
@ -289,7 +301,7 @@ PUBLIC void *vm_allocpage(phys_bytes *phys, int reason)
vm_assert(level >= 1); vm_assert(level >= 1);
vm_assert(level <= 2); vm_assert(level <= 2);
if(level > 1 || !(vmp->vm_flags & VMF_HASPT) || !meminit_done) { if(level > 1 || !(vmprocess->vm_flags & VMF_HASPT) || !meminit_done) {
int r; int r;
void *s; void *s;
s=vm_getsparepage(phys); s=vm_getsparepage(phys);
@ -304,8 +316,8 @@ PUBLIC void *vm_allocpage(phys_bytes *phys, int reason)
/* VM does have a pagetable, so get a page and map it in there. /* VM does have a pagetable, so get a page and map it in there.
* Where in our virtual address space can we put it? * Where in our virtual address space can we put it?
*/ */
loc = findhole(pt, arch_vir2map(vmp, vmp->vm_stacktop), loc = findhole(pt, arch_vir2map(vmprocess, vmprocess->vm_stacktop),
vmp->vm_arch.vm_data_top); vmprocess->vm_arch.vm_data_top);
if(loc == NO_MEM) { if(loc == NO_MEM) {
level--; level--;
printf("VM: vm_allocpage: findhole failed\n"); printf("VM: vm_allocpage: findhole failed\n");
@ -315,9 +327,9 @@ PUBLIC void *vm_allocpage(phys_bytes *phys, int reason)
/* Allocate page of memory for use by VM. As VM /* Allocate page of memory for use by VM. As VM
* is trusted, we don't have to pre-clear it. * is trusted, we don't have to pre-clear it.
*/ */
if((newpage = ALLOC_MEM(CLICKSPERPAGE, 0)) == NO_MEM) { if((newpage = alloc_mem(CLICKSPERPAGE, 0)) == NO_MEM) {
level--; level--;
printf("VM: vm_allocpage: ALLOC_MEM failed\n"); printf("VM: vm_allocpage: alloc_mem failed\n");
return NULL; return NULL;
} }
@ -326,7 +338,7 @@ PUBLIC void *vm_allocpage(phys_bytes *phys, int reason)
/* Map this page into our address space. */ /* Map this page into our address space. */
if((r=pt_writemap(pt, loc, *phys, I386_PAGE_SIZE, if((r=pt_writemap(pt, loc, *phys, I386_PAGE_SIZE,
I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE, 0)) != OK) { I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE, 0)) != OK) {
FREE_MEM(newpage, CLICKSPERPAGE); free_mem(newpage, CLICKSPERPAGE);
printf("vm_allocpage writemap failed\n"); printf("vm_allocpage writemap failed\n");
level--; level--;
return NULL; return NULL;
@ -339,7 +351,7 @@ PUBLIC void *vm_allocpage(phys_bytes *phys, int reason)
level--; level--;
/* Return user-space-ready pointer to it. */ /* Return user-space-ready pointer to it. */
ret = (void *) arch_map2vir(vmp, loc); ret = (void *) arch_map2vir(vmprocess, loc);
return ret; return ret;
} }
@ -355,8 +367,8 @@ PUBLIC void vm_pagelock(void *vir, int lockflag)
u32_t flags = I386_VM_PRESENT | I386_VM_USER; u32_t flags = I386_VM_PRESENT | I386_VM_USER;
pt_t *pt; pt_t *pt;
pt = &vmp->vm_pt; pt = &vmprocess->vm_pt;
m = arch_vir2map(vmp, (vir_bytes) vir); m = arch_vir2map(vmprocess, (vir_bytes) vir);
vm_assert(!(m % I386_PAGE_SIZE)); vm_assert(!(m % I386_PAGE_SIZE));
@ -376,6 +388,51 @@ PUBLIC void vm_pagelock(void *vir, int lockflag)
return; return;
} }
/*===========================================================================*
* vm_addrok *
*===========================================================================*/
PUBLIC int vm_addrok(void *vir, int writeflag)
{
/* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */
pt_t *pt = &vmprocess->vm_pt;
int pde, pte;
vir_bytes v = arch_vir2map(vmprocess, (vir_bytes) vir);
/* No PT yet? Don't bother looking. */
if(!(vmprocess->vm_flags & VMF_HASPT)) {
return 1;
}
pde = I386_VM_PDE(v);
pte = I386_VM_PTE(v);
if(!(pt->pt_dir[pde] & I386_VM_PRESENT)) {
printf("addr not ok: missing pde %d\n", pde);
return 0;
}
if(writeflag &&
!(pt->pt_dir[pde] & I386_VM_WRITE)) {
printf("addr not ok: pde %d present but pde unwritable\n", pde);
return 0;
}
if(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT)) {
printf("addr not ok: missing pde %d / pte %d\n",
pde, pte);
return 0;
}
if(writeflag &&
!(pt->pt_pt[pde][pte] & I386_VM_WRITE)) {
printf("addr not ok: pde %d / pte %d present but unwritable\n",
pde, pte);
return 0;
}
return 1;
}
/*===========================================================================* /*===========================================================================*
* pt_ptalloc * * pt_ptalloc *
*===========================================================================*/ *===========================================================================*/
@ -412,6 +469,32 @@ PRIVATE int pt_ptalloc(pt_t *pt, int pde, u32_t flags)
return OK; return OK;
} }
PRIVATE char *ptestr(u32_t pte)
{
#define FLAG(constant, name) { \
if(pte & (constant)) { strcat(str, name); strcat(str, " "); } \
}
static char str[30];
if(!(pte & I386_VM_PRESENT)) {
return "not present";
}
str[0] = '\0';
FLAG(I386_VM_WRITE, "W");
FLAG(I386_VM_USER, "U");
FLAG(I386_VM_PWT, "PWT");
FLAG(I386_VM_PCD, "PCD");
FLAG(I386_VM_ACC, "ACC");
FLAG(I386_VM_DIRTY, "DIRTY");
FLAG(I386_VM_PS, "PS");
FLAG(I386_VM_GLOBAL, "G");
FLAG(I386_VM_PTAVAIL1, "AV1");
FLAG(I386_VM_PTAVAIL2, "AV2");
FLAG(I386_VM_PTAVAIL3, "AV3");
return str;
}
/*===========================================================================* /*===========================================================================*
* pt_writemap * * pt_writemap *
*===========================================================================*/ *===========================================================================*/
@ -436,14 +519,8 @@ PUBLIC int pt_writemap(pt_t *pt, vir_bytes v, phys_bytes physaddr,
* what's actually written into the PTE if I386_VM_PRESENT * what's actually written into the PTE if I386_VM_PRESENT
* isn't on, so we can just write MAP_NONE into it. * isn't on, so we can just write MAP_NONE into it.
*/ */
#if SANITYCHECKS vm_assert(physaddr == MAP_NONE || (flags & I386_VM_PRESENT));
if(physaddr != MAP_NONE && !(flags & I386_VM_PRESENT)) { vm_assert(physaddr != MAP_NONE || !flags);
panic("pt_writemap: writing dir with !P");
}
if(physaddr == MAP_NONE && flags) {
panic("pt_writemap: writing 0 with flags");
}
#endif
finalpde = I386_VM_PDE(v + I386_PAGE_SIZE * pages); finalpde = I386_VM_PDE(v + I386_PAGE_SIZE * pages);
@ -454,11 +531,7 @@ PUBLIC int pt_writemap(pt_t *pt, vir_bytes v, phys_bytes physaddr,
*/ */
for(pdecheck = I386_VM_PDE(v); pdecheck <= finalpde; pdecheck++) { for(pdecheck = I386_VM_PDE(v); pdecheck <= finalpde; pdecheck++) {
vm_assert(pdecheck >= 0 && pdecheck < I386_VM_DIR_ENTRIES); vm_assert(pdecheck >= 0 && pdecheck < I386_VM_DIR_ENTRIES);
if(pt->pt_dir[pdecheck] & I386_VM_BIGPAGE) { vm_assert(!(pt->pt_dir[pdecheck] & I386_VM_BIGPAGE));
printf("pt_writemap: trying to write 0x%lx into 0x%lx\n",
physaddr, v);
panic("pt_writemap: BIGPAGE found");
}
if(!(pt->pt_dir[pdecheck] & I386_VM_PRESENT)) { if(!(pt->pt_dir[pdecheck] & I386_VM_PRESENT)) {
int r; int r;
if(verify) { if(verify) {
@ -497,7 +570,8 @@ PUBLIC int pt_writemap(pt_t *pt, vir_bytes v, phys_bytes physaddr,
/* Make sure page directory entry for this page table /* Make sure page directory entry for this page table
* is marked present and page table entry is available. * is marked present and page table entry is available.
*/ */
vm_assert((pt->pt_dir[pde] & I386_VM_PRESENT) && pt->pt_pt[pde]); vm_assert((pt->pt_dir[pde] & I386_VM_PRESENT));
vm_assert(pt->pt_pt[pde]);
#if SANITYCHECKS #if SANITYCHECKS
/* We don't expect to overwrite a page. */ /* We don't expect to overwrite a page. */
@ -509,7 +583,7 @@ PUBLIC int pt_writemap(pt_t *pt, vir_bytes v, phys_bytes physaddr,
} }
if(writemapflags & WMF_FREE) { if(writemapflags & WMF_FREE) {
FREE_MEM(ABS2CLICK(physaddr), 1); free_mem(ABS2CLICK(physaddr), 1);
} }
/* Entry we will write. */ /* Entry we will write. */
@ -521,12 +595,23 @@ PUBLIC int pt_writemap(pt_t *pt, vir_bytes v, phys_bytes physaddr,
maskedentry &= ~(I386_VM_ACC|I386_VM_DIRTY); maskedentry &= ~(I386_VM_ACC|I386_VM_DIRTY);
/* Verify pagetable entry. */ /* Verify pagetable entry. */
if(maskedentry != entry) { if(maskedentry != entry) {
printf("pt_writemap: 0x%lx found, masked 0x%lx, 0x%lx expected\n", printf("pt_writemap: mismatch: ");
pt->pt_pt[pde][pte], maskedentry, entry); if((entry & I386_VM_ADDR_MASK) !=
(maskedentry & I386_VM_ADDR_MASK)) {
printf("pt_writemap: physaddr mismatch (0x%lx, 0x%lx); ", entry, maskedentry);
} else printf("phys ok; ");
printf(" flags: found %s; ",
ptestr(pt->pt_pt[pde][pte]));
printf(" masked %s; ",
ptestr(maskedentry));
printf(" expected %s\n", ptestr(entry));
return EFAULT; return EFAULT;
} }
} else { } else {
/* Write pagetable entry. */ /* Write pagetable entry. */
#if SANITYCHECKS
vm_assert(vm_addrok(pt->pt_pt[pde], 1));
#endif
pt->pt_pt[pde][pte] = entry; pt->pt_pt[pde][pte] = entry;
} }
@ -644,8 +729,7 @@ PUBLIC void pt_init(phys_bytes usedlimit)
phys_bytes sparepages_ph; phys_bytes sparepages_ph;
/* Shorthand. */ /* Shorthand. */
newpt = &vmp->vm_pt; newpt = &vmprocess->vm_pt;
/* Get ourselves spare pages. */ /* Get ourselves spare pages. */
if(!(sparepages_mem = (vir_bytes) aalloc(I386_PAGE_SIZE*SPAREPAGES))) if(!(sparepages_mem = (vir_bytes) aalloc(I386_PAGE_SIZE*SPAREPAGES)))
@ -679,9 +763,9 @@ PUBLIC void pt_init(phys_bytes usedlimit)
free_pde = id_map_high_pde+1; free_pde = id_map_high_pde+1;
/* Initial (current) range of our virtual address space. */ /* Initial (current) range of our virtual address space. */
lo = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys); lo = CLICK2ABS(vmprocess->vm_arch.vm_seg[T].mem_phys);
hi = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys + hi = CLICK2ABS(vmprocess->vm_arch.vm_seg[S].mem_phys +
vmp->vm_arch.vm_seg[S].mem_len); vmprocess->vm_arch.vm_seg[S].mem_len);
vm_assert(!(lo % I386_PAGE_SIZE)); vm_assert(!(lo % I386_PAGE_SIZE));
vm_assert(!(hi % I386_PAGE_SIZE)); vm_assert(!(hi % I386_PAGE_SIZE));
@ -713,9 +797,9 @@ PUBLIC void pt_init(phys_bytes usedlimit)
} }
/* Move segments up too. */ /* Move segments up too. */
vmp->vm_arch.vm_seg[T].mem_phys += ABS2CLICK(moveup); vmprocess->vm_arch.vm_seg[T].mem_phys += ABS2CLICK(moveup);
vmp->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup); vmprocess->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup);
vmp->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup); vmprocess->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup);
/* Allocate us a page table in which to remember page directory /* Allocate us a page table in which to remember page directory
* pointers. * pointers.
@ -731,14 +815,14 @@ PUBLIC void pt_init(phys_bytes usedlimit)
* like regular processes have. * like regular processes have.
*/ */
extra_clicks = ABS2CLICK(VM_DATATOP - hi); extra_clicks = ABS2CLICK(VM_DATATOP - hi);
vmp->vm_arch.vm_seg[S].mem_len += extra_clicks; vmprocess->vm_arch.vm_seg[S].mem_len += extra_clicks;
/* We pretend to the kernel we have a huge stack segment to /* We pretend to the kernel we have a huge stack segment to
* increase our data segment. * increase our data segment.
*/ */
vmp->vm_arch.vm_data_top = vmprocess->vm_arch.vm_data_top =
(vmp->vm_arch.vm_seg[S].mem_vir + (vmprocess->vm_arch.vm_seg[S].mem_vir +
vmp->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT; vmprocess->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
/* Where our free virtual address space starts. /* Where our free virtual address space starts.
* This is only a hint to the VM system. * This is only a hint to the VM system.
@ -746,7 +830,7 @@ PUBLIC void pt_init(phys_bytes usedlimit)
newpt->pt_virtop = 0; newpt->pt_virtop = 0;
/* Let other functions know VM now has a private page table. */ /* Let other functions know VM now has a private page table. */
vmp->vm_flags |= VMF_HASPT; vmprocess->vm_flags |= VMF_HASPT;
/* Now reserve another pde for kernel's own mappings. */ /* Now reserve another pde for kernel's own mappings. */
{ {
@ -813,19 +897,19 @@ PUBLIC void pt_init(phys_bytes usedlimit)
/* Give our process the new, copied, private page table. */ /* Give our process the new, copied, private page table. */
pt_mapkernel(newpt); /* didn't know about vm_dir pages earlier */ pt_mapkernel(newpt); /* didn't know about vm_dir pages earlier */
pt_bind(newpt, vmp); pt_bind(newpt, vmprocess);
/* new segment limit for the kernel after paging is enabled */ /* new segment limit for the kernel after paging is enabled */
ep_data.data_seg_limit = free_pde*I386_BIG_PAGE_SIZE; ep_data.data_seg_limit = free_pde*I386_BIG_PAGE_SIZE;
/* the memory map which must be installed after paging is enabled */ /* the memory map which must be installed after paging is enabled */
ep_data.mem_map = vmp->vm_arch.vm_seg; ep_data.mem_map = vmprocess->vm_arch.vm_seg;
/* Now actually enable paging. */ /* Now actually enable paging. */
if(sys_vmctl_enable_paging(&ep_data) != OK) if(sys_vmctl_enable_paging(&ep_data) != OK)
panic("pt_init: enable paging failed"); panic("pt_init: enable paging failed");
/* Back to reality - this is where the stack actually is. */ /* Back to reality - this is where the stack actually is. */
vmp->vm_arch.vm_seg[S].mem_len -= extra_clicks; vmprocess->vm_arch.vm_seg[S].mem_len -= extra_clicks;
/* All OK. */ /* All OK. */
return; return;

View file

@ -18,6 +18,7 @@
#include <sys/mman.h> #include <sys/mman.h>
#include <errno.h> #include <errno.h>
#include <assert.h>
#include <env.h> #include <env.h>
#include "proto.h" #include "proto.h"

View file

@ -88,8 +88,6 @@ PUBLIC int do_exec_newmem(message *msg)
vmp= &vmproc[proc_n]; vmp= &vmproc[proc_n];
ptr= msg->VMEN_ARGSPTR; ptr= msg->VMEN_ARGSPTR;
NOTRUNNABLE(vmp->vm_endpoint);
if(msg->VMEN_ARGSSIZE != sizeof(args)) { if(msg->VMEN_ARGSSIZE != sizeof(args)) {
printf("VM: exec_newmem: args size %d != %ld\n", printf("VM: exec_newmem: args size %d != %ld\n",
msg->VMEN_ARGSSIZE, sizeof(args)); msg->VMEN_ARGSSIZE, sizeof(args));
@ -160,8 +158,6 @@ SANITYCHECK(SCL_DETAIL);
if (!sh_mp) /* Load text if sh_mp = NULL */ if (!sh_mp) /* Load text if sh_mp = NULL */
msg->VMEN_FLAGS |= EXC_NM_RF_LOAD_TEXT; msg->VMEN_FLAGS |= EXC_NM_RF_LOAD_TEXT;
NOTRUNNABLE(vmp->vm_endpoint);
return OK; return OK;
} }
@ -225,8 +221,8 @@ vir_bytes *stack_top; /* top of process stack */
*/ */
SANITYCHECK(SCL_DETAIL); SANITYCHECK(SCL_DETAIL);
if(hadpt) { if(hadpt) {
pt_free(&rmp->vm_pt);
rmp->vm_flags &= ~VMF_HASPT; rmp->vm_flags &= ~VMF_HASPT;
pt_free(&rmp->vm_pt);
} }
vm_assert(!(vmpold->vm_flags & VMF_INUSE)); vm_assert(!(vmpold->vm_flags & VMF_INUSE));
*vmpold = *rmp; /* copy current state. */ *vmpold = *rmp; /* copy current state. */
@ -236,11 +232,11 @@ SANITYCHECK(SCL_DETAIL);
if(!hadpt) { if(!hadpt) {
if (find_share(rmp, rmp->vm_ino, rmp->vm_dev, rmp->vm_ctime) == NULL) { if (find_share(rmp, rmp->vm_ino, rmp->vm_dev, rmp->vm_ctime) == NULL) {
/* No other process shares the text segment, so free it. */ /* No other process shares the text segment, so free it. */
FREE_MEM(rmp->vm_arch.vm_seg[T].mem_phys, rmp->vm_arch.vm_seg[T].mem_len); free_mem(rmp->vm_arch.vm_seg[T].mem_phys, rmp->vm_arch.vm_seg[T].mem_len);
} }
/* Free the data and stack segments. */ /* Free the data and stack segments. */
FREE_MEM(rmp->vm_arch.vm_seg[D].mem_phys, free_mem(rmp->vm_arch.vm_seg[D].mem_phys,
rmp->vm_arch.vm_seg[S].mem_vir rmp->vm_arch.vm_seg[S].mem_vir
+ rmp->vm_arch.vm_seg[S].mem_len + rmp->vm_arch.vm_seg[S].mem_len
- rmp->vm_arch.vm_seg[D].mem_vir); - rmp->vm_arch.vm_seg[D].mem_vir);
@ -271,6 +267,7 @@ SANITYCHECK(SCL_DETAIL);
SANITYCHECK(SCL_DETAIL); SANITYCHECK(SCL_DETAIL);
printf("VM: new_mem: failed\n"); printf("VM: new_mem: failed\n");
if(ptok) { if(ptok) {
rmp->vm_flags &= ~VMF_HASPT;
pt_free(&rmp->vm_pt); pt_free(&rmp->vm_pt);
} }
*rmp = *vmpold; /* undo. */ *rmp = *vmpold; /* undo. */
@ -304,9 +301,9 @@ SANITYCHECK(SCL_DETAIL);
} else { } else {
phys_clicks new_base; phys_clicks new_base;
new_base = ALLOC_MEM(text_clicks + tot_clicks, 0); new_base = alloc_mem(text_clicks + tot_clicks, 0);
if (new_base == NO_MEM) { if (new_base == NO_MEM) {
printf("VM: new_mem: ALLOC_MEM failed\n"); printf("VM: new_mem: alloc_mem failed\n");
return(ENOMEM); return(ENOMEM);
} }

View file

@ -16,6 +16,7 @@
#include <minix/bitmap.h> #include <minix/bitmap.h>
#include <errno.h> #include <errno.h>
#include <assert.h>
#include <env.h> #include <env.h>
#include "glo.h" #include "glo.h"
@ -25,11 +26,11 @@
PUBLIC void free_proc(struct vmproc *vmp) PUBLIC void free_proc(struct vmproc *vmp)
{ {
map_free_proc(vmp);
if(vmp->vm_flags & VMF_HASPT) { if(vmp->vm_flags & VMF_HASPT) {
vmp->vm_flags &= ~VMF_HASPT; vmp->vm_flags &= ~VMF_HASPT;
pt_free(&vmp->vm_pt); pt_free(&vmp->vm_pt);
} }
map_free_proc(vmp);
vmp->vm_regions = NULL; vmp->vm_regions = NULL;
#if VMSTATS #if VMSTATS
vmp->vm_bytecopies = 0; vmp->vm_bytecopies = 0;
@ -77,7 +78,7 @@ SANITYCHECK(SCL_DETAIL);
SANITYCHECK(SCL_DETAIL); SANITYCHECK(SCL_DETAIL);
} else { } else {
/* Free the data and stack segments. */ /* Free the data and stack segments. */
FREE_MEM(vmp->vm_arch.vm_seg[D].mem_phys, free_mem(vmp->vm_arch.vm_seg[D].mem_phys,
vmp->vm_arch.vm_seg[S].mem_vir + vmp->vm_arch.vm_seg[S].mem_vir +
vmp->vm_arch.vm_seg[S].mem_len - vmp->vm_arch.vm_seg[S].mem_len -
vmp->vm_arch.vm_seg[D].mem_vir); vmp->vm_arch.vm_seg[D].mem_vir);
@ -86,7 +87,7 @@ SANITYCHECK(SCL_DETAIL);
/* No other process shares the text segment, /* No other process shares the text segment,
* so free it. * so free it.
*/ */
FREE_MEM(vmp->vm_arch.vm_seg[T].mem_phys, free_mem(vmp->vm_arch.vm_seg[T].mem_phys,
vmp->vm_arch.vm_seg[T].mem_len); vmp->vm_arch.vm_seg[T].mem_len);
} }
} }

View file

@ -58,8 +58,6 @@ PUBLIC int do_fork(message *msg)
vmc = &vmproc[childproc]; /* child */ vmc = &vmproc[childproc]; /* child */
vm_assert(vmc->vm_slot == childproc); vm_assert(vmc->vm_slot == childproc);
NOTRUNNABLE(vmp->vm_endpoint);
if(vmp->vm_flags & VMF_HAS_DMA) { if(vmp->vm_flags & VMF_HAS_DMA) {
printf("VM: %d has DMA memory and may not fork\n", msg->VMF_ENDPOINT); printf("VM: %d has DMA memory and may not fork\n", msg->VMF_ENDPOINT);
return EINVAL; return EINVAL;
@ -74,7 +72,7 @@ PUBLIC int do_fork(message *msg)
vmc->vm_regions = NULL; vmc->vm_regions = NULL;
vmc->vm_endpoint = NONE; /* In case someone tries to use it. */ vmc->vm_endpoint = NONE; /* In case someone tries to use it. */
vmc->vm_pt = origpt; vmc->vm_pt = origpt;
vmc->vm_flags |= VMF_HASPT; vmc->vm_flags &= ~VMF_HASPT;
#if VMSTATS #if VMSTATS
vmc->vm_bytecopies = 0; vmc->vm_bytecopies = 0;
@ -85,6 +83,8 @@ PUBLIC int do_fork(message *msg)
return ENOMEM; return ENOMEM;
} }
vmc->vm_flags |= VMF_HASPT;
if(fullvm) { if(fullvm) {
SANITYCHECK(SCL_DETAIL); SANITYCHECK(SCL_DETAIL);
@ -102,7 +102,7 @@ PUBLIC int do_fork(message *msg)
SANITYCHECK(SCL_DETAIL); SANITYCHECK(SCL_DETAIL);
} else { } else {
vir_bytes sp; vir_bytes sp;
phys_bytes d_abs, s_abs; struct vir_region *heap, *stack;
vir_bytes text_bytes, data_bytes, stack_bytes, parent_gap_bytes, vir_bytes text_bytes, data_bytes, stack_bytes, parent_gap_bytes,
child_gap_bytes; child_gap_bytes;
@ -147,30 +147,32 @@ PUBLIC int do_fork(message *msg)
return r; return r;
} }
if((d_abs = map_lookup_phys(vmc, VRT_HEAP)) == MAP_NONE) if(!(heap = map_region_lookup_tag(vmc, VRT_HEAP)))
panic("couldn't lookup data"); panic("couldn't lookup heap");
if((s_abs = map_lookup_phys(vmc, VRT_STACK)) == MAP_NONE) vm_assert(heap->phys);
if(!(stack = map_region_lookup_tag(vmc, VRT_STACK)))
panic("couldn't lookup stack"); panic("couldn't lookup stack");
vm_assert(stack->phys);
/* Now copy the memory regions. */ /* Now copy the memory regions. */
if(vmc->vm_arch.vm_seg[T].mem_len > 0) { if(vmc->vm_arch.vm_seg[T].mem_len > 0) {
phys_bytes t_abs; struct vir_region *text;
if((t_abs = map_lookup_phys(vmc, VRT_TEXT)) == MAP_NONE) if(!(text = map_region_lookup_tag(vmc, VRT_TEXT)))
panic("couldn't lookup text"); panic("couldn't lookup text");
if(sys_abscopy(CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys), vm_assert(text->phys);
t_abs, text_bytes) != OK) if(copy_abs2region(CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys),
text, 0, text_bytes) != OK)
panic("couldn't copy text"); panic("couldn't copy text");
} }
if(sys_abscopy(CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys), if(copy_abs2region(CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys),
d_abs, data_bytes) != OK) heap, 0, data_bytes) != OK)
panic("couldn't copy data"); panic("couldn't copy heap");
if(sys_abscopy( if(copy_abs2region(CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys +
CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys +
vmc->vm_arch.vm_seg[D].mem_len) + parent_gap_bytes, vmc->vm_arch.vm_seg[D].mem_len) + parent_gap_bytes,
s_abs + child_gap_bytes, stack_bytes) != OK) stack, child_gap_bytes, stack_bytes) != OK)
panic("couldn't copy stack"); panic("couldn't copy stack");
} }
@ -187,9 +189,6 @@ PUBLIC int do_fork(message *msg)
panic("do_fork can't sys_fork: %d", r); panic("do_fork can't sys_fork: %d", r);
} }
NOTRUNNABLE(vmp->vm_endpoint);
NOTRUNNABLE(vmc->vm_endpoint);
if(fullvm) { if(fullvm) {
vir_bytes vir; vir_bytes vir;
/* making these messages writable is an optimisation /* making these messages writable is an optimisation

View file

@ -31,3 +31,4 @@ EXTERN int total_pages;
EXTERN long vm_paged; EXTERN long vm_paged;
EXTERN int meminit_done; EXTERN int meminit_done;

View file

@ -24,6 +24,7 @@
#include <string.h> #include <string.h>
#include <env.h> #include <env.h>
#include <stdio.h> #include <stdio.h>
#include <assert.h>
#include <memory.h> #include <memory.h>
@ -79,6 +80,8 @@ PUBLIC int main(void)
/* SEF local startup. */ /* SEF local startup. */
sef_local_startup(); sef_local_startup();
SANITYCHECK(SCL_TOP);
/* This is VM's main loop. */ /* This is VM's main loop. */
while (TRUE) { while (TRUE) {
int r, c; int r, c;
@ -166,7 +169,6 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
#if SANITYCHECKS #if SANITYCHECKS
incheck = nocheck = 0; incheck = nocheck = 0;
FIXME("VM SANITYCHECKS are on");
#endif #endif
vm_paged = 1; vm_paged = 1;
@ -278,7 +280,7 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
panic("VM: vmctl for new stack failed"); panic("VM: vmctl for new stack failed");
} }
FREE_MEM(vmp->vm_arch.vm_seg[D].mem_phys + free_mem(vmp->vm_arch.vm_seg[D].mem_phys +
vmp->vm_arch.vm_seg[D].mem_len, vmp->vm_arch.vm_seg[D].mem_len,
old_stack); old_stack);

View file

@ -1,308 +0,0 @@
#define _SYSTEM 1
#include <minix/type.h>
#include <minix/config.h>
#include <minix/const.h>
#include <minix/sysutil.h>
#include <minix/syslib.h>
#include <limits.h>
#include <errno.h>
#include <assert.h>
#include <stdint.h>
#include <memory.h>
#include "vm.h"
#include "proto.h"
#include "util.h"
#include "glo.h"
#include "region.h"
#include "sanitycheck.h"
/*===========================================================================*
* split_phys *
*===========================================================================*/
PRIVATE int split_phys(struct phys_region *pr, vir_bytes point)
{
struct phys_region *newpr, *q, *prev;
struct phys_block *newpb;
struct phys_block *pb = pr->ph;
/* Split the phys region into 2 parts by @point. */
if(pr->offset >= point || pr->offset + pb->length <= point)
return OK;
if(!SLABALLOC(newpb))
return ENOMEM;
/* Split phys block. */
*newpb = *pb;
pb->length = point - pr->offset;
newpb->length -= pb->length;
newpb->phys += pb->length;
/* Split phys regions in a list. */
for(q = pb->firstregion; q; q = q->next_ph_list) {
if(!SLABALLOC(newpr))
return ENOMEM;
*newpr = *q;
newpr->ph = newpb;
newpr->offset += pb->length;
/* Link to the vir region's phys region list. */
physr_insert(newpr->parent->phys, newpr);
/* Link to the next_ph_list. */
if(q == pb->firstregion) {
newpb->firstregion = newpr;
prev = newpr;
} else {
prev->next_ph_list = newpr;
prev = newpr;
}
}
prev->next_ph_list = NULL;
return OK;
}
/*===========================================================================*
* rm_phys_regions *
*===========================================================================*/
PRIVATE void rm_phys_regions(struct vir_region *region,
vir_bytes begin, vir_bytes length)
{
/* Remove all phys regions between @begin and @begin+length.
*
* Don't update the page table, because we will update it at map_memory()
* later.
*/
struct phys_region *pr;
physr_iter iter;
physr_start_iter(region->phys, &iter, begin, AVL_GREATER_EQUAL);
while((pr = physr_get_iter(&iter)) && pr->offset < begin + length) {
pb_unreferenced(region, pr);
physr_remove(region->phys, pr->offset);
physr_start_iter(region->phys, &iter, begin,
AVL_GREATER_EQUAL);
SLABFREE(pr);
}
}
/*===========================================================================*
* clean_phys_regions *
*===========================================================================*/
PRIVATE void clean_phys_regions(struct vir_region *region,
vir_bytes offset, vir_bytes length)
{
/* Consider @offset as the start address and @offset+length as the end address.
* If there are phys regions crossing the start address or the end address,
* split them into 2 parts.
*
* We assume that the phys regions are listed in order and don't overlap.
*/
struct phys_region *pr;
physr_iter iter;
physr_start_iter_least(region->phys, &iter);
while((pr = physr_get_iter(&iter))) {
/* If this phys region crosses the start address, split it. */
if(pr->offset < offset
&& pr->offset + pr->ph->length > offset) {
split_phys(pr, offset);
physr_start_iter_least(region->phys, &iter);
}
/* If this phys region crosses the end address, split it. */
else if(pr->offset < offset + length
&& pr->offset + pr->ph->length > offset + length) {
split_phys(pr, offset + length);
physr_start_iter_least(region->phys, &iter);
}
else {
physr_incr_iter(&iter);
}
}
}
/*===========================================================================*
* do_map_memory *
*===========================================================================*/
PRIVATE int do_map_memory(struct vmproc *vms, struct vmproc *vmd,
struct vir_region *vrs, struct vir_region *vrd,
vir_bytes offset_s, vir_bytes offset_d,
vir_bytes length, int flag)
{
struct phys_region *prs;
struct phys_region *newphysr;
struct phys_block *pb;
physr_iter iter;
u32_t pt_flag = PTF_PRESENT | PTF_USER;
vir_bytes end;
/* Search for the first phys region in the source process. */
physr_start_iter(vrs->phys, &iter, offset_s, AVL_EQUAL);
prs = physr_get_iter(&iter);
if(!prs)
panic("do_map_memory: no aligned phys region: %d", 0);
/* flag: 0 -> read-only
* 1 -> writable
* -1 -> share as COW, so read-only
*/
if(flag > 0)
pt_flag |= PTF_WRITE;
/* Map phys blocks in the source process to the destination process. */
end = offset_d + length;
while((prs = physr_get_iter(&iter)) && offset_d < end) {
/* If a SMAP share was requested but the phys block has already
* been shared as COW, copy the block for the source phys region
* first.
*/
pb = prs->ph;
if(flag >= 0 && pb->refcount > 1
&& pb->share_flag == PBSH_COW) {
map_copy_ph_block(vms, vrs, prs);
pb = prs->ph;
}
/* Allocate a new phys region. */
if(!SLABALLOC(newphysr))
return ENOMEM;
/* Set and link the new phys region to the block. */
newphysr->ph = pb;
newphysr->offset = offset_d;
newphysr->parent = vrd;
newphysr->next_ph_list = pb->firstregion;
pb->firstregion = newphysr;
physr_insert(newphysr->parent->phys, newphysr);
pb->refcount++;
/* If a COW share was requested but the phys block has already
* been shared as SMAP, give up on COW and copy the block for
* the destination phys region now.
*/
if(flag < 0 && pb->refcount > 1
&& pb->share_flag == PBSH_SMAP) {
map_copy_ph_block(vmd, vrd, newphysr);
}
else {
/* See if this is a COW share or SMAP share. */
if(flag < 0) { /* COW share */
pb->share_flag = PBSH_COW;
/* Update the page table for the src process. */
pt_writemap(&vms->vm_pt, offset_s + vrs->vaddr,
pb->phys, pb->length,
pt_flag, WMF_OVERWRITE);
}
else { /* SMAP share */
pb->share_flag = PBSH_SMAP;
}
/* Update the page table for the destination process. */
pt_writemap(&vmd->vm_pt, offset_d + vrd->vaddr,
pb->phys, pb->length, pt_flag, WMF_OVERWRITE);
}
physr_incr_iter(&iter);
offset_d += pb->length;
offset_s += pb->length;
}
return OK;
}
/*===========================================================================*
* map_memory *
*===========================================================================*/
PUBLIC int map_memory(endpoint_t sour, endpoint_t dest,
vir_bytes virt_s, vir_bytes virt_d, vir_bytes length, int flag)
{
/* This is the entry point. This function will be called by handle_memory() when
* VM recieves a map-memory request.
*/
struct vmproc *vms, *vmd;
struct vir_region *vrs, *vrd;
physr_iter iterd;
vir_bytes offset_s, offset_d;
int p;
int r;
if(vm_isokendpt(sour, &p) != OK)
panic("map_memory: bad endpoint: %d", sour);
vms = &vmproc[p];
if(vm_isokendpt(dest, &p) != OK)
panic("map_memory: bad endpoint: %d", dest);
vmd = &vmproc[p];
vrs = map_lookup(vms, virt_s);
vm_assert(vrs);
vrd = map_lookup(vmd, virt_d);
vm_assert(vrd);
/* Linear address -> offset from start of vir region. */
offset_s = virt_s - vrs->vaddr;
offset_d = virt_d - vrd->vaddr;
/* Make sure that the range in the source process has been mapped
* to physical memory.
*/
map_handle_memory(vms, vrs, offset_s, length, 0);
/* Prepare work. */
clean_phys_regions(vrs, offset_s, length);
clean_phys_regions(vrd, offset_d, length);
rm_phys_regions(vrd, offset_d, length);
/* Map memory. */
r = do_map_memory(vms, vmd, vrs, vrd, offset_s, offset_d, length, flag);
return r;
}
/*===========================================================================*
* unmap_memory *
*===========================================================================*/
PUBLIC int unmap_memory(endpoint_t sour, endpoint_t dest,
vir_bytes virt_s, vir_bytes virt_d, vir_bytes length, int flag)
{
struct vmproc *vmd;
struct vir_region *vrd;
struct phys_region *pr;
struct phys_block *pb;
physr_iter iter;
vir_bytes off, end;
int p;
/* Use information on the destination process to unmap. */
if(vm_isokendpt(dest, &p) != OK)
panic("unmap_memory: bad endpoint: %d", dest);
vmd = &vmproc[p];
vrd = map_lookup(vmd, virt_d);
vm_assert(vrd);
/* Search for the first phys region in the destination process. */
off = virt_d - vrd->vaddr;
physr_start_iter(vrd->phys, &iter, off, AVL_EQUAL);
pr = physr_get_iter(&iter);
if(!pr)
panic("unmap_memory: no aligned phys region: %d", 0);
/* Copy the phys block now rather than doing COW. */
end = off + length;
while((pr = physr_get_iter(&iter)) && off < end) {
pb = pr->ph;
vm_assert(pb->refcount > 1);
vm_assert(pb->share_flag == PBSH_SMAP);
map_copy_ph_block(vmd, vrd, pr);
physr_incr_iter(&iter);
off += pb->length;
}
return OK;
}

11
servers/vm/memlist.h Normal file
View file

@ -0,0 +1,11 @@
#ifndef _MEMLIST_H
#define _MEMLIST_H 1
struct memlist {
struct memlist *next;
phys_bytes phys; /* physical address in bytes */
phys_bytes length; /* length in bytes */
};
#endif

View file

@ -15,10 +15,12 @@
#include <minix/syslib.h> #include <minix/syslib.h>
#include <minix/safecopies.h> #include <minix/safecopies.h>
#include <minix/bitmap.h> #include <minix/bitmap.h>
#include <minix/debug.h>
#include <sys/mman.h> #include <sys/mman.h>
#include <errno.h> #include <errno.h>
#include <assert.h>
#include <string.h> #include <string.h>
#include <env.h> #include <env.h>
#include <stdio.h> #include <stdio.h>
@ -59,12 +61,12 @@ PUBLIC int do_mmap(message *m)
return EINVAL; return EINVAL;
} }
if(m->VMM_FLAGS & MAP_CONTIG) mfflags |= MF_CONTIG;
if(m->VMM_FLAGS & MAP_PREALLOC) mfflags |= MF_PREALLOC; if(m->VMM_FLAGS & MAP_PREALLOC) mfflags |= MF_PREALLOC;
if(m->VMM_FLAGS & MAP_LOWER16M) vrflags |= VR_LOWER16MB; if(m->VMM_FLAGS & MAP_LOWER16M) vrflags |= VR_LOWER16MB;
if(m->VMM_FLAGS & MAP_LOWER1M) vrflags |= VR_LOWER1MB; if(m->VMM_FLAGS & MAP_LOWER1M) vrflags |= VR_LOWER1MB;
if(m->VMM_FLAGS & MAP_ALIGN64K) vrflags |= VR_PHYS64K; if(m->VMM_FLAGS & MAP_ALIGN64K) vrflags |= VR_PHYS64K;
if(m->VMM_FLAGS & MAP_SHARED) vrflags |= VR_SHARED; if(m->VMM_FLAGS & MAP_SHARED) vrflags |= VR_SHARED;
if(m->VMM_FLAGS & MAP_CONTIG) vrflags |= VR_CONTIG;
if(len % VM_PAGE_SIZE) if(len % VM_PAGE_SIZE)
len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE); len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);
@ -238,6 +240,24 @@ PUBLIC int do_remap(message *m)
if (!(region = map_lookup(svmp, sa))) if (!(region = map_lookup(svmp, sa)))
return EINVAL; return EINVAL;
if(region->vaddr != sa) {
printf("VM: do_remap: not start of region.\n");
return EFAULT;
}
if(!(region->flags & VR_SHARED)) {
printf("VM: do_remap: not shared.\n");
return EFAULT;
}
if (size % VM_PAGE_SIZE)
size += VM_PAGE_SIZE - size % VM_PAGE_SIZE;
if(size != region->length) {
printf("VM: do_remap: not size of region.\n");
return EFAULT;
}
if ((r = map_remap(dvmp, da, size, region, &startv)) != OK) if ((r = map_remap(dvmp, da, size, region, &startv)) != OK)
return r; return r;

View file

@ -22,6 +22,7 @@
#include <stdio.h> #include <stdio.h>
#include <fcntl.h> #include <fcntl.h>
#include <signal.h> #include <signal.h>
#include <assert.h>
#include <pagefaults.h> #include <pagefaults.h>

View file

@ -18,7 +18,8 @@ struct phys_region;
#include "vm.h" #include "vm.h"
/* alloc.c */ /* alloc.c */
_PROTOTYPE( phys_clicks alloc_mem_f, (phys_clicks clicks, u32_t flags) ); _PROTOTYPE( phys_clicks alloc_mem, (phys_clicks clicks, u32_t flags) );
_PROTOTYPE( struct memlist *alloc_mem_in_list, (phys_bytes bytes, u32_t flags));
_PROTOTYPE( int do_adddma, (message *msg) ); _PROTOTYPE( int do_adddma, (message *msg) );
_PROTOTYPE( int do_deldma, (message *msg) ); _PROTOTYPE( int do_deldma, (message *msg) );
_PROTOTYPE( int do_getdma, (message *msg) ); _PROTOTYPE( int do_getdma, (message *msg) );
@ -28,12 +29,11 @@ _PROTOTYPE( void printmemstats, (void) );
_PROTOTYPE( void usedpages_reset, (void) ); _PROTOTYPE( void usedpages_reset, (void) );
_PROTOTYPE( int usedpages_add_f, (phys_bytes phys, phys_bytes len, _PROTOTYPE( int usedpages_add_f, (phys_bytes phys, phys_bytes len,
char *file, int line) ); char *file, int line) );
_PROTOTYPE( void free_mem_f, (phys_clicks base, phys_clicks clicks) ); _PROTOTYPE( void free_mem, (phys_clicks base, phys_clicks clicks) );
_PROTOTYPE( void free_mem_list, (struct memlist *list, int all));
_PROTOTYPE( void print_mem_list, (struct memlist *ml));
#define usedpages_add(a, l) usedpages_add_f(a, l, __FILE__, __LINE__) #define usedpages_add(a, l) usedpages_add_f(a, l, __FILE__, __LINE__)
#define ALLOC_MEM(clicks, flags) alloc_mem_f(clicks, flags)
#define FREE_MEM(base, clicks) free_mem_f(base, clicks)
_PROTOTYPE( void mem_init, (struct memory *chunks) ); _PROTOTYPE( void mem_init, (struct memory *chunks) );
/* utility.c */ /* utility.c */
@ -109,6 +109,7 @@ _PROTOTYPE( void *vm_allocpage, (phys_bytes *p, int cat));
_PROTOTYPE( void pt_cycle, (void)); _PROTOTYPE( void pt_cycle, (void));
_PROTOTYPE( int pt_mapkernel, (pt_t *pt)); _PROTOTYPE( int pt_mapkernel, (pt_t *pt));
_PROTOTYPE( void vm_pagelock, (void *vir, int lockflag) ); _PROTOTYPE( void vm_pagelock, (void *vir, int lockflag) );
_PROTOTYPE( int vm_addrok, (void *vir, int write) );
#if SANITYCHECKS #if SANITYCHECKS
_PROTOTYPE( void pt_sanitycheck, (pt_t *pt, char *file, int line) ); _PROTOTYPE( void pt_sanitycheck, (pt_t *pt, char *file, int line) );
@ -123,7 +124,7 @@ _PROTOTYPE(void slabfree,(void *mem, int bytes));
_PROTOTYPE(void slabstats,(void)); _PROTOTYPE(void slabstats,(void));
_PROTOTYPE(void slab_sanitycheck, (char *file, int line)); _PROTOTYPE(void slab_sanitycheck, (char *file, int line));
#define SLABALLOC(var) (var = slaballoc(sizeof(*var))) #define SLABALLOC(var) (var = slaballoc(sizeof(*var)))
#define SLABFREE(ptr) slabfree(ptr, sizeof(*(ptr))) #define SLABFREE(ptr) do { slabfree(ptr, sizeof(*(ptr))); (ptr) = NULL; } while(0)
#if SANITYCHECKS #if SANITYCHECKS
_PROTOTYPE(void slabunlock,(void *mem, int bytes)); _PROTOTYPE(void slabunlock,(void *mem, int bytes));
@ -159,14 +160,14 @@ _PROTOTYPE(int map_remap, (struct vmproc *dvmp, vir_bytes da, size_t size,
_PROTOTYPE(int map_get_phys, (struct vmproc *vmp, vir_bytes addr, phys_bytes *r)); _PROTOTYPE(int map_get_phys, (struct vmproc *vmp, vir_bytes addr, phys_bytes *r));
_PROTOTYPE(int map_get_ref, (struct vmproc *vmp, vir_bytes addr, u8_t *cnt)); _PROTOTYPE(int map_get_ref, (struct vmproc *vmp, vir_bytes addr, u8_t *cnt));
_PROTOTYPE(int map_copy_ph_block, (struct vmproc *vmp,
struct vir_region *region, struct phys_region *ph));
_PROTOTYPE(void pb_unreferenced, (struct vir_region *region, _PROTOTYPE(void pb_unreferenced, (struct vir_region *region,
struct phys_region *pr)); struct phys_region *pr));
_PROTOTYPE(void get_usage_info, (struct vmproc *vmp, _PROTOTYPE(void get_usage_info, (struct vmproc *vmp,
struct vm_usage_info *vui)); struct vm_usage_info *vui));
_PROTOTYPE(int get_region_info, (struct vmproc *vmp, _PROTOTYPE(int get_region_info, (struct vmproc *vmp,
struct vm_region_info *vri, int count, vir_bytes *nextp)); struct vm_region_info *vri, int count, vir_bytes *nextp));
_PROTOTYPE(int copy_abs2region, (phys_bytes abs,
struct vir_region *destregion, phys_bytes offset, phys_bytes len));
#if SANITYCHECKS #if SANITYCHECKS
_PROTOTYPE(void map_sanitycheck,(char *file, int line)); _PROTOTYPE(void map_sanitycheck,(char *file, int line));
#endif #endif

File diff suppressed because it is too large Load diff

View file

@ -65,6 +65,7 @@ struct vir_region {
#define VR_PHYS64K 0x004 /* Physical memory must be 64k aligned. */ #define VR_PHYS64K 0x004 /* Physical memory must be 64k aligned. */
#define VR_LOWER16MB 0x008 #define VR_LOWER16MB 0x008
#define VR_LOWER1MB 0x010 #define VR_LOWER1MB 0x010
#define VR_CONTIG 0x020 /* Must be physically contiguous. */
/* Mapping type: */ /* Mapping type: */
#define VR_ANON 0x100 /* Memory to be cleared and allocated */ #define VR_ANON 0x100 /* Memory to be cleared and allocated */
@ -79,7 +80,6 @@ struct vir_region {
/* map_page_region flags */ /* map_page_region flags */
#define MF_PREALLOC 0x01 #define MF_PREALLOC 0x01
#define MF_CONTIG 0x02
#endif #endif

View file

@ -20,6 +20,7 @@
#include <string.h> #include <string.h>
#include <env.h> #include <env.h>
#include <stdio.h> #include <stdio.h>
#include <assert.h>
#include "glo.h" #include "glo.h"
#include "proto.h" #include "proto.h"

View file

@ -1,6 +1,8 @@
#ifndef _SANITYCHECK_H #ifndef _SANITYCHECK_H
#define _SANITYCHECK_H 1 #define _SANITYCHECK_H 1
#include <assert.h>
#include "vm.h" #include "vm.h"
#include "glo.h" #include "glo.h"
@ -17,15 +19,15 @@
slab_sanitycheck(__FILE__, __LINE__); } slab_sanitycheck(__FILE__, __LINE__); }
#define SANITYCHECK(l) if(!nocheck && ((l) <= vm_sanitychecklevel)) { \ #define SANITYCHECK(l) if(!nocheck && ((l) <= vm_sanitychecklevel)) { \
struct vmproc *vmp; \ struct vmproc *vmpr; \
vm_assert(incheck == 0); \ vm_assert(incheck == 0); \
incheck = 1; \ incheck = 1; \
usedpages_reset(); \ usedpages_reset(); \
slab_sanitycheck(__FILE__, __LINE__); \ slab_sanitycheck(__FILE__, __LINE__); \
for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) { \ for(vmpr = vmproc; vmpr < &vmproc[VMP_NR]; vmpr++) { \
if((vmp->vm_flags & (VMF_INUSE | VMF_HASPT)) == \ if((vmpr->vm_flags & (VMF_INUSE | VMF_HASPT)) == \
(VMF_INUSE | VMF_HASPT)) { \ (VMF_INUSE | VMF_HASPT)) { \
PT_SANE(&vmp->vm_pt); \ PT_SANE(&vmpr->vm_pt); \
} \ } \
} \ } \
map_sanitycheck(__FILE__, __LINE__); \ map_sanitycheck(__FILE__, __LINE__); \
@ -50,22 +52,11 @@
} \ } \
} }
#define NOTRUNNABLE(ep) { \
struct proc pr; \
if(sys_getproc(&pr, ep) != OK) { \
panic("VM: sys_getproc failed: %d", ep); \
} \
if(!pr.p_rts_flags) { \
panic("VM: runnable: %d", ep); \
} \
}
#else #else
#define SANITYCHECK #define SANITYCHECK
#define SLABSANITYCHECK(l) #define SLABSANITYCHECK(l)
#define USE(obj, code) do { code } while(0) #define USE(obj, code) do { code } while(0)
#define SLABSANE(ptr) #define SLABSANE(ptr)
#define NOTRUNNABLE(ep)
#endif #endif
#endif #endif

View file

@ -16,6 +16,7 @@
#include <minix/bitmap.h> #include <minix/bitmap.h>
#include <minix/debug.h> #include <minix/debug.h>
#include <assert.h>
#include <errno.h> #include <errno.h>
#include <string.h> #include <string.h>
#include <env.h> #include <env.h>
@ -316,8 +317,10 @@ PUBLIC void *slaballoc(int bytes)
vm_assert(s); vm_assert(s);
firstused = LH(s, LIST_USED); firstused = LH(s, LIST_USED);
vm_assert(firstused); vm_assert(firstused);
#if SANITYCHECKS
vm_assert(firstused->sdh.magic1 == MAGIC1); vm_assert(firstused->sdh.magic1 == MAGIC1);
vm_assert(firstused->sdh.magic2 == MAGIC2); vm_assert(firstused->sdh.magic2 == MAGIC2);
#endif
vm_assert(firstused->sdh.nused < ITEMSPERPAGE(bytes)); vm_assert(firstused->sdh.nused < ITEMSPERPAGE(bytes));
for(i = firstused->sdh.freeguess; for(i = firstused->sdh.freeguess;
@ -502,8 +505,6 @@ PUBLIC void slablock(void *mem, int bytes)
SLABDATAUNWRITABLE(f); SLABDATAUNWRITABLE(f);
FIXME("verify new contents");
return; return;
} }

View file

@ -10,9 +10,9 @@
#if SANITYCHECKS #if SANITYCHECKS
#define vm_assert(cond) { \ #define vm_assert(cond) { \
if(vm_sanitychecklevel > 0 && !(cond)) { \ if(vm_sanitychecklevel > 0 && !(cond)) { \
printf("VM:%s:%d: assert failed: %s\n", \ printf("VM:%s:%d: vm_assert failed: %s\n", \
__FILE__, __LINE__, #cond); \ __FILE__, __LINE__, #cond); \
panic("assert failed"); \ panic("vm_assert failed"); \
} \ } \
} }
#else #else

View file

@ -7,6 +7,9 @@
#define PAF_ALIGN64K 0x04 /* Aligned to 64k boundary. */ #define PAF_ALIGN64K 0x04 /* Aligned to 64k boundary. */
#define PAF_LOWER16MB 0x08 #define PAF_LOWER16MB 0x08
#define PAF_LOWER1MB 0x10 #define PAF_LOWER1MB 0x10
#define PAF_FIRSTBLOCK 0x20 /* alloc_mem: return first block */
#define MARK do { if(mark) { printf("%d\n", __LINE__); } } while(0)
/* special value for v in pt_allocmap */ /* special value for v in pt_allocmap */
#define AM_AUTO ((u32_t) -1) #define AM_AUTO ((u32_t) -1)

View file

@ -5,6 +5,7 @@
#include <pagetable.h> #include <pagetable.h>
#include <arch_vmproc.h> #include <arch_vmproc.h>
#include <minix/bitmap.h> #include <minix/bitmap.h>
#include <machine/archtypes.h>
#include "vm.h" #include "vm.h"