vm: restartability improvements (#1)
Two bugs fixed wrt vm restartability. . make sure pagetable data is only allocated using dynamic data instead of static spare pages (bootstrap pages). They are needed for bootstrap but now repeat some of the initialization so only dynamic data remains. This solves the problem of physical addresses changing (as static pages are re-allocated for the new instance) after update. . pt_ptalloc has to be specified in bytes instead of pde slot numbers. leaving pt_pt NULL causes mapping transfers to fail because NULL happens to be mapped in then and updates then happen there. . added some sanity checks against the above happening. The new state is that VM can update many times, but the system isn't fully reliable afterwards yet. Change-Id: I7313602c740cdae8590589132291116ed921aed7
This commit is contained in:
parent
8bab0dfa2a
commit
10e6ba68d2
4 changed files with 83 additions and 33 deletions
|
@ -113,6 +113,17 @@ static char static_sparepages[VM_PAGE_SIZE*STATIC_SPAREPAGES]
|
||||||
static char static_sparepagedirs[ARCH_PAGEDIR_SIZE*STATIC_SPAREPAGEDIRS + ARCH_PAGEDIR_SIZE] __aligned(ARCH_PAGEDIR_SIZE);
|
static char static_sparepagedirs[ARCH_PAGEDIR_SIZE*STATIC_SPAREPAGEDIRS + ARCH_PAGEDIR_SIZE] __aligned(ARCH_PAGEDIR_SIZE);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
void pt_assert(pt_t *pt)
|
||||||
|
{
|
||||||
|
char dir[4096];
|
||||||
|
pt_clearmapcache();
|
||||||
|
if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
|
||||||
|
panic("VMCTL_FLUSHTLB failed");
|
||||||
|
}
|
||||||
|
sys_physcopy(NONE, pt->pt_dir_phys, SELF, (vir_bytes) dir, sizeof(dir), 0);
|
||||||
|
assert(!memcmp(dir, pt->pt_dir, sizeof(dir)));
|
||||||
|
}
|
||||||
|
|
||||||
#if SANITYCHECKS
|
#if SANITYCHECKS
|
||||||
/*===========================================================================*
|
/*===========================================================================*
|
||||||
* pt_sanitycheck *
|
* pt_sanitycheck *
|
||||||
|
@ -255,7 +266,6 @@ static void *vm_getsparepage(phys_bytes *phys)
|
||||||
{
|
{
|
||||||
void *ptr;
|
void *ptr;
|
||||||
if(reservedqueue_alloc(spare_pagequeue, phys, &ptr) != OK) {
|
if(reservedqueue_alloc(spare_pagequeue, phys, &ptr) != OK) {
|
||||||
printf("vm_getsparepage: no spare found\n");
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
assert(ptr);
|
assert(ptr);
|
||||||
|
@ -662,6 +672,7 @@ int pt_map_in_range(struct vmproc *src_vmp, struct vmproc *dst_vmp,
|
||||||
|
|
||||||
/* Transfer the mapping. */
|
/* Transfer the mapping. */
|
||||||
dst_pt->pt_pt[pde][pte] = pt->pt_pt[pde][pte];
|
dst_pt->pt_pt[pde][pte] = pt->pt_pt[pde][pte];
|
||||||
|
assert(dst_pt->pt_pt[pde]);
|
||||||
|
|
||||||
if(viraddr == VM_DATATOP) break;
|
if(viraddr == VM_DATATOP) break;
|
||||||
}
|
}
|
||||||
|
@ -709,11 +720,13 @@ int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Scan all non-reserved page-directory entries. */
|
/* Scan all non-reserved page-directory entries. */
|
||||||
for(pde=0; pde < ARCH_VM_DIR_ENTRIES; pde++) {
|
for(pde=0; pde < kern_start_pde; pde++) {
|
||||||
if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
|
if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if(!pt->pt_pt[pde]) { panic("pde %d empty\n", pde); }
|
||||||
|
|
||||||
/* Transfer mapping to the page table. */
|
/* Transfer mapping to the page table. */
|
||||||
viraddr = (vir_bytes) pt->pt_pt[pde];
|
viraddr = (vir_bytes) pt->pt_pt[pde];
|
||||||
#if defined(__i386__)
|
#if defined(__i386__)
|
||||||
|
@ -721,6 +734,7 @@ int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp)
|
||||||
#elif defined(__arm__)
|
#elif defined(__arm__)
|
||||||
physaddr = pt->pt_dir[pde] & ARCH_VM_PDE_MASK;
|
physaddr = pt->pt_dir[pde] & ARCH_VM_PDE_MASK;
|
||||||
#endif
|
#endif
|
||||||
|
assert(viraddr);
|
||||||
if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE,
|
if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE,
|
||||||
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
|
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
|
||||||
#ifdef __arm__
|
#ifdef __arm__
|
||||||
|
@ -1024,6 +1038,40 @@ static int freepde(void)
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void pt_allocate_kernel_mapped_pagetables(void)
|
||||||
|
{
|
||||||
|
/* Reserve PDEs available for mapping in the page directories. */
|
||||||
|
int pd;
|
||||||
|
for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) {
|
||||||
|
struct pdm *pdm = &pagedir_mappings[pd];
|
||||||
|
if(!pdm->pdeno) {
|
||||||
|
pdm->pdeno = freepde();
|
||||||
|
assert(pdm->pdeno);
|
||||||
|
}
|
||||||
|
phys_bytes ph;
|
||||||
|
|
||||||
|
/* Allocate us a page table in which to
|
||||||
|
* remember page directory pointers.
|
||||||
|
*/
|
||||||
|
if(!(pdm->page_directories =
|
||||||
|
vm_allocpage(&ph, VMP_PAGETABLE))) {
|
||||||
|
panic("no virt addr for vm mappings");
|
||||||
|
}
|
||||||
|
memset(pdm->page_directories, 0, VM_PAGE_SIZE);
|
||||||
|
pdm->phys = ph;
|
||||||
|
|
||||||
|
#if defined(__i386__)
|
||||||
|
pdm->val = (ph & ARCH_VM_ADDR_MASK) |
|
||||||
|
ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW;
|
||||||
|
#elif defined(__arm__)
|
||||||
|
pdm->val = (ph & ARCH_VM_PDE_MASK)
|
||||||
|
| ARCH_VM_PDE_PRESENT
|
||||||
|
| ARM_VM_PTE_CACHED
|
||||||
|
| ARM_VM_PDE_DOMAIN; //LSC FIXME
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*===========================================================================*
|
/*===========================================================================*
|
||||||
* pt_init *
|
* pt_init *
|
||||||
*===========================================================================*/
|
*===========================================================================*/
|
||||||
|
@ -1031,6 +1079,7 @@ void pt_init(void)
|
||||||
{
|
{
|
||||||
pt_t *newpt;
|
pt_t *newpt;
|
||||||
int s, r, p;
|
int s, r, p;
|
||||||
|
phys_bytes phys;
|
||||||
vir_bytes sparepages_mem;
|
vir_bytes sparepages_mem;
|
||||||
#if defined(__arm__)
|
#if defined(__arm__)
|
||||||
vir_bytes sparepagedirs_mem;
|
vir_bytes sparepagedirs_mem;
|
||||||
|
@ -1181,35 +1230,7 @@ void pt_init(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Reserve PDEs available for mapping in the page directories. */
|
pt_allocate_kernel_mapped_pagetables();
|
||||||
{
|
|
||||||
int pd;
|
|
||||||
for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) {
|
|
||||||
struct pdm *pdm = &pagedir_mappings[pd];
|
|
||||||
pdm->pdeno = freepde();
|
|
||||||
phys_bytes ph;
|
|
||||||
|
|
||||||
/* Allocate us a page table in which to
|
|
||||||
* remember page directory pointers.
|
|
||||||
*/
|
|
||||||
if(!(pdm->page_directories =
|
|
||||||
vm_allocpage(&ph, VMP_PAGETABLE))) {
|
|
||||||
panic("no virt addr for vm mappings");
|
|
||||||
}
|
|
||||||
memset(pdm->page_directories, 0, VM_PAGE_SIZE);
|
|
||||||
pdm->phys = ph;
|
|
||||||
|
|
||||||
#if defined(__i386__)
|
|
||||||
pdm->val = (ph & ARCH_VM_ADDR_MASK) |
|
|
||||||
ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW;
|
|
||||||
#elif defined(__arm__)
|
|
||||||
pdm->val = (ph & ARCH_VM_PDE_MASK)
|
|
||||||
| ARCH_VM_PDE_PRESENT
|
|
||||||
| ARM_VM_PTE_CACHED
|
|
||||||
| ARM_VM_PDE_DOMAIN; //LSC FIXME
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Allright. Now. We have to make our own page directory and page tables,
|
/* Allright. Now. We have to make our own page directory and page tables,
|
||||||
* that the kernel has already set up, accessible to us. It's easier to
|
* that the kernel has already set up, accessible to us. It's easier to
|
||||||
|
@ -1279,6 +1300,27 @@ void pt_init(void)
|
||||||
|
|
||||||
pt_init_done = 1;
|
pt_init_done = 1;
|
||||||
|
|
||||||
|
/* VM is now fully functional in that it can dynamically allocate memory
|
||||||
|
* for itself.
|
||||||
|
*
|
||||||
|
* We don't want to keep using the bootstrap statically allocated spare
|
||||||
|
* pages though, as the physical addresses will change on liveupdate. So we
|
||||||
|
* re-do part of the initialization now with purely dynamically allocated
|
||||||
|
* memory. First throw out the static pool.
|
||||||
|
*/
|
||||||
|
|
||||||
|
alloc_cycle(); /* Make sure allocating works */
|
||||||
|
while(vm_getsparepage(&phys)) ; /* Use up all static pages */
|
||||||
|
alloc_cycle(); /* Refill spares with dynamic */
|
||||||
|
pt_allocate_kernel_mapped_pagetables(); /* Reallocate in-kernel pages */
|
||||||
|
pt_bind(newpt, &vmproc[VM_PROC_NR]); /* Recalculate */
|
||||||
|
pt_mapkernel(newpt); /* Rewrite pagetable info */
|
||||||
|
|
||||||
|
/* Flush TLB just in case any of those mappings have been touched */
|
||||||
|
if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
|
||||||
|
panic("VMCTL_FLUSHTLB failed");
|
||||||
|
}
|
||||||
|
|
||||||
/* All OK. */
|
/* All OK. */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -118,6 +118,7 @@ void vm_pagelock(void *vir, int lockflag);
|
||||||
int vm_addrok(void *vir, int write);
|
int vm_addrok(void *vir, int write);
|
||||||
int get_vm_self_pages(void);
|
int get_vm_self_pages(void);
|
||||||
int pt_writable(struct vmproc *vmp, vir_bytes v);
|
int pt_writable(struct vmproc *vmp, vir_bytes v);
|
||||||
|
void pt_assert(pt_t *pt);
|
||||||
|
|
||||||
#if SANITYCHECKS
|
#if SANITYCHECKS
|
||||||
void pt_sanitycheck(pt_t *pt, const char *file, int line);
|
void pt_sanitycheck(pt_t *pt, const char *file, int line);
|
||||||
|
|
|
@ -783,6 +783,7 @@ int map_pin_memory(struct vmproc *vmp)
|
||||||
region_iter iter;
|
region_iter iter;
|
||||||
region_start_iter_least(&vmp->vm_regions_avl, &iter);
|
region_start_iter_least(&vmp->vm_regions_avl, &iter);
|
||||||
/* Scan all memory regions. */
|
/* Scan all memory regions. */
|
||||||
|
pt_assert(&vmp->vm_pt);
|
||||||
while((vr = region_get_iter(&iter))) {
|
while((vr = region_get_iter(&iter))) {
|
||||||
/* Make sure region is mapped to physical memory and writable.*/
|
/* Make sure region is mapped to physical memory and writable.*/
|
||||||
r = map_handle_memory(vmp, vr, 0, vr->length, 1, NULL, 0, 0);
|
r = map_handle_memory(vmp, vr, 0, vr->length, 1, NULL, 0, 0);
|
||||||
|
@ -791,6 +792,7 @@ int map_pin_memory(struct vmproc *vmp)
|
||||||
}
|
}
|
||||||
region_incr_iter(&iter);
|
region_incr_iter(&iter);
|
||||||
}
|
}
|
||||||
|
pt_assert(&vmp->vm_pt);
|
||||||
return OK;
|
return OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -146,6 +146,8 @@ static int rs_memctl_make_vm_instance(struct vmproc *new_vm_vmp)
|
||||||
|
|
||||||
this_vm_vmp = &vmproc[VM_PROC_NR];
|
this_vm_vmp = &vmproc[VM_PROC_NR];
|
||||||
|
|
||||||
|
pt_assert(&this_vm_vmp->vm_pt);
|
||||||
|
|
||||||
/* Check if the operation is allowed. */
|
/* Check if the operation is allowed. */
|
||||||
assert(num_vm_instances == 1 || num_vm_instances == 2);
|
assert(num_vm_instances == 1 || num_vm_instances == 2);
|
||||||
if(num_vm_instances == 2) {
|
if(num_vm_instances == 2) {
|
||||||
|
@ -169,12 +171,12 @@ static int rs_memctl_make_vm_instance(struct vmproc *new_vm_vmp)
|
||||||
flags = 0;
|
flags = 0;
|
||||||
verify = FALSE;
|
verify = FALSE;
|
||||||
r = pt_ptalloc_in_range(&this_vm_vmp->vm_pt,
|
r = pt_ptalloc_in_range(&this_vm_vmp->vm_pt,
|
||||||
kernel_boot_info.freepde_start, ARCH_VM_DIR_ENTRIES, flags, verify);
|
VM_OWN_HEAPBASE, VM_DATATOP, flags, verify);
|
||||||
if(r != OK) {
|
if(r != OK) {
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
r = pt_ptalloc_in_range(&new_vm_vmp->vm_pt,
|
r = pt_ptalloc_in_range(&new_vm_vmp->vm_pt,
|
||||||
kernel_boot_info.freepde_start, ARCH_VM_DIR_ENTRIES, flags, verify);
|
VM_OWN_HEAPBASE, VM_DATATOP, flags, verify);
|
||||||
if(r != OK) {
|
if(r != OK) {
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -189,6 +191,9 @@ static int rs_memctl_make_vm_instance(struct vmproc *new_vm_vmp)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pt_assert(&this_vm_vmp->vm_pt);
|
||||||
|
pt_assert(&new_vm_vmp->vm_pt);
|
||||||
|
|
||||||
return OK;
|
return OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue