- map in as much memory as is necessary in 4MB chunks to

let boot processes run with segments
 - allow segment-only processes to fork() by copying them
   and giving them an identity page table
This commit is contained in:
Ben Gras 2009-12-07 12:10:44 +00:00
parent 51065a1b47
commit f0db9bb328
5 changed files with 80 additions and 79 deletions

View file

@ -123,8 +123,7 @@ vir_bytes sp; /* new value of sp */
rmp->vm_arch.vm_seg[S].mem_vir) ? ENOMEM : OK;
if(r == OK && (rmp->vm_flags & VMF_HASPT) &&
rmp->vm_endpoint != VM_PROC_NR) {
vm_assert(rmp->vm_heap);
rmp->vm_endpoint != VM_PROC_NR && rmp->vm_heap) {
if(old_clicks < data_clicks) {
vir_bytes more;
more = (data_clicks - old_clicks) << CLICK_SHIFT;

View file

@ -72,7 +72,7 @@ PUBLIC int do_fork(message *msg)
vmc->vm_regions = NULL;
vmc->vm_endpoint = NONE; /* In case someone tries to use it. */
vmc->vm_pt = origpt;
vmc->vm_flags &= ~VMF_HASPT;
vmc->vm_flags |= VMF_HASPT;
#if VMSTATS
vmc->vm_bytecopies = 0;
@ -88,8 +88,6 @@ PUBLIC int do_fork(message *msg)
return ENOMEM;
}
vmc->vm_flags |= VMF_HASPT;
SANITYCHECK(SCL_DETAIL);
if(map_proc_copy(vmc, vmp) != OK) {
@ -124,7 +122,6 @@ PUBLIC int do_fork(message *msg)
/* Create a copy of the parent's core image for the child. */
child_abs = (phys_bytes) child_base << CLICK_SHIFT;
parent_abs = (phys_bytes) vmp->vm_arch.vm_seg[D].mem_phys << CLICK_SHIFT;
FIXME("VM uses kernel for abscopy");
s = sys_abscopy(parent_abs, child_abs, prog_bytes);
if (s < 0) vm_panic("do_fork can't copy", s);
@ -136,6 +133,11 @@ PUBLIC int do_fork(message *msg)
vmc->vm_arch.vm_seg[D].mem_phys = child_base;
vmc->vm_arch.vm_seg[S].mem_phys = vmc->vm_arch.vm_seg[D].mem_phys +
(vmp->vm_arch.vm_seg[S].mem_vir - vmp->vm_arch.vm_seg[D].mem_vir);
if(pt_identity(&vmc->vm_pt) != OK) {
printf("VM: fork: pt_identity failed\n");
return ENOMEM;
}
}
/* Only inherit these flags. */
@ -148,7 +150,7 @@ PUBLIC int do_fork(message *msg)
/* Tell kernel about the (now successful) FORK. */
if((r=sys_fork(vmp->vm_endpoint, childproc,
&vmc->vm_endpoint, vmc->vm_arch.vm_seg,
fullvm ? PFF_VMINHIBIT : 0, &msgaddr)) != OK) {
PFF_VMINHIBIT, &msgaddr)) != OK) {
vm_panic("do_fork can't sys_fork", r);
}
@ -164,10 +166,11 @@ PUBLIC int do_fork(message *msg)
handle_memory(vmc, vir, sizeof(message), 1);
vir = arch_vir2map(vmp, msgaddr);
handle_memory(vmp, vir, sizeof(message), 1);
if((r=pt_bind(&vmc->vm_pt, vmc)) != OK)
vm_panic("fork can't pt_bind", r);
}
if((r=pt_bind(&vmc->vm_pt, vmc)) != OK)
vm_panic("fork can't pt_bind", r);
/* Inform caller of new child endpoint. */
msg->VMF_CHILD_ENDPOINT = vmc->vm_endpoint;

View file

@ -38,8 +38,8 @@
#include "memory.h"
/* PDE used to map in kernel, kernel physical address. */
PRIVATE int kernel_pde = -1, pagedir_pde = -1;
PRIVATE u32_t kern_pde_val = 0, global_bit = 0, pagedir_pde_val;
PRIVATE int id_map_high_pde = -1, pagedir_pde = -1;
PRIVATE u32_t global_bit = 0, pagedir_pde_val;
PRIVATE int proc_pde = 0;
@ -622,10 +622,40 @@ PUBLIC int pt_new(pt_t *pt)
return OK;
}
/*===========================================================================*
* pt_identity *
*===========================================================================*/
PUBLIC int pt_identity(pt_t *pt)
{
/* Allocate a pagetable that does a 1:1 mapping. */
int i;
/* Allocate page directory. */
if(!pt->pt_dir &&
!(pt->pt_dir = vm_allocpage(&pt->pt_dir_phys, VMP_PAGEDIR))) {
return ENOMEM;
}
for(i = 0; i < I386_VM_DIR_ENTRIES; i++) {
phys_bytes addr;
addr = I386_BIG_PAGE_SIZE*i;
pt->pt_dir[i] = (addr & I386_VM_ADDR_MASK_4MB) |
I386_VM_BIGPAGE|
I386_VM_USER|
I386_VM_PRESENT|I386_VM_WRITE;
pt->pt_pt[i] = NULL;
}
/* Where to start looking for free virtual address space? */
pt->pt_virtop = 0;
return OK;
}
/*===========================================================================*
* pt_init *
*===========================================================================*/
PUBLIC void pt_init(void)
PUBLIC void pt_init(phys_bytes usedlimit)
{
/* By default, the kernel gives us a data segment with pre-allocated
* memory that then can't grow. We want to be able to allocate memory
@ -649,6 +679,7 @@ PUBLIC void pt_init(void)
/* Shorthand. */
newpt = &vmp->vm_pt;
/* Get ourselves spare pages. */
if(!(sparepages_mem = (vir_bytes) aalloc(I386_PAGE_SIZE*SPAREPAGES)))
vm_panic("pt_init: aalloc for spare failed", NO_NUM);
@ -671,29 +702,18 @@ PUBLIC void pt_init(void)
if(global_bit_ok)
global_bit = I386_VM_GLOBAL;
/* Figure out kernel pde slot. */
{
int pde1, pde2;
pde1 = I386_VM_PDE(KERNEL_TEXT);
pde2 = I386_VM_PDE(KERNEL_DATA+KERNEL_DATA_LEN);
if(pde1 != pde2)
vm_panic("pt_init: kernel too big", NO_NUM);
/* The kernel and boot time processes need an identity mapping.
* We use full PDE's for this without separate page tables.
* Figure out which pde we can start using for other purposes.
*/
id_map_high_pde = usedlimit / I386_BIG_PAGE_SIZE;
/* Map in kernel with this single pde value if 4MB pages
* supported.
*/
kern_pde_val = (KERNEL_TEXT & I386_VM_ADDR_MASK_4MB) |
I386_VM_BIGPAGE|
I386_VM_USER|
I386_VM_PRESENT|I386_VM_WRITE|global_bit;
kernel_pde = pde1;
vm_assert(kernel_pde >= 0);
free_pde = kernel_pde+1;
}
/* We have to make mappings up till here. */
free_pde = id_map_high_pde+1;
printf("map high pde: %d for limit: 0x%lx\n",
id_map_high_pde, usedlimit);
/* First unused pde. */
proc_pde = free_pde;
/* Initial (current) range of our virtual address space. */
lo = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys);
hi = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
@ -715,9 +735,6 @@ PUBLIC void pt_init(void)
if(pt_new(newpt) != OK)
vm_panic("pt_init: pt_new failed", NO_NUM);
/* Old position mapped in? */
pt_check(vmp);
/* Set up mappings for VM process. */
for(v = lo; v < hi; v += I386_PAGE_SIZE) {
phys_bytes addr;
@ -787,7 +804,8 @@ PUBLIC void pt_init(void)
kern_mappings[index].flags = flags;
kern_mappings[index].lin_addr = offset;
kern_mappings[index].flags =
I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;
I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE |
global_bit;
if(flags & VMMF_UNCACHED)
kern_mappings[index].flags |=
I386_VM_PWT | I386_VM_PCD;
@ -906,28 +924,22 @@ PUBLIC void pt_free(pt_t *pt)
PUBLIC int pt_mapkernel(pt_t *pt)
{
int r, i;
static int printed = 0;
/* Any i386 page table needs to map in the kernel address space. */
vm_assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);
if(bigpage_ok) {
if(kernel_pde >= 0) {
pt->pt_dir[kernel_pde] = kern_pde_val;
} else
vm_panic("VM: pt_mapkernel: no kernel pde", NO_NUM);
int pde;
for(pde = 0; pde <= id_map_high_pde; pde++) {
phys_bytes addr;
addr = pde * I386_BIG_PAGE_SIZE;
vm_assert((addr & I386_VM_ADDR_MASK) == addr);
pt->pt_dir[pde] = addr | I386_VM_PRESENT |
I386_VM_BIGPAGE | I386_VM_USER |
I386_VM_WRITE | global_bit;
}
} else {
vm_panic("VM: pt_mapkernel: no bigpage", NO_NUM);
/* Map in text. flags: don't write, supervisor only */
if((r=pt_writemap(pt, KERNEL_TEXT, KERNEL_TEXT, KERNEL_TEXT_LEN,
I386_VM_PRESENT|global_bit, 0)) != OK)
return r;
/* Map in data. flags: read-write, supervisor only */
if((r=pt_writemap(pt, KERNEL_DATA, KERNEL_DATA, KERNEL_DATA_LEN,
I386_VM_PRESENT|I386_VM_WRITE, 0)) != OK)
return r;
}
if(pagedir_pde >= 0) {
@ -948,21 +960,6 @@ PUBLIC int pt_mapkernel(pt_t *pt)
return OK;
}
/*===========================================================================*
* pt_check *
*===========================================================================*/
PUBLIC void pt_check(struct vmproc *vmp)
{
phys_bytes hi;
hi = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
vmp->vm_arch.vm_seg[S].mem_len);
if(hi > (kernel_pde+1) * I386_BIG_PAGE_SIZE) {
printf("VM: %d doesn't fit in kernel range (0x%lx)\n",
vmp->vm_endpoint, hi);
vm_panic("boot time processes too big", NO_NUM);
}
}
/*===========================================================================*
* pt_cycle *
*===========================================================================*/

View file

@ -173,6 +173,7 @@ PRIVATE void vm_init(void)
struct memory mem_chunks[NR_MEMS];
struct boot_image image[NR_BOOT_PROCS];
struct boot_image *ip;
phys_bytes limit = 0;
/* Get chunks of available memory. */
get_mem_chunks(mem_chunks);
@ -195,6 +196,7 @@ PRIVATE void vm_init(void)
* now and make valid slot entries for them.
*/
for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
phys_bytes proclimit;
struct vmproc *vmp;
if(ip->proc_nr >= _NR_PROCS) { vm_panic("proc", ip->proc_nr); }
@ -224,6 +226,13 @@ PRIVATE void vm_init(void)
/* Remove this memory from the free list. */
reserve_proc_mem(mem_chunks, vmp->vm_arch.vm_seg);
/* Set memory limit. */
proclimit = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
vmp->vm_arch.vm_seg[S].mem_len) - 1;
if(proclimit > limit)
limit = proclimit;
vmp->vm_flags = VMF_INUSE;
vmp->vm_endpoint = ip->endpoint;
vmp->vm_stacktop =
@ -235,7 +244,7 @@ PRIVATE void vm_init(void)
}
/* Architecture-dependent initialization. */
pt_init();
pt_init(limit);
/* Initialize tables to all physical memory. */
mem_init(mem_chunks);
@ -251,16 +260,8 @@ PRIVATE void vm_init(void)
GETVMP(vmp, ip->proc_nr);
if(!(ip->flags & PROC_FULLVM)) {
/* See if this process fits in kernel
* mapping. VM has its own pagetable,
* don't check it.
*/
if(!(vmp->vm_flags & VMF_HASPT)) {
pt_check(vmp);
}
continue;
}
if(!(ip->flags & PROC_FULLVM))
continue;
old_stack =
vmp->vm_arch.vm_seg[S].mem_vir +

View file

@ -95,9 +95,10 @@ _PROTOTYPE( int handle_memory, (struct vmproc *vmp, vir_bytes mem,
vir_bytes len, int wrflag));
/* $(ARCH)/pagetable.c */
_PROTOTYPE( void pt_init, (void) );
_PROTOTYPE( void pt_init, (phys_bytes limit) );
_PROTOTYPE( void pt_check, (struct vmproc *vmp) );
_PROTOTYPE( int pt_new, (pt_t *pt) );
_PROTOTYPE( int pt_identity, (pt_t *pt) );
_PROTOTYPE( void pt_free, (pt_t *pt) );
_PROTOTYPE( int pt_writemap, (pt_t *pt, vir_bytes v, phys_bytes physaddr,
size_t bytes, u32_t flags, u32_t writemapflags));