further libexec generalization

. new mode for sys_memset: include process so memset can be
	  done in physical or virtual address space.
	. add a mode to mmap() that lets a process allocate uninitialized
	  memory.
	. this allows an exec()er (RS, VFS, etc.) to request uninitialized
	  memory from VM and selectively clear the ranges that don't come
	  from a file, leaving no uninitialized memory left for the process
	  to see.
	. use callbacks for clearing the process, clearing memory in the
	  process, and copying into the process; so that the libexec code
	  can be used from rs, vfs, and in the future, kernel (to load vm)
	  and vm (to load boot-time processes)
This commit is contained in:
Ben Gras 2012-06-06 19:05:28 +02:00
parent 040362e379
commit 769af57274
21 changed files with 207 additions and 64 deletions

View file

@ -819,6 +819,7 @@ struct
{ "READBIOS", SYS_READBIOS }, { "READBIOS", SYS_READBIOS },
{ "STIME", SYS_STIME }, { "STIME", SYS_STIME },
{ "VMCTL", SYS_VMCTL }, { "VMCTL", SYS_VMCTL },
{ "MEMSET", SYS_MEMSET },
{ NULL, 0 } { NULL, 0 }
}; };

View file

@ -93,6 +93,7 @@ service vfs
KILL # 06 KILL # 06
UMAP # 14 UMAP # 14
VIRCOPY # 15 VIRCOPY # 15
MEMSET
; ;
vm PROCCTL; vm PROCCTL;
io NONE; # No I/O range allowed io NONE; # No I/O range allowed

View file

@ -371,6 +371,7 @@
#define MEM_PTR m2_p1 /* base */ #define MEM_PTR m2_p1 /* base */
#define MEM_COUNT m2_l1 /* count */ #define MEM_COUNT m2_l1 /* count */
#define MEM_PATTERN m2_l2 /* pattern to write */ #define MEM_PATTERN m2_l2 /* pattern to write */
#define MEM_PROCESS m2_i1 /* NONE (phys) or process id (vir) */
/* Field names for SYS_DEVIO, SYS_VDEVIO, SYS_SDEVIO. */ /* Field names for SYS_DEVIO, SYS_VDEVIO, SYS_SDEVIO. */
#define DIO_REQUEST m2_i3 /* device in or output */ #define DIO_REQUEST m2_i3 /* device in or output */

View file

@ -153,8 +153,8 @@ int sys_safecopyto(endpoint_t dest, cp_grant_id_t grant, vir_bytes
grant_offset, vir_bytes my_address, size_t bytes, int my_seg); grant_offset, vir_bytes my_address, size_t bytes, int my_seg);
int sys_vsafecopy(struct vscp_vec *copyvec, int elements); int sys_vsafecopy(struct vscp_vec *copyvec, int elements);
int sys_memset(unsigned long pattern, phys_bytes base, phys_bytes int sys_memset(endpoint_t who, unsigned long pattern,
bytes); phys_bytes base, phys_bytes bytes);
/* Grant-based map functions. */ /* Grant-based map functions. */
int sys_safemap(endpoint_t grantor, cp_grant_id_t grant, vir_bytes int sys_safemap(endpoint_t grantor, cp_grant_id_t grant, vir_bytes

View file

@ -59,6 +59,7 @@ typedef __off_t off_t; /* file offset */
#define MAP_FIXED 0x0200 /* require mapping to happen at hint */ #define MAP_FIXED 0x0200 /* require mapping to happen at hint */
#define MAP_THIRDPARTY 0x0400 /* perform on behalf of any process */ #define MAP_THIRDPARTY 0x0400 /* perform on behalf of any process */
#define MAP_UNINITIALIZED 0x0800 /* do not clear memory */
/* /*
* Error indicator returned by mmap(2) * Error indicator returned by mmap(2)

View file

@ -52,7 +52,7 @@ static void pagefault( struct proc *pr,
struct exception_frame * frame, struct exception_frame * frame,
int is_nested) int is_nested)
{ {
int in_physcopy = 0; int in_physcopy = 0, in_memset = 0;
reg_t pagefaultcr2; reg_t pagefaultcr2;
message m_pagefault; message m_pagefault;
@ -68,13 +68,21 @@ static void pagefault( struct proc *pr,
in_physcopy = (frame->eip > (vir_bytes) phys_copy) && in_physcopy = (frame->eip > (vir_bytes) phys_copy) &&
(frame->eip < (vir_bytes) phys_copy_fault); (frame->eip < (vir_bytes) phys_copy_fault);
in_memset = (frame->eip > (vir_bytes) phys_memset) &&
(frame->eip < (vir_bytes) memset_fault);
if((is_nested || iskernelp(pr)) && if((is_nested || iskernelp(pr)) &&
catch_pagefaults && in_physcopy) { catch_pagefaults && (in_physcopy || in_memset)) {
#if 0 #if 0
printf("pf caught! addr 0x%lx\n", pagefaultcr2); printf("pf caught! addr 0x%lx\n", pagefaultcr2);
#endif #endif
if (is_nested) { if (is_nested) {
if(in_physcopy) {
assert(!in_memset);
frame->eip = (reg_t) phys_copy_fault_in_kernel; frame->eip = (reg_t) phys_copy_fault_in_kernel;
} else {
frame->eip = (reg_t) memset_fault_in_kernel;
}
} }
else { else {
pr->p_reg.pc = (reg_t) phys_copy_fault; pr->p_reg.pc = (reg_t) phys_copy_fault;

View file

@ -90,7 +90,7 @@ void phys_outsw(u16_t port, phys_bytes buf, size_t count);
u32_t read_cr3(void); u32_t read_cr3(void);
void reload_cr3(void); void reload_cr3(void);
void i386_invlpg(phys_bytes linaddr); void i386_invlpg(phys_bytes linaddr);
void phys_memset(phys_bytes ph, u32_t c, phys_bytes bytes); vir_bytes phys_memset(phys_bytes ph, u32_t c, phys_bytes bytes);
void reload_ds(void); void reload_ds(void);
void ia32_msr_read(u32_t reg, u32_t * hi, u32_t * lo); void ia32_msr_read(u32_t reg, u32_t * hi, u32_t * lo);
void ia32_msr_write(u32_t reg, u32_t hi, u32_t lo); void ia32_msr_write(u32_t reg, u32_t hi, u32_t lo);

View file

@ -391,13 +391,24 @@ remain_fill:
inc %ebp inc %ebp
dec %eax dec %eax
jmp remain_fill jmp remain_fill
fill_done: fill_done:
LABEL(memset_fault) /* kernel can send us here */
mov $0, %eax /* 0 means: no fault */
pop %ds pop %ds
pop %ebx pop %ebx
pop %esi pop %esi
pop %ebp pop %ebp
ret ret
LABEL(memset_fault_in_kernel) /* kernel can send us here */
pop %ds
pop %ebx
pop %esi
pop %ebp
mov %cr2, %eax
ret
/*===========================================================================*/ /*===========================================================================*/
/* mem_rdw */ /* mem_rdw */

View file

@ -662,16 +662,27 @@ static void vm_print(u32_t *root)
} }
#endif #endif
/*===========================================================================* int vm_memset(endpoint_t who, phys_bytes ph, const u8_t c, phys_bytes bytes)
* lin_memset *
*===========================================================================*/
int vm_phys_memset(phys_bytes ph, const u8_t c, phys_bytes bytes)
{ {
u32_t p; u32_t p;
int r = OK;
struct proc *whoptr = NULL;
/* NONE for physical, otherwise virtual */
if(who != NONE) {
int n;
vir_bytes lin;
assert(vm_running);
if(!isokendpt(who, &n)) return ESRCH;
whoptr = proc_addr(n);
if(!(lin = umap_local(whoptr, D, ph, bytes))) return EFAULT;
ph = lin;
}
p = c | (c << 8) | (c << 16) | (c << 24); p = c | (c << 8) | (c << 16) | (c << 24);
if(!vm_running) { if(!vm_running) {
if(who != NONE) panic("can't vm_memset without vm running");
phys_memset(ph, p, bytes); phys_memset(ph, p, bytes);
return OK; return OK;
} }
@ -680,24 +691,34 @@ int vm_phys_memset(phys_bytes ph, const u8_t c, phys_bytes bytes)
assert(get_cpulocal_var(ptproc)->p_seg.p_cr3_v); assert(get_cpulocal_var(ptproc)->p_seg.p_cr3_v);
/* With VM, we have to map in the physical memory. assert(!catch_pagefaults);
catch_pagefaults=1;
/* With VM, we have to map in the memory (virtual or physical).
* We can do this 4MB at a time. * We can do this 4MB at a time.
*/ */
while(bytes > 0) { while(bytes > 0) {
int changed = 0; int changed = 0;
phys_bytes chunk = bytes, ptr; phys_bytes chunk = bytes, ptr, pfa;
ptr = createpde(NULL, ph, &chunk, 0, &changed); ptr = createpde(whoptr, ph, &chunk, 0, &changed);
if(changed) if(changed)
reload_cr3(); reload_cr3();
/* We can memset as many bytes as we have remaining, /* We can memset as many bytes as we have remaining,
* or as many as remain in the 4MB chunk we mapped in. * or as many as remain in the 4MB chunk we mapped in.
*/ */
phys_memset(ptr, p, chunk); if((pfa=phys_memset(ptr, p, chunk))) {
printf("kernel memset pagefault\n");
r = EFAULT;
break;
}
bytes -= chunk; bytes -= chunk;
ph += chunk; ph += chunk;
} }
assert(catch_pagefaults);
catch_pagefaults=0;
assert(get_cpulocal_var(ptproc)->p_seg.p_cr3_v); assert(get_cpulocal_var(ptproc)->p_seg.p_cr3_v);
return OK; return OK;

View file

@ -157,6 +157,8 @@ phys_bytes phys_copy(phys_bytes source, phys_bytes dest, phys_bytes
count); count);
void phys_copy_fault(void); void phys_copy_fault(void);
void phys_copy_fault_in_kernel(void); void phys_copy_fault_in_kernel(void);
void memset_fault(void);
void memset_fault_in_kernel(void);
#define virtual_copy(src, dst, bytes) \ #define virtual_copy(src, dst, bytes) \
virtual_copy_f(NULL, src, dst, bytes, 0) virtual_copy_f(NULL, src, dst, bytes, 0)
#define virtual_copy_vmcheck(caller, src, dst, bytes) \ #define virtual_copy_vmcheck(caller, src, dst, bytes) \
@ -174,7 +176,8 @@ phys_bytes umap_local(register struct proc *rp, int seg, vir_bytes
phys_bytes umap_virtual(struct proc* rp, int seg, vir_bytes vir_addr, phys_bytes umap_virtual(struct proc* rp, int seg, vir_bytes vir_addr,
vir_bytes bytes); vir_bytes bytes);
phys_bytes seg2phys(u16_t); phys_bytes seg2phys(u16_t);
int vm_phys_memset(phys_bytes source, u8_t pattern, phys_bytes count); int vm_memset(endpoint_t who,
phys_bytes source, u8_t pattern, phys_bytes count);
int intr_init(int, int); int intr_init(int, int);
void halt_cpu(void); void halt_cpu(void);
void arch_init(void); void arch_init(void);

View file

@ -18,7 +18,8 @@ int do_memset(struct proc * caller, message * m_ptr)
{ {
/* Handle sys_memset(). This writes a pattern into the specified memory. */ /* Handle sys_memset(). This writes a pattern into the specified memory. */
unsigned char c = m_ptr->MEM_PATTERN; unsigned char c = m_ptr->MEM_PATTERN;
vm_phys_memset((phys_bytes) m_ptr->MEM_PTR, c, (phys_bytes) m_ptr->MEM_COUNT); vm_memset(m_ptr->MEM_PROCESS, (phys_bytes) m_ptr->MEM_PTR,
c, (phys_bytes) m_ptr->MEM_COUNT);
return(OK); return(OK);
} }

View file

@ -220,7 +220,6 @@ int elf_has_interpreter(char *exec_hdr, /* executable header */
int libexec_load_elf(struct exec_info *execi) int libexec_load_elf(struct exec_info *execi)
{ {
int r;
Elf_Ehdr *hdr = NULL; Elf_Ehdr *hdr = NULL;
Elf_Phdr *phdr = NULL; Elf_Phdr *phdr = NULL;
int e, i = 0; int e, i = 0;
@ -240,25 +239,26 @@ int libexec_load_elf(struct exec_info *execi)
*/ */
i = elf_has_interpreter(execi->hdr, execi->hdr_len, NULL, 0); i = elf_has_interpreter(execi->hdr, execi->hdr_len, NULL, 0);
if(i > 0) { if(i > 0) {
printf("libexec: cannot load dynamically linked executable\n");
return ENOEXEC; return ENOEXEC;
} }
/* Make VM forget about all existing memory in process. */
vm_procctl(execi->proc_e, VMPPARAM_CLEAR);
execi->stack_size = roundup(execi->stack_size, PAGE_SIZE); execi->stack_size = roundup(execi->stack_size, PAGE_SIZE);
execi->stack_high = VM_STACKTOP; execi->stack_high = VM_STACKTOP;
assert(!(VM_STACKTOP % PAGE_SIZE)); assert(!(VM_STACKTOP % PAGE_SIZE));
stacklow = execi->stack_high - execi->stack_size; stacklow = execi->stack_high - execi->stack_size;
assert(execi->copymem);
assert(execi->clearmem);
assert(execi->allocmem_prealloc);
assert(execi->allocmem_ondemand);
if(execi->clearproc) execi->clearproc(execi);
for (i = 0; i < hdr->e_phnum; i++) { for (i = 0; i < hdr->e_phnum; i++) {
vir_bytes seg_membytes, page_offset, vaddr; vir_bytes seg_membytes, page_offset, vaddr;
vir_bytes chunk, vfileend, vmemend;
Elf_Phdr *ph = &phdr[i]; Elf_Phdr *ph = &phdr[i];
if (ph->p_type != PT_LOAD || ph->p_memsz == 0) continue; if (ph->p_type != PT_LOAD || ph->p_memsz == 0) continue;
#if 0
printf("index %d memsz 0x%lx vaddr 0x%lx\n", i, ph->p_memsz, ph->p_vaddr);
#endif
vaddr = ph->p_vaddr; vaddr = ph->p_vaddr;
seg_membytes = ph->p_memsz; seg_membytes = ph->p_memsz;
page_offset = vaddr % PAGE_SIZE; page_offset = vaddr % PAGE_SIZE;
@ -268,34 +268,46 @@ int libexec_load_elf(struct exec_info *execi)
if(first || startv > vaddr) startv = vaddr; if(first || startv > vaddr) startv = vaddr;
first = 0; first = 0;
#if 0 /* make us some memory */
printf("libexec_load_elf: mmap 0x%lx bytes at 0x%lx\n", if(execi->allocmem_prealloc(execi, vaddr, seg_membytes) != OK) {
seg_membytes, vaddr); if(execi->clearproc) execi->clearproc(execi);
#endif
/* Tell VM to make us some memory */
if(minix_mmap_for(execi->proc_e, (void *) vaddr, seg_membytes,
PROT_READ|PROT_WRITE|PROT_EXEC,
MAP_ANON|MAP_PREALLOC|MAP_FIXED, -1, 0) == MAP_FAILED) {
printf("libexec_load_elf: mmap of 0x%lx bytes failed\n", seg_membytes);
vm_procctl(execi->proc_e, VMPPARAM_CLEAR);
return ENOMEM; return ENOMEM;
} }
#if ELF_DEBUG
printf("mmapped 0x%lx-0x%lx\n", vaddr, vaddr+seg_membytes);
#endif
/* Copy executable section into it */ /* Copy executable section into it */
if(execi->load(execi, ph->p_offset, ph->p_vaddr, ph->p_filesz) != OK) { if(execi->copymem(execi, ph->p_offset, ph->p_vaddr, ph->p_filesz) != OK) {
printf("libexec_load_elf: load callback failed\n"); if(execi->clearproc) execi->clearproc(execi);
vm_procctl(execi->proc_e, VMPPARAM_CLEAR);
return ENOMEM; return ENOMEM;
} }
#if ELF_DEBUG
printf("copied 0x%lx-0x%lx\n", ph->p_vaddr, ph->p_vaddr+ph->p_filesz);
#endif
/* Clear remaining bits */
vfileend = ph->p_vaddr + ph->p_filesz;
vmemend = vaddr + seg_membytes;
if((chunk = ph->p_vaddr - vaddr) > 0) {
#if ELF_DEBUG
printf("start clearing 0x%lx-0x%lx\n", vaddr, vaddr+chunk);
#endif
execi->clearmem(execi, vaddr, chunk);
}
if((chunk = vmemend - vfileend) > 0) {
#if ELF_DEBUG
printf("end clearing 0x%lx-0x%lx\n", vfileend, vaddr+chunk);
#endif
execi->clearmem(execi, vfileend, chunk);
}
} }
/* Make it a stack */ /* Make it a stack */
if(minix_mmap_for(execi->proc_e, (void *) stacklow, execi->stack_size, if(execi->allocmem_ondemand(execi, stacklow, execi->stack_size) != OK) {
PROT_READ|PROT_WRITE|PROT_EXEC, if(execi->clearproc) execi->clearproc(execi);
MAP_ANON|MAP_FIXED, -1, 0) == MAP_FAILED) {
printf("libexec_load_elf: mmap for stack failed\n");
vm_procctl(execi->proc_e, VMPPARAM_CLEAR);
return ENOMEM; return ENOMEM;
} }
@ -303,10 +315,6 @@ int libexec_load_elf(struct exec_info *execi)
execi->pc = hdr->e_entry; execi->pc = hdr->e_entry;
execi->load_base = startv; execi->load_base = startv;
if((r = libexec_pm_newexec(execi->proc_e, execi)) != OK) { return OK;
printf("libexec_load_elf: pm_newexec failed: %d\n", r);
}
return(r);
} }

View file

@ -12,8 +12,43 @@
#include <minix/ipc.h> #include <minix/ipc.h>
#include <minix/com.h> #include <minix/com.h>
#include <minix/callnr.h> #include <minix/callnr.h>
#include <minix/vm.h>
#include <minix/syslib.h>
#include <sys/mman.h>
#include <machine/elf.h> #include <machine/elf.h>
int libexec_alloc_mmap_prealloc(struct exec_info *execi, off_t vaddr, size_t len)
{
if(minix_mmap_for(execi->proc_e, (void *) vaddr, len,
PROT_READ|PROT_WRITE|PROT_EXEC,
MAP_ANON|MAP_PREALLOC|MAP_UNINITIALIZED|MAP_FIXED, -1, 0) == MAP_FAILED) {
return ENOMEM;
}
return OK;
}
int libexec_alloc_mmap_ondemand(struct exec_info *execi, off_t vaddr, size_t len)
{
if(minix_mmap_for(execi->proc_e, (void *) vaddr, len,
PROT_READ|PROT_WRITE|PROT_EXEC,
MAP_ANON|MAP_FIXED, -1, 0) == MAP_FAILED) {
return ENOMEM;
}
return OK;
}
int libexec_clearproc_vm_procctl(struct exec_info *execi)
{
return vm_procctl(execi->proc_e, VMPPARAM_CLEAR);
}
int libexec_clear_sys_memset(struct exec_info *execi, off_t vaddr, size_t len)
{
return sys_memset(execi->proc_e, 0, vaddr, len);
}
void libexec_patch_ptr(char stack[ARG_MAX], vir_bytes base) void libexec_patch_ptr(char stack[ARG_MAX], vir_bytes base)
{ {
/* When doing an exec(name, argv, envp) call, the user builds up a stack /* When doing an exec(name, argv, envp) call, the user builds up a stack

View file

@ -11,6 +11,11 @@ typedef int (*libexec_loadfunc_t)(struct exec_info *execi,
typedef int (*libexec_clearfunc_t)(struct exec_info *execi, typedef int (*libexec_clearfunc_t)(struct exec_info *execi,
off_t vaddr, size_t len); off_t vaddr, size_t len);
typedef int (*libexec_allocfunc_t)(struct exec_info *execi,
off_t vaddr, size_t len);
typedef int (*libexec_procclearfunc_t)(struct exec_info *execi);
struct exec_info { struct exec_info {
/* Filled in by libexec caller */ /* Filled in by libexec caller */
endpoint_t proc_e; /* Process endpoint */ endpoint_t proc_e; /* Process endpoint */
@ -21,11 +26,16 @@ struct exec_info {
uid_t new_uid; /* Process UID after exec */ uid_t new_uid; /* Process UID after exec */
gid_t new_gid; /* Process GID after exec */ gid_t new_gid; /* Process GID after exec */
int allow_setuid; /* Allow set{u,g}id execution? */ int allow_setuid; /* Allow set{u,g}id execution? */
libexec_loadfunc_t load; /* Load callback */
libexec_clearfunc_t clear; /* Clear callback */
void *opaque; /* Callback data */
vir_bytes stack_size; /* Desired stack size */ vir_bytes stack_size; /* Desired stack size */
/* Callback pointers for use by libexec */
libexec_loadfunc_t copymem; /* Copy callback */
libexec_clearfunc_t clearmem; /* Clear callback */
libexec_allocfunc_t allocmem_prealloc; /* Alloc callback */
libexec_allocfunc_t allocmem_ondemand; /* Alloc callback */
libexec_procclearfunc_t clearproc; /* Clear process callback */
void *opaque; /* Callback data */
/* Filled in by libexec load function */ /* Filled in by libexec load function */
vir_bytes load_base; /* Where executable is loaded */ vir_bytes load_base; /* Where executable is loaded */
vir_bytes pc; /* Entry point of exec file */ vir_bytes pc; /* Entry point of exec file */
@ -45,7 +55,11 @@ void libexec_patch_ptr(char stack[ARG_MAX], vir_bytes base);
int libexec_pm_newexec(endpoint_t proc_e, struct exec_info *execi); int libexec_pm_newexec(endpoint_t proc_e, struct exec_info *execi);
typedef int (*libexec_exec_loadfunc_t)(struct exec_info *execi); typedef int (*libexec_exec_loadfunc_t)(struct exec_info *execi);
int libexec_load_elf(struct exec_info *execi); int libexec_load_elf(struct exec_info *execi);
int libexec_alloc_mmap_prealloc(struct exec_info *execi, off_t vaddr, size_t len);
int libexec_alloc_mmap_ondemand(struct exec_info *execi, off_t vaddr, size_t len);
int libexec_clearproc_vm_procctl(struct exec_info *execi);
int libexec_clear_sys_memset(struct exec_info *execi, off_t vaddr, size_t len);
#endif /* !_LIBEXEC_H_ */ #endif /* !_LIBEXEC_H_ */

View file

@ -1,6 +1,7 @@
#include "syslib.h" #include "syslib.h"
int sys_memset(unsigned long pattern, phys_bytes base, phys_bytes bytes) int sys_memset(endpoint_t who, unsigned long pattern,
phys_bytes base, phys_bytes bytes)
{ {
/* Zero a block of data. */ /* Zero a block of data. */
message mess; message mess;
@ -10,6 +11,7 @@ int sys_memset(unsigned long pattern, phys_bytes base, phys_bytes bytes)
mess.MEM_PTR = (char *) base; mess.MEM_PTR = (char *) base;
mess.MEM_COUNT = bytes; mess.MEM_COUNT = bytes;
mess.MEM_PATTERN = pattern; mess.MEM_PATTERN = pattern;
mess.MEM_PROCESS = who;
return(_kernel_call(SYS_MEMSET, &mess)); return(_kernel_call(SYS_MEMSET, &mess));
} }

View file

@ -126,7 +126,13 @@ static int do_exec(int proc_e, char *exec, size_t exec_len, char *progname,
strncpy(execi.progname, progname, PROC_NAME_LEN-1); strncpy(execi.progname, progname, PROC_NAME_LEN-1);
execi.progname[PROC_NAME_LEN-1] = '\0'; execi.progname[PROC_NAME_LEN-1] = '\0';
execi.frame_len = frame_len; execi.frame_len = frame_len;
execi.load = read_seg;
/* callback functions and data */
execi.copymem = read_seg;
execi.clearproc = libexec_clearproc_vm_procctl;
execi.clearmem = libexec_clear_sys_memset;
execi.allocmem_prealloc = libexec_alloc_mmap_prealloc;
execi.allocmem_ondemand = libexec_alloc_mmap_ondemand;
for(i = 0; exec_loaders[i].load_object != NULL; i++) { for(i = 0; exec_loaders[i].load_object != NULL; i++) {
r = (*exec_loaders[i].load_object)(&execi); r = (*exec_loaders[i].load_object)(&execi);
@ -140,6 +146,10 @@ static int do_exec(int proc_e, char *exec, size_t exec_len, char *progname,
return r; return r;
} }
/* Inform PM */
if((r = libexec_pm_newexec(execi.proc_e, &execi)) != OK)
return r;
/* Patch up stack and copy it from RS to new core image. */ /* Patch up stack and copy it from RS to new core image. */
vsp = execi.stack_high; vsp = execi.stack_high;
vsp -= frame_len; vsp -= frame_len;
@ -194,7 +204,7 @@ size_t seg_bytes /* how much is to be transferred? */
if (off+seg_bytes > execi->hdr_len) return ENOEXEC; if (off+seg_bytes > execi->hdr_len) return ENOEXEC;
if((r= sys_vircopy(SELF, D, ((vir_bytes)execi->hdr)+off, if((r= sys_vircopy(SELF, D, ((vir_bytes)execi->hdr)+off,
execi->proc_e, D, seg_addr, seg_bytes)) != OK) { execi->proc_e, D, seg_addr, seg_bytes)) != OK) {
printf("RS: exec read_seg: copy 0x%lx bytes into %d at 0x%lx failed: %d\n", printf("RS: exec read_seg: copy 0x%x bytes into %d at 0x%lx failed: %d\n",
seg_bytes, execi->proc_e, seg_addr, r); seg_bytes, execi->proc_e, seg_addr, r);
} }
return r; return r;

View file

@ -296,9 +296,14 @@ int pm_exec(endpoint_t proc_e, vir_bytes path, size_t path_len,
Get_read_vp(execi, fullpath, 0, 0, &resolve, fp); Get_read_vp(execi, fullpath, 0, 0, &resolve, fp);
} }
/* callback functions and data */
execi.args.copymem = read_seg;
execi.args.clearproc = libexec_clearproc_vm_procctl;
execi.args.clearmem = libexec_clear_sys_memset;
execi.args.allocmem_prealloc = libexec_alloc_mmap_prealloc;
execi.args.allocmem_ondemand = libexec_alloc_mmap_ondemand;
execi.args.opaque = &execi; execi.args.opaque = &execi;
execi.args.load = &read_seg;
execi.args.clear = NULL;
execi.args.proc_e = proc_e; execi.args.proc_e = proc_e;
execi.args.frame_len = frame_len; execi.args.frame_len = frame_len;
@ -310,6 +315,9 @@ int pm_exec(endpoint_t proc_e, vir_bytes path, size_t path_len,
FAILCHECK(r); FAILCHECK(r);
/* Inform PM */
FAILCHECK(libexec_pm_newexec(proc_e, &execi.args));
/* Save off PC */ /* Save off PC */
*pc = execi.args.pc; *pc = execi.args.pc;

View file

@ -506,7 +506,7 @@ static phys_bytes alloc_pages(int pages, int memflags, phys_bytes *len)
if(memflags & PAF_CLEAR) { if(memflags & PAF_CLEAR) {
int s; int s;
if ((s= sys_memset(0, CLICK_SIZE*mem, if ((s= sys_memset(NONE, 0, CLICK_SIZE*mem,
VM_PAGE_SIZE*pages)) != OK) VM_PAGE_SIZE*pages)) != OK)
panic("alloc_mem: sys_memset failed: %d", s); panic("alloc_mem: sys_memset failed: %d", s);
} }
@ -543,7 +543,7 @@ static void free_pages(phys_bytes pageno, int npages)
assert(!addr_search(&addravl, pageno, AVL_EQUAL)); assert(!addr_search(&addravl, pageno, AVL_EQUAL));
#if JUNKFREE #if JUNKFREE
if(sys_memset(0xa5a5a5a5, VM_PAGE_SIZE * pageno, if(sys_memset(NONE, 0xa5a5a5a5, VM_PAGE_SIZE * pageno,
VM_PAGE_SIZE * npages) != OK) VM_PAGE_SIZE * npages) != OK)
panic("free_pages: sys_memset failed"); panic("free_pages: sys_memset failed");
#endif #endif

View file

@ -42,11 +42,14 @@ int do_mmap(message *m)
int mfflags = 0; int mfflags = 0;
vir_bytes addr; vir_bytes addr;
struct vir_region *vr = NULL; struct vir_region *vr = NULL;
int execpriv = 0;
/* RS and VFS can do slightly more special mmap() things */
if(m->m_source == VFS_PROC_NR || m->m_source == RS_PROC_NR)
execpriv = 1;
if(m->VMM_FLAGS & MAP_THIRDPARTY) { if(m->VMM_FLAGS & MAP_THIRDPARTY) {
/* exec()ers, i.e. RS & VFS, can mmap() on behalf of anyone. */ if(!execpriv) return EPERM;
if(m->m_source != VFS_PROC_NR && m->m_source != RS_PROC_NR)
return EPERM;
if((r=vm_isokendpt(m->VMM_FORWHOM, &n)) != OK) if((r=vm_isokendpt(m->VMM_FORWHOM, &n)) != OK)
return ESRCH; return ESRCH;
} else { } else {
@ -79,6 +82,10 @@ int do_mmap(message *m)
if(m->VMM_FLAGS & MAP_LOWER16M) vrflags |= VR_LOWER16MB; if(m->VMM_FLAGS & MAP_LOWER16M) vrflags |= VR_LOWER16MB;
if(m->VMM_FLAGS & MAP_LOWER1M) vrflags |= VR_LOWER1MB; if(m->VMM_FLAGS & MAP_LOWER1M) vrflags |= VR_LOWER1MB;
if(m->VMM_FLAGS & MAP_ALIGN64K) vrflags |= VR_PHYS64K; if(m->VMM_FLAGS & MAP_ALIGN64K) vrflags |= VR_PHYS64K;
if(m->VMM_FLAGS & MAP_UNINITIALIZED) {
if(!execpriv) return EPERM;
vrflags |= VR_UNINITIALIZED;
}
if(m->VMM_FLAGS & MAP_IPC_SHARED) { if(m->VMM_FLAGS & MAP_IPC_SHARED) {
vrflags |= VR_SHARED; vrflags |= VR_SHARED;
/* Shared memory has to be preallocated. */ /* Shared memory has to be preallocated. */

View file

@ -583,6 +583,11 @@ USE(newregion,
} }
} }
/* Pre-allocations should be uninitialized, but after that it's a
* different story.
*/
newregion->flags &= ~VR_UNINITIALIZED;
/* Link it. */ /* Link it. */
region_insert(&vmp->vm_regions_avl, newregion); region_insert(&vmp->vm_regions_avl, newregion);
@ -1266,6 +1271,11 @@ int write;
struct phys_region *physr, *nextphysr; struct phys_region *physr, *nextphysr;
int changes = 0; int changes = 0;
physr_iter iter; physr_iter iter;
u32_t allocflags = 0;
if(!(region->flags & VR_UNINITIALIZED)) {
allocflags = PAF_CLEAR;
}
#define FREE_RANGE_HERE(er1, er2) { \ #define FREE_RANGE_HERE(er1, er2) { \
struct phys_region *r1 = (er1), *r2 = (er2); \ struct phys_region *r1 = (er1), *r2 = (er2); \
@ -1277,7 +1287,7 @@ int write;
if(start < end) { \ if(start < end) { \
SANITYCHECK(SCL_DETAIL); \ SANITYCHECK(SCL_DETAIL); \
if(map_new_physblock(vmp, region, start, \ if(map_new_physblock(vmp, region, start, \
end-start, MAP_NONE, PAF_CLEAR, 0) != OK) { \ end-start, MAP_NONE, allocflags, 0) != OK) { \
SANITYCHECK(SCL_DETAIL); \ SANITYCHECK(SCL_DETAIL); \
return ENOMEM; \ return ENOMEM; \
} \ } \

View file

@ -55,6 +55,7 @@ typedef struct vir_region {
#define VR_LOWER1MB 0x010 #define VR_LOWER1MB 0x010
#define VR_CONTIG 0x020 /* Must be physically contiguous. */ #define VR_CONTIG 0x020 /* Must be physically contiguous. */
#define VR_SHARED 0x040 #define VR_SHARED 0x040
#define VR_UNINITIALIZED 0x080 /* Do not clear after allocation */
/* Mapping type: */ /* Mapping type: */
#define VR_ANON 0x100 /* Memory to be cleared and allocated */ #define VR_ANON 0x100 /* Memory to be cleared and allocated */