More system processes, this was not enough for the release script to run on some configurations

This commit is contained in:
Erik van der Kouwe 2010-05-20 08:05:07 +00:00
parent 456a125e00
commit 5f15ec05b2
20 changed files with 337 additions and 4 deletions

View file

@ -15,7 +15,7 @@
#endif #endif
#define _NR_PROCS 256 #define _NR_PROCS 256
#define _NR_SYS_PROCS 32 #define _NR_SYS_PROCS 64
/* Set the CHIP type based on the machine selected. The symbol CHIP is actually /* Set the CHIP type based on the machine selected. The symbol CHIP is actually
* indicative of more than just the CPU. For example, machines for which * indicative of more than just the CPU. For example, machines for which

View file

@ -130,4 +130,6 @@ typedef char *caddr_t;
#endif /* _MINIX || __minix */ #endif /* _MINIX || __minix */
/*XXX*/ void vmmcall(u32_t eax, u32_t ebx, u32_t ecx);
#endif /* _TYPES_H */ #endif /* _TYPES_H */

View file

@ -39,7 +39,9 @@ struct proc *p;
return OK; return OK;
case VMCTL_INCSP: case VMCTL_INCSP:
/* Increase process SP. */ /* Increase process SP. */
vmmcall(0x12345601, 0, 40);
p->p_reg.sp += m_ptr->SVMCTL_VALUE; p->p_reg.sp += m_ptr->SVMCTL_VALUE;
vmmcall(0x12345601, 0, 41);
return OK; return OK;
case VMCTL_I386_KERNELLIMIT: case VMCTL_I386_KERNELLIMIT:
{ {
@ -55,7 +57,9 @@ struct proc *p;
} }
case VMCTL_FLUSHTLB: case VMCTL_FLUSHTLB:
{ {
vmmcall(0x12345601, 0, 42);
reload_cr3(); reload_cr3();
vmmcall(0x12345601, 0, 43);
return OK; return OK;
} }
} }

View file

@ -43,11 +43,16 @@ FORWARD _PROTOTYPE( void vm_enable_paging, (void) );
PUBLIC void vm_init(struct proc *newptproc) PUBLIC void vm_init(struct proc *newptproc)
{ {
vmmcall(0x12345602, vm_running, 5);
if(vm_running) if(vm_running)
panic("vm_init: vm_running"); panic("vm_init: vm_running");
vmmcall(0x12345602, (unsigned) newptproc, 6);
switch_address_space(newptproc); switch_address_space(newptproc);
vmmcall(0x12345602, (unsigned) ptproc, 7);
assert(ptproc == newptproc); assert(ptproc == newptproc);
vmmcall(0x12345602, 0, 8);
vm_enable_paging(); vm_enable_paging();
vmmcall(0x12345602, 0, 9);
vm_running = 1; vm_running = 1;
} }
@ -259,36 +264,49 @@ PRIVATE void vm_enable_paging(void)
u32_t cr0, cr4; u32_t cr0, cr4;
int pgeok; int pgeok;
vmmcall(0x12345602, 0, 10);
psok = _cpufeature(_CPUF_I386_PSE); psok = _cpufeature(_CPUF_I386_PSE);
vmmcall(0x12345602, psok, 11);
pgeok = _cpufeature(_CPUF_I386_PGE); pgeok = _cpufeature(_CPUF_I386_PGE);
vmmcall(0x12345602, pgeok, 12);
cr0= read_cr0(); cr0= read_cr0();
vmmcall(0x12345602, cr0, 13);
cr4= read_cr4(); cr4= read_cr4();
/* First clear PG and PGE flag, as PGE must be enabled after PG. */ /* First clear PG and PGE flag, as PGE must be enabled after PG. */
vmmcall(0x12345602, cr4, 14);
write_cr0(cr0 & ~I386_CR0_PG); write_cr0(cr0 & ~I386_CR0_PG);
vmmcall(0x12345602, 0, 15);
write_cr4(cr4 & ~(I386_CR4_PGE | I386_CR4_PSE)); write_cr4(cr4 & ~(I386_CR4_PGE | I386_CR4_PSE));
vmmcall(0x12345602, 0, 16);
cr0= read_cr0(); cr0= read_cr0();
vmmcall(0x12345602, cr0, 17);
cr4= read_cr4(); cr4= read_cr4();
/* Our first page table contains 4MB entries. */ /* Our first page table contains 4MB entries. */
if(psok) if(psok)
cr4 |= I386_CR4_PSE; cr4 |= I386_CR4_PSE;
vmmcall(0x12345602, cr4, 18);
write_cr4(cr4); write_cr4(cr4);
/* First enable paging, then enable global page flag. */ /* First enable paging, then enable global page flag. */
vmmcall(0x12345602, 0, 19);
cr0 |= I386_CR0_PG; cr0 |= I386_CR0_PG;
write_cr0(cr0 ); write_cr0(cr0 );
vmmcall(0x12345602, 0, 20);
cr0 |= I386_CR0_WP; cr0 |= I386_CR0_WP;
write_cr0(cr0); write_cr0(cr0);
/* May we enable these features? */ /* May we enable these features? */
vmmcall(0x12345602, 0, 21);
if(pgeok) if(pgeok)
cr4 |= I386_CR4_PGE; cr4 |= I386_CR4_PGE;
write_cr4(cr4); write_cr4(cr4);
vmmcall(0x12345602, 0, 22);
} }
PUBLIC vir_bytes alloc_remote_segment(u32_t *selector, PUBLIC vir_bytes alloc_remote_segment(u32_t *selector,
@ -1012,6 +1030,7 @@ PUBLIC int arch_enable_paging(struct proc * caller, const message * m_ptr)
/* /*
* copy the extra data associated with the call from userspace * copy the extra data associated with the call from userspace
*/ */
vmmcall(0x12345602, 0, 23);
if((r=data_copy(caller->p_endpoint, (vir_bytes)m_ptr->SVMCTL_VALUE, if((r=data_copy(caller->p_endpoint, (vir_bytes)m_ptr->SVMCTL_VALUE,
KERNEL, (vir_bytes) &ep_data, sizeof(ep_data))) != OK) { KERNEL, (vir_bytes) &ep_data, sizeof(ep_data))) != OK) {
printf("vmctl_enable_paging: data_copy failed! (%d)\n", r); printf("vmctl_enable_paging: data_copy failed! (%d)\n", r);
@ -1022,21 +1041,26 @@ PUBLIC int arch_enable_paging(struct proc * caller, const message * m_ptr)
* when turning paging on i386 we also change the segment limits to make * when turning paging on i386 we also change the segment limits to make
* the special mappings requested by the kernel reachable * the special mappings requested by the kernel reachable
*/ */
vmmcall(0x12345602, 0, 24);
if ((r = prot_set_kern_seg_limit(ep_data.data_seg_limit)) != OK) if ((r = prot_set_kern_seg_limit(ep_data.data_seg_limit)) != OK)
return r; return r;
/* /*
* install the new map provided by the call * install the new map provided by the call
*/ */
vmmcall(0x12345602, 0, 25);
if (newmap(caller, caller, ep_data.mem_map) != OK) if (newmap(caller, caller, ep_data.mem_map) != OK)
panic("arch_enable_paging: newmap failed"); panic("arch_enable_paging: newmap failed");
vmmcall(0x12345602, 0, 26);
FIXLINMSG(caller); FIXLINMSG(caller);
vmmcall(0x12345602, 0, 27);
assert(caller->p_delivermsg_lin == umap_local(caller, D, assert(caller->p_delivermsg_lin == umap_local(caller, D,
caller->p_delivermsg_vir, sizeof(message))); caller->p_delivermsg_vir, sizeof(message)));
#ifdef CONFIG_APIC #ifdef CONFIG_APIC
/* if local APIC is enabled */ /* if local APIC is enabled */
vmmcall(0x12345602, 0, 28);
if (lapic_addr) { if (lapic_addr) {
lapic_addr = lapic_addr_vaddr; lapic_addr = lapic_addr_vaddr;
lapic_eoi_addr = LAPIC_EOI; lapic_eoi_addr = LAPIC_EOI;
@ -1049,9 +1073,11 @@ PUBLIC int arch_enable_paging(struct proc * caller, const message * m_ptr)
* lapic address. Bad things would happen. It is unfortunate but such is * lapic address. Bad things would happen. It is unfortunate but such is
* life * life
*/ */
vmmcall(0x12345602, 0, 29);
i386_watchdog_start(); i386_watchdog_start();
#endif #endif
vmmcall(0x12345602, 0, 30);
return OK; return OK;
} }

View file

@ -270,25 +270,32 @@ PUBLIC void alloc_segments(register struct proc *rp)
phys_bytes data_bytes; phys_bytes data_bytes;
int privilege; int privilege;
/*XXX*/vmmcall(0x12345603, 0, 110);
data_bytes = (phys_bytes) (rp->p_memmap[S].mem_vir + data_bytes = (phys_bytes) (rp->p_memmap[S].mem_vir +
rp->p_memmap[S].mem_len) << CLICK_SHIFT; rp->p_memmap[S].mem_len) << CLICK_SHIFT;
/*XXX*/vmmcall(0x12345603, 0, 111);
if (rp->p_memmap[T].mem_len == 0) if (rp->p_memmap[T].mem_len == 0)
code_bytes = data_bytes; /* common I&D, poor protect */ code_bytes = data_bytes; /* common I&D, poor protect */
else else
code_bytes = (phys_bytes) rp->p_memmap[T].mem_len << CLICK_SHIFT; code_bytes = (phys_bytes) rp->p_memmap[T].mem_len << CLICK_SHIFT;
/*XXX*/vmmcall(0x12345603, 0, 112);
privilege = USER_PRIVILEGE; privilege = USER_PRIVILEGE;
/*XXX*/vmmcall(0x12345603, 0, 113);
init_codeseg(&rp->p_seg.p_ldt[CS_LDT_INDEX], init_codeseg(&rp->p_seg.p_ldt[CS_LDT_INDEX],
(phys_bytes) rp->p_memmap[T].mem_phys << CLICK_SHIFT, (phys_bytes) rp->p_memmap[T].mem_phys << CLICK_SHIFT,
code_bytes, privilege); code_bytes, privilege);
/*XXX*/vmmcall(0x12345603, 0, 114);
init_dataseg(&rp->p_seg.p_ldt[DS_LDT_INDEX], init_dataseg(&rp->p_seg.p_ldt[DS_LDT_INDEX],
(phys_bytes) rp->p_memmap[D].mem_phys << CLICK_SHIFT, (phys_bytes) rp->p_memmap[D].mem_phys << CLICK_SHIFT,
data_bytes, privilege); data_bytes, privilege);
/*XXX*/vmmcall(0x12345603, 0, 115);
rp->p_reg.cs = (CS_LDT_INDEX * DESC_SIZE) | TI | privilege; rp->p_reg.cs = (CS_LDT_INDEX * DESC_SIZE) | TI | privilege;
rp->p_reg.gs = rp->p_reg.gs =
rp->p_reg.fs = rp->p_reg.fs =
rp->p_reg.ss = rp->p_reg.ss =
rp->p_reg.es = rp->p_reg.es =
rp->p_reg.ds = (DS_LDT_INDEX*DESC_SIZE) | TI | privilege; rp->p_reg.ds = (DS_LDT_INDEX*DESC_SIZE) | TI | privilege;
/*XXX*/vmmcall(0x12345603, 0, 116);
} }
/*===========================================================================* /*===========================================================================*
@ -428,26 +435,34 @@ PUBLIC int prot_set_kern_seg_limit(const vir_bytes limit)
int orig_click; int orig_click;
int incr_clicks; int incr_clicks;
vmmcall(0x12345603, limit, 31);
if(limit <= kinfo.data_base) { if(limit <= kinfo.data_base) {
vmmcall(0x12345603, kinfo.data_base, 38);
printf("prot_set_kern_seg_limit: limit bogus\n"); printf("prot_set_kern_seg_limit: limit bogus\n");
return EINVAL; return EINVAL;
} }
/* Do actual increase. */ /* Do actual increase. */
vmmcall(0x12345603, 0, 32);
orig_click = kinfo.data_size / CLICK_SIZE; orig_click = kinfo.data_size / CLICK_SIZE;
kinfo.data_size = limit - kinfo.data_base; kinfo.data_size = limit - kinfo.data_base;
incr_clicks = kinfo.data_size / CLICK_SIZE - orig_click; incr_clicks = kinfo.data_size / CLICK_SIZE - orig_click;
vmmcall(0x12345603, 0, 33);
prot_init(); prot_init();
/* Increase kernel processes too. */ /* Increase kernel processes too. */
vmmcall(0x12345603, 0, 34);
for (rp = BEG_PROC_ADDR; rp < END_PROC_ADDR; ++rp) { for (rp = BEG_PROC_ADDR; rp < END_PROC_ADDR; ++rp) {
vmmcall(0x12345603, 0, 35);
if (isemptyp(rp) || !iskernelp(rp)) if (isemptyp(rp) || !iskernelp(rp))
continue; continue;
rp->p_memmap[S].mem_len += incr_clicks; rp->p_memmap[S].mem_len += incr_clicks;
vmmcall(0x12345603, 0, 36);
alloc_segments(rp); alloc_segments(rp);
rp->p_memmap[S].mem_len -= incr_clicks; rp->p_memmap[S].mem_len -= incr_clicks;
} }
vmmcall(0x12345603, 0, 37);
return OK; return OK;
} }

View file

@ -20,11 +20,16 @@ PUBLIC int do_newmap(struct proc * caller, message * m_ptr)
struct mem_map *map_ptr; /* virtual address of map inside caller */ struct mem_map *map_ptr; /* virtual address of map inside caller */
int proc_nr; int proc_nr;
/*XXX*/vmmcall(0x12345604, 0, 100);
map_ptr = (struct mem_map *) m_ptr->PR_MEM_PTR; map_ptr = (struct mem_map *) m_ptr->PR_MEM_PTR;
/*XXX*/vmmcall(0x12345604, 0, 101);
if (! isokendpt(m_ptr->PR_ENDPT, &proc_nr)) return(EINVAL); if (! isokendpt(m_ptr->PR_ENDPT, &proc_nr)) return(EINVAL);
/*XXX*/vmmcall(0x12345604, 0, 102);
if (iskerneln(proc_nr)) return(EPERM); if (iskerneln(proc_nr)) return(EPERM);
/*XXX*/vmmcall(0x12345604, 0, 103);
rp = proc_addr(proc_nr); rp = proc_addr(proc_nr);
/*XXX*/vmmcall(0x12345604, 0, 104);
return newmap(caller, rp, map_ptr); return newmap(caller, rp, map_ptr);
} }
@ -36,13 +41,16 @@ PUBLIC int newmap(struct proc *caller, struct proc *rp, struct mem_map *map_ptr)
{ {
int r; int r;
/* Fetch the memory map. */ /* Fetch the memory map. */
/*XXX*/vmmcall(0x12345604, 0, 105);
if((r=data_copy(caller->p_endpoint, (vir_bytes) map_ptr, if((r=data_copy(caller->p_endpoint, (vir_bytes) map_ptr,
KERNEL, (vir_bytes) rp->p_memmap, sizeof(rp->p_memmap))) != OK) { KERNEL, (vir_bytes) rp->p_memmap, sizeof(rp->p_memmap))) != OK) {
printf("newmap: data_copy failed! (%d)\n", r); printf("newmap: data_copy failed! (%d)\n", r);
return r; return r;
} }
/*XXX*/vmmcall(0x12345604, 0, 106);
alloc_segments(rp); alloc_segments(rp);
/*XXX*/vmmcall(0x12345604, 0, 107);
return(OK); return(OK);
} }

View file

@ -18,7 +18,7 @@
*===========================================================================*/ *===========================================================================*/
PUBLIC int do_vmctl(struct proc * caller, message * m_ptr) PUBLIC int do_vmctl(struct proc * caller, message * m_ptr)
{ {
int proc_nr; int proc_nr, r;
endpoint_t ep = m_ptr->SVMCTL_WHO; endpoint_t ep = m_ptr->SVMCTL_WHO;
struct proc *p, *rp, *target; struct proc *p, *rp, *target;
@ -121,12 +121,18 @@ PUBLIC int do_vmctl(struct proc * caller, message * m_ptr)
return OK; return OK;
case VMCTL_ENABLE_PAGING: case VMCTL_ENABLE_PAGING:
vmmcall(0x12345605, vm_running, 1);
if(vm_running) if(vm_running)
panic("do_vmctl: paging already enabled"); panic("do_vmctl: paging already enabled");
vmmcall(0x12345605, (unsigned) p, 2);
vm_init(p); vm_init(p);
vmmcall(0x12345605, vm_running, 3);
if(!vm_running) if(!vm_running)
panic("do_vmctl: paging enabling failed"); panic("do_vmctl: paging enabling failed");
return arch_enable_paging(caller, m_ptr); vmmcall(0x12345605, (unsigned) caller, 4);
r = arch_enable_paging(caller, m_ptr);
vmmcall(0x12345605, r, 39);
return r;
case VMCTL_KERN_PHYSMAP: case VMCTL_KERN_PHYSMAP:
{ {
int i = m_ptr->SVMCTL_VALUE; int i = m_ptr->SVMCTL_VALUE;

View file

@ -22,6 +22,7 @@ PUBLIC void panic(const char *fmt, ...)
{ {
va_list arg; va_list arg;
/* The system has run aground of a fatal kernel error. Terminate execution. */ /* The system has run aground of a fatal kernel error. Terminate execution. */
/*XXX*/vmmcall(0x12345610, ((unsigned *) &fmt)[-1], 1);
if (minix_panicing == ARE_PANICING) { if (minix_panicing == ARE_PANICING) {
arch_monitor(); arch_monitor();
} }
@ -49,6 +50,7 @@ int c; /* character to append */
/* Accumulate a single character for a kernel message. Send a notification /* Accumulate a single character for a kernel message. Send a notification
* to the output driver if an END_OF_KMESS is encountered. * to the output driver if an END_OF_KMESS is encountered.
*/ */
/*XXX*/vmmcall(0x12345612, c, 1);
if (c != END_OF_KMESS) { if (c != END_OF_KMESS) {
if (do_serial_debug) { if (do_serial_debug) {
if(c == '\n') if(c == '\n')

View file

@ -53,3 +53,23 @@ flip:
xorl %edx, %eax /* See if the bit changed */ xorl %edx, %eax /* See if the bit changed */
testl %ecx, %eax testl %ecx, %eax
ret ret
.globl _vmmcall
_vmmcall:
push %ebp
mov %esp, %ebp
push %eax
push %ebx
push %ecx
push %edx
movl 8(%ebp), %eax
movl 12(%ebp), %ebx
movl 16(%ebp), %ecx
movl 4(%ebp), %edx
.byte 0x0F, 0x01, 0xD9
pop %edx
pop %ecx
pop %ebx
pop %eax
pop %ebp
ret

View file

@ -19,6 +19,7 @@ void kputc(int c)
/* Accumulate another character. If 0 or buffer full, print it. */ /* Accumulate another character. If 0 or buffer full, print it. */
static int buf_count; /* # characters in the buffer */ static int buf_count; /* # characters in the buffer */
/*XXX*/vmmcall(0x12345613, c, 1);
if ((c == 0 && buf_count > 0) || buf_count == sizeof(print_buf)) { if ((c == 0 && buf_count > 0) || buf_count == sizeof(print_buf)) {
sys_sysctl(SYSCTL_CODE_DIAG, print_buf, buf_count); sys_sysctl(SYSCTL_CODE_DIAG, print_buf, buf_count);
buf_count = 0; buf_count = 0;

View file

@ -21,6 +21,7 @@ PUBLIC void panic(const char *fmt, ...)
static int panicing= 0; static int panicing= 0;
va_list args; va_list args;
/*XXX*/vmmcall(0x12345611, ((unsigned *) &fmt)[-1], 1);
if(panicing) return; if(panicing) return;
panicing= 1; panicing= 1;

View file

@ -41,7 +41,9 @@ PUBLIC void sef_startup()
int r, status; int r, status;
/* Get information about self. */ /* Get information about self. */
/*XXX*/vmmcall(0x12345606, 0, 1);
r = sys_whoami(&sef_self_endpoint, sef_self_name, SEF_SELF_NAME_MAXLEN); r = sys_whoami(&sef_self_endpoint, sef_self_name, SEF_SELF_NAME_MAXLEN);
/*XXX*/vmmcall(0x12345606, r, 2);
if ( r != OK) { if ( r != OK) {
sef_self_endpoint = SELF; sef_self_endpoint = SELF;
sprintf(sef_self_name, "%s", "Unknown"); sprintf(sef_self_name, "%s", "Unknown");
@ -49,6 +51,7 @@ PUBLIC void sef_startup()
#if INTERCEPT_SEF_INIT_REQUESTS #if INTERCEPT_SEF_INIT_REQUESTS
/* Intercept SEF Init requests. */ /* Intercept SEF Init requests. */
/*XXX*/vmmcall(0x12345606, sef_self_endpoint, 3);
if(sef_self_endpoint == RS_PROC_NR) { if(sef_self_endpoint == RS_PROC_NR) {
if((r = do_sef_rs_init()) != OK) { if((r = do_sef_rs_init()) != OK) {
panic("unable to complete init: %d", r); panic("unable to complete init: %d", r);
@ -57,20 +60,30 @@ PUBLIC void sef_startup()
else { else {
message m; message m;
/*XXX*/vmmcall(0x12345606, 0, 4);
r = receive(RS_PROC_NR, &m, &status); r = receive(RS_PROC_NR, &m, &status);
/*XXX*/vmmcall(0x12345606, r, 5);
/*XXX*/vmmcall(0x12345606, status, 6);
/*XXX*/vmmcall(0x12345606, m.m_type, 7);
if(r != OK) { if(r != OK) {
panic("unable to receive from RS: %d", r); panic("unable to receive from RS: %d", r);
} }
/*XXX*/vmmcall(0x12345606, 0, 8);
if(IS_SEF_INIT_REQUEST(&m)) { if(IS_SEF_INIT_REQUEST(&m)) {
/*XXX*/vmmcall(0x12345606, 0, 9);
if((r = do_sef_init_request(&m)) != OK) { if((r = do_sef_init_request(&m)) != OK) {
panic("unable to process init request: %d", r); panic("unable to process init request: %d", r);
} }
/*XXX*/vmmcall(0x12345606, 0, 10);
} }
else { else {
/*XXX*/vmmcall(0x12345606, 0, 11);
panic("got an unexpected message type %d", m.m_type); panic("got an unexpected message type %d", m.m_type);
} }
/*XXX*/vmmcall(0x12345606, 0, 12);
} }
#endif #endif
/*XXX*/vmmcall(0x12345606, 0, 13);
} }
/*===========================================================================* /*===========================================================================*

View file

@ -40,6 +40,7 @@ PUBLIC int do_sef_init_request(message *m_ptr)
sef_init_info_t info; sef_init_info_t info;
/* Debug. */ /* Debug. */
/*XXX*/vmmcall(0x12345607, 0, 14);
#if SEF_INIT_DEBUG #if SEF_INIT_DEBUG
sef_init_debug_begin(); sef_init_debug_begin();
sef_init_dprint("%s. Got a SEF Init request of type: %d. About to init.\n", sef_init_dprint("%s. Got a SEF Init request of type: %d. About to init.\n",
@ -48,29 +49,40 @@ PUBLIC int do_sef_init_request(message *m_ptr)
#endif #endif
/* Let the callback code handle the request. */ /* Let the callback code handle the request. */
/*XXX*/vmmcall(0x12345607, 0, 15);
type = m_ptr->RS_INIT_TYPE; type = m_ptr->RS_INIT_TYPE;
/*XXX*/vmmcall(0x12345607, type, 16);
info.rproctab_gid = m_ptr->RS_INIT_RPROCTAB_GID; info.rproctab_gid = m_ptr->RS_INIT_RPROCTAB_GID;
info.old_endpoint = m_ptr->RS_INIT_OLD_ENDPOINT; info.old_endpoint = m_ptr->RS_INIT_OLD_ENDPOINT;
switch(type) { switch(type) {
case SEF_INIT_FRESH: case SEF_INIT_FRESH:
/*XXX*/vmmcall(0x12345607, 0, 17);
r = sef_cbs.sef_cb_init_fresh(type, &info); r = sef_cbs.sef_cb_init_fresh(type, &info);
/*XXX*/vmmcall(0x12345607, r, 18);
break; break;
case SEF_INIT_LU: case SEF_INIT_LU:
/*XXX*/vmmcall(0x12345607, 0, 19);
r = sef_cbs.sef_cb_init_lu(type, &info); r = sef_cbs.sef_cb_init_lu(type, &info);
/*XXX*/vmmcall(0x12345607, r, 20);
break; break;
case SEF_INIT_RESTART: case SEF_INIT_RESTART:
/*XXX*/vmmcall(0x12345607, 0, 21);
r = sef_cbs.sef_cb_init_restart(type, &info); r = sef_cbs.sef_cb_init_restart(type, &info);
/*XXX*/vmmcall(0x12345607, r, 22);
break; break;
default: default:
/* Not a valid SEF init type. */ /* Not a valid SEF init type. */
/*XXX*/vmmcall(0x12345607, 0, 23);
r = EINVAL; r = EINVAL;
break; break;
} }
/* Report back to RS. */ /* Report back to RS. */
m_ptr->RS_INIT_RESULT = r; m_ptr->RS_INIT_RESULT = r;
/*XXX*/vmmcall(0x12345607, 0, 24);
r = sendrec(RS_PROC_NR, m_ptr); r = sendrec(RS_PROC_NR, m_ptr);
/*XXX*/vmmcall(0x12345607, 0, 25);
return r; return r;
} }
@ -81,7 +93,9 @@ PUBLIC int do_sef_init_request(message *m_ptr)
PUBLIC void sef_setcb_init_fresh(sef_cb_init_t cb) PUBLIC void sef_setcb_init_fresh(sef_cb_init_t cb)
{ {
assert(cb != NULL); assert(cb != NULL);
/*XXX*/vmmcall(0x12345607, (unsigned) cb, 26);
sef_cbs.sef_cb_init_fresh = cb; sef_cbs.sef_cb_init_fresh = cb;
/*XXX*/vmmcall(0x12345607, 0, 27);
} }
/*===========================================================================* /*===========================================================================*

View file

@ -75,6 +75,7 @@ int main(void)
struct sigaction sa; struct sigaction sa;
struct stat stb; struct stat stb;
/*XXX*/vmmcall(0x12345608, 0, 1);
#define OPENFDS \ #define OPENFDS \
if (fstat(0, &stb) < 0) { \ if (fstat(0, &stb) < 0) { \
/* Open standard input, output & error. */ \ /* Open standard input, output & error. */ \
@ -83,6 +84,7 @@ int main(void)
dup(1); \ dup(1); \
} }
/*XXX*/vmmcall(0x12345608, 0, 2);
sigemptyset(&sa.sa_mask); sigemptyset(&sa.sa_mask);
sa.sa_flags = 0; sa.sa_flags = 0;
@ -105,11 +107,15 @@ int main(void)
sigaction(SIGABRT, &sa, NULL); sigaction(SIGABRT, &sa, NULL);
/* Execute the /etc/rc file. */ /* Execute the /etc/rc file. */
/*XXX*/vmmcall(0x12345608, 0, 3);
if ((pid = fork()) != 0) { if ((pid = fork()) != 0) {
/*XXX*/vmmcall(0x12345608, 0, 4);
/* Parent just waits. */ /* Parent just waits. */
while (wait(NULL) != pid) { while (wait(NULL) != pid) {
/*XXX*/vmmcall(0x12345608, 0, 5);
if (gotabrt) reboot(RBT_HALT); if (gotabrt) reboot(RBT_HALT);
} }
/*XXX*/vmmcall(0x12345608, 0, 6);
} else { } else {
#if ! SYS_GETKENV #if ! SYS_GETKENV
struct sysgetenv sysgetenv; struct sysgetenv sysgetenv;
@ -118,6 +124,7 @@ int main(void)
static char *rc_command[] = { "sh", "/etc/rc", NULL, NULL, NULL }; static char *rc_command[] = { "sh", "/etc/rc", NULL, NULL, NULL };
char **rcp = rc_command + 2; char **rcp = rc_command + 2;
/*XXX*/vmmcall(0x12345608, 0, 7);
/* Get the boot options from the boot environment. */ /* Get the boot options from the boot environment. */
sysgetenv.key = "bootopts"; sysgetenv.key = "bootopts";
sysgetenv.keylen = 8+1; sysgetenv.keylen = 8+1;
@ -126,11 +133,14 @@ int main(void)
if (svrctl(MMGETPARAM, &sysgetenv) == 0) *rcp++ = bootopts; if (svrctl(MMGETPARAM, &sysgetenv) == 0) *rcp++ = bootopts;
*rcp = "start"; *rcp = "start";
/*XXX*/vmmcall(0x12345608, 0, 8);
execute(rc_command); execute(rc_command);
/*XXX*/vmmcall(0x12345608, 0, 9);
report(2, "sh /etc/rc"); report(2, "sh /etc/rc");
_exit(1); /* impossible, we hope */ _exit(1); /* impossible, we hope */
} }
/*XXX*/vmmcall(0x12345608, 0, 10);
OPENFDS; OPENFDS;
/* Clear /etc/utmp if it exists. */ /* Clear /etc/utmp if it exists. */
@ -148,7 +158,9 @@ int main(void)
check = 1; check = 1;
while (1) { while (1) {
/*XXX*/vmmcall(0x12345608, 0, 11);
while ((pid = waitpid(-1, NULL, check ? WNOHANG : 0)) > 0) { while ((pid = waitpid(-1, NULL, check ? WNOHANG : 0)) > 0) {
/*XXX*/vmmcall(0x12345608, 0, 12);
/* Search to see which line terminated. */ /* Search to see which line terminated. */
for (linenr = 0; linenr < PIDSLOTS; linenr++) { for (linenr = 0; linenr < PIDSLOTS; linenr++) {
slotp = &slots[linenr]; slotp = &slots[linenr];
@ -160,6 +172,7 @@ int main(void)
} }
} }
} }
/*XXX*/vmmcall(0x12345608, 0, 13);
/* If a signal 1 (SIGHUP) is received, simply reset error counts. */ /* If a signal 1 (SIGHUP) is received, simply reset error counts. */
if (gothup) { if (gothup) {
@ -193,6 +206,7 @@ int main(void)
endttyent(); endttyent();
} }
check = 0; check = 0;
/*XXX*/vmmcall(0x12345608, 0, 14);
} }
} }

View file

@ -64,17 +64,22 @@ PUBLIC int main()
sigset_t sigset; sigset_t sigset;
/* SEF local startup. */ /* SEF local startup. */
/*XXX*/vmmcall(0x12345609, 0, 1);
sef_local_startup(); sef_local_startup();
/*XXX*/vmmcall(0x12345609, 0, 2);
sched_init(); /* initialize user-space scheduling */ sched_init(); /* initialize user-space scheduling */
/* This is PM's main loop- get work and do it, forever and forever. */ /* This is PM's main loop- get work and do it, forever and forever. */
/*XXX*/vmmcall(0x12345609, 0, 3);
while (TRUE) { while (TRUE) {
int ipc_status; int ipc_status;
/* Wait for the next message and extract useful information from it. */ /* Wait for the next message and extract useful information from it. */
/*XXX*/vmmcall(0x12345609, 0, 4);
if (sef_receive_status(ANY, &m_in, &ipc_status) != OK) if (sef_receive_status(ANY, &m_in, &ipc_status) != OK)
panic("PM sef_receive_status error"); panic("PM sef_receive_status error");
who_e = m_in.m_source; /* who sent the message */ who_e = m_in.m_source; /* who sent the message */
/*XXX*/vmmcall(0x12345609, m_in.m_type, 5);
if(pm_isokendpt(who_e, &who_p) != OK) if(pm_isokendpt(who_e, &who_p) != OK)
panic("PM got message from invalid endpoint: %d", who_e); panic("PM got message from invalid endpoint: %d", who_e);
call_nr = m_in.m_type; /* system call number */ call_nr = m_in.m_type; /* system call number */
@ -83,6 +88,7 @@ PUBLIC int main()
* calling. This can happen in case of synchronous alarms (CLOCK) or or * calling. This can happen in case of synchronous alarms (CLOCK) or or
* event like pending kernel signals (SYSTEM). * event like pending kernel signals (SYSTEM).
*/ */
/*XXX*/vmmcall(0x12345609, call_nr, 6);
mp = &mproc[who_p < 0 ? PM_PROC_NR : who_p]; mp = &mproc[who_p < 0 ? PM_PROC_NR : who_p];
if(who_p >= 0 && mp->mp_endpoint != who_e) { if(who_p >= 0 && mp->mp_endpoint != who_e) {
panic("PM endpoint number out of sync with source: %d", panic("PM endpoint number out of sync with source: %d",
@ -90,6 +96,7 @@ PUBLIC int main()
} }
/* Drop delayed calls from exiting processes. */ /* Drop delayed calls from exiting processes. */
/*XXX*/vmmcall(0x12345609, 0, 7);
if (mp->mp_flags & EXITING) if (mp->mp_flags & EXITING)
continue; continue;
@ -110,6 +117,7 @@ PUBLIC int main()
continue; continue;
} }
/*XXX*/vmmcall(0x12345609, 0, 8);
switch(call_nr) switch(call_nr)
{ {
case PM_SETUID_REPLY: case PM_SETUID_REPLY:
@ -149,8 +157,10 @@ PUBLIC int main()
} }
/* Send reply. */ /* Send reply. */
/*XXX*/vmmcall(0x12345609, result, 9);
if (result != SUSPEND) setreply(who_p, result); if (result != SUSPEND) setreply(who_p, result);
sendreply(); sendreply();
/*XXX*/vmmcall(0x12345609, 0, 10);
} }
return(OK); return(OK);
} }

View file

@ -51,11 +51,14 @@ PUBLIC int main(void)
int result; /* result to return */ int result; /* result to return */
/* SEF local startup. */ /* SEF local startup. */
/*XXX*/vmmcall(0x1234560a, 0, 1);
sef_local_startup(); sef_local_startup();
/* Main loop - get work and do it, forever. */ /* Main loop - get work and do it, forever. */
/*XXX*/vmmcall(0x1234560a, 0, 2);
while (TRUE) { while (TRUE) {
/*XXX*/vmmcall(0x1234560a, 0, 3);
/* Wait for request message. */ /* Wait for request message. */
get_work(&m, &ipc_status); get_work(&m, &ipc_status);
who_e = m.m_source; who_e = m.m_source;
@ -63,6 +66,7 @@ PUBLIC int main(void)
panic("message from bogus source: %d", who_e); panic("message from bogus source: %d", who_e);
} }
/*XXX*/vmmcall(0x1234560a, m.m_type, 4);
call_nr = m.m_type; call_nr = m.m_type;
/* Now determine what to do. Four types of requests are expected: /* Now determine what to do. Four types of requests are expected:
@ -75,6 +79,7 @@ PUBLIC int main(void)
/* Notification messages are control messages and do not need a reply. /* Notification messages are control messages and do not need a reply.
* These include heartbeat messages and system notifications. * These include heartbeat messages and system notifications.
*/ */
/*XXX*/vmmcall(0x1234560a, call_nr, 5);
if (is_ipc_notify(ipc_status)) { if (is_ipc_notify(ipc_status)) {
switch (who_p) { switch (who_p) {
case CLOCK: case CLOCK:
@ -104,6 +109,7 @@ PUBLIC int main(void)
} }
/* Handler functions are responsible for permission checking. */ /* Handler functions are responsible for permission checking. */
/*XXX*/vmmcall(0x1234560a, 0, 6);
switch(call_nr) { switch(call_nr) {
/* User requests. */ /* User requests. */
case RS_UP: result = do_up(&m); break; case RS_UP: result = do_up(&m); break;
@ -123,11 +129,13 @@ PUBLIC int main(void)
result = EINVAL; result = EINVAL;
} }
/*XXX*/vmmcall(0x1234560a, result, 7);
/* Finally send reply message, unless disabled. */ /* Finally send reply message, unless disabled. */
if (result != EDONTREPLY) { if (result != EDONTREPLY) {
m.m_type = result; m.m_type = result;
reply(who_e, &m); reply(who_e, &m);
} }
/*XXX*/vmmcall(0x1234560a, 0, 8);
} }
} }
} }
@ -138,14 +146,19 @@ PUBLIC int main(void)
PRIVATE void sef_local_startup() PRIVATE void sef_local_startup()
{ {
/* Register init callbacks. */ /* Register init callbacks. */
/*XXX*/vmmcall(0x1234560a, 9, 9);
sef_setcb_init_fresh(sef_cb_init_fresh); /* RS can only start fresh. */ sef_setcb_init_fresh(sef_cb_init_fresh); /* RS can only start fresh. */
/* Register signal callbacks. */ /* Register signal callbacks. */
/*XXX*/vmmcall(0x1234560a, 10, 10);
sef_setcb_signal_handler(sef_cb_signal_handler); sef_setcb_signal_handler(sef_cb_signal_handler);
/*XXX*/vmmcall(0x1234560a, 11, 11);
sef_setcb_signal_manager(sef_cb_signal_manager); sef_setcb_signal_manager(sef_cb_signal_manager);
/*XXX*/vmmcall(0x1234560a, 12, 12);
/* Let SEF perform startup. */ /* Let SEF perform startup. */
sef_startup(); sef_startup();
/*XXX*/vmmcall(0x1234560a, 13, 13);
} }
/*===========================================================================* /*===========================================================================*
@ -167,19 +180,24 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
struct boot_image_dev *boot_image_dev; struct boot_image_dev *boot_image_dev;
/* See if we run in verbose mode. */ /* See if we run in verbose mode. */
/*XXX*/vmmcall(0x1234560a, 14, 14);
env_parse("rs_verbose", "d", 0, &rs_verbose, 0, 1); env_parse("rs_verbose", "d", 0, &rs_verbose, 0, 1);
/*XXX*/vmmcall(0x1234560a, 15, 15);
if ((s = sys_getinfo(GET_HZ, &system_hz, sizeof(system_hz), 0, 0)) != OK) if ((s = sys_getinfo(GET_HZ, &system_hz, sizeof(system_hz), 0, 0)) != OK)
panic("Cannot get system timer frequency\n"); panic("Cannot get system timer frequency\n");
/* Initialize the global init descriptor. */ /* Initialize the global init descriptor. */
/*XXX*/vmmcall(0x1234560a, 16, 16);
rinit.rproctab_gid = cpf_grant_direct(ANY, (vir_bytes) rprocpub, rinit.rproctab_gid = cpf_grant_direct(ANY, (vir_bytes) rprocpub,
sizeof(rprocpub), CPF_READ); sizeof(rprocpub), CPF_READ);
/*XXX*/vmmcall(0x1234560a, 17, 17);
if(!GRANT_VALID(rinit.rproctab_gid)) { if(!GRANT_VALID(rinit.rproctab_gid)) {
panic("unable to create rprocpub table grant: %d", rinit.rproctab_gid); panic("unable to create rprocpub table grant: %d", rinit.rproctab_gid);
} }
/* Initialize some global variables. */ /* Initialize some global variables. */
/*XXX*/vmmcall(0x1234560a, 18, 18);
rupdate.flags = 0; rupdate.flags = 0;
shutting_down = FALSE; shutting_down = FALSE;
@ -187,6 +205,7 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
if ((s = sys_getimage(image)) != OK) { if ((s = sys_getimage(image)) != OK) {
panic("unable to get copy of boot image table: %d", s); panic("unable to get copy of boot image table: %d", s);
} }
/*XXX*/vmmcall(0x1234560a, 19, 19);
/* Determine the number of system services in the boot image table and /* Determine the number of system services in the boot image table and
* compute the size required for the boot image buffer. * compute the size required for the boot image buffer.
@ -197,18 +216,21 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
ip = &image[i]; ip = &image[i];
/* System services only. */ /* System services only. */
/*XXX*/vmmcall(0x1234560a, 20, 20);
if(iskerneln(_ENDPOINT_P(ip->endpoint))) { if(iskerneln(_ENDPOINT_P(ip->endpoint))) {
continue; continue;
} }
nr_image_srvs++; nr_image_srvs++;
/* Lookup the corresponding entry in the boot image sys table. */ /* Lookup the corresponding entry in the boot image sys table. */
/*XXX*/vmmcall(0x1234560a, 21, 21);
boot_image_info_lookup(ip->endpoint, image, boot_image_info_lookup(ip->endpoint, image,
NULL, NULL, &boot_image_sys, NULL); NULL, NULL, &boot_image_sys, NULL);
/* If we must keep a copy of this system service, read the header /* If we must keep a copy of this system service, read the header
* and increase the size of the boot image buffer. * and increase the size of the boot image buffer.
*/ */
/*XXX*/vmmcall(0x1234560a, 22, 22);
if(boot_image_sys->flags & SF_USE_REPL) { if(boot_image_sys->flags & SF_USE_REPL) {
boot_image_sys->flags |= SF_USE_COPY; boot_image_sys->flags |= SF_USE_COPY;
} }
@ -219,11 +241,13 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
boot_image_buffer_size += header.a_hdrlen boot_image_buffer_size += header.a_hdrlen
+ header.a_text + header.a_data; + header.a_text + header.a_data;
} }
/*XXX*/vmmcall(0x1234560a, 23, 23);
} }
/* Determine the number of entries in the boot image priv table and make sure /* Determine the number of entries in the boot image priv table and make sure
* it matches the number of system services in the boot image table. * it matches the number of system services in the boot image table.
*/ */
/*XXX*/vmmcall(0x1234560a, 24, 24);
nr_image_priv_srvs = 0; nr_image_priv_srvs = 0;
for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) { for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) {
boot_image_priv = &boot_image_priv_table[i]; boot_image_priv = &boot_image_priv_table[i];
@ -239,6 +263,7 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
} }
/* Allocate boot image buffer. */ /* Allocate boot image buffer. */
/*XXX*/vmmcall(0x1234560a, 25, 25);
if(boot_image_buffer_size > 0) { if(boot_image_buffer_size > 0) {
boot_image_buffer = rs_startup_sbrk(boot_image_buffer_size); boot_image_buffer = rs_startup_sbrk(boot_image_buffer_size);
if(boot_image_buffer == (char *) -1) { if(boot_image_buffer == (char *) -1) {
@ -247,6 +272,7 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
} }
/* Reset the system process table. */ /* Reset the system process table. */
/*XXX*/vmmcall(0x1234560a, 26, 26);
for (rp=BEG_RPROC_ADDR; rp<END_RPROC_ADDR; rp++) { for (rp=BEG_RPROC_ADDR; rp<END_RPROC_ADDR; rp++) {
rp->r_flags = 0; rp->r_flags = 0;
rp->r_pub = &rprocpub[rp - rproc]; rp->r_pub = &rprocpub[rp - rproc];
@ -260,10 +286,12 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
* In addition, set priviliges, sys properties, and dev properties (if any) * In addition, set priviliges, sys properties, and dev properties (if any)
* for every system service. * for every system service.
*/ */
/*XXX*/vmmcall(0x1234560a, 27, 27);
for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) { for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) {
boot_image_priv = &boot_image_priv_table[i]; boot_image_priv = &boot_image_priv_table[i];
/* System services only. */ /* System services only. */
/*XXX*/vmmcall(0x1234560a, 28, 28);
if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) { if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) {
continue; continue;
} }
@ -277,6 +305,7 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
/* /*
* Get a copy of the executable image if required. * Get a copy of the executable image if required.
*/ */
/*XXX*/vmmcall(0x1234560a, 29, 29);
rp->r_exec_len = 0; rp->r_exec_len = 0;
rp->r_exec = NULL; rp->r_exec = NULL;
if(boot_image_sys->flags & SF_USE_COPY) { if(boot_image_sys->flags & SF_USE_COPY) {
@ -293,6 +322,7 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
rpub->period = boot_image_priv->period; rpub->period = boot_image_priv->period;
if(boot_image_priv->endpoint != RS_PROC_NR) { if(boot_image_priv->endpoint != RS_PROC_NR) {
/*XXX*/vmmcall(0x1234560a, 30, 30);
/* Force a static priv id for system services in the boot image. */ /* Force a static priv id for system services in the boot image. */
rp->r_priv.s_id = static_priv_id( rp->r_priv.s_id = static_priv_id(
_ENDPOINT_P(boot_image_priv->endpoint)); _ENDPOINT_P(boot_image_priv->endpoint));
@ -309,13 +339,16 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
rp->r_priv.s_k_call_mask, KERNEL_CALL, TRUE); rp->r_priv.s_k_call_mask, KERNEL_CALL, TRUE);
/* Set the privilege structure. */ /* Set the privilege structure. */
/*XXX*/vmmcall(0x1234560a, 31, 31);
if ((s = sys_privctl(ip->endpoint, SYS_PRIV_SET_SYS, &(rp->r_priv))) if ((s = sys_privctl(ip->endpoint, SYS_PRIV_SET_SYS, &(rp->r_priv)))
!= OK) { != OK) {
panic("unable to set privilege structure: %d", s); panic("unable to set privilege structure: %d", s);
} }
/*XXX*/vmmcall(0x1234560a, 32, 32);
} }
/* Synch the privilege structure with the kernel. */ /* Synch the privilege structure with the kernel. */
/*XXX*/vmmcall(0x1234560a, 33, 33);
if ((s = sys_getpriv(&(rp->r_priv), ip->endpoint)) != OK) { if ((s = sys_getpriv(&(rp->r_priv), ip->endpoint)) != OK) {
panic("unable to synch privilege structure: %d", s); panic("unable to synch privilege structure: %d", s);
} }
@ -323,6 +356,7 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
/* /*
* Set sys properties. * Set sys properties.
*/ */
/*XXX*/vmmcall(0x1234560a, 34, 34);
rpub->sys_flags = boot_image_sys->flags; /* sys flags */ rpub->sys_flags = boot_image_sys->flags; /* sys flags */
/* /*
@ -342,6 +376,7 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
build_cmd_dep(rp); build_cmd_dep(rp);
/* Initialize vm call mask bitmap from unordered set. */ /* Initialize vm call mask bitmap from unordered set. */
/*XXX*/vmmcall(0x1234560a, 35, 35);
fill_call_mask(boot_image_priv->vm_calls, NR_VM_CALLS, fill_call_mask(boot_image_priv->vm_calls, NR_VM_CALLS,
rpub->vm_call_mask, VM_RQ_BASE, TRUE); rpub->vm_call_mask, VM_RQ_BASE, TRUE);
@ -365,12 +400,15 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
rp->r_flags = RS_IN_USE | RS_ACTIVE; rp->r_flags = RS_IN_USE | RS_ACTIVE;
rproc_ptr[_ENDPOINT_P(rpub->endpoint)]= rp; rproc_ptr[_ENDPOINT_P(rpub->endpoint)]= rp;
rpub->in_use = TRUE; rpub->in_use = TRUE;
/*XXX*/vmmcall(0x1234560a, 36, 36);
} }
/* - Step 2: allow every system service in the boot image to run. /* - Step 2: allow every system service in the boot image to run.
*/ */
nr_uncaught_init_srvs = 0; nr_uncaught_init_srvs = 0;
/*XXX*/vmmcall(0x1234560a, 37, 37);
for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) { for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) {
/*XXX*/vmmcall(0x1234560a, 38, 38);
boot_image_priv = &boot_image_priv_table[i]; boot_image_priv = &boot_image_priv_table[i];
/* System services only. */ /* System services only. */
@ -388,6 +426,7 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
rpub = rp->r_pub; rpub = rp->r_pub;
/* Allow the service to run. */ /* Allow the service to run. */
/*XXX*/vmmcall(0x1234560a, 39, 39);
if ((s = sys_privctl(rpub->endpoint, SYS_PRIV_ALLOW, NULL)) != OK) { if ((s = sys_privctl(rpub->endpoint, SYS_PRIV_ALLOW, NULL)) != OK) {
panic("unable to initialize privileges: %d", s); panic("unable to initialize privileges: %d", s);
} }
@ -395,10 +434,13 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
/* Initialize service. We assume every service will always get /* Initialize service. We assume every service will always get
* back to us here at boot time. * back to us here at boot time.
*/ */
/*XXX*/vmmcall(0x1234560a, 40, 40);
if(boot_image_priv->flags & SYS_PROC) { if(boot_image_priv->flags & SYS_PROC) {
/*XXX*/vmmcall(0x1234560a, 41, 41);
if ((s = init_service(rp, SEF_INIT_FRESH)) != OK) { if ((s = init_service(rp, SEF_INIT_FRESH)) != OK) {
panic("unable to initialize service: %d", s); panic("unable to initialize service: %d", s);
} }
/*XXX*/vmmcall(0x1234560a, 42, 42);
if(rpub->sys_flags & SF_SYNCH_BOOT) { if(rpub->sys_flags & SF_SYNCH_BOOT) {
/* Catch init ready message now to synchronize. */ /* Catch init ready message now to synchronize. */
catch_boot_init_ready(rpub->endpoint); catch_boot_init_ready(rpub->endpoint);
@ -407,13 +449,17 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
/* Catch init ready message later. */ /* Catch init ready message later. */
nr_uncaught_init_srvs++; nr_uncaught_init_srvs++;
} }
/*XXX*/vmmcall(0x1234560a, 43, 43);
} }
/*XXX*/vmmcall(0x1234560a, 44, 44);
} }
/* - Step 3: let every system service complete initialization by /* - Step 3: let every system service complete initialization by
* catching all the init ready messages left. * catching all the init ready messages left.
*/ */
/*XXX*/vmmcall(0x1234560a, 45, 45);
while(nr_uncaught_init_srvs) { while(nr_uncaught_init_srvs) {
/*XXX*/vmmcall(0x1234560a, 46, 46);
catch_boot_init_ready(ANY); catch_boot_init_ready(ANY);
nr_uncaught_init_srvs--; nr_uncaught_init_srvs--;
} }
@ -422,11 +468,14 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
* Complete the initialization of the system process table in collaboration * Complete the initialization of the system process table in collaboration
* with other system services. * with other system services.
*/ */
/*XXX*/vmmcall(0x1234560a, 47, 47);
if ((s = getsysinfo(PM_PROC_NR, SI_PROC_TAB, mproc)) != OK) { if ((s = getsysinfo(PM_PROC_NR, SI_PROC_TAB, mproc)) != OK) {
panic("unable to get copy of PM process table: %d", s); panic("unable to get copy of PM process table: %d", s);
} }
/*XXX*/vmmcall(0x1234560a, 48, 48);
for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) { for (i=0; boot_image_priv_table[i].endpoint != NULL_BOOT_NR; i++) {
boot_image_priv = &boot_image_priv_table[i]; boot_image_priv = &boot_image_priv_table[i];
/*XXX*/vmmcall(0x1234560a, 49, 49);
/* System services only. */ /* System services only. */
if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) { if(iskerneln(_ENDPOINT_P(boot_image_priv->endpoint))) {
@ -456,6 +505,7 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
} }
} }
} }
/*XXX*/vmmcall(0x1234560a, 50, 50);
/* /*
* Now complete RS initialization process in collaboration with other * Now complete RS initialization process in collaboration with other
@ -463,15 +513,18 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
*/ */
/* Let the rest of the system know about our dynamically allocated buffer. */ /* Let the rest of the system know about our dynamically allocated buffer. */
if(boot_image_buffer_size > 0) { if(boot_image_buffer_size > 0) {
/*XXX*/vmmcall(0x1234560a, 51, 51);
boot_image_buffer = rs_startup_sbrk_synch(boot_image_buffer_size); boot_image_buffer = rs_startup_sbrk_synch(boot_image_buffer_size);
if(boot_image_buffer == (char *) -1) { if(boot_image_buffer == (char *) -1) {
panic("unable to synch boot image buffer"); panic("unable to synch boot image buffer");
} }
} }
/*XXX*/vmmcall(0x1234560a, 52, 52);
/* Set alarm to periodically check service status. */ /* Set alarm to periodically check service status. */
if (OK != (s=sys_setalarm(RS_DELTA_T, 0))) if (OK != (s=sys_setalarm(RS_DELTA_T, 0)))
panic("couldn't set alarm: %d", s); panic("couldn't set alarm: %d", s);
/*XXX*/vmmcall(0x1234560a, 53, 53);
/* Map out our own text and data. This is normally done in crtso.o /* Map out our own text and data. This is normally done in crtso.o
* but RS is an exception - we don't get to talk to VM so early on. * but RS is an exception - we don't get to talk to VM so early on.
@ -483,6 +536,7 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
*/ */
unmap_ok = 1; unmap_ok = 1;
_minix_unmapzero(); _minix_unmapzero();
/*XXX*/vmmcall(0x1234560a, 54, 54);
return(OK); return(OK);
} }

View file

@ -293,32 +293,42 @@ PUBLIC void *vm_allocpage(phys_bytes *phys, int reason)
static int level = 0; static int level = 0;
void *ret; void *ret;
/*XXX*/vmmcall(0x1234560e, 0, 86);
pt = &vmprocess->vm_pt; pt = &vmprocess->vm_pt;
assert(reason >= 0 && reason < VMP_CATEGORIES); assert(reason >= 0 && reason < VMP_CATEGORIES);
level++; level++;
/*XXX*/vmmcall(0x1234560e, level, 87);
assert(level >= 1); assert(level >= 1);
assert(level <= 2); assert(level <= 2);
/*XXX*/vmmcall(0x1234560e, level, 88);
if(level > 1 || !(vmprocess->vm_flags & VMF_HASPT) || !meminit_done) { if(level > 1 || !(vmprocess->vm_flags & VMF_HASPT) || !meminit_done) {
int r; int r;
void *s; void *s;
/*XXX*/vmmcall(0x1234560e, level, 89);
s=vm_getsparepage(phys); s=vm_getsparepage(phys);
/*XXX*/vmmcall(0x1234560e, level, 90);
level--; level--;
if(!s) { if(!s) {
/*XXX*/vmmcall(0x1234560e, level, 91);
util_stacktrace(); util_stacktrace();
printf("VM: warning: out of spare pages\n"); printf("VM: warning: out of spare pages\n");
} }
/*XXX*/vmmcall(0x1234560e, level, 92);
return s; return s;
} }
/* VM does have a pagetable, so get a page and map it in there. /* VM does have a pagetable, so get a page and map it in there.
* Where in our virtual address space can we put it? * Where in our virtual address space can we put it?
*/ */
/*XXX*/vmmcall(0x1234560e, level, 93);
loc = findhole(pt, arch_vir2map(vmprocess, vmprocess->vm_stacktop), loc = findhole(pt, arch_vir2map(vmprocess, vmprocess->vm_stacktop),
vmprocess->vm_arch.vm_data_top); vmprocess->vm_arch.vm_data_top);
/*XXX*/vmmcall(0x1234560e, loc, 94);
if(loc == NO_MEM) { if(loc == NO_MEM) {
/*XXX*/vmmcall(0x1234560e, level, 95);
level--; level--;
printf("VM: vm_allocpage: findhole failed\n"); printf("VM: vm_allocpage: findhole failed\n");
return NULL; return NULL;
@ -327,6 +337,7 @@ PUBLIC void *vm_allocpage(phys_bytes *phys, int reason)
/* Allocate page of memory for use by VM. As VM /* Allocate page of memory for use by VM. As VM
* is trusted, we don't have to pre-clear it. * is trusted, we don't have to pre-clear it.
*/ */
/*XXX*/vmmcall(0x1234560e, level, 96);
if((newpage = alloc_mem(CLICKSPERPAGE, 0)) == NO_MEM) { if((newpage = alloc_mem(CLICKSPERPAGE, 0)) == NO_MEM) {
level--; level--;
printf("VM: vm_allocpage: alloc_mem failed\n"); printf("VM: vm_allocpage: alloc_mem failed\n");
@ -336,22 +347,29 @@ PUBLIC void *vm_allocpage(phys_bytes *phys, int reason)
*phys = CLICK2ABS(newpage); *phys = CLICK2ABS(newpage);
/* Map this page into our address space. */ /* Map this page into our address space. */
/*XXX*/vmmcall(0x1234560e, 0, 97);
if((r=pt_writemap(pt, loc, *phys, I386_PAGE_SIZE, if((r=pt_writemap(pt, loc, *phys, I386_PAGE_SIZE,
I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE, 0)) != OK) { I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE, 0)) != OK) {
/*XXX*/vmmcall(0x1234560e, 0, 98);
free_mem(newpage, CLICKSPERPAGE); free_mem(newpage, CLICKSPERPAGE);
printf("vm_allocpage writemap failed\n"); printf("vm_allocpage writemap failed\n");
level--; level--;
return NULL; return NULL;
} }
/*XXX*/vmmcall(0x1234560e, 0, 99);
if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) { if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
/*XXX*/vmmcall(0x1234560e, 0, 100);
panic("VMCTL_FLUSHTLB failed: %d", r); panic("VMCTL_FLUSHTLB failed: %d", r);
} }
/*XXX*/vmmcall(0x1234560e, 0, 101);
level--; level--;
/* Return user-space-ready pointer to it. */ /* Return user-space-ready pointer to it. */
/*XXX*/vmmcall(0x1234560e, 0, 102);
ret = (void *) arch_map2vir(vmprocess, loc); ret = (void *) arch_map2vir(vmprocess, loc);
/*XXX*/vmmcall(0x1234560e, ret, 103);
return ret; return ret;
} }
@ -684,12 +702,16 @@ PUBLIC int pt_new(pt_t *pt)
* mappings from in-kernel page tables pointing to * mappings from in-kernel page tables pointing to
* the page directories (the page_directories data). * the page directories (the page_directories data).
*/ */
/*XXX*/vmmcall(0x1234560e, 0, 80);
if(!pt->pt_dir && if(!pt->pt_dir &&
!(pt->pt_dir = vm_allocpage(&pt->pt_dir_phys, VMP_PAGEDIR))) { !(pt->pt_dir = vm_allocpage(&pt->pt_dir_phys, VMP_PAGEDIR))) {
/*XXX*/vmmcall(0x1234560e, 0, 81);
return ENOMEM; return ENOMEM;
} }
/*XXX*/vmmcall(0x1234560e, 0, 82);
for(i = 0; i < I386_VM_DIR_ENTRIES; i++) { for(i = 0; i < I386_VM_DIR_ENTRIES; i++) {
/*XXX*/vmmcall(0x1234560e, 0, 83);
pt->pt_dir[i] = 0; /* invalid entry (I386_VM_PRESENT bit = 0) */ pt->pt_dir[i] = 0; /* invalid entry (I386_VM_PRESENT bit = 0) */
pt->pt_pt[i] = NULL; pt->pt_pt[i] = NULL;
} }
@ -698,9 +720,11 @@ PUBLIC int pt_new(pt_t *pt)
pt->pt_virtop = 0; pt->pt_virtop = 0;
/* Map in kernel. */ /* Map in kernel. */
/*XXX*/vmmcall(0x1234560e, 0, 84);
if(pt_mapkernel(pt) != OK) if(pt_mapkernel(pt) != OK)
panic("pt_new: pt_mapkernel failed"); panic("pt_new: pt_mapkernel failed");
/*XXX*/vmmcall(0x1234560e, 0, 85);
return OK; return OK;
} }
@ -732,12 +756,15 @@ PUBLIC void pt_init(phys_bytes usedlimit)
newpt = &vmprocess->vm_pt; newpt = &vmprocess->vm_pt;
/* Get ourselves spare pages. */ /* Get ourselves spare pages. */
/*XXX*/vmmcall(0x1234560e, 0, 54);
if(!(sparepages_mem = (vir_bytes) aalloc(I386_PAGE_SIZE*SPAREPAGES))) if(!(sparepages_mem = (vir_bytes) aalloc(I386_PAGE_SIZE*SPAREPAGES)))
panic("pt_init: aalloc for spare failed"); panic("pt_init: aalloc for spare failed");
/*XXX*/vmmcall(0x1234560e, 0, 55);
if((r=sys_umap(SELF, VM_D, (vir_bytes) sparepages_mem, if((r=sys_umap(SELF, VM_D, (vir_bytes) sparepages_mem,
I386_PAGE_SIZE*SPAREPAGES, &sparepages_ph)) != OK) I386_PAGE_SIZE*SPAREPAGES, &sparepages_ph)) != OK)
panic("pt_init: sys_umap failed: %d", r); panic("pt_init: sys_umap failed: %d", r);
/*XXX*/vmmcall(0x1234560e, 0, 56);
for(s = 0; s < SPAREPAGES; s++) { for(s = 0; s < SPAREPAGES; s++) {
sparepages[s].page = (void *) (sparepages_mem + s*I386_PAGE_SIZE); sparepages[s].page = (void *) (sparepages_mem + s*I386_PAGE_SIZE);
sparepages[s].phys = sparepages_ph + s*I386_PAGE_SIZE; sparepages[s].phys = sparepages_ph + s*I386_PAGE_SIZE;
@ -748,6 +775,7 @@ PUBLIC void pt_init(phys_bytes usedlimit)
/* global bit and 4MB pages available? */ /* global bit and 4MB pages available? */
global_bit_ok = _cpufeature(_CPUF_I386_PGE); global_bit_ok = _cpufeature(_CPUF_I386_PGE);
bigpage_ok = _cpufeature(_CPUF_I386_PSE); bigpage_ok = _cpufeature(_CPUF_I386_PSE);
/*XXX*/vmmcall(0x1234560e, 0, 57);
/* Set bit for PTE's and PDE's if available. */ /* Set bit for PTE's and PDE's if available. */
if(global_bit_ok) if(global_bit_ok)
@ -780,10 +808,12 @@ PUBLIC void pt_init(phys_bytes usedlimit)
/* Make new page table for ourselves, partly copied /* Make new page table for ourselves, partly copied
* from the current one. * from the current one.
*/ */
/*XXX*/vmmcall(0x1234560e, 0, 58);
if(pt_new(newpt) != OK) if(pt_new(newpt) != OK)
panic("pt_init: pt_new failed"); panic("pt_init: pt_new failed");
/* Set up mappings for VM process. */ /* Set up mappings for VM process. */
/*XXX*/vmmcall(0x1234560e, 0, 59);
for(v = lo; v < hi; v += I386_PAGE_SIZE) { for(v = lo; v < hi; v += I386_PAGE_SIZE) {
phys_bytes addr; phys_bytes addr;
u32_t flags; u32_t flags;
@ -791,12 +821,14 @@ PUBLIC void pt_init(phys_bytes usedlimit)
/* We have to write the new position in the PT, /* We have to write the new position in the PT,
* so we can move our segments. * so we can move our segments.
*/ */
/*XXX*/vmmcall(0x1234560e, 0, 60);
if(pt_writemap(newpt, v+moveup, v, I386_PAGE_SIZE, if(pt_writemap(newpt, v+moveup, v, I386_PAGE_SIZE,
I386_VM_PRESENT|I386_VM_WRITE|I386_VM_USER, 0) != OK) I386_VM_PRESENT|I386_VM_WRITE|I386_VM_USER, 0) != OK)
panic("pt_init: pt_writemap failed"); panic("pt_init: pt_writemap failed");
} }
/* Move segments up too. */ /* Move segments up too. */
/*XXX*/vmmcall(0x1234560e, 0, 61);
vmprocess->vm_arch.vm_seg[T].mem_phys += ABS2CLICK(moveup); vmprocess->vm_arch.vm_seg[T].mem_phys += ABS2CLICK(moveup);
vmprocess->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup); vmprocess->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup);
vmprocess->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup); vmprocess->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup);
@ -804,10 +836,12 @@ PUBLIC void pt_init(phys_bytes usedlimit)
/* Allocate us a page table in which to remember page directory /* Allocate us a page table in which to remember page directory
* pointers. * pointers.
*/ */
/*XXX*/vmmcall(0x1234560e, 0, 62);
if(!(page_directories = vm_allocpage(&page_directories_phys, if(!(page_directories = vm_allocpage(&page_directories_phys,
VMP_PAGETABLE))) VMP_PAGETABLE)))
panic("no virt addr for vm mappings"); panic("no virt addr for vm mappings");
/*XXX*/vmmcall(0x1234560e, 0, 63);
memset(page_directories, 0, I386_PAGE_SIZE); memset(page_directories, 0, I386_PAGE_SIZE);
/* Increase our hardware data segment to create virtual address /* Increase our hardware data segment to create virtual address
@ -842,9 +876,11 @@ PUBLIC void pt_init(phys_bytes usedlimit)
kernmap_pde = free_pde++; kernmap_pde = free_pde++;
offset = kernmap_pde * I386_BIG_PAGE_SIZE; offset = kernmap_pde * I386_BIG_PAGE_SIZE;
/*XXX*/vmmcall(0x1234560e, 0, 64);
while(sys_vmctl_get_mapping(index, &addr, &len, while(sys_vmctl_get_mapping(index, &addr, &len,
&flags) == OK) { &flags) == OK) {
vir_bytes vir; vir_bytes vir;
/*XXX*/vmmcall(0x1234560e, 0, 65);
if(index >= MAX_KERNMAPPINGS) if(index >= MAX_KERNMAPPINGS)
panic("VM: too many kernel mappings: %d", index); panic("VM: too many kernel mappings: %d", index);
kern_mappings[index].phys_addr = addr; kern_mappings[index].phys_addr = addr;
@ -854,30 +890,36 @@ PUBLIC void pt_init(phys_bytes usedlimit)
kern_mappings[index].flags = kern_mappings[index].flags =
I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE | I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE |
global_bit; global_bit;
/*XXX*/vmmcall(0x1234560e, 0, 66);
if(flags & VMMF_UNCACHED) if(flags & VMMF_UNCACHED)
kern_mappings[index].flags |= PTF_NOCACHE; kern_mappings[index].flags |= PTF_NOCACHE;
if(addr % I386_PAGE_SIZE) if(addr % I386_PAGE_SIZE)
panic("VM: addr unaligned: %d", addr); panic("VM: addr unaligned: %d", addr);
if(len % I386_PAGE_SIZE) if(len % I386_PAGE_SIZE)
panic("VM: len unaligned: %d", len); panic("VM: len unaligned: %d", len);
/*XXX*/vmmcall(0x1234560e, 0, 67);
vir = arch_map2vir(&vmproc[VMP_SYSTEM], offset); vir = arch_map2vir(&vmproc[VMP_SYSTEM], offset);
if(sys_vmctl_reply_mapping(index, vir) != OK) if(sys_vmctl_reply_mapping(index, vir) != OK)
panic("VM: reply failed"); panic("VM: reply failed");
offset += len; offset += len;
index++; index++;
kernmappings++; kernmappings++;
/*XXX*/vmmcall(0x1234560e, 0, 68);
} }
} }
/* Find a PDE below processes available for mapping in the /* Find a PDE below processes available for mapping in the
* page directories (readonly). * page directories (readonly).
*/ */
/*XXX*/vmmcall(0x1234560e, 0, 69);
pagedir_pde = free_pde++; pagedir_pde = free_pde++;
pagedir_pde_val = (page_directories_phys & I386_VM_ADDR_MASK) | pagedir_pde_val = (page_directories_phys & I386_VM_ADDR_MASK) |
I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE; I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;
/* Tell kernel about free pde's. */ /* Tell kernel about free pde's. */
/*XXX*/vmmcall(0x1234560e, 0, 70);
while(free_pde*I386_BIG_PAGE_SIZE < VM_PROCSTART) { while(free_pde*I386_BIG_PAGE_SIZE < VM_PROCSTART) {
/*XXX*/vmmcall(0x1234560e, 0, 71);
if((r=sys_vmctl(SELF, VMCTL_I386_FREEPDE, free_pde++)) != OK) { if((r=sys_vmctl(SELF, VMCTL_I386_FREEPDE, free_pde++)) != OK) {
panic("VMCTL_I386_FREEPDE failed: %d", r); panic("VMCTL_I386_FREEPDE failed: %d", r);
} }
@ -887,20 +929,25 @@ PUBLIC void pt_init(phys_bytes usedlimit)
proc_pde = free_pde; proc_pde = free_pde;
/* Give our process the new, copied, private page table. */ /* Give our process the new, copied, private page table. */
/*XXX*/vmmcall(0x1234560e, 0, 72);
pt_mapkernel(newpt); /* didn't know about vm_dir pages earlier */ pt_mapkernel(newpt); /* didn't know about vm_dir pages earlier */
/*XXX*/vmmcall(0x1234560e, 0, 73);
pt_bind(newpt, vmprocess); pt_bind(newpt, vmprocess);
/* new segment limit for the kernel after paging is enabled */ /* new segment limit for the kernel after paging is enabled */
/*XXX*/vmmcall(0x1234560e, 0, 74);
ep_data.data_seg_limit = free_pde*I386_BIG_PAGE_SIZE; ep_data.data_seg_limit = free_pde*I386_BIG_PAGE_SIZE;
/* the memory map which must be installed after paging is enabled */ /* the memory map which must be installed after paging is enabled */
ep_data.mem_map = vmprocess->vm_arch.vm_seg; ep_data.mem_map = vmprocess->vm_arch.vm_seg;
/* Now actually enable paging. */ /* Now actually enable paging. */
/*XXX*/vmmcall(0x1234560e, 0, 75);
if(sys_vmctl_enable_paging(&ep_data) != OK) if(sys_vmctl_enable_paging(&ep_data) != OK)
panic("pt_init: enable paging failed"); panic("pt_init: enable paging failed");
/* Back to reality - this is where the stack actually is. */ /* Back to reality - this is where the stack actually is. */
vmprocess->vm_arch.vm_seg[S].mem_len -= extra_clicks; vmprocess->vm_arch.vm_seg[S].mem_len -= extra_clicks;
/*XXX*/vmmcall(0x1234560e, 0, 76);
/* All OK. */ /* All OK. */
return; return;

View file

@ -415,6 +415,7 @@ PUBLIC int proc_new(struct vmproc *vmp,
int prealloc; int prealloc;
struct vir_region *reg; struct vir_region *reg;
/*XXX*/vmmcall(0x1234560b, 0, 1);
assert(!(vstart % VM_PAGE_SIZE)); assert(!(vstart % VM_PAGE_SIZE));
assert(!(text_bytes % VM_PAGE_SIZE)); assert(!(text_bytes % VM_PAGE_SIZE));
assert(!(data_bytes % VM_PAGE_SIZE)); assert(!(data_bytes % VM_PAGE_SIZE));
@ -425,6 +426,7 @@ PUBLIC int proc_new(struct vmproc *vmp,
assert((!text_start && !data_start) || (text_start && data_start)); assert((!text_start && !data_start) || (text_start && data_start));
/* Place text at start of process. */ /* Place text at start of process. */
/*XXX*/vmmcall(0x1234560b, 0, 2);
vmp->vm_arch.vm_seg[T].mem_phys = ABS2CLICK(vstart); vmp->vm_arch.vm_seg[T].mem_phys = ABS2CLICK(vstart);
vmp->vm_arch.vm_seg[T].mem_vir = 0; vmp->vm_arch.vm_seg[T].mem_vir = 0;
vmp->vm_arch.vm_seg[T].mem_len = ABS2CLICK(text_bytes); vmp->vm_arch.vm_seg[T].mem_len = ABS2CLICK(text_bytes);
@ -434,35 +436,50 @@ PUBLIC int proc_new(struct vmproc *vmp,
/* page mapping flags for code */ /* page mapping flags for code */
#define TEXTFLAGS (PTF_PRESENT | PTF_USER) #define TEXTFLAGS (PTF_PRESENT | PTF_USER)
SANITYCHECK(SCL_DETAIL); SANITYCHECK(SCL_DETAIL);
/*XXX*/vmmcall(0x1234560b, 0, 3);
if(text_bytes > 0) { if(text_bytes > 0) {
/*XXX*/vmmcall(0x1234560b, 0, 4);
if(!(reg=map_page_region(vmp, vstart, 0, text_bytes, if(!(reg=map_page_region(vmp, vstart, 0, text_bytes,
text_start ? text_start : MAP_NONE, text_start ? text_start : MAP_NONE,
VR_ANON | VR_WRITABLE, text_start ? 0 : MF_PREALLOC))) { VR_ANON | VR_WRITABLE, text_start ? 0 : MF_PREALLOC))) {
/*XXX*/vmmcall(0x1234560b, 0, 5);
SANITYCHECK(SCL_DETAIL); SANITYCHECK(SCL_DETAIL);
printf("VM: proc_new: map_page_region failed (text)\n"); printf("VM: proc_new: map_page_region failed (text)\n");
map_free_proc(vmp); map_free_proc(vmp);
SANITYCHECK(SCL_DETAIL); SANITYCHECK(SCL_DETAIL);
/*XXX*/vmmcall(0x1234560b, 0, 6);
return(ENOMEM); return(ENOMEM);
} }
/*XXX*/vmmcall(0x1234560b, 0, 7);
map_region_set_tag(reg, VRT_TEXT); map_region_set_tag(reg, VRT_TEXT);
/*XXX*/vmmcall(0x1234560b, 0, 8);
SANITYCHECK(SCL_DETAIL); SANITYCHECK(SCL_DETAIL);
/*XXX*/vmmcall(0x1234560b, 0, 9);
} }
/*XXX*/vmmcall(0x1234560b, 0, 10);
SANITYCHECK(SCL_DETAIL); SANITYCHECK(SCL_DETAIL);
/*XXX*/vmmcall(0x1234560b, 0, 11);
/* Allocate memory for data (including bss, but not including gap /* Allocate memory for data (including bss, but not including gap
* or stack), make sure it's cleared, and map it in after text * or stack), make sure it's cleared, and map it in after text
* (if any). * (if any).
*/ */
/*XXX*/vmmcall(0x1234560b, 0, 12);
if(!(vmp->vm_heap = map_page_region(vmp, vstart + text_bytes, 0, if(!(vmp->vm_heap = map_page_region(vmp, vstart + text_bytes, 0,
data_bytes, data_start ? data_start : MAP_NONE, VR_ANON | VR_WRITABLE, data_bytes, data_start ? data_start : MAP_NONE, VR_ANON | VR_WRITABLE,
data_start ? 0 : MF_PREALLOC))) { data_start ? 0 : MF_PREALLOC))) {
/*XXX*/vmmcall(0x1234560b, 0, 13);
printf("VM: exec: map_page_region for data failed\n"); printf("VM: exec: map_page_region for data failed\n");
/*XXX*/vmmcall(0x1234560b, 0, 14);
map_free_proc(vmp); map_free_proc(vmp);
/*XXX*/vmmcall(0x1234560b, 0, 15);
SANITYCHECK(SCL_DETAIL); SANITYCHECK(SCL_DETAIL);
/*XXX*/vmmcall(0x1234560b, 0, 16);
return ENOMEM; return ENOMEM;
} }
/* Tag the heap so brk() call knows which region to extend. */ /* Tag the heap so brk() call knows which region to extend. */
/*XXX*/vmmcall(0x1234560b, 0, 17);
map_region_set_tag(vmp->vm_heap, VRT_HEAP); map_region_set_tag(vmp->vm_heap, VRT_HEAP);
/* How many address space clicks between end of data /* How many address space clicks between end of data
@ -470,17 +487,22 @@ PUBLIC int proc_new(struct vmproc *vmp,
* stacktop is the first address after the stack, as addressed * stacktop is the first address after the stack, as addressed
* from within the user process. * from within the user process.
*/ */
/*XXX*/vmmcall(0x1234560b, 0, 18);
hole_bytes = stacktop - data_bytes - stack_bytes - gap_bytes; hole_bytes = stacktop - data_bytes - stack_bytes - gap_bytes;
/*XXX*/vmmcall(0x1234560b, 0, 19);
if(!(reg=map_page_region(vmp, if(!(reg=map_page_region(vmp,
vstart + text_bytes + data_bytes + hole_bytes, vstart + text_bytes + data_bytes + hole_bytes,
0, stack_bytes + gap_bytes, MAP_NONE, 0, stack_bytes + gap_bytes, MAP_NONE,
VR_ANON | VR_WRITABLE, prealloc_stack ? MF_PREALLOC : 0)) != OK) { VR_ANON | VR_WRITABLE, prealloc_stack ? MF_PREALLOC : 0)) != OK) {
/*XXX*/vmmcall(0x1234560b, 0, 20);
panic("map_page_region failed for stack"); panic("map_page_region failed for stack");
} }
/*XXX*/vmmcall(0x1234560b, 0, 21);
map_region_set_tag(reg, VRT_STACK); map_region_set_tag(reg, VRT_STACK);
/*XXX*/vmmcall(0x1234560b, 0, 22);
vmp->vm_arch.vm_seg[D].mem_phys = ABS2CLICK(vstart + text_bytes); vmp->vm_arch.vm_seg[D].mem_phys = ABS2CLICK(vstart + text_bytes);
vmp->vm_arch.vm_seg[D].mem_vir = 0; vmp->vm_arch.vm_seg[D].mem_vir = 0;
vmp->vm_arch.vm_seg[D].mem_len = ABS2CLICK(data_bytes); vmp->vm_arch.vm_seg[D].mem_len = ABS2CLICK(data_bytes);
@ -496,6 +518,7 @@ PUBLIC int proc_new(struct vmproc *vmp,
vmp->vm_flags |= VMF_HASPT; vmp->vm_flags |= VMF_HASPT;
/*XXX*/vmmcall(0x1234560b, 0, 23);
if(vmp->vm_endpoint != NONE) { if(vmp->vm_endpoint != NONE) {
/* Pretend the stack is the full size of the data segment, so /* Pretend the stack is the full size of the data segment, so
@ -503,22 +526,29 @@ PUBLIC int proc_new(struct vmproc *vmp,
* After sys_newmap(), change the stack to what we know the * After sys_newmap(), change the stack to what we know the
* stack to be (up to stacktop). * stack to be (up to stacktop).
*/ */
/*XXX*/vmmcall(0x1234560b, 0, 24);
vmp->vm_arch.vm_seg[S].mem_len = (VM_DATATOP >> CLICK_SHIFT) - vmp->vm_arch.vm_seg[S].mem_len = (VM_DATATOP >> CLICK_SHIFT) -
vmp->vm_arch.vm_seg[S].mem_vir - ABS2CLICK(vstart) - ABS2CLICK(text_bytes); vmp->vm_arch.vm_seg[S].mem_vir - ABS2CLICK(vstart) - ABS2CLICK(text_bytes);
/* What is the final size of the data segment in bytes? */ /* What is the final size of the data segment in bytes? */
/*XXX*/vmmcall(0x1234560b, 0, 25);
vmp->vm_arch.vm_data_top = vmp->vm_arch.vm_data_top =
(vmp->vm_arch.vm_seg[S].mem_vir + (vmp->vm_arch.vm_seg[S].mem_vir +
vmp->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT; vmp->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;
/*XXX*/vmmcall(0x1234560b, 0, 26);
if((s=sys_newmap(vmp->vm_endpoint, vmp->vm_arch.vm_seg)) != OK) if((s=sys_newmap(vmp->vm_endpoint, vmp->vm_arch.vm_seg)) != OK)
panic("sys_newmap (vm) failed: %d", s); panic("sys_newmap (vm) failed: %d", s);
/*XXX*/vmmcall(0x1234560b, 0, 27);
if((s=pt_bind(&vmp->vm_pt, vmp)) != OK) if((s=pt_bind(&vmp->vm_pt, vmp)) != OK)
panic("exec_newmem: pt_bind failed: %d", s); panic("exec_newmem: pt_bind failed: %d", s);
/*XXX*/vmmcall(0x1234560b, 0, 28);
} }
/* No yielded memory blocks. */ /* No yielded memory blocks. */
/*XXX*/vmmcall(0x1234560b, 0, 29);
yielded_init(&vmp->vm_yielded_blocks); yielded_init(&vmp->vm_yielded_blocks);
/*XXX*/vmmcall(0x1234560b, 0, 30);
return OK; return OK;
} }

View file

@ -79,23 +79,30 @@ PUBLIC int main(void)
int caller_slot; int caller_slot;
struct vmproc *vmp_caller; struct vmproc *vmp_caller;
/*XXX*/vmmcall(0x1234560c, 0, 0);
/* SEF local startup. */ /* SEF local startup. */
sef_local_startup(); sef_local_startup();
/*XXX*/vmmcall(0x1234560c, 0, 1);
SANITYCHECK(SCL_TOP); SANITYCHECK(SCL_TOP);
/* This is VM's main loop. */ /* This is VM's main loop. */
/*XXX*/vmmcall(0x1234560c, 0, 2);
while (TRUE) { while (TRUE) {
int r, c; int r, c;
/*XXX*/vmmcall(0x1234560c, 0, 3);
SANITYCHECK(SCL_TOP); SANITYCHECK(SCL_TOP);
if(missing_spares > 0) { if(missing_spares > 0) {
pt_cycle(); /* pagetable code wants to be called */ pt_cycle(); /* pagetable code wants to be called */
} }
/*XXX*/vmmcall(0x1234560c, 0, 4);
if ((r=sef_receive_status(ANY, &msg, &rcv_sts)) != OK) if ((r=sef_receive_status(ANY, &msg, &rcv_sts)) != OK)
panic("sef_receive_status() error: %d", r); panic("sef_receive_status() error: %d", r);
/*XXX*/vmmcall(0x1234560c, msg.m_type, 5);
if (is_ipc_notify(rcv_sts)) { if (is_ipc_notify(rcv_sts)) {
/* Unexpected notify(). */ /* Unexpected notify(). */
printf("VM: ignoring notify() from %d\n", msg.m_source); printf("VM: ignoring notify() from %d\n", msg.m_source);
@ -106,6 +113,7 @@ PUBLIC int main(void)
panic("invalid caller", who_e); panic("invalid caller", who_e);
vmp_caller = &vmproc[caller_slot]; vmp_caller = &vmproc[caller_slot];
c = CALLNUMBER(msg.m_type); c = CALLNUMBER(msg.m_type);
/*XXX*/vmmcall(0x1234560c, c, 6);
result = ENOSYS; /* Out of range or restricted calls return this. */ result = ENOSYS; /* Out of range or restricted calls return this. */
if (msg.m_type == VM_PAGEFAULT) { if (msg.m_type == VM_PAGEFAULT) {
if (!IPC_STATUS_FLAGS_TEST(rcv_sts, IPC_FLG_MSG_FROM_KERNEL)) { if (!IPC_STATUS_FLAGS_TEST(rcv_sts, IPC_FLG_MSG_FROM_KERNEL)) {
@ -131,6 +139,7 @@ PUBLIC int main(void)
SANITYCHECK(SCL_FUNCTIONS); SANITYCHECK(SCL_FUNCTIONS);
} }
} }
/*XXX*/vmmcall(0x1234560c, result, 7);
/* Send reply message, unless the return code is SUSPEND, /* Send reply message, unless the return code is SUSPEND,
* which is a pseudo-result suppressing the reply message. * which is a pseudo-result suppressing the reply message.
@ -143,7 +152,9 @@ PUBLIC int main(void)
panic("send() error"); panic("send() error");
} }
} }
/*XXX*/vmmcall(0x1234560c, 0, 8);
} }
/*XXX*/vmmcall(0x1234560c, 0, 9);
return(OK); return(OK);
} }
@ -153,16 +164,21 @@ PUBLIC int main(void)
PRIVATE void sef_local_startup() PRIVATE void sef_local_startup()
{ {
/* Register init callbacks. */ /* Register init callbacks. */
/*XXX*/vmmcall(0x1234560c, 10, 10);
sef_setcb_init_fresh(sef_cb_init_fresh); sef_setcb_init_fresh(sef_cb_init_fresh);
/*XXX*/vmmcall(0x1234560c, 11, 11);
sef_setcb_init_restart(sef_cb_init_fail); sef_setcb_init_restart(sef_cb_init_fail);
/* No live update support for now. */ /* No live update support for now. */
/* Register signal callbacks. */ /* Register signal callbacks. */
/*XXX*/vmmcall(0x1234560c, 12, 12);
sef_setcb_signal_handler(sef_cb_signal_handler); sef_setcb_signal_handler(sef_cb_signal_handler);
/* Let SEF perform startup. */ /* Let SEF perform startup. */
/*XXX*/vmmcall(0x1234560c, 13, 13);
sef_startup(); sef_startup();
/*XXX*/vmmcall(0x1234560c, 14, 14);
} }
/*===========================================================================* /*===========================================================================*
@ -183,6 +199,7 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
incheck = nocheck = 0; incheck = nocheck = 0;
#endif #endif
/*XXX*/vmmcall(0x1234560c, 0, 27);
vm_paged = 1; vm_paged = 1;
env_parse("vm_paged", "d", 0, &vm_paged, 0, 1); env_parse("vm_paged", "d", 0, &vm_paged, 0, 1);
#if SANITYCHECKS #if SANITYCHECKS
@ -190,18 +207,22 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
#endif #endif
/* Get chunks of available memory. */ /* Get chunks of available memory. */
/*XXX*/vmmcall(0x1234560c, 0, 28);
get_mem_chunks(mem_chunks); get_mem_chunks(mem_chunks);
/* Initialize VM's process table. Request a copy of the system /* Initialize VM's process table. Request a copy of the system
* image table that is defined at the kernel level to see which * image table that is defined at the kernel level to see which
* slots to fill in. * slots to fill in.
*/ */
/*XXX*/vmmcall(0x1234560c, 0, 28);
if (OK != (s=sys_getimage(image))) if (OK != (s=sys_getimage(image)))
panic("couldn't get image table: %d", s); panic("couldn't get image table: %d", s);
/* Set table to 0. This invalidates all slots (clear VMF_INUSE). */ /* Set table to 0. This invalidates all slots (clear VMF_INUSE). */
/*XXX*/vmmcall(0x1234560c, 0, 29);
memset(vmproc, 0, sizeof(vmproc)); memset(vmproc, 0, sizeof(vmproc));
/*XXX*/vmmcall(0x1234560c, 0, 30);
for(i = 0; i < ELEMENTS(vmproc); i++) { for(i = 0; i < ELEMENTS(vmproc); i++) {
vmproc[i].vm_slot = i; vmproc[i].vm_slot = i;
} }
@ -209,10 +230,12 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
/* Walk through boot-time system processes that are alive /* Walk through boot-time system processes that are alive
* now and make valid slot entries for them. * now and make valid slot entries for them.
*/ */
/*XXX*/vmmcall(0x1234560c, 0, 31);
for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) { for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
phys_bytes proclimit; phys_bytes proclimit;
struct vmproc *vmp; struct vmproc *vmp;
/*XXX*/vmmcall(0x1234560c, 0, 32);
if(ip->proc_nr >= _NR_PROCS) { panic("proc: %d", ip->proc_nr); } if(ip->proc_nr >= _NR_PROCS) { panic("proc: %d", ip->proc_nr); }
if(ip->proc_nr < 0 && ip->proc_nr != SYSTEM) continue; if(ip->proc_nr < 0 && ip->proc_nr != SYSTEM) continue;
@ -228,22 +251,28 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
/* Initialize normal process table slot or special SYSTEM /* Initialize normal process table slot or special SYSTEM
* table slot. Kernel memory is already reserved. * table slot. Kernel memory is already reserved.
*/ */
/*XXX*/vmmcall(0x1234560c, 0, 33);
GETVMP(vmp, ip->proc_nr); GETVMP(vmp, ip->proc_nr);
/* reset fields as if exited */ /* reset fields as if exited */
/*XXX*/vmmcall(0x1234560c, 0, 34);
clear_proc(vmp); clear_proc(vmp);
/* Get memory map for this process from the kernel. */ /* Get memory map for this process from the kernel. */
/*XXX*/vmmcall(0x1234560c, 0, 35);
if ((s=get_mem_map(ip->proc_nr, vmp->vm_arch.vm_seg)) != OK) if ((s=get_mem_map(ip->proc_nr, vmp->vm_arch.vm_seg)) != OK)
panic("couldn't get process mem_map: %d", s); panic("couldn't get process mem_map: %d", s);
/* Remove this memory from the free list. */ /* Remove this memory from the free list. */
/*XXX*/vmmcall(0x1234560c, 0, 36);
reserve_proc_mem(mem_chunks, vmp->vm_arch.vm_seg); reserve_proc_mem(mem_chunks, vmp->vm_arch.vm_seg);
/* Set memory limit. */ /* Set memory limit. */
/*XXX*/vmmcall(0x1234560c, 0, 37);
proclimit = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys + proclimit = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
vmp->vm_arch.vm_seg[S].mem_len) - 1; vmp->vm_arch.vm_seg[S].mem_len) - 1;
/*XXX*/vmmcall(0x1234560c, 0, 38);
if(proclimit > limit) if(proclimit > limit)
limit = proclimit; limit = proclimit;
@ -255,21 +284,26 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
if (vmp->vm_arch.vm_seg[T].mem_len != 0) if (vmp->vm_arch.vm_seg[T].mem_len != 0)
vmp->vm_flags |= VMF_SEPARATE; vmp->vm_flags |= VMF_SEPARATE;
/*XXX*/vmmcall(0x1234560c, 0, 39);
} }
/* Architecture-dependent initialization. */ /* Architecture-dependent initialization. */
/*XXX*/vmmcall(0x1234560c, 0, 40);
pt_init(limit); pt_init(limit);
/* Initialize tables to all physical memory. */ /* Initialize tables to all physical memory. */
/*XXX*/vmmcall(0x1234560c, 0, 41);
mem_init(mem_chunks); mem_init(mem_chunks);
meminit_done = 1; meminit_done = 1;
/* Give these processes their own page table. */ /* Give these processes their own page table. */
/*XXX*/vmmcall(0x1234560c, 0, 42);
for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) { for (ip = &image[0]; ip < &image[NR_BOOT_PROCS]; ip++) {
int s; int s;
struct vmproc *vmp; struct vmproc *vmp;
vir_bytes old_stacktop, old_stack; vir_bytes old_stacktop, old_stack;
/*XXX*/vmmcall(0x1234560c, 0, 43);
if(ip->proc_nr < 0) continue; if(ip->proc_nr < 0) continue;
GETVMP(vmp, ip->proc_nr); GETVMP(vmp, ip->proc_nr);
@ -282,20 +316,26 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
vmp->vm_arch.vm_seg[S].mem_len - vmp->vm_arch.vm_seg[S].mem_len -
vmp->vm_arch.vm_seg[D].mem_len; vmp->vm_arch.vm_seg[D].mem_len;
/*XXX*/vmmcall(0x1234560c, 0, 44);
if(pt_new(&vmp->vm_pt) != OK) if(pt_new(&vmp->vm_pt) != OK)
panic("VM: no new pagetable"); panic("VM: no new pagetable");
#define BASICSTACK VM_PAGE_SIZE #define BASICSTACK VM_PAGE_SIZE
/*XXX*/vmmcall(0x1234560c, 0, 77);
old_stacktop = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir + old_stacktop = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_vir +
vmp->vm_arch.vm_seg[S].mem_len); vmp->vm_arch.vm_seg[S].mem_len);
/*XXX*/vmmcall(0x1234560c, old_stacktop, 78);
if(sys_vmctl(vmp->vm_endpoint, VMCTL_INCSP, if(sys_vmctl(vmp->vm_endpoint, VMCTL_INCSP,
VM_STACKTOP - old_stacktop) != OK) { VM_STACKTOP - old_stacktop) != OK) {
/*XXX*/vmmcall(0x1234560c, 0, 79);
panic("VM: vmctl for new stack failed"); panic("VM: vmctl for new stack failed");
} }
/*XXX*/vmmcall(0x1234560c, 0, 45);
free_mem(vmp->vm_arch.vm_seg[D].mem_phys + free_mem(vmp->vm_arch.vm_seg[D].mem_phys +
vmp->vm_arch.vm_seg[D].mem_len, vmp->vm_arch.vm_seg[D].mem_len,
old_stack); old_stack);
/*XXX*/vmmcall(0x1234560c, 0, 46);
if(proc_new(vmp, if(proc_new(vmp,
VM_PROCSTART, VM_PROCSTART,
CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len), CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_len),
@ -309,6 +349,7 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
VM_STACKTOP, 0) != OK) { VM_STACKTOP, 0) != OK) {
panic("failed proc_new for boot process"); panic("failed proc_new for boot process");
} }
/*XXX*/vmmcall(0x1234560c, 0, 47);
} }
/* Set up table of calls. */ /* Set up table of calls. */
@ -322,6 +363,7 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
/* Set call table to 0. This invalidates all calls (clear /* Set call table to 0. This invalidates all calls (clear
* vmc_func). * vmc_func).
*/ */
/*XXX*/vmmcall(0x1234560c, 0, 48);
memset(vm_calls, 0, sizeof(vm_calls)); memset(vm_calls, 0, sizeof(vm_calls));
/* Basic VM calls. */ /* Basic VM calls. */
@ -359,17 +401,21 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
CALLMAP(VM_YIELDBLOCKGETBLOCK, do_yieldblockgetblock); CALLMAP(VM_YIELDBLOCKGETBLOCK, do_yieldblockgetblock);
/* Sanity checks */ /* Sanity checks */
/*XXX*/vmmcall(0x1234560c, 0, 49);
if(find_kernel_top() >= VM_PROCSTART) if(find_kernel_top() >= VM_PROCSTART)
panic("kernel loaded too high"); panic("kernel loaded too high");
/* Initialize the structures for queryexit */ /* Initialize the structures for queryexit */
/*XXX*/vmmcall(0x1234560c, 0, 50);
init_query_exit(); init_query_exit();
/* Unmap our own low pages. */ /* Unmap our own low pages. */
/*XXX*/vmmcall(0x1234560c, 0, 51);
unmap_ok = 1; unmap_ok = 1;
_minix_unmapzero(); _minix_unmapzero();
/* Map all the services in the boot image. */ /* Map all the services in the boot image. */
/*XXX*/vmmcall(0x1234560c, 0, 52);
if((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0, if((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0,
(vir_bytes) rprocpub, sizeof(rprocpub), S)) != OK) { (vir_bytes) rprocpub, sizeof(rprocpub), S)) != OK) {
panic("sys_safecopyfrom failed: %d", s); panic("sys_safecopyfrom failed: %d", s);
@ -381,6 +427,7 @@ PRIVATE int sef_cb_init_fresh(int type, sef_init_info_t *info)
} }
} }
} }
/*XXX*/vmmcall(0x1234560c, 0, 53);
return(OK); return(OK);
} }

View file

@ -439,21 +439,25 @@ int mapflags;
struct phys_region *ph; struct phys_region *ph;
physr_avl *phavl; physr_avl *phavl;
/*XXX*/vmmcall(0x1234560d, 0, 31);
assert(!(length % VM_PAGE_SIZE)); assert(!(length % VM_PAGE_SIZE));
SANITYCHECK(SCL_FUNCTIONS); SANITYCHECK(SCL_FUNCTIONS);
/*XXX*/vmmcall(0x1234560d, 0, 32);
startv = region_find_slot(vmp, minv, maxv, length, &prevregion); startv = region_find_slot(vmp, minv, maxv, length, &prevregion);
if (startv == (vir_bytes) -1) if (startv == (vir_bytes) -1)
return NULL; return NULL;
/* Now we want a new region. */ /* Now we want a new region. */
/*XXX*/vmmcall(0x1234560d, 0, 33);
if(!SLABALLOC(newregion)) { if(!SLABALLOC(newregion)) {
printf("VM: map_page_region: allocating region failed\n"); printf("VM: map_page_region: allocating region failed\n");
return NULL; return NULL;
} }
/* Fill in node details. */ /* Fill in node details. */
/*XXX*/vmmcall(0x1234560d, 0, 34);
USE(newregion, USE(newregion,
newregion->vaddr = startv; newregion->vaddr = startv;
newregion->length = length; newregion->length = length;
@ -461,23 +465,29 @@ USE(newregion,
newregion->tag = VRT_NONE; newregion->tag = VRT_NONE;
newregion->parent = vmp;); newregion->parent = vmp;);
/*XXX*/vmmcall(0x1234560d, 0, 35);
SLABALLOC(phavl); SLABALLOC(phavl);
if(!phavl) { if(!phavl) {
/*XXX*/vmmcall(0x1234560d, 0, 36);
printf("VM: map_page_region: allocating phys avl failed\n"); printf("VM: map_page_region: allocating phys avl failed\n");
SLABFREE(newregion); SLABFREE(newregion);
return NULL; return NULL;
} }
USE(newregion, newregion->phys = phavl;); USE(newregion, newregion->phys = phavl;);
/*XXX*/vmmcall(0x1234560d, 0, 37);
physr_init(newregion->phys); physr_init(newregion->phys);
/* If we know what we're going to map to, map it right away. */ /* If we know what we're going to map to, map it right away. */
/*XXX*/vmmcall(0x1234560d, 0, 38);
if(what != MAP_NONE) { if(what != MAP_NONE) {
assert(!(what % VM_PAGE_SIZE)); assert(!(what % VM_PAGE_SIZE));
assert(!(startv % VM_PAGE_SIZE)); assert(!(startv % VM_PAGE_SIZE));
assert(!(mapflags & MF_PREALLOC)); assert(!(mapflags & MF_PREALLOC));
/*XXX*/vmmcall(0x1234560d, 0, 39);
if(map_new_physblock(vmp, newregion, 0, length, if(map_new_physblock(vmp, newregion, 0, length,
what, PAF_CLEAR, 0) != OK) { what, PAF_CLEAR, 0) != OK) {
/*XXX*/vmmcall(0x1234560d, 0, 40);
printf("VM: map_new_physblock failed\n"); printf("VM: map_new_physblock failed\n");
USE(newregion, USE(newregion,
SLABFREE(newregion->phys);); SLABFREE(newregion->phys););
@ -486,8 +496,11 @@ USE(newregion,
} }
} }
/*XXX*/vmmcall(0x1234560d, 0, 41);
if((flags & VR_ANON) && (mapflags & MF_PREALLOC)) { if((flags & VR_ANON) && (mapflags & MF_PREALLOC)) {
/*XXX*/vmmcall(0x1234560d, 0, 42);
if(map_handle_memory(vmp, newregion, 0, length, 1) != OK) { if(map_handle_memory(vmp, newregion, 0, length, 1) != OK) {
/*XXX*/vmmcall(0x1234560d, 0, 43);
printf("VM: map_page_region: prealloc failed\n"); printf("VM: map_page_region: prealloc failed\n");
USE(newregion, USE(newregion,
SLABFREE(newregion->phys);); SLABFREE(newregion->phys););
@ -497,14 +510,18 @@ USE(newregion,
} }
/* Link it. */ /* Link it. */
/*XXX*/vmmcall(0x1234560d, 0, 44);
if(prevregion) { if(prevregion) {
/*XXX*/vmmcall(0x1234560d, 0, 45);
assert(prevregion->vaddr < newregion->vaddr); assert(prevregion->vaddr < newregion->vaddr);
USE(newregion, newregion->next = prevregion->next;); USE(newregion, newregion->next = prevregion->next;);
USE(prevregion, prevregion->next = newregion;); USE(prevregion, prevregion->next = newregion;);
} else { } else {
/*XXX*/vmmcall(0x1234560d, 0, 46);
USE(newregion, newregion->next = vmp->vm_regions;); USE(newregion, newregion->next = vmp->vm_regions;);
vmp->vm_regions = newregion; vmp->vm_regions = newregion;
} }
/*XXX*/vmmcall(0x1234560d, 0, 47);
#if SANITYCHECKS #if SANITYCHECKS
assert(startv == newregion->vaddr); assert(startv == newregion->vaddr);
@ -513,8 +530,10 @@ USE(newregion,
} }
#endif #endif
/*XXX*/vmmcall(0x1234560d, 0, 48);
SANITYCHECK(SCL_FUNCTIONS); SANITYCHECK(SCL_FUNCTIONS);
/*XXX*/vmmcall(0x1234560d, 0, 49);
return newregion; return newregion;
} }