rename of mode/context switching functions

- this patch only renames schedcheck() to switch_to_user(),
  cycles_accounting_stop() to context_stop() and restart() to
  +restore_user_context()

- the motivation is that since the introduction of schedcheck() it has
  been abused for many things. It deserves a better name.  It should
  express the fact that from the moment we call the function we are in
  the process of switching to user.

- cycles_accounting_stop() was originally a single purpose function.
  As this function is called at were convenient places it is used in
  for other things too, e.g. (un)locking the kernel. Thus it deserves
  a better name too.

- using the old name, restart() does not call schedcheck(), however
  calls to restart are replaced by calls to schedcheck()
  [switch_to_user] and it calls restart() [restore_user_context]
This commit is contained in:
Tomas Hruby 2010-05-18 13:00:39 +00:00
parent 9ba65d2ea8
commit b90c2d7026
8 changed files with 53 additions and 68 deletions

View file

@ -38,15 +38,15 @@
\
SAVE_PROCESS_CTX(0) ;\
push %ebp ;\
call cycles_accounting_stop ;\
call context_stop ;\
add $4, %esp ;\
movl $0, %ebp /* for stack trace */ ;\
APIC_IRQ_HANDLER(irq) ;\
jmp restart ;\
jmp switch_to_user ;\
\
0: \
pusha ;\
call cycles_accounting_stop_idle ;\
call context_stop_idle ;\
APIC_IRQ_HANDLER(irq) ;\
CLEAR_IF(10*4(%esp)) ;\
popa ;\
@ -149,15 +149,15 @@ apic_hwint15:
\
SAVE_PROCESS_CTX(0) ;\
push %ebp ;\
call cycles_accounting_stop ;\
call context_stop ;\
add $4, %esp ;\
movl $0, %ebp /* for stack trace */ ;\
LAPIC_INTR_HANDLER(func) ;\
jmp restart ;\
jmp switch_to_user ;\
\
0: \
pusha ;\
call cycles_accounting_stop_idle ;\
call context_stop_idle ;\
LAPIC_INTR_HANDLER(func) ;\
CLEAR_IF(10*4(%esp)) ;\
popa ;\

View file

@ -131,7 +131,7 @@ PUBLIC void cycles_accounting_init(void)
read_tsc_64(&tsc_ctr_switch);
}
PUBLIC void cycles_accounting_stop(struct proc * p)
PUBLIC void context_stop(struct proc * p)
{
u64_t tsc;
@ -140,7 +140,7 @@ PUBLIC void cycles_accounting_stop(struct proc * p)
tsc_ctr_switch = tsc;
}
PUBLIC void cycles_accounting_stop_idle(void)
PUBLIC void context_stop_idle(void)
{
cycles_accounting_stop(proc_addr(IDLE));
context_stop(proc_addr(IDLE));
}

View file

@ -423,7 +423,7 @@ PUBLIC void arch_do_syscall(struct proc *proc)
do_ipc(proc->p_reg.cx, proc->p_reg.retreg, proc->p_reg.bx);
}
PUBLIC struct proc * arch_finish_schedcheck(void)
PUBLIC struct proc * arch_finish_switch_to_user(void)
{
char * stk;
stk = (char *)tss.sp0;

View file

@ -5,29 +5,12 @@
* interrupt handlers. It cooperates with the code in "start.c" to set up a
* good environment for main().
*
* Every transition to the kernel goes through this file. Transitions to the
* kernel may be nested. The initial entry may be with a system call (i.e.,
* send or receive a message), an exception or a hardware interrupt; kernel
* reentries may only be made by hardware interrupts. The count of reentries
* is kept in "k_reenter". It is important for deciding whether to switch to
* the kernel stack and for protecting the message passing code in "proc.c".
*
* For the message passing trap, most of the machine state is saved in the
* proc table. (Some of the registers need not be saved.) Then the stack is
* switched to "k_stack", and interrupts are reenabled. Finally, the system
* call handler (in C) is called. When it returns, interrupts are disabled
* again and the code falls into the restart routine, to finish off held-up
* interrupts and run the process or task whose pointer is in "proc_ptr".
*
* Hardware interrupt handlers do the same, except (1) The entire state must
* be saved. (2) There are too many handlers to do this inline, so the save
* routine is called. A few cycles are saved by pushing the address of the
* appropiate restart routine for a return later. (3) A stack switch is
* avoided when the stack is already switched. (4) The (master) 8259 interrupt
* controller is reenabled centrally in save(). (5) Each interrupt handler
* masks its interrupt line using the 8259 before enabling (other unmasked)
* interrupts, and unmasks it after servicing the interrupt. This limits the
* nest level to the number of lines and protects the handler from itself.
* Kernel is entered either because of kernel-calls, ipc-calls, interrupts or
* exceptions. TSS is set so that the kernel stack is loaded. The user cotext is
* saved to the proc table and the handler of the event is called. Once the
* handler is done, switch_to_user() function is called to pick a new process,
* finish what needs to be done for the next process to run, sets its context
* and switch to userspace.
*
* For communication with the boot monitor at startup time some constant
* data are compiled into the beginning of the text segment. This facilitates
@ -81,7 +64,7 @@ begbss:
* the entity.
*/
.globl restart
.globl restore_user_context
.globl reload_cr3
.globl divide_error
@ -106,7 +89,7 @@ begbss:
.globl params_size
.globl params_offset
.globl mon_ds
.globl schedcheck
.globl switch_to_user
.globl lazy_fpu
.globl hwint00 /* handlers for hardware interrupts */
@ -250,17 +233,17 @@ csinit:
\
SAVE_PROCESS_CTX(0) ;\
push %ebp ;\
call cycles_accounting_stop ;\
call context_stop ;\
add $4, %esp ;\
movl $0, %ebp /* for stack trace */ ;\
PIC_IRQ_HANDLER(irq) ;\
movb $END_OF_INT, %al ;\
outb $INT_CTL /* reenable interrupts in master pic */ ;\
jmp restart ;\
jmp switch_to_user ;\
\
0: \
pusha ;\
call cycles_accounting_stop_idle ;\
call context_stop_idle ;\
PIC_IRQ_HANDLER(irq) ;\
movb $END_OF_INT, %al ;\
outb $INT_CTL /* reenable interrupts in master pic */ ;\
@ -318,18 +301,18 @@ hwint07:
\
SAVE_PROCESS_CTX(0) ;\
push %ebp ;\
call cycles_accounting_stop ;\
call context_stop ;\
add $4, %esp ;\
movl $0, %ebp /* for stack trace */ ;\
PIC_IRQ_HANDLER(irq) ;\
movb $END_OF_INT, %al ;\
outb $INT_CTL /* reenable interrupts in master pic */ ;\
outb $INT2_CTL /* reenable slave 8259 */ ;\
jmp restart ;\
jmp switch_to_user ;\
\
0: \
pusha ;\
call cycles_accounting_stop_idle ;\
call context_stop_idle ;\
PIC_IRQ_HANDLER(irq) ;\
movb $END_OF_INT, %al ;\
outb $INT_CTL /* reenable interrupts in master pic */ ;\
@ -402,7 +385,7 @@ ipc_entry:
/* stop user process cycles */
push %ebp
call cycles_accounting_stop
call context_stop
add $4, %esp
/* for stack trace */
@ -415,7 +398,7 @@ ipc_entry:
pop %esi
mov %eax, AXREG(%esi)
jmp restart
jmp switch_to_user
/*
@ -439,7 +422,7 @@ kernel_call_entry:
/* stop user process cycles */
push %ebp
call cycles_accounting_stop
call context_stop
add $4, %esp
/* for stack trace */
@ -450,7 +433,7 @@ kernel_call_entry:
/* restore the current process pointer and save the return value */
add $8, %esp
jmp restart
jmp switch_to_user
.balign 16
@ -472,7 +455,7 @@ exception_entry_from_user:
/* stop user process cycles */
push %ebp
call cycles_accounting_stop
call context_stop
add $4, %esp
/* for stack trace clear %ebp */
@ -487,7 +470,7 @@ exception_entry_from_user:
push $0 /* it's not a nested exception */
call exception_handler
jmp restart
jmp switch_to_user
exception_entry_nested:
@ -508,11 +491,8 @@ exception_entry_nested:
/*===========================================================================*/
/* restart */
/*===========================================================================*/
restart:
call schedcheck
/* %eax is set by schedcheck() to the process to run */
mov %eax, %ebp /* will assume P_STACKBASE == 0 */
restore_user_context:
mov 4(%esp), %ebp /* will assume P_STACKBASE == 0 */
/* reconstruct the stack for iret */
movl SSREG(%ebp), %eax
@ -617,7 +597,7 @@ copr_not_available:
SAVE_PROCESS_CTX_NON_LAZY(0)
/* stop user process cycles */
push %ebp
call cycles_accounting_stop
call context_stop
pop %ebp
lea P_MISC_FLAGS(%ebp), %ebx
movw (%ebx), %cx
@ -639,7 +619,7 @@ fp_l_no_fxsr:
frstor (%eax)
copr_return:
orw $MF_USED_FPU, (%ebx) /* fpu was used during last execution */
jmp restart
jmp switch_to_user
copr_not_available_in_kernel:
movl $0, (%esp)

View file

@ -257,7 +257,7 @@ PUBLIC void main(void)
assert(runqueues_ok());
restart();
switch_to_user();
NOT_REACHABLE;
}

View file

@ -89,7 +89,7 @@ PRIVATE void idle(void)
*/
/* start accounting for the idle time */
cycles_accounting_stop(proc_addr(KERNEL));
context_stop(proc_addr(KERNEL));
halt_cpu();
/*
* end of accounting for the idle task does not happen here, the kernel
@ -98,9 +98,9 @@ PRIVATE void idle(void)
}
/*===========================================================================*
* schedcheck *
* switch_to_user *
*===========================================================================*/
PUBLIC struct proc * schedcheck(void)
PUBLIC void switch_to_user(void)
{
/* This function is called an instant before proc_ptr is
* to be scheduled again.
@ -232,12 +232,17 @@ check_misc_flags:
#endif
proc_ptr = arch_finish_schedcheck();
proc_ptr = arch_finish_switch_to_user();
assert(proc_ptr->p_ticks_left > 0);
cycles_accounting_stop(proc_addr(KERNEL));
context_stop(proc_addr(KERNEL));
return proc_ptr;
/*
* restore_user_context() carries out the actual mode switch from kernel
* to userspace. This function does not return
*/
restore_user_context(proc_ptr);
NOT_REACHABLE;
}
/*
@ -1398,7 +1403,7 @@ PRIVATE void notify_scheduler(struct proc *p)
/*
* If a scheduler is scheduling itself or has no scheduler, and
* runs out of quantum, we don't send a message. The
* RTS_NO_QUANTUM flag will be removed by schedcheck in proc.c.
* RTS_NO_QUANTUM flag will be removed in switch_to_user.
*/
}
else {

View file

@ -25,9 +25,9 @@ _PROTOTYPE( void cycles_accounting_init, (void) );
* possible before returning to userspace. These function is architecture
* dependent
*/
_PROTOTYPE( void cycles_accounting_stop, (struct proc * p) );
_PROTOTYPE( void context_stop, (struct proc * p) );
/* this is a wrapper to make calling it from assembly easier */
_PROTOTYPE( void cycles_accounting_stop_idle, (void) );
_PROTOTYPE( void context_stop_idle, (void) );
/* main.c */
_PROTOTYPE( void main, (void) );
@ -40,8 +40,8 @@ _PROTOTYPE( int do_ipc, (reg_t r1, reg_t r2, reg_t r3) );
_PROTOTYPE( int mini_notify, (const struct proc *src, endpoint_t dst) );
_PROTOTYPE( void enqueue, (struct proc *rp) );
_PROTOTYPE( void dequeue, (const struct proc *rp) );
_PROTOTYPE( struct proc * schedcheck, (void) );
_PROTOTYPE( struct proc * arch_finish_schedcheck, (void) );
_PROTOTYPE( void switch_to_user, (void) );
_PROTOTYPE( struct proc * arch_finish_switch_to_user, (void) );
_PROTOTYPE( struct proc *endpoint_lookup, (endpoint_t ep) );
#if DEBUG_ENABLE_IPC_WARNINGS
_PROTOTYPE( int isokendpt_f, (const char *file, int line, endpoint_t e, int *p, int f));
@ -155,7 +155,7 @@ _PROTOTYPE( void ser_putc, (char) );
_PROTOTYPE( void arch_shutdown, (int) );
_PROTOTYPE( void arch_monitor, (void) );
_PROTOTYPE( void arch_get_aout_headers, (int i, struct exec *h) );
_PROTOTYPE( void restart, (void) );
_PROTOTYPE( void restore_user_context, (struct proc * p) );
_PROTOTYPE( void read_tsc, (unsigned long *high, unsigned long *low) );
_PROTOTYPE( int arch_init_profile_clock, (u32_t freq) );
_PROTOTYPE( void arch_stop_profile_clock, (void) );

View file

@ -130,7 +130,7 @@ PUBLIC void kernel_call(message *m_user, struct proc * caller)
caller->p_delivermsg_vir = (vir_bytes) m_user;
/*
* the ldt and cr3 of the caller process is loaded because it just've trapped
* into the kernel or was already set in schedcheck() before we resume
* into the kernel or was already set in switch_to_user() before we resume
* execution of an interrupted kernel call
*/
if (copy_msg_from_user(caller, m_user, &msg) == 0) {