diff --git a/kernel/arch/i386/apic_asm.S b/kernel/arch/i386/apic_asm.S index d014928e9..b387333f5 100644 --- a/kernel/arch/i386/apic_asm.S +++ b/kernel/arch/i386/apic_asm.S @@ -38,15 +38,15 @@ \ SAVE_PROCESS_CTX(0) ;\ push %ebp ;\ - call cycles_accounting_stop ;\ + call context_stop ;\ add $4, %esp ;\ movl $0, %ebp /* for stack trace */ ;\ APIC_IRQ_HANDLER(irq) ;\ - jmp restart ;\ + jmp switch_to_user ;\ \ 0: \ pusha ;\ - call cycles_accounting_stop_idle ;\ + call context_stop_idle ;\ APIC_IRQ_HANDLER(irq) ;\ CLEAR_IF(10*4(%esp)) ;\ popa ;\ @@ -149,15 +149,15 @@ apic_hwint15: \ SAVE_PROCESS_CTX(0) ;\ push %ebp ;\ - call cycles_accounting_stop ;\ + call context_stop ;\ add $4, %esp ;\ movl $0, %ebp /* for stack trace */ ;\ LAPIC_INTR_HANDLER(func) ;\ - jmp restart ;\ + jmp switch_to_user ;\ \ 0: \ pusha ;\ - call cycles_accounting_stop_idle ;\ + call context_stop_idle ;\ LAPIC_INTR_HANDLER(func) ;\ CLEAR_IF(10*4(%esp)) ;\ popa ;\ diff --git a/kernel/arch/i386/arch_clock.c b/kernel/arch/i386/arch_clock.c index 9f1d66e08..b3bb9cdfa 100644 --- a/kernel/arch/i386/arch_clock.c +++ b/kernel/arch/i386/arch_clock.c @@ -131,7 +131,7 @@ PUBLIC void cycles_accounting_init(void) read_tsc_64(&tsc_ctr_switch); } -PUBLIC void cycles_accounting_stop(struct proc * p) +PUBLIC void context_stop(struct proc * p) { u64_t tsc; @@ -140,7 +140,7 @@ PUBLIC void cycles_accounting_stop(struct proc * p) tsc_ctr_switch = tsc; } -PUBLIC void cycles_accounting_stop_idle(void) +PUBLIC void context_stop_idle(void) { - cycles_accounting_stop(proc_addr(IDLE)); + context_stop(proc_addr(IDLE)); } diff --git a/kernel/arch/i386/arch_system.c b/kernel/arch/i386/arch_system.c index 33340f471..1b9e45ef9 100644 --- a/kernel/arch/i386/arch_system.c +++ b/kernel/arch/i386/arch_system.c @@ -423,7 +423,7 @@ PUBLIC void arch_do_syscall(struct proc *proc) do_ipc(proc->p_reg.cx, proc->p_reg.retreg, proc->p_reg.bx); } -PUBLIC struct proc * arch_finish_schedcheck(void) +PUBLIC struct proc * arch_finish_switch_to_user(void) { char * stk; stk = (char *)tss.sp0; diff --git a/kernel/arch/i386/mpx.S b/kernel/arch/i386/mpx.S index f441d79af..7c0290946 100644 --- a/kernel/arch/i386/mpx.S +++ b/kernel/arch/i386/mpx.S @@ -5,29 +5,12 @@ * interrupt handlers. It cooperates with the code in "start.c" to set up a * good environment for main(). * - * Every transition to the kernel goes through this file. Transitions to the - * kernel may be nested. The initial entry may be with a system call (i.e., - * send or receive a message), an exception or a hardware interrupt; kernel - * reentries may only be made by hardware interrupts. The count of reentries - * is kept in "k_reenter". It is important for deciding whether to switch to - * the kernel stack and for protecting the message passing code in "proc.c". - * - * For the message passing trap, most of the machine state is saved in the - * proc table. (Some of the registers need not be saved.) Then the stack is - * switched to "k_stack", and interrupts are reenabled. Finally, the system - * call handler (in C) is called. When it returns, interrupts are disabled - * again and the code falls into the restart routine, to finish off held-up - * interrupts and run the process or task whose pointer is in "proc_ptr". - * - * Hardware interrupt handlers do the same, except (1) The entire state must - * be saved. (2) There are too many handlers to do this inline, so the save - * routine is called. A few cycles are saved by pushing the address of the - * appropiate restart routine for a return later. (3) A stack switch is - * avoided when the stack is already switched. (4) The (master) 8259 interrupt - * controller is reenabled centrally in save(). (5) Each interrupt handler - * masks its interrupt line using the 8259 before enabling (other unmasked) - * interrupts, and unmasks it after servicing the interrupt. This limits the - * nest level to the number of lines and protects the handler from itself. + * Kernel is entered either because of kernel-calls, ipc-calls, interrupts or + * exceptions. TSS is set so that the kernel stack is loaded. The user cotext is + * saved to the proc table and the handler of the event is called. Once the + * handler is done, switch_to_user() function is called to pick a new process, + * finish what needs to be done for the next process to run, sets its context + * and switch to userspace. * * For communication with the boot monitor at startup time some constant * data are compiled into the beginning of the text segment. This facilitates @@ -81,7 +64,7 @@ begbss: * the entity. */ -.globl restart +.globl restore_user_context .globl reload_cr3 .globl divide_error @@ -106,7 +89,7 @@ begbss: .globl params_size .globl params_offset .globl mon_ds -.globl schedcheck +.globl switch_to_user .globl lazy_fpu .globl hwint00 /* handlers for hardware interrupts */ @@ -250,17 +233,17 @@ csinit: \ SAVE_PROCESS_CTX(0) ;\ push %ebp ;\ - call cycles_accounting_stop ;\ + call context_stop ;\ add $4, %esp ;\ movl $0, %ebp /* for stack trace */ ;\ PIC_IRQ_HANDLER(irq) ;\ movb $END_OF_INT, %al ;\ outb $INT_CTL /* reenable interrupts in master pic */ ;\ - jmp restart ;\ + jmp switch_to_user ;\ \ 0: \ pusha ;\ - call cycles_accounting_stop_idle ;\ + call context_stop_idle ;\ PIC_IRQ_HANDLER(irq) ;\ movb $END_OF_INT, %al ;\ outb $INT_CTL /* reenable interrupts in master pic */ ;\ @@ -318,18 +301,18 @@ hwint07: \ SAVE_PROCESS_CTX(0) ;\ push %ebp ;\ - call cycles_accounting_stop ;\ + call context_stop ;\ add $4, %esp ;\ movl $0, %ebp /* for stack trace */ ;\ PIC_IRQ_HANDLER(irq) ;\ movb $END_OF_INT, %al ;\ outb $INT_CTL /* reenable interrupts in master pic */ ;\ outb $INT2_CTL /* reenable slave 8259 */ ;\ - jmp restart ;\ + jmp switch_to_user ;\ \ 0: \ pusha ;\ - call cycles_accounting_stop_idle ;\ + call context_stop_idle ;\ PIC_IRQ_HANDLER(irq) ;\ movb $END_OF_INT, %al ;\ outb $INT_CTL /* reenable interrupts in master pic */ ;\ @@ -402,7 +385,7 @@ ipc_entry: /* stop user process cycles */ push %ebp - call cycles_accounting_stop + call context_stop add $4, %esp /* for stack trace */ @@ -415,7 +398,7 @@ ipc_entry: pop %esi mov %eax, AXREG(%esi) - jmp restart + jmp switch_to_user /* @@ -439,7 +422,7 @@ kernel_call_entry: /* stop user process cycles */ push %ebp - call cycles_accounting_stop + call context_stop add $4, %esp /* for stack trace */ @@ -450,7 +433,7 @@ kernel_call_entry: /* restore the current process pointer and save the return value */ add $8, %esp - jmp restart + jmp switch_to_user .balign 16 @@ -472,7 +455,7 @@ exception_entry_from_user: /* stop user process cycles */ push %ebp - call cycles_accounting_stop + call context_stop add $4, %esp /* for stack trace clear %ebp */ @@ -487,7 +470,7 @@ exception_entry_from_user: push $0 /* it's not a nested exception */ call exception_handler - jmp restart + jmp switch_to_user exception_entry_nested: @@ -508,11 +491,8 @@ exception_entry_nested: /*===========================================================================*/ /* restart */ /*===========================================================================*/ -restart: - call schedcheck - - /* %eax is set by schedcheck() to the process to run */ - mov %eax, %ebp /* will assume P_STACKBASE == 0 */ +restore_user_context: + mov 4(%esp), %ebp /* will assume P_STACKBASE == 0 */ /* reconstruct the stack for iret */ movl SSREG(%ebp), %eax @@ -617,7 +597,7 @@ copr_not_available: SAVE_PROCESS_CTX_NON_LAZY(0) /* stop user process cycles */ push %ebp - call cycles_accounting_stop + call context_stop pop %ebp lea P_MISC_FLAGS(%ebp), %ebx movw (%ebx), %cx @@ -639,7 +619,7 @@ fp_l_no_fxsr: frstor (%eax) copr_return: orw $MF_USED_FPU, (%ebx) /* fpu was used during last execution */ - jmp restart + jmp switch_to_user copr_not_available_in_kernel: movl $0, (%esp) diff --git a/kernel/main.c b/kernel/main.c index e0ad78cdb..f00dc602e 100644 --- a/kernel/main.c +++ b/kernel/main.c @@ -257,7 +257,7 @@ PUBLIC void main(void) assert(runqueues_ok()); - restart(); + switch_to_user(); NOT_REACHABLE; } diff --git a/kernel/proc.c b/kernel/proc.c index da3e04e5b..0a1420d34 100644 --- a/kernel/proc.c +++ b/kernel/proc.c @@ -89,7 +89,7 @@ PRIVATE void idle(void) */ /* start accounting for the idle time */ - cycles_accounting_stop(proc_addr(KERNEL)); + context_stop(proc_addr(KERNEL)); halt_cpu(); /* * end of accounting for the idle task does not happen here, the kernel @@ -98,9 +98,9 @@ PRIVATE void idle(void) } /*===========================================================================* - * schedcheck * + * switch_to_user * *===========================================================================*/ -PUBLIC struct proc * schedcheck(void) +PUBLIC void switch_to_user(void) { /* This function is called an instant before proc_ptr is * to be scheduled again. @@ -232,12 +232,17 @@ check_misc_flags: #endif - proc_ptr = arch_finish_schedcheck(); + proc_ptr = arch_finish_switch_to_user(); assert(proc_ptr->p_ticks_left > 0); - cycles_accounting_stop(proc_addr(KERNEL)); + context_stop(proc_addr(KERNEL)); - return proc_ptr; + /* + * restore_user_context() carries out the actual mode switch from kernel + * to userspace. This function does not return + */ + restore_user_context(proc_ptr); + NOT_REACHABLE; } /* @@ -1398,7 +1403,7 @@ PRIVATE void notify_scheduler(struct proc *p) /* * If a scheduler is scheduling itself or has no scheduler, and * runs out of quantum, we don't send a message. The - * RTS_NO_QUANTUM flag will be removed by schedcheck in proc.c. + * RTS_NO_QUANTUM flag will be removed in switch_to_user. */ } else { diff --git a/kernel/proto.h b/kernel/proto.h index 7f58ac821..0aa7e677d 100644 --- a/kernel/proto.h +++ b/kernel/proto.h @@ -25,9 +25,9 @@ _PROTOTYPE( void cycles_accounting_init, (void) ); * possible before returning to userspace. These function is architecture * dependent */ -_PROTOTYPE( void cycles_accounting_stop, (struct proc * p) ); +_PROTOTYPE( void context_stop, (struct proc * p) ); /* this is a wrapper to make calling it from assembly easier */ -_PROTOTYPE( void cycles_accounting_stop_idle, (void) ); +_PROTOTYPE( void context_stop_idle, (void) ); /* main.c */ _PROTOTYPE( void main, (void) ); @@ -40,8 +40,8 @@ _PROTOTYPE( int do_ipc, (reg_t r1, reg_t r2, reg_t r3) ); _PROTOTYPE( int mini_notify, (const struct proc *src, endpoint_t dst) ); _PROTOTYPE( void enqueue, (struct proc *rp) ); _PROTOTYPE( void dequeue, (const struct proc *rp) ); -_PROTOTYPE( struct proc * schedcheck, (void) ); -_PROTOTYPE( struct proc * arch_finish_schedcheck, (void) ); +_PROTOTYPE( void switch_to_user, (void) ); +_PROTOTYPE( struct proc * arch_finish_switch_to_user, (void) ); _PROTOTYPE( struct proc *endpoint_lookup, (endpoint_t ep) ); #if DEBUG_ENABLE_IPC_WARNINGS _PROTOTYPE( int isokendpt_f, (const char *file, int line, endpoint_t e, int *p, int f)); @@ -155,7 +155,7 @@ _PROTOTYPE( void ser_putc, (char) ); _PROTOTYPE( void arch_shutdown, (int) ); _PROTOTYPE( void arch_monitor, (void) ); _PROTOTYPE( void arch_get_aout_headers, (int i, struct exec *h) ); -_PROTOTYPE( void restart, (void) ); +_PROTOTYPE( void restore_user_context, (struct proc * p) ); _PROTOTYPE( void read_tsc, (unsigned long *high, unsigned long *low) ); _PROTOTYPE( int arch_init_profile_clock, (u32_t freq) ); _PROTOTYPE( void arch_stop_profile_clock, (void) ); diff --git a/kernel/system.c b/kernel/system.c index 5510112c3..c2f7233ee 100644 --- a/kernel/system.c +++ b/kernel/system.c @@ -130,7 +130,7 @@ PUBLIC void kernel_call(message *m_user, struct proc * caller) caller->p_delivermsg_vir = (vir_bytes) m_user; /* * the ldt and cr3 of the caller process is loaded because it just've trapped - * into the kernel or was already set in schedcheck() before we resume + * into the kernel or was already set in switch_to_user() before we resume * execution of an interrupted kernel call */ if (copy_msg_from_user(caller, m_user, &msg) == 0) {