From e87d29171f327714a119600053a4f0386b31c22a Mon Sep 17 00:00:00 2001 From: Tomas Hruby Date: Wed, 15 Sep 2010 14:11:03 +0000 Subject: [PATCH] SMP - Compiles for both single and multi processor again - this patch adds various fixes as some of the previous patches break compilations without CONFIG_SMP being set --- kernel/arch/i386/arch_clock.c | 51 ++++++++++++++++++----------------- kernel/arch/i386/klib.S | 1 - kernel/proc.c | 2 ++ kernel/system.c | 6 +++++ servers/pm/glo.h | 2 ++ servers/sched/schedproc.h | 4 +++ servers/sched/schedule.c | 4 +++ 7 files changed, 45 insertions(+), 25 deletions(-) diff --git a/kernel/arch/i386/arch_clock.c b/kernel/arch/i386/arch_clock.c index ef0389b04..36c28b10d 100644 --- a/kernel/arch/i386/arch_clock.c +++ b/kernel/arch/i386/arch_clock.c @@ -194,30 +194,7 @@ PUBLIC void context_stop(struct proc * p) u64_t tsc, tsc_delta; u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch); - read_tsc_64(&tsc); - tsc_delta = sub64(tsc, *__tsc_ctr_switch); - p->p_cycles = add64(p->p_cycles, tsc_delta); - - /* - * deduct the just consumed cpu cycles from the cpu time left for this - * process during its current quantum. Skip IDLE and other pseudo kernel - * tasks - */ - if (p->p_endpoint >= 0) { -#if DEBUG_RACE - make_zero64(p->p_cpu_time_left); -#else - /* if (tsc_delta < p->p_cpu_time_left) in 64bit */ - if (tsc_delta.hi < p->p_cpu_time_left.hi || - (tsc_delta.hi == p->p_cpu_time_left.hi && - tsc_delta.lo < p->p_cpu_time_left.lo)) - p->p_cpu_time_left = sub64(p->p_cpu_time_left, tsc_delta); - else { - make_zero64(p->p_cpu_time_left); - } -#endif - } - +#ifdef CONFIG_SMP /* * This function is called only if we switch from kernel to user or idle * or back. Therefore this is a perfect location to place the big kernel @@ -251,7 +228,33 @@ PUBLIC void context_stop(struct proc * p) kernel_ticks[cpu] = add64(kernel_ticks[cpu], tmp); p->p_cycles = add64(p->p_cycles, tmp); } +#else + read_tsc_64(&tsc); + p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch)); +#endif + tsc_delta = sub64(tsc, *__tsc_ctr_switch); + + /* + * deduct the just consumed cpu cycles from the cpu time left for this + * process during its current quantum. Skip IDLE and other pseudo kernel + * tasks + */ + if (p->p_endpoint >= 0) { +#if DEBUG_RACE + make_zero64(p->p_cpu_time_left); +#else + /* if (tsc_delta < p->p_cpu_time_left) in 64bit */ + if (tsc_delta.hi < p->p_cpu_time_left.hi || + (tsc_delta.hi == p->p_cpu_time_left.hi && + tsc_delta.lo < p->p_cpu_time_left.lo)) + p->p_cpu_time_left = sub64(p->p_cpu_time_left, tsc_delta); + else { + make_zero64(p->p_cpu_time_left); + } +#endif + } + *__tsc_ctr_switch = tsc; } diff --git a/kernel/arch/i386/klib.S b/kernel/arch/i386/klib.S index b0519631f..decb55a7d 100644 --- a/kernel/arch/i386/klib.S +++ b/kernel/arch/i386/klib.S @@ -16,7 +16,6 @@ * kernel. */ - /* * The routines only guarantee to preserve the registers the C compiler * expects to be preserved (ebx, esi, edi, ebp, esp, segment registers, and diff --git a/kernel/proc.c b/kernel/proc.c index be4ac1f67..afa58cd23 100644 --- a/kernel/proc.c +++ b/kernel/proc.c @@ -188,10 +188,12 @@ PRIVATE void idle(void) switch_address_space_idle(); +#ifdef CONFIG_SMP /* we don't need to keep time on APs as it is handled on the BSP */ if (cpuid != bsp_cpu_id) arch_stop_local_timer(); get_cpulocal_var(cpu_is_idle) = 1; +#endif /* start accounting for the idle time */ context_stop(proc_addr(KERNEL)); diff --git a/kernel/system.c b/kernel/system.c index 14454009e..0c79ae7a6 100644 --- a/kernel/system.c +++ b/kernel/system.c @@ -650,8 +650,10 @@ PUBLIC int sched_proc(struct proc *p, if (quantum < 1 && quantum != -1) return(EINVAL); +#ifdef CONFIG_SMP if ((cpu < 0 && cpu != -1) || (cpu > 0 && (unsigned) cpu >= ncpus)) return(EINVAL); +#endif /* In some cases, we might be rescheduling a runnable process. In such * a case (i.e. if we are updating the priority) we set the NO_QUANTUM @@ -663,11 +665,13 @@ PUBLIC int sched_proc(struct proc *p, /* FIXME this is a problem for SMP if the processes currently runs on a * different CPU */ if (proc_is_runnable(p)) { +#ifdef CONFIG_SMP if (p->p_cpu != cpuid && cpu != -1 && cpu != p->p_cpu) { printf("WARNING : changing cpu of a runnable process %d " "on a different cpu!\n", p->p_endpoint); return(EINVAL); } +#endif RTS_SET(p, RTS_NO_QUANTUM); } @@ -681,8 +685,10 @@ PUBLIC int sched_proc(struct proc *p, p->p_quantum_size_ms = quantum; p->p_cpu_time_left = ms_2_cpu_time(quantum); } +#ifdef CONFIG_SMP if (cpu != -1) p->p_cpu = cpu; +#endif /* Clear the scheduling bit and enqueue the process */ RTS_UNSET(p, RTS_NO_QUANTUM); diff --git a/servers/pm/glo.h b/servers/pm/glo.h index 46e4b449b..76feac5c1 100644 --- a/servers/pm/glo.h +++ b/servers/pm/glo.h @@ -28,4 +28,6 @@ EXTERN int abort_flag; EXTERN char monitor_code[256]; EXTERN struct machine machine; /* machine info */ +#ifdef CONFIG_SMP EXTERN unsigned cpu_proc[CONFIG_MAX_CPUS]; +#endif diff --git a/servers/sched/schedproc.h b/servers/sched/schedproc.h index 77e31b638..69f99e4c2 100644 --- a/servers/sched/schedproc.h +++ b/servers/sched/schedproc.h @@ -11,6 +11,10 @@ #define EXTERN #endif +#ifndef CONFIG_SMP +#define CONFIG_MAX_CPUS 1 +#endif + /** * We might later want to add more information to this table, such as the * process owner, process group or cpumask. diff --git a/servers/sched/schedule.c b/servers/sched/schedule.c index 4e1e24ca3..e73e3ac4e 100644 --- a/servers/sched/schedule.c +++ b/servers/sched/schedule.c @@ -120,7 +120,9 @@ PUBLIC int do_stop_scheduling(message *m_ptr) } rmp = &schedproc[proc_nr_n]; +#ifdef CONFIG_SMP cpu_proc[rmp->cpu]--; +#endif rmp->flags = 0; /*&= ~IN_USE;*/ return OK; @@ -172,8 +174,10 @@ PUBLIC int do_start_scheduling(message *m_ptr) * changed that yet either, we can be sure that BSP is the * processor where the processes run now. */ +#ifdef CONFIG_SMP rmp->cpu = machine.bsp_id; /* FIXME set the cpu mask */ +#endif } switch (m_ptr->m_type) {