SMP - Cpu local variables

- most global variables carry information which is specific to the
  local CPU and each CPU must have its own copy

- cpu local variable must be declared in cpulocal.h between
  DECLARE_CPULOCAL_START and DECLARE_CPULOCAL_END markers using
  DECLARE_CPULOCAL macro

- to access the cpu local data the provided macros must be used

	get_cpu_var(cpu, name)
	get_cpu_var_ptr(cpu, name)

	get_cpulocal_var(name)
	get_cpulocal_var_ptr(name)

- using this macros makes future changes in the implementation
  possible

- switching to ELF will make the declaration of cpu local data much
  simpler, e.g.

  CPULOCAL int blah;

  anywhere in the kernel source code
This commit is contained in:
Tomas Hruby 2010-09-15 14:09:46 +00:00
parent 2a2a19e542
commit 13a0d5fa5e
17 changed files with 208 additions and 102 deletions

View file

@ -39,7 +39,7 @@ struct fpu_state_s {
char fpu_image[527];
};
#define INMEMORY(p) (!p->p_seg.p_cr3 || ptproc == p)
#define INMEMORY(p) (!p->p_seg.p_cr3 || get_cpulocal_var(ptproc) == p)
#endif /* #ifndef _I386_TYPES_H */

View file

@ -7,7 +7,7 @@ PROG= kernel
SRCS= mpx.S
SRCS+= start.c table.c main.c proc.c \
system.c clock.c utility.c debug.c profile.c interrupt.c \
watchdog.c
watchdog.c cpulocals.c
DPADD+= ${LIBTIMERS} ${LIBSYS}
LDADD+= -ltimers -lsys

View file

@ -30,7 +30,7 @@ struct proc *p;
p->p_seg.p_cr3 = m_ptr->SVMCTL_PTROOT;
p->p_seg.p_cr3_v = (u32_t *) m_ptr->SVMCTL_PTROOT_V;
p->p_misc_flags |= MF_FULLVM;
if(p == ptproc) {
if(p == get_cpulocal_var(ptproc)) {
write_cr3(p->p_seg.p_cr3);
}
} else {

View file

@ -81,8 +81,8 @@ PUBLIC __dead void arch_shutdown(int how)
/* We're panicing? Then retrieve and decode currently
* loaded segment selectors.
*/
printseg("cs: ", 1, proc_ptr, read_cs());
printseg("ds: ", 0, proc_ptr, read_ds());
printseg("cs: ", 1, get_cpulocal_var(proc_ptr), read_cs());
printseg("ds: ", 0, get_cpulocal_var(proc_ptr), read_ds());
if(read_ds() != read_ss()) {
printseg("ss: ", 0, NULL, read_ss());
}
@ -536,7 +536,7 @@ PUBLIC int arch_set_params(char *params, int size)
PUBLIC void arch_do_syscall(struct proc *proc)
{
/* do_ipc assumes that it's running because of the current process */
assert(proc == proc_ptr);
assert(proc == get_cpulocal_var(proc_ptr));
/* Make the system call, for real this time. */
proc->p_reg.retreg =
do_ipc(proc->p_reg.cx, proc->p_reg.retreg, proc->p_reg.bx);
@ -545,11 +545,13 @@ PUBLIC void arch_do_syscall(struct proc *proc)
PUBLIC struct proc * arch_finish_switch_to_user(void)
{
char * stk;
struct proc * p;
stk = (char *)tss.sp0;
/* set pointer to the process to run on the stack */
*((reg_t *)stk) = (reg_t) proc_ptr;
return proc_ptr;
p = get_cpulocal_var(proc_ptr);
*((reg_t *)stk) = (reg_t) p;
return p;
}
PUBLIC void fpu_sigcontext(struct proc *pr, struct sigframe *fr, struct sigcontext *sc)

View file

@ -136,7 +136,7 @@ PUBLIC void exception_handler(int is_nested, struct exception_frame * frame)
struct proc *saved_proc;
/* Save proc_ptr, because it may be changed by debug statements. */
saved_proc = proc_ptr;
saved_proc = get_cpulocal_var(proc_ptr);
ep = &ex_data[frame->vector];

View file

@ -95,6 +95,11 @@ _PROTOTYPE( void frstor, (void *));
_PROTOTYPE( unsigned short fnstsw, (void));
_PROTOTYPE( void fnstcw, (unsigned short* cw));
_PROTOTYPE(void __switch_address_space, (struct proc * p,
struct proc ** __ptproc));
#define switch_address_space(proc) \
__switch_address_space(proc, get_cpulocal_var_ptr(ptproc))
/* protect.c */
struct tss_s {
reg_t backlink;

View file

@ -812,15 +812,15 @@ ENTRY(reload_ds)
ret
/*===========================================================================*/
/* switch_address_space */
/* __switch_address_space */
/*===========================================================================*/
/* PUBLIC void switch_address_space(struct proc *p)
/* PUBLIC void __switch_address_space(struct proc *p, struct ** ptproc)
*
* sets the %cr3 register to the supplied value if it is not already set to the
* same value in which case it would only result in an extra TLB flush which is
* not desirable
*/
ENTRY(switch_address_space)
ENTRY(__switch_address_space)
/* read the process pointer */
mov 4(%esp), %edx
/* enable process' segment descriptors */
@ -839,7 +839,9 @@ ENTRY(switch_address_space)
cmp %ecx, %eax
je 0f
mov %eax, %cr3
mov %edx, _C_LABEL(ptproc)
/* get ptproc */
mov 8(%esp), %eax
mov %edx, (%eax)
0:
ret

View file

@ -81,7 +81,7 @@ PRIVATE phys_bytes createpde(
pde = freepdes[free_pde_idx];
assert(pde >= 0 && pde < 1024);
if(pr && ((pr == ptproc) || !HASPT(pr))) {
if(pr && ((pr == get_cpulocal_var(ptproc)) || !HASPT(pr))) {
/* Process memory is requested, and
* it's a process that is already in current page table, or
* a process that is in every page table.
@ -109,9 +109,9 @@ PRIVATE phys_bytes createpde(
* can access, into the currently loaded page table so it becomes
* visible.
*/
assert(ptproc->p_seg.p_cr3_v);
if(ptproc->p_seg.p_cr3_v[pde] != pdeval) {
ptproc->p_seg.p_cr3_v[pde] = pdeval;
assert(get_cpulocal_var(ptproc)->p_seg.p_cr3_v);
if(get_cpulocal_var(ptproc)->p_seg.p_cr3_v[pde] != pdeval) {
get_cpulocal_var(ptproc)->p_seg.p_cr3_v[pde] = pdeval;
*changed = 1;
}
@ -139,18 +139,18 @@ PRIVATE int lin_lin_copy(const struct proc *srcproc, vir_bytes srclinaddr,
assert(vm_running);
assert(nfreepdes >= 3);
assert(ptproc);
assert(proc_ptr);
assert(read_cr3() == ptproc->p_seg.p_cr3);
assert(get_cpulocal_var(ptproc));
assert(get_cpulocal_var(proc_ptr));
assert(read_cr3() == get_cpulocal_var(ptproc)->p_seg.p_cr3);
procslot = ptproc->p_nr;
procslot = get_cpulocal_var(ptproc)->p_nr;
assert(procslot >= 0 && procslot < I386_VM_DIR_ENTRIES);
if(srcproc) assert(!RTS_ISSET(srcproc, RTS_SLOT_FREE));
if(dstproc) assert(!RTS_ISSET(dstproc, RTS_SLOT_FREE));
assert(!RTS_ISSET(ptproc, RTS_SLOT_FREE));
assert(ptproc->p_seg.p_cr3_v);
assert(!RTS_ISSET(get_cpulocal_var(ptproc), RTS_SLOT_FREE));
assert(get_cpulocal_var(ptproc)->p_seg.p_cr3_v);
while(bytes > 0) {
phys_bytes srcptr, dstptr;
@ -190,8 +190,8 @@ PRIVATE int lin_lin_copy(const struct proc *srcproc, vir_bytes srclinaddr,
if(srcproc) assert(!RTS_ISSET(srcproc, RTS_SLOT_FREE));
if(dstproc) assert(!RTS_ISSET(dstproc, RTS_SLOT_FREE));
assert(!RTS_ISSET(ptproc, RTS_SLOT_FREE));
assert(ptproc->p_seg.p_cr3_v);
assert(!RTS_ISSET(get_cpulocal_var(ptproc), RTS_SLOT_FREE));
assert(get_cpulocal_var(ptproc)->p_seg.p_cr3_v);
return OK;
}
@ -682,7 +682,7 @@ int vm_phys_memset(phys_bytes ph, const u8_t c, phys_bytes bytes)
assert(nfreepdes >= 3);
assert(ptproc->p_seg.p_cr3_v);
assert(get_cpulocal_var(ptproc)->p_seg.p_cr3_v);
/* With VM, we have to map in the physical memory.
* We can do this 4MB at a time.
@ -702,7 +702,7 @@ int vm_phys_memset(phys_bytes ph, const u8_t c, phys_bytes bytes)
ph += chunk;
}
assert(ptproc->p_seg.p_cr3_v);
assert(get_cpulocal_var(ptproc)->p_seg.p_cr3_v);
return OK;
}

View file

@ -60,7 +60,7 @@ PRIVATE clock_t next_timeout; /* realtime that next timer expires */
PRIVATE clock_t realtime = 0; /* real time clock */
/*
* The boot processor timer interrupt handler. In addition to non-boot cpus it
* The boot processos timer interrupt handler. In addition to non-boot cpus it
* keeps real time and notifies the clock task if need be
*/
extern unsigned ooq_msg;
@ -195,9 +195,8 @@ PUBLIC int ap_timer_int_handler(void)
* user's system time.
*/
/* FIXME prepared for get_cpu_local_var() */
p = proc_ptr;
billp = bill_ptr;
p = get_cpulocal_var(proc_ptr);
billp = get_cpulocal_var(bill_ptr);
p->p_user_time += ticks;

3
kernel/cpulocals.c Normal file
View file

@ -0,0 +1,3 @@
#include "kernel.h"
DEFINE_CPULOCAL_VARS;

75
kernel/cpulocals.h Normal file
View file

@ -0,0 +1,75 @@
/* Implementation of CPU local variables generics */
#ifndef __CPULOCALS_H__
#define __CPULOCALS_H__
#ifndef __ASSEMBLY__
#include "kernel.h"
#ifdef CONFIG_SMP
/* SMP */
#define CPULOCAL_ARRAY [CONFIG_MAX_CPUS]
#define get_cpu_var(cpu, name) CPULOCAL_STRUCT[cpu].name
#define get_cpu_var_ptr(cpu, name) (&(get_cpu_var(cpu, name)))
#define get_cpulocal_var(name) get_cpu_var(cpuid, name)
#define get_cpulocal_var_ptr(name) get_cpu_var_ptr(cpuid, name)
/* FIXME - padd the structure so that items in the array do not share cacheline
* with other cpus */
#else
/* single CPU */
#define CPULOCAL_ARRAY
#define get_cpulocal_var(name) CPULOCAL_STRUCT.name
#define get_cpulocal_var_ptr(name) &(get_cpulocal_var(name))
#define get_cpu_var(cpu, name) get_cpulocal_var(name)
#endif
#define DECLARE_CPULOCAL(type, name) type name
#define CPULOCAL_STRUCT __cpu_local_vars
#define ___CPULOCAL_START struct CPULOCAL_STRUCT {
#define ___CPULOCAL_END } CPULOCAL_STRUCT CPULOCAL_ARRAY;
#define DECLARE_CPULOCAL_START extern ___CPULOCAL_START
#define DECLARE_CPULOCAL_END ___CPULOCAL_END
#define DEFINE_CPULOCAL_VARS struct CPULOCAL_STRUCT CPULOCAL_STRUCT CPULOCAL_ARRAY
/*
* The global cpu local variables in use
*/
DECLARE_CPULOCAL_START
/* Process scheduling information and the kernel reentry count. */
DECLARE_CPULOCAL(struct proc *,proc_ptr);/* pointer to currently running process */
DECLARE_CPULOCAL(struct proc *,bill_ptr);/* process to bill for clock ticks */
/*
* signal whether pagefault is already being handled to detect recursive
* pagefaults
*/
DECLARE_CPULOCAL(int, pagefault_handled);
/*
* which processpage tables are loaded right now. We need to know this because
* some processes are loaded in each process pagetables and don't have their own
* pagetables. Therefore we cannot use the proc_ptr pointer
*/
DECLARE_CPULOCAL(struct proc *, ptproc);
DECLARE_CPULOCAL_END
#endif /* __ASSEMBLY__ */
#endif /* __CPULOCALS_H__ */

View file

@ -26,8 +26,6 @@ EXTERN struct k_randomness krandom; /* gather kernel random information */
EXTERN struct loadinfo kloadinfo; /* status of load average */
/* Process scheduling information and the kernel reentry count. */
EXTERN struct proc *proc_ptr; /* pointer to currently running process */
EXTERN struct proc *bill_ptr; /* process to bill for clock ticks */
EXTERN struct proc *vmrequest; /* first process on vmrequest queue */
EXTERN unsigned lost_ticks; /* clock ticks counted outside clock task */
EXTERN char *ipc_call_names[IPCNO_HIGHEST+1]; /* human-readable call names */
@ -67,7 +65,6 @@ EXTERN u64_t cpu_hz[CONFIG_MAX_CPUS];
/* VM */
EXTERN int vm_running;
EXTERN int catch_pagefaults;
EXTERN struct proc *ptproc;
/* Timing */
EXTERN util_timingdata_t timingdata[TIMING_CATEGORIES];

View file

@ -2,7 +2,9 @@
#define KERNEL_H
/* APIC is turned on by default */
#ifndef CONFIG_APIC
#define CONFIG_APIC
#endif
/* boot verbose */
#define CONFIG_BOOT_VERBOSE
/*
@ -52,6 +54,7 @@
#include "profile.h" /* system profiling */
#include "perf.h" /* performance-related definitions */
#include "debug.h" /* debugging, MUST be last kernel header */
#include "cpulocals.h"
#endif /* __ASSEMBLY__ */

View file

@ -180,7 +180,8 @@ PUBLIC int main(void)
}
/* scheduling functions depend on proc_ptr pointing somewhere. */
if(!proc_ptr) proc_ptr = rp;
if(!get_cpulocal_var(proc_ptr))
get_cpulocal_var(proc_ptr) = rp;
/* If this process has its own page table, VM will set the
* PT up and manage it. VM will signal the kernel when it has
@ -218,7 +219,7 @@ PUBLIC int main(void)
/* MINIX is now ready. All boot image processes are on the ready queue.
* Return to the assembly code to start running the current process.
*/
bill_ptr = proc_addr(IDLE); /* it has to point somewhere */
get_cpulocal_var(bill_ptr) = proc_addr(IDLE); /* it has to point somewhere */
announce(); /* print MINIX startup banner */
/*

View file

@ -43,6 +43,8 @@
#include "vm.h"
#include "clock.h"
#include "arch_proto.h"
/* Scheduling and message passing functions */
FORWARD _PROTOTYPE( void idle, (void));
/**
@ -135,12 +137,14 @@ PUBLIC void switch_to_user(void)
/* This function is called an instant before proc_ptr is
* to be scheduled again.
*/
struct proc * p;
p = get_cpulocal_var(proc_ptr);
/*
* if the current process is still runnable check the misc flags and let
* it run unless it becomes not runnable in the meantime
*/
if (proc_is_runnable(proc_ptr))
if (proc_is_runnable(p))
goto check_misc_flags;
/*
* if a process becomes not runnable while handling the misc flags, we
@ -148,13 +152,13 @@ PUBLIC void switch_to_user(void)
* current process wasn' runnable, we pick a new one here
*/
not_runnable_pick_new:
if (proc_is_preempted(proc_ptr)) {
proc_ptr->p_rts_flags &= ~RTS_PREEMPTED;
if (proc_is_runnable(proc_ptr)) {
if (!is_zero64(proc_ptr->p_cpu_time_left))
enqueue_head(proc_ptr);
if (proc_is_preempted(p)) {
p->p_rts_flags &= ~RTS_PREEMPTED;
if (proc_is_runnable(p)) {
if (!is_zero64(p->p_cpu_time_left))
enqueue_head(p);
else
enqueue(proc_ptr);
enqueue(p);
}
}
@ -164,104 +168,111 @@ not_runnable_pick_new:
* timer interrupt the execution resumes here and we can pick another
* process. If there is still nothing runnable we "schedule" IDLE again
*/
while (!(proc_ptr = pick_proc())) {
proc_ptr = proc_addr(IDLE);
if (priv(proc_ptr)->s_flags & BILLABLE)
bill_ptr = proc_ptr;
while (!(p = pick_proc())) {
p = get_cpulocal_var(proc_ptr) = proc_addr(IDLE);
if (priv(p)->s_flags & BILLABLE)
get_cpulocal_var(bill_ptr) = p;
idle();
}
switch_address_space(proc_ptr);
/* update the global variable */
get_cpulocal_var(proc_ptr) = p;
switch_address_space(p);
check_misc_flags:
assert(proc_ptr);
assert(proc_is_runnable(proc_ptr));
while (proc_ptr->p_misc_flags &
assert(p);
assert(proc_is_runnable(p));
while (p->p_misc_flags &
(MF_KCALL_RESUME | MF_DELIVERMSG |
MF_SC_DEFER | MF_SC_TRACE | MF_SC_ACTIVE)) {
assert(proc_is_runnable(proc_ptr));
if (proc_ptr->p_misc_flags & MF_KCALL_RESUME) {
kernel_call_resume(proc_ptr);
assert(proc_is_runnable(p));
if (p->p_misc_flags & MF_KCALL_RESUME) {
kernel_call_resume(p);
}
else if (proc_ptr->p_misc_flags & MF_DELIVERMSG) {
else if (p->p_misc_flags & MF_DELIVERMSG) {
TRACE(VF_SCHEDULING, printf("delivering to %s / %d\n",
proc_ptr->p_name, proc_ptr->p_endpoint););
delivermsg(proc_ptr);
p->p_name, p->p_endpoint););
delivermsg(p);
}
else if (proc_ptr->p_misc_flags & MF_SC_DEFER) {
else if (p->p_misc_flags & MF_SC_DEFER) {
/* Perform the system call that we deferred earlier. */
assert (!(proc_ptr->p_misc_flags & MF_SC_ACTIVE));
assert (!(p->p_misc_flags & MF_SC_ACTIVE));
arch_do_syscall(proc_ptr);
arch_do_syscall(p);
/* If the process is stopped for signal delivery, and
* not blocked sending a message after the system call,
* inform PM.
*/
if ((proc_ptr->p_misc_flags & MF_SIG_DELAY) &&
!RTS_ISSET(proc_ptr, RTS_SENDING))
sig_delay_done(proc_ptr);
if ((p->p_misc_flags & MF_SIG_DELAY) &&
!RTS_ISSET(p, RTS_SENDING))
sig_delay_done(p);
}
else if (proc_ptr->p_misc_flags & MF_SC_TRACE) {
else if (p->p_misc_flags & MF_SC_TRACE) {
/* Trigger a system call leave event if this was a
* system call. We must do this after processing the
* other flags above, both for tracing correctness and
* to be able to use 'break'.
*/
if (!(proc_ptr->p_misc_flags & MF_SC_ACTIVE))
if (!(p->p_misc_flags & MF_SC_ACTIVE))
break;
proc_ptr->p_misc_flags &=
p->p_misc_flags &=
~(MF_SC_TRACE | MF_SC_ACTIVE);
/* Signal the "leave system call" event.
* Block the process.
*/
cause_sig(proc_nr(proc_ptr), SIGTRAP);
cause_sig(proc_nr(p), SIGTRAP);
}
else if (proc_ptr->p_misc_flags & MF_SC_ACTIVE) {
else if (p->p_misc_flags & MF_SC_ACTIVE) {
/* If MF_SC_ACTIVE was set, remove it now:
* we're leaving the system call.
*/
proc_ptr->p_misc_flags &= ~MF_SC_ACTIVE;
p->p_misc_flags &= ~MF_SC_ACTIVE;
break;
}
if (!proc_is_runnable(proc_ptr))
break;
/*
* the selected process might not be runnable anymore. We have
* to checkit and schedule another one
*/
if (!proc_is_runnable(p))
goto not_runnable_pick_new;
}
/*
* check the quantum left before it runs again. We must do it only here
* as we are sure that a possible out-of-quantum message to the
* scheduler will not collide with the regular ipc
*/
if (is_zero64(proc_ptr->p_cpu_time_left))
proc_no_time(proc_ptr);
if (is_zero64(p->p_cpu_time_left))
proc_no_time(p);
/*
* After handling the misc flags the selected process might not be
* runnable anymore. We have to checkit and schedule another one
*/
if (!proc_is_runnable(proc_ptr))
if (!proc_is_runnable(p))
goto not_runnable_pick_new;
TRACE(VF_SCHEDULING, printf("starting %s / %d\n",
proc_ptr->p_name, proc_ptr->p_endpoint););
p->p_name, p->p_endpoint););
#if DEBUG_TRACE
proc_ptr->p_schedules++;
p->p_schedules++;
#endif
proc_ptr = arch_finish_switch_to_user();
assert(!is_zero64(proc_ptr->p_cpu_time_left));
p = arch_finish_switch_to_user();
assert(!is_zero64(p->p_cpu_time_left));
context_stop(proc_addr(KERNEL));
/* If the process isn't the owner of FPU, enable the FPU exception */
if(fpu_owner != proc_ptr)
if(fpu_owner != p)
enable_fpu_exception();
else
disable_fpu_exception();
@ -269,13 +280,13 @@ check_misc_flags:
/* If MF_CONTEXT_SET is set, don't clobber process state within
* the kernel. The next kernel entry is OK again though.
*/
proc_ptr->p_misc_flags &= ~MF_CONTEXT_SET;
p->p_misc_flags &= ~MF_CONTEXT_SET;
/*
* restore_user_context() carries out the actual mode switch from kernel
* to userspace. This function does not return
*/
restore_user_context(proc_ptr);
restore_user_context(p);
NOT_REACHABLE;
}
@ -404,7 +415,7 @@ PRIVATE int do_sync_ipc(struct proc * caller_ptr, /* who made the call */
PUBLIC int do_ipc(reg_t r1, reg_t r2, reg_t r3)
{
struct proc * caller_ptr = proc_ptr; /* always the current process */
struct proc *const caller_ptr = get_cpulocal_var(proc_ptr); /* get pointer to caller */
int call_nr = (int) r1;
assert(!RTS_ISSET(caller_ptr, RTS_SLOT_FREE));
@ -1199,6 +1210,7 @@ PUBLIC void enqueue(
* defined in sched() and pick_proc().
*/
int q = rp->p_priority; /* scheduling queue to use */
struct proc * p;
#if DEBUG_RACE
/* With DEBUG_RACE, schedule everyone at the same priority level. */
@ -1225,10 +1237,11 @@ PUBLIC void enqueue(
* preempted. The current process must be preemptible. Testing the priority
* also makes sure that a process does not preempt itself
*/
assert(proc_ptr && proc_ptr_ok(proc_ptr));
if ((proc_ptr->p_priority > rp->p_priority) &&
(priv(proc_ptr)->s_flags & PREEMPTIBLE))
RTS_SET(proc_ptr, RTS_PREEMPTED); /* calls dequeue() */
p = get_cpulocal_var(proc_ptr);
assert(p);
if((p->p_priority > rp->p_priority) &&
(priv(p)->s_flags & PREEMPTIBLE))
RTS_SET(p, RTS_PREEMPTED); /* calls dequeue() */
#if DEBUG_SANITYCHECKS
assert(runqueues_ok());
@ -1372,7 +1385,7 @@ PRIVATE struct proc * pick_proc(void)
rp->p_name, rp->p_endpoint, q););
assert(proc_is_runnable(rp));
if (priv(rp)->s_flags & BILLABLE)
bill_ptr = rp; /* bill for system time */
get_cpulocal_var(bill_ptr) = rp; /* bill for system time */
return rp;
}
return NULL;
@ -1485,6 +1498,7 @@ PUBLIC void proc_no_time(struct proc * p)
PUBLIC void copr_not_available_handler(void)
{
struct proc * p;
/*
* Disable the FPU exception (both for the kernel and for the process
* once it's scheduled), and initialize or restore the FPU state.
@ -1492,9 +1506,11 @@ PUBLIC void copr_not_available_handler(void)
disable_fpu_exception();
p = get_cpulocal_var(proc_ptr);
/* if FPU is not owned by anyone, do not store anything */
if (fpu_owner != NULL) {
assert(fpu_owner != proc_ptr);
assert(fpu_owner != p);
save_fpu(fpu_owner);
}
@ -1502,10 +1518,10 @@ PUBLIC void copr_not_available_handler(void)
* restore the current process' state and let it run again, do not
* schedule!
*/
restore_fpu(proc_ptr);
fpu_owner = proc_ptr;
restore_fpu(p);
fpu_owner = p;
context_stop(proc_addr(KERNEL));
restore_user_context(proc_ptr);
restore_user_context(p);
NOT_REACHABLE;
}

View file

@ -65,6 +65,7 @@ PUBLIC void stop_profile_clock()
*===========================================================================*/
PRIVATE int profile_clock_handler(irq_hook_t *hook)
{
struct proc * p;
/* This executes on every tick of the CMOS timer. */
/* Are we profiling, and profiling memory not full? */
@ -76,25 +77,27 @@ PRIVATE int profile_clock_handler(irq_hook_t *hook)
return(1);
}
p = get_cpulocal_var(proc_ptr);
/* All is OK */
/* Idle process? */
if (priv(proc_ptr)->s_proc_nr == IDLE) {
if (priv(p)->s_proc_nr == IDLE) {
sprof_info.idle_samples++;
} else
/* Runnable system process? */
if (priv(proc_ptr)->s_flags & SYS_PROC && proc_is_runnable(proc_ptr)) {
if (priv(p)->s_flags & SYS_PROC && proc_is_runnable(p)) {
/* Note: k_reenter is always 0 here. */
/* Store sample (process name and program counter). */
data_copy(KERNEL, (vir_bytes) proc_ptr->p_name,
data_copy(KERNEL, (vir_bytes) p->p_name,
sprof_ep, sprof_data_addr_vir + sprof_info.mem_used,
strlen(proc_ptr->p_name));
strlen(p->p_name));
data_copy(KERNEL, (vir_bytes) &proc_ptr->p_reg.pc, sprof_ep,
data_copy(KERNEL, (vir_bytes) &p->p_reg.pc, sprof_ep,
(vir_bytes) (sprof_data_addr_vir + sprof_info.mem_used +
sizeof(proc_ptr->p_name)),
(vir_bytes) sizeof(proc_ptr->p_reg.pc));
sizeof(p->p_name)),
(vir_bytes) sizeof(p->p_reg.pc));
sprof_info.mem_used += sizeof(sprof_sample);

View file

@ -76,7 +76,7 @@ PUBLIC int do_update(struct proc * caller, message * m_ptr)
proc_stacktrace(src_rp);
proc_stacktrace(dst_rp);
printf("do_update: curr ptproc %d\n", ptproc->p_endpoint);
printf("do_update: curr ptproc %d\n", get_cpulocal_var(ptproc)->p_endpoint);
#endif
/* Let destination inherit the target mask from source. */
@ -107,7 +107,7 @@ PUBLIC int do_update(struct proc * caller, message * m_ptr)
adjust_priv_slot(priv(dst_rp), &orig_dst_priv);
/* Swap global process slot addresses. */
swap_proc_slot_pointer(&ptproc, src_rp, dst_rp);
swap_proc_slot_pointer(get_cpulocal_var_ptr(ptproc), src_rp, dst_rp);
/* Fix segments. */
alloc_segments(src_rp);
@ -121,7 +121,7 @@ PUBLIC int do_update(struct proc * caller, message * m_ptr)
proc_stacktrace(src_rp);
proc_stacktrace(dst_rp);
printf("do_update: curr ptproc %d\n", ptproc->p_endpoint);
printf("do_update: curr ptproc %d\n", get_cpulocal_var(ptproc)->p_endpoint);
#endif
return OK;