Busy idle loop when profiling
- the Intel architecture cycle counter (performance counter) does not count when the CPU is idle therefore we use busy loop instead of halting the cpu when there is nothing to schedule - the downside is that handling interrupts may be accounted as idle time if a sample is taken before we get out of the nested trap and pick a new process
This commit is contained in:
parent
d2b56f60da
commit
ef92583c3a
5 changed files with 31 additions and 1 deletions
|
@ -11,6 +11,7 @@
|
|||
#include "kernel/interrupt.h"
|
||||
#include <minix/u64.h>
|
||||
#include "glo.h"
|
||||
#include "profile.h"
|
||||
|
||||
|
||||
#ifdef CONFIG_APIC
|
||||
|
@ -272,6 +273,9 @@ PUBLIC void context_stop_idle(void)
|
|||
|
||||
if (is_idle)
|
||||
restart_local_timer();
|
||||
|
||||
if (sprofiling)
|
||||
get_cpulocal_var(idle_interrupted) = 1;
|
||||
}
|
||||
|
||||
PUBLIC u64_t ms_2_cpu_time(unsigned ms)
|
||||
|
|
|
@ -1030,6 +1030,14 @@ ENTRY(read_ebp)
|
|||
mov %ebp, %eax
|
||||
ret
|
||||
|
||||
ENTRY(interrupts_enable)
|
||||
sti
|
||||
ret
|
||||
|
||||
ENTRY(interrupts_disable)
|
||||
cli
|
||||
ret
|
||||
|
||||
|
||||
/*
|
||||
* void switch_k_stack(void * esp, void (* continuation)(void));
|
||||
|
|
|
@ -76,6 +76,9 @@ DECLARE_CPULOCAL(struct proc *, run_q_head[NR_SCHED_QUEUES]); /* ptrs to ready l
|
|||
DECLARE_CPULOCAL(struct proc *, run_q_tail[NR_SCHED_QUEUES]); /* ptrs to ready list tails */
|
||||
DECLARE_CPULOCAL(int, cpu_is_idle); /* let the others know that you are idle */
|
||||
|
||||
DECLARE_CPULOCAL(volatile int, idle_interrupted); /* to interrupt busy-idle
|
||||
while profiling */
|
||||
|
||||
DECLARE_CPULOCAL(u64_t ,tsc_ctr_switch); /* when did we switched time accounting */
|
||||
|
||||
/* last values read from cpu when sending ooq msg to scheduler */
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
#include "vm.h"
|
||||
#include "clock.h"
|
||||
#include "spinlock.h"
|
||||
#include "profile.h"
|
||||
|
||||
#include "arch_proto.h"
|
||||
|
||||
|
@ -214,7 +215,18 @@ PRIVATE void idle(void)
|
|||
|
||||
/* start accounting for the idle time */
|
||||
context_stop(proc_addr(KERNEL));
|
||||
halt_cpu();
|
||||
if (!sprofiling)
|
||||
halt_cpu();
|
||||
else {
|
||||
volatile int * v;
|
||||
|
||||
v = get_cpulocal_var_ptr(idle_interrupted);
|
||||
interrupts_enable();
|
||||
while (!*v)
|
||||
arch_pause();
|
||||
interrupts_disable();
|
||||
*v = 0;
|
||||
}
|
||||
/*
|
||||
* end of accounting for the idle task does not happen here, the kernel
|
||||
* is handling stuff for quite a while before it gets back here!
|
||||
|
|
|
@ -103,6 +103,9 @@ _PROTOTYPE( void rm_irq_handler, (const irq_hook_t *hook) );
|
|||
_PROTOTYPE( void enable_irq, (const irq_hook_t *hook) );
|
||||
_PROTOTYPE( int disable_irq, (const irq_hook_t *hook) );
|
||||
|
||||
_PROTOTYPE(void interrupts_enable, (void));
|
||||
_PROTOTYPE(void interrupts_disable, (void));
|
||||
|
||||
/* debug.c */
|
||||
_PROTOTYPE( int runqueues_ok, (void) );
|
||||
#ifndef CONFIG_SMP
|
||||
|
|
Loading…
Reference in a new issue