2012-10-08 03:38:03 +02:00
|
|
|
|
|
|
|
/* ARM-specific clock functions. */
|
|
|
|
|
|
|
|
#include "kernel/kernel.h"
|
|
|
|
|
|
|
|
#include "kernel/clock.h"
|
|
|
|
#include "kernel/proc.h"
|
|
|
|
#include "kernel/interrupt.h"
|
|
|
|
#include <minix/u64.h>
|
2013-01-13 17:20:11 +01:00
|
|
|
#include "kernel/glo.h"
|
|
|
|
#include "kernel/profile.h"
|
2012-10-08 03:38:03 +02:00
|
|
|
|
|
|
|
|
2013-01-13 17:20:11 +01:00
|
|
|
#include "kernel/spinlock.h"
|
2012-10-08 03:38:03 +02:00
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#include "kernel/smp.h"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include "omap_timer.h"
|
|
|
|
#include "omap_intr.h"
|
|
|
|
|
|
|
|
static unsigned tsc_per_ms[CONFIG_MAX_CPUS];
|
|
|
|
|
|
|
|
int init_local_timer(unsigned freq)
|
|
|
|
{
|
|
|
|
omap3_timer_init(freq);
|
2013-01-29 20:58:00 +01:00
|
|
|
omap3_frclock_init();
|
2013-05-23 17:58:51 +02:00
|
|
|
#ifdef DM37XX
|
|
|
|
tsc_per_ms[0] = 16250;
|
|
|
|
#endif
|
|
|
|
#ifdef AM335X
|
|
|
|
tsc_per_ms[0] = 15000;
|
|
|
|
#endif
|
2012-10-08 03:38:03 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void stop_local_timer(void)
|
|
|
|
{
|
|
|
|
omap3_timer_stop();
|
|
|
|
}
|
|
|
|
|
|
|
|
void arch_timer_int_handler(void)
|
|
|
|
{
|
|
|
|
omap3_timer_int_handler();
|
|
|
|
}
|
|
|
|
|
|
|
|
void cycles_accounting_init(void)
|
|
|
|
{
|
|
|
|
read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
|
|
|
|
|
|
|
|
make_zero64(get_cpu_var(cpu, cpu_last_tsc));
|
|
|
|
make_zero64(get_cpu_var(cpu, cpu_last_idle));
|
|
|
|
}
|
|
|
|
|
|
|
|
void context_stop(struct proc * p)
|
|
|
|
{
|
2013-02-03 19:28:24 +01:00
|
|
|
u64_t tsc;
|
|
|
|
u32_t tsc_delta;
|
2012-10-08 03:38:03 +02:00
|
|
|
u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
|
|
|
|
|
|
|
|
read_tsc_64(&tsc);
|
2013-02-03 19:28:24 +01:00
|
|
|
tsc_delta = tsc - *__tsc_ctr_switch;
|
|
|
|
p->p_cycles += tsc_delta;
|
2012-10-08 03:38:03 +02:00
|
|
|
|
|
|
|
if(kbill_ipc) {
|
|
|
|
kbill_ipc->p_kipc_cycles =
|
|
|
|
add64(kbill_ipc->p_kipc_cycles, tsc_delta);
|
|
|
|
kbill_ipc = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(kbill_kcall) {
|
|
|
|
kbill_kcall->p_kcall_cycles =
|
|
|
|
add64(kbill_kcall->p_kcall_cycles, tsc_delta);
|
|
|
|
kbill_kcall = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* deduct the just consumed cpu cycles from the cpu time left for this
|
|
|
|
* process during its current quantum. Skip IDLE and other pseudo kernel
|
|
|
|
* tasks
|
|
|
|
*/
|
|
|
|
if (p->p_endpoint >= 0) {
|
|
|
|
#if DEBUG_RACE
|
2013-02-03 19:28:24 +01:00
|
|
|
p->p_cpu_time_left = 0;
|
2012-10-08 03:38:03 +02:00
|
|
|
#else
|
2013-02-03 19:28:24 +01:00
|
|
|
if (tsc_delta < p->p_cpu_time_left) {
|
|
|
|
p->p_cpu_time_left -= tsc_delta;
|
|
|
|
} else p->p_cpu_time_left = 0;
|
2012-10-08 03:38:03 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
*__tsc_ctr_switch = tsc;
|
|
|
|
}
|
|
|
|
|
|
|
|
void context_stop_idle(void)
|
|
|
|
{
|
|
|
|
int is_idle;
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
unsigned cpu = cpuid;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
is_idle = get_cpu_var(cpu, cpu_is_idle);
|
|
|
|
get_cpu_var(cpu, cpu_is_idle) = 0;
|
|
|
|
|
|
|
|
context_stop(get_cpulocal_var_ptr(idle_proc));
|
|
|
|
|
|
|
|
if (is_idle)
|
|
|
|
restart_local_timer();
|
|
|
|
#if SPROFILE
|
|
|
|
if (sprofiling)
|
|
|
|
get_cpulocal_var(idle_interrupted) = 1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void restart_local_timer(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
int register_local_timer_handler(const irq_handler_t handler)
|
|
|
|
{
|
|
|
|
return omap3_register_timer_handler(handler);
|
|
|
|
}
|
|
|
|
|
|
|
|
u64_t ms_2_cpu_time(unsigned ms)
|
|
|
|
{
|
|
|
|
return mul64u(tsc_per_ms[cpuid], ms);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned cpu_time_2_ms(u64_t cpu_time)
|
|
|
|
{
|
|
|
|
return div64u(cpu_time, tsc_per_ms[cpuid]);
|
|
|
|
}
|
|
|
|
|
|
|
|
short cpu_load(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|