2010-04-02 00:22:33 +02:00
|
|
|
#include "kernel/kernel.h"
|
|
|
|
#include "kernel/watchdog.h"
|
2010-09-15 16:09:36 +02:00
|
|
|
#include "arch_proto.h"
|
2010-09-15 16:09:52 +02:00
|
|
|
#include "glo.h"
|
2010-01-19 15:47:25 +01:00
|
|
|
#include <minix/minlib.h>
|
2010-05-25 10:06:14 +02:00
|
|
|
#include <minix/u64.h>
|
2010-01-16 21:53:55 +01:00
|
|
|
|
|
|
|
#include "apic.h"
|
|
|
|
|
|
|
|
#define CPUID_UNHALTED_CORE_CYCLES_AVAILABLE 0
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Intel architecture performance counters watchdog
|
|
|
|
*/
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static struct arch_watchdog intel_arch_watchdog;
|
|
|
|
static struct arch_watchdog amd_watchdog;
|
2010-09-23 12:49:45 +02:00
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static void intel_arch_watchdog_init(const unsigned cpu)
|
2010-01-16 21:53:55 +01:00
|
|
|
{
|
2010-05-25 10:06:14 +02:00
|
|
|
u64_t cpuf;
|
2010-01-16 21:53:55 +01:00
|
|
|
u32_t val;
|
|
|
|
|
2010-09-23 16:42:30 +02:00
|
|
|
ia32_msr_write(INTEL_MSR_PERFMON_CRT0, 0, 0);
|
2010-01-16 21:53:55 +01:00
|
|
|
|
|
|
|
/* Int, OS, USR, Core ccyles */
|
|
|
|
val = 1 << 20 | 1 << 17 | 1 << 16 | 0x3c;
|
2010-09-23 16:42:30 +02:00
|
|
|
ia32_msr_write(INTEL_MSR_PERFMON_SEL0, 0, val);
|
2010-01-16 21:53:55 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* should give as a tick approx. every 0.5-1s, the perf counter has only
|
|
|
|
* lowest 31 bits writable :(
|
|
|
|
*/
|
|
|
|
cpuf = cpu_get_freq(cpu);
|
2010-11-08 00:35:29 +01:00
|
|
|
while (ex64hi(cpuf) || ex64lo(cpuf) > 0x7fffffffU)
|
2010-05-25 10:06:14 +02:00
|
|
|
cpuf = div64u64(cpuf, 2);
|
2010-11-08 00:35:29 +01:00
|
|
|
cpuf = make64(-ex64lo(cpuf), ex64hi(cpuf));
|
2010-09-23 16:42:30 +02:00
|
|
|
watchdog->resetval = watchdog->watchdog_resetval = cpuf;
|
2010-01-16 21:53:55 +01:00
|
|
|
|
2010-11-08 00:35:29 +01:00
|
|
|
ia32_msr_write(INTEL_MSR_PERFMON_CRT0, 0, ex64lo(cpuf));
|
2010-01-16 21:53:55 +01:00
|
|
|
|
2010-09-23 16:42:30 +02:00
|
|
|
ia32_msr_write(INTEL_MSR_PERFMON_SEL0, 0,
|
|
|
|
val | INTEL_MSR_PERFMON_SEL0_ENABLE);
|
2010-01-16 21:53:55 +01:00
|
|
|
|
|
|
|
/* unmask the performance counter interrupt */
|
|
|
|
lapic_write(LAPIC_LVTPCR, APIC_ICR_DM_NMI);
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static void intel_arch_watchdog_reinit(const unsigned cpu)
|
2010-01-16 21:53:55 +01:00
|
|
|
{
|
|
|
|
lapic_write(LAPIC_LVTPCR, APIC_ICR_DM_NMI);
|
2010-11-08 00:35:29 +01:00
|
|
|
ia32_msr_write(INTEL_MSR_PERFMON_CRT0, 0, ex64lo(watchdog->resetval));
|
2010-01-16 21:53:55 +01:00
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
int arch_watchdog_init(void)
|
2010-01-16 21:53:55 +01:00
|
|
|
{
|
2010-01-19 15:47:25 +01:00
|
|
|
u32_t eax, ebx, ecx, edx;
|
2010-10-26 23:07:27 +02:00
|
|
|
unsigned cpu = cpuid;
|
2010-01-16 21:53:55 +01:00
|
|
|
|
2010-09-23 12:49:45 +02:00
|
|
|
if (!lapic_addr) {
|
|
|
|
printf("ERROR : Cannot use NMI watchdog if APIC is not enabled\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2010-10-26 23:07:27 +02:00
|
|
|
if (cpu_info[cpu].vendor == CPU_VENDOR_INTEL) {
|
2010-09-23 16:42:30 +02:00
|
|
|
eax = 0xA;
|
|
|
|
|
|
|
|
_cpuid(&eax, &ebx, &ecx, &edx);
|
|
|
|
|
|
|
|
/* FIXME currently we support only watchdog based on the intel
|
|
|
|
* architectural performance counters. Some Intel CPUs don't have this
|
|
|
|
* feature
|
|
|
|
*/
|
|
|
|
if (ebx & (1 << CPUID_UNHALTED_CORE_CYCLES_AVAILABLE))
|
|
|
|
return -1;
|
|
|
|
if (!((((eax >> 8)) & 0xff) > 0))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
watchdog = &intel_arch_watchdog;
|
2010-10-26 23:07:27 +02:00
|
|
|
} else if (cpu_info[cpu].vendor == CPU_VENDOR_AMD) {
|
|
|
|
if (cpu_info[cpu].family != 6 &&
|
|
|
|
cpu_info[cpu].family != 15 &&
|
|
|
|
cpu_info[cpu].family != 16 &&
|
|
|
|
cpu_info[cpu].family != 17)
|
2010-09-23 16:42:30 +02:00
|
|
|
return -1;
|
|
|
|
else
|
|
|
|
watchdog = &amd_watchdog;
|
|
|
|
} else
|
2010-01-16 21:53:55 +01:00
|
|
|
return -1;
|
|
|
|
|
2010-09-20 01:23:44 +02:00
|
|
|
/* Setup PC overflow as NMI for watchdog, it is masked for now */
|
2010-01-16 21:53:55 +01:00
|
|
|
lapic_write(LAPIC_LVTPCR, APIC_ICR_INT_MASK | APIC_ICR_DM_NMI);
|
2010-07-06 13:59:19 +02:00
|
|
|
(void) lapic_read(LAPIC_LVTPCR);
|
2010-01-16 21:53:55 +01:00
|
|
|
|
|
|
|
/* double check if LAPIC is enabled */
|
2010-09-23 12:49:45 +02:00
|
|
|
if (lapic_addr && watchdog->init) {
|
2010-01-16 21:53:55 +01:00
|
|
|
watchdog->init(cpuid);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void arch_watchdog_stop(void)
|
2010-09-23 12:49:45 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void arch_watchdog_lockup(const struct nmi_frame * frame)
|
2010-01-16 21:53:55 +01:00
|
|
|
{
|
2010-03-03 16:45:01 +01:00
|
|
|
printf("KERNEL LOCK UP\n"
|
2010-01-16 21:53:55 +01:00
|
|
|
"eax 0x%08x\n"
|
|
|
|
"ecx 0x%08x\n"
|
|
|
|
"edx 0x%08x\n"
|
|
|
|
"ebx 0x%08x\n"
|
|
|
|
"ebp 0x%08x\n"
|
|
|
|
"esi 0x%08x\n"
|
|
|
|
"edi 0x%08x\n"
|
|
|
|
"gs 0x%08x\n"
|
|
|
|
"fs 0x%08x\n"
|
|
|
|
"es 0x%08x\n"
|
|
|
|
"ds 0x%08x\n"
|
|
|
|
"pc 0x%08x\n"
|
|
|
|
"cs 0x%08x\n"
|
|
|
|
"eflags 0x%08x\n",
|
|
|
|
frame->eax,
|
|
|
|
frame->ecx,
|
|
|
|
frame->edx,
|
|
|
|
frame->ebx,
|
|
|
|
frame->ebp,
|
|
|
|
frame->esi,
|
|
|
|
frame->edi,
|
|
|
|
frame->gs,
|
|
|
|
frame->fs,
|
|
|
|
frame->es,
|
|
|
|
frame->ds,
|
|
|
|
frame->pc,
|
|
|
|
frame->cs,
|
|
|
|
frame->eflags
|
|
|
|
);
|
2010-03-05 16:05:11 +01:00
|
|
|
panic("Kernel lockup");
|
2010-01-16 21:53:55 +01:00
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
int i386_watchdog_start(void)
|
2010-01-16 21:53:55 +01:00
|
|
|
{
|
2010-09-23 12:49:45 +02:00
|
|
|
if (arch_watchdog_init()) {
|
|
|
|
printf("WARNING watchdog initialization "
|
|
|
|
"failed! Disabled\n");
|
|
|
|
watchdog_enabled = 0;
|
|
|
|
return -1;
|
2010-01-16 21:53:55 +01:00
|
|
|
}
|
2010-09-23 12:49:45 +02:00
|
|
|
else
|
|
|
|
BOOT_VERBOSE(printf("Watchdog enabled\n"););
|
|
|
|
|
|
|
|
return 0;
|
2010-01-16 21:53:55 +01:00
|
|
|
}
|
2010-09-23 12:49:45 +02:00
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static int intel_arch_watchdog_profile_init(const unsigned freq)
|
2010-09-23 12:49:45 +02:00
|
|
|
{
|
|
|
|
u64_t cpuf;
|
|
|
|
|
|
|
|
/* FIXME works only if all CPUs have the same freq */
|
|
|
|
cpuf = cpu_get_freq(cpuid);
|
|
|
|
cpuf = div64u64(cpuf, freq);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if freq is too low and the cpu freq too high we may get in a range of
|
|
|
|
* insane value which cannot be handled by the 31bit CPU perf counter
|
|
|
|
*/
|
2010-11-08 00:35:29 +01:00
|
|
|
if (ex64hi(cpuf) != 0 || ex64lo(cpuf) > 0x7fffffffU) {
|
2010-09-23 12:49:45 +02:00
|
|
|
printf("ERROR : nmi watchdog ticks exceed 31bits, use higher frequency\n");
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
2010-11-08 00:35:29 +01:00
|
|
|
cpuf = make64(-ex64lo(cpuf), ex64hi(cpuf));
|
2010-09-23 16:42:30 +02:00
|
|
|
watchdog->profile_resetval = cpuf;
|
2010-09-23 12:49:45 +02:00
|
|
|
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static struct arch_watchdog intel_arch_watchdog = {
|
2010-09-23 12:49:45 +02:00
|
|
|
/*.init = */ intel_arch_watchdog_init,
|
|
|
|
/*.reinit = */ intel_arch_watchdog_reinit,
|
|
|
|
/*.profile_init = */ intel_arch_watchdog_profile_init
|
|
|
|
};
|
2010-09-23 16:42:30 +02:00
|
|
|
|
|
|
|
#define AMD_MSR_EVENT_SEL0 0xc0010000
|
|
|
|
#define AMD_MSR_EVENT_CTR0 0xc0010004
|
|
|
|
#define AMD_MSR_EVENT_SEL0_ENABLE (1 << 22)
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static void amd_watchdog_init(const unsigned cpu)
|
2010-09-23 16:42:30 +02:00
|
|
|
{
|
|
|
|
u64_t cpuf;
|
|
|
|
u32_t val;
|
|
|
|
|
|
|
|
ia32_msr_write(AMD_MSR_EVENT_CTR0, 0, 0);
|
|
|
|
|
|
|
|
/* Int, OS, USR, Cycles cpu is running */
|
|
|
|
val = 1 << 20 | 1 << 17 | 1 << 16 | 0x76;
|
|
|
|
ia32_msr_write(AMD_MSR_EVENT_SEL0, 0, val);
|
|
|
|
|
|
|
|
cpuf = cpu_get_freq(cpu);
|
|
|
|
neg64(cpuf);
|
|
|
|
watchdog->resetval = watchdog->watchdog_resetval = cpuf;
|
|
|
|
|
|
|
|
ia32_msr_write(AMD_MSR_EVENT_CTR0,
|
2010-11-08 00:35:29 +01:00
|
|
|
ex64hi(watchdog->resetval), ex64lo(watchdog->resetval));
|
2010-09-23 16:42:30 +02:00
|
|
|
|
|
|
|
ia32_msr_write(AMD_MSR_EVENT_SEL0, 0,
|
|
|
|
val | AMD_MSR_EVENT_SEL0_ENABLE);
|
|
|
|
|
|
|
|
/* unmask the performance counter interrupt */
|
|
|
|
lapic_write(LAPIC_LVTPCR, APIC_ICR_DM_NMI);
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static void amd_watchdog_reinit(const unsigned cpu)
|
2010-09-23 16:42:30 +02:00
|
|
|
{
|
|
|
|
lapic_write(LAPIC_LVTPCR, APIC_ICR_DM_NMI);
|
|
|
|
ia32_msr_write(AMD_MSR_EVENT_CTR0,
|
2010-11-08 00:35:29 +01:00
|
|
|
ex64hi(watchdog->resetval), ex64lo(watchdog->resetval));
|
2010-09-23 16:42:30 +02:00
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static int amd_watchdog_profile_init(const unsigned freq)
|
2010-09-23 16:42:30 +02:00
|
|
|
{
|
|
|
|
u64_t cpuf;
|
|
|
|
|
|
|
|
/* FIXME works only if all CPUs have the same freq */
|
|
|
|
cpuf = cpu_get_freq(cpuid);
|
|
|
|
cpuf = div64u64(cpuf, freq);
|
|
|
|
|
|
|
|
neg64(cpuf);
|
|
|
|
watchdog->profile_resetval = cpuf;
|
|
|
|
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static struct arch_watchdog amd_watchdog = {
|
2010-09-23 16:42:30 +02:00
|
|
|
/*.init = */ amd_watchdog_init,
|
|
|
|
/*.reinit = */ amd_watchdog_reinit,
|
|
|
|
/*.profile_init = */ amd_watchdog_profile_init
|
|
|
|
};
|