minix/kernel/arch/i386/sconst.h
Tomas Hruby 5efa92f754 NMI watchdog is an awesome feature for debugging locked up kernels.
There is not that much use for it on a single CPU, however, deadlock
between kernel and system task can be delected. Or a runaway loop.

If a kernel gets locked up the timer interrupts don't occure (as all
interrupts are disabled in kernel mode). The only chance is to
interrupt the kernel by a non-maskable interrupt.

This patch generates NMIs using performance counters. It uses the most
widely available performace counters. As the performance counters are 
highly model-specific this patch is not guaranteed to work on every
machine.  Unfortunately this is also true for KVM :-/ On the other
hand adding this feature for other models is not extremely difficult
and the framework makes it hopefully easy enough.

Depending on the frequency of the CPU an NMI is generated at most
about every 0.5s If the cpu's speed is less then 2Ghz it is generated
at most every 1s. In general an NMI is generated much less often as
the performance counter counts down only if the cpu is not idle.
Therefore the overhead of this feature is fairly minimal even if the
load is high.

Uppon detecting that the kernel is locked up the kernel dumps the 
state of the kernel registers and panics.

Local APIC must be enabled for the watchdog to work.

The code is _always_ compiled in, however, it is only enabled if  
watchdog=<non-zero> is set in the boot monitor.

One corner case is serial console debugging. As dumping a lot of stuff
to the serial link may take a lot of time, the watchdog does not 
detect lockups during this time!!! as it would result in too many
false positives. 10 nmi have to be handled before the lockup is
detected. This means something between ~5s to 10s.

Another corner case is that the watchdog is enabled only after the
paging is enabled as it would be pure madness to try to get it right.
2010-01-16 20:53:55 +00:00

145 lines
3.8 KiB
C

#ifndef __SCONST_H__
#define __SCONST_H__
#include "../../const.h"
/* Miscellaneous constants used in assembler code. */
W = _WORD_SIZE /* Machine word size. */
/* Offsets in struct proc. They MUST match proc.h. */
P_STACKBASE = 0
GSREG = P_STACKBASE
FSREG = GSREG+2 /* 386 introduces FS and GS segments*/
ESREG = FSREG+2
DSREG = ESREG+2
DIREG = DSREG+2
SIREG = DIREG+W
BPREG = SIREG+W
STREG = BPREG+W /* hole for another SP*/
BXREG = STREG+W
DXREG = BXREG+W
CXREG = DXREG+W
AXREG = CXREG+W
RETADR = AXREG+W /* return address for save() call*/
PCREG = RETADR+W
CSREG = PCREG+W
PSWREG = CSREG+W
SPREG = PSWREG+W
SSREG = SPREG+W
P_STACKTOP = SSREG+W
FP_SAVE_AREA_P = P_STACKTOP
P_LDT_SEL = FP_SAVE_AREA_P + 532
P_CR3 = P_LDT_SEL+W
P_LDT = P_CR3+W
P_MISC_FLAGS = P_LDT + 50
Msize = 9 /* size of a message in 32-bit words*/
/*
* offset to current process pointer right after trap, we assume we always have
* error code on the stack
*/
#define CURR_PROC_PTR 20
/*
* tests whether the interrupt was triggered in kernel. If so, jump to the
* label. Displacement tell the macro ha far is the CS value saved by the trap
* from the current %esp. The kernel code segment selector has the lower 3 bits
* zeroed
*/
#define TEST_INT_IN_KERNEL(displ, label) \
cmpl $CS_SELECTOR, displ(%esp) ;\
je label ;
/*
* saves the basic interrupt context (no error code) to the process structure
*
* displ is the displacement of %esp from the original stack after trap
* pptr is the process structure pointer
* tmp is an available temporary register
*/
#define SAVE_TRAP_CTX(displ, pptr, tmp) \
movl (0 + displ)(%esp), tmp ;\
movl tmp, PCREG(pptr) ;\
movl (4 + displ)(%esp), tmp ;\
movl tmp, CSREG(pptr) ;\
movl (8 + displ)(%esp), tmp ;\
movl tmp, PSWREG(pptr) ;\
movl (12 + displ)(%esp), tmp ;\
movl tmp, SPREG(pptr) ;\
movl tmp, STREG(pptr) ;\
movl (16 + displ)(%esp), tmp ;\
movl tmp, SSREG(pptr) ;
#define SAVE_SEGS(pptr) \
mov %ds, %ss:DSREG(pptr) ;\
mov %es, %ss:ESREG(pptr) ;\
mov %fs, %ss:FSREG(pptr) ;\
mov %gs, %ss:GSREG(pptr) ;
#define RESTORE_SEGS(pptr) \
movw %ss:DSREG(pptr), %ds ;\
movw %ss:ESREG(pptr), %es ;\
movw %ss:FSREG(pptr), %fs ;\
movw %ss:GSREG(pptr), %gs ;
/*
* restore kernel segments, %ss is kernnel data segment, %cs is aready set and
* %fs, %gs are not used
*/
#define RESTORE_KERNEL_SEGS \
mov %ss, %si ;\
mov %si, %ds ;\
mov %si, %es ;\
movw $0, %si ;\
mov %si, %gs ;\
mov %si, %fs ;
#define SAVE_GP_REGS(pptr) \
mov %eax, %ss:AXREG(pptr) ;\
mov %ecx, %ss:CXREG(pptr) ;\
mov %edx, %ss:DXREG(pptr) ;\
mov %ebx, %ss:BXREG(pptr) ;\
mov %esi, %ss:SIREG(pptr) ;\
mov %edi, %ss:DIREG(pptr) ;
#define RESTORE_GP_REGS(pptr) \
movl %ss:AXREG(pptr), %eax ;\
movl %ss:CXREG(pptr), %ecx ;\
movl %ss:DXREG(pptr), %edx ;\
movl %ss:BXREG(pptr), %ebx ;\
movl %ss:SIREG(pptr), %esi ;\
movl %ss:DIREG(pptr), %edi ;
/*
* save the context of the interrupted process to the structure in the process
* table. It pushses the %ebp to stack to get a scratch register. After %esi is
* saved, we can use it to get the saved %ebp from stack and save it to the
* final location
*
* displ is the stack displacement. In case of an exception, there are two extra
* value on the stack - error code and the exception number
*/
#define SAVE_PROCESS_CTX_NON_LAZY(displ) \
push %ebp ;\
;\
movl (CURR_PROC_PTR + 4 + displ)(%esp), %ebp ;\
;\
/* save the segment registers */ \
SAVE_SEGS(%ebp) ;\
\
SAVE_GP_REGS(%ebp) ;\
pop %esi /* get the orig %ebp and save it */ ;\
mov %esi, %ss:BPREG(%ebp) ;\
\
RESTORE_KERNEL_SEGS ;\
SAVE_TRAP_CTX(displ, %ebp, %esi) ;
#define SAVE_PROCESS_CTX(displ) \
SAVE_PROCESS_CTX_NON_LAZY(displ) ;\
push %ebp ;\
call lazy_fpu ;\
add $4, %esp ;
#endif /* __SCONST_H__ */