diff --git a/defs.h b/defs.h index b9e0e98..4d2cba9 100644 --- a/defs.h +++ b/defs.h @@ -92,6 +92,7 @@ int pipewrite(struct pipe*, char*, int); // proc.c struct proc* copyproc(struct proc*); +struct proc* curproc(); void exit(void); int growproc(int); int kill(int); @@ -114,6 +115,8 @@ void getcallerpcs(void*, uint*); int holding(struct spinlock*); void initlock(struct spinlock*, char*); void release(struct spinlock*); +void splhi(); +void spllo(); // string.c int memcmp(const void*, const void*, uint); diff --git a/dot-bochsrc b/dot-bochsrc index 8c609bd..ddd5d93 100755 --- a/dot-bochsrc +++ b/dot-bochsrc @@ -107,7 +107,7 @@ romimage: file=$BXSHARE/BIOS-bochs-latest # 650Mhz Athlon K-7 with Linux 2.4.4/egcs-2.91.66 2 to 2.5 Mips # 400Mhz Pentium II with Linux 2.0.36/egcs-1.0.3 1 to 1.8 Mips #======================================================================= -cpu: count=2, ips=10000000, quantum=1 +cpu: count=2, ips=10000000, quantum=1, reset_on_triple_fault=0 #======================================================================= # MEGS diff --git a/lapic.c b/lapic.c index d62c8ea..a7878fd 100644 --- a/lapic.c +++ b/lapic.c @@ -2,7 +2,10 @@ // See Chapter 8 & Appendix C of Intel processor manual volume 3. #include "types.h" +#include "defs.h" #include "traps.h" +#include "mmu.h" +#include "x86.h" // Local APIC registers, divided by 4 for use as uint[] indices. #define ID (0x0020/4) // ID @@ -84,6 +87,25 @@ lapic_init(int c) int cpu(void) { + // Cannot call cpu when interrupts are enabled: + // result not guaranteed to last long enough to be used! + // Would prefer to panic but even printing is chancy here: + // everything, including cprintf, calls cpu, at least indirectly + // through acquire and release. + if(read_eflags()&FL_IF){ + static int n; + int i; + uint pcs[10]; + + if(n++%999 == 0){ + getcallerpcs((uint*)read_ebp() + 2, pcs); + cprintf("cpu called from %x with interrupts enabled: stk"); + for(i=0; i<10 && pcs[i] && pcs[i] != -1; i++) + cprintf(" %x", pcs[i]); + cprintf("\n"); + } + } + if(lapic) return lapic[ID]>>24; return 0; diff --git a/main.c b/main.c index 87dfd3e..9e33708 100644 --- a/main.c +++ b/main.c @@ -18,9 +18,9 @@ main(void) // clear BSS memset(edata, 0, end - edata); - // Prevent release() from enabling interrupts. + // splhi() every processor during bootstrap. for(i=0; imem = newmem; kfree(oldmem, cp->sz); cp->sz += n; + setupsegs(cp); return cp->sz - n; } @@ -71,6 +71,7 @@ setupsegs(struct proc *p) { struct cpu *c; + splhi(); c = &cpus[cpu()]; c->ts.ss0 = SEG_KDATA << 3; if(p) @@ -93,6 +94,7 @@ setupsegs(struct proc *p) lgdt(c->gdt, sizeof(c->gdt)); ltr(SEG_TSS << 3); + spllo(); } // Create a new process copying p as the parent. @@ -176,6 +178,19 @@ userinit(void) initproc = p; } +// Return currently running process. +// XXX comment better +struct proc* +curproc(void) +{ + struct proc *p; + + splhi(); + p = cpus[cpu()].curproc; + spllo(); + return p; +} + //PAGEBREAK: 42 // Per-CPU process scheduler. // Each CPU calls scheduler() after setting itself up. @@ -188,12 +203,14 @@ void scheduler(void) { struct proc *p; + struct cpu *c; int i; for(;;){ // Loop over process table looking for process to run. acquire(&proc_table_lock); - + + c = &cpus[cpu()]; for(i = 0; i < NPROC; i++){ p = &proc[i]; if(p->state != RUNNABLE) @@ -202,14 +219,14 @@ scheduler(void) // Switch to chosen process. It is the process's job // to release proc_table_lock and then reacquire it // before jumping back to us. - cp = p; + c->curproc = p; setupsegs(p); p->state = RUNNING; - swtch(&cpus[cpu()].context, &p->context); + swtch(&c->context, &p->context); // Process is done running for now. // It should have changed its p->state before coming back. - cp = 0; + c->curproc = 0; setupsegs(0); } @@ -222,11 +239,13 @@ scheduler(void) void sched(void) { + if(read_eflags()&FL_IF) + panic("sched interruptible"); if(cp->state == RUNNING) panic("sched running"); if(!holding(&proc_table_lock)) panic("sched proc_table_lock"); - if(cpus[cpu()].nlock != 1) + if(cpus[cpu()].nsplhi != 1) panic("sched locks"); swtch(&cp->context, &cpus[cpu()].context); diff --git a/proc.h b/proc.h index f76e264..ad37271 100644 --- a/proc.h +++ b/proc.h @@ -49,25 +49,23 @@ struct proc { // fixed-size stack // expandable heap -// Arrange that cp point to the struct proc that this -// CPU is currently running. Such preprocessor -// subterfuge can be confusing, but saves a lot of typing. -extern struct proc *curproc[NCPU]; // Current (running) process per CPU -#define cp (curproc[cpu()]) // Current process on this CPU - - #define MPSTACK 512 // Per-CPU state struct cpu { uchar apicid; // Local APIC ID + struct proc *curproc; // Process currently running. struct context context; // Switch here to enter scheduler struct taskstate ts; // Used by x86 to find stack for interrupt struct segdesc gdt[NSEGS]; // x86 global descriptor table char mpstack[MPSTACK]; // Per-CPU startup stack volatile int booted; // Has the CPU started? - int nlock; // Number of locks currently held + int nsplhi; // Depth of splhi nesting. }; extern struct cpu cpus[NCPU]; extern int ncpu; + +// "cp" is a short alias for curproc(). +// It gets used enough to make this worthwhile. +#define cp curproc() diff --git a/spinlock.c b/spinlock.c index 891f72c..61ae093 100644 --- a/spinlock.c +++ b/spinlock.c @@ -25,13 +25,10 @@ initlock(struct spinlock *lock, char *name) void acquire(struct spinlock *lock) { + splhi(); if(holding(lock)) panic("acquire"); - if(cpus[cpu()].nlock == 0) - cli(); - cpus[cpu()].nlock++; - while(cmpxchg(0, 1, &lock->locked) == 1) ; @@ -62,8 +59,7 @@ release(struct spinlock *lock) cpuid(0, 0, 0, 0, 0); // memory barrier (see Ch 7, IA-32 manual vol 3) lock->locked = 0; - if(--cpus[cpu()].nlock == 0) - sti(); + spllo(); } // Record the current call stack in pcs[] by following the %ebp chain. @@ -91,3 +87,26 @@ holding(struct spinlock *lock) return lock->locked && lock->cpu == cpu() + 10; } + + +// XXX! +// Better names? Better functions? + +void +splhi(void) +{ + cli(); + cpus[cpu()].nsplhi++; +} + +void +spllo(void) +{ + if(read_eflags()&FL_IF) + panic("spllo - interruptible"); + if(--cpus[cpu()].nsplhi < 0) + panic("spllo"); + if(cpus[cpu()].nsplhi == 0) + sti(); +} + diff --git a/sysproc.c b/sysproc.c index 8d7d9cc..4a9c8de 100644 --- a/sysproc.c +++ b/sysproc.c @@ -54,7 +54,6 @@ sys_sbrk(void) return -1; if((addr = growproc(n)) < 0) return -1; - setupsegs(cp); return addr; } diff --git a/trap.c b/trap.c index f6a6ce6..9e8d09a 100644 --- a/trap.c +++ b/trap.c @@ -44,8 +44,8 @@ trap(struct trapframe *tf) return; } - // Make sure interrupts stay off during handler. - cpus[cpu()].nlock++; + // No interrupts during interrupt handling. + splhi(); switch(tf->trapno){ case IRQ_OFFSET + IRQ_TIMER: @@ -84,7 +84,13 @@ trap(struct trapframe *tf) cp->killed = 1; } - cpus[cpu()].nlock--; + // Undo splhi but do not enable interrupts. + // If you change this to spllo() you can get a + // triple fault by just typing too fast at the prompt. + // An interrupt stops us right here, and when that + // interrupt tries to return, somehow the segment + // registers are all invalid. + --cpus[cpu()].nsplhi; // Force process exit if it has been killed and is in user space. // (If it is still executing in the kernel, let it keep running diff --git a/x86.h b/x86.h index 2ac51e5..a24214d 100644 --- a/x86.h +++ b/x86.h @@ -39,6 +39,15 @@ outsl(int port, const void *addr, int cnt) "cc"); } +static inline uint +read_ebp(void) +{ + uint ebp; + + asm volatile("movl %%ebp, %0" : "=a" (ebp)); + return ebp; +} + struct segdesc; static inline void