initproc, usegment, swtch tweaks

This commit is contained in:
Russ Cox 2009-07-11 19:28:29 -07:00
parent b121486c3f
commit 2c5f7aba38
4 changed files with 45 additions and 59 deletions

3
defs.h
View file

@ -109,7 +109,7 @@ void wakeup(void*);
void yield(void); void yield(void);
// swtch.S // swtch.S
void swtch(struct context**, struct context**); void swtch(struct context**, struct context*);
// spinlock.c // spinlock.c
void acquire(struct spinlock*); void acquire(struct spinlock*);
@ -151,7 +151,6 @@ void uartinit(void);
void uartintr(void); void uartintr(void);
void uartputc(int); void uartputc(int);
// number of elements in fixed-size array // number of elements in fixed-size array
#define NELEM(x) (sizeof(x)/sizeof((x)[0])) #define NELEM(x) (sizeof(x)/sizeof((x)[0]))

77
proc.c
View file

@ -15,7 +15,7 @@ static struct proc *initproc;
int nextpid = 1; int nextpid = 1;
extern void forkret(void); extern void forkret(void);
extern void forkret1(struct trapframe*); extern void trapret(void);
void void
pinit(void) pinit(void)
@ -30,19 +30,18 @@ static struct proc*
allocproc(void) allocproc(void)
{ {
struct proc *p; struct proc *p;
char *sp;
acquire(&ptable.lock); acquire(&ptable.lock);
for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
if(p->state == UNUSED){ if(p->state == UNUSED)
p->state = EMBRYO;
p->pid = nextpid++;
goto found; goto found;
}
}
release(&ptable.lock); release(&ptable.lock);
return 0; return 0;
found: found:
p->state = EMBRYO;
p->pid = nextpid++;
release(&ptable.lock); release(&ptable.lock);
// Allocate kernel stack if necessary. // Allocate kernel stack if necessary.
@ -50,11 +49,20 @@ found:
p->state = UNUSED; p->state = UNUSED;
return 0; return 0;
} }
p->tf = (struct trapframe*)(p->kstack + KSTACKSIZE) - 1; sp = p->kstack + KSTACKSIZE;
// Set up new context to start executing at forkret (see below). // Leave room for trap frame.
p->context = (struct context *)p->tf - 1; sp -= sizeof *p->tf;
memset(p->context, 0, sizeof(*p->context)); p->tf = (struct trapframe*)sp;
// Set up new context to start executing at forkret,
// which returns to trapret (see below).
sp -= 4;
*(uint*)sp = (uint)trapret;
sp -= sizeof *p->context;
p->context = (struct context*)sp;
memset(p->context, 0, sizeof *p->context);
p->context->eip = (uint)forkret; p->context->eip = (uint)forkret;
return p; return p;
} }
@ -79,19 +87,16 @@ growproc(int n)
} }
// Set up CPU's kernel segment descriptors. // Set up CPU's kernel segment descriptors.
// Run once at boot time on each CPU.
void void
ksegment(void) ksegment(void)
{ {
struct cpu *c1; struct cpu *c1;
c1 = &cpus[cpu()]; c1 = &cpus[cpu()];
c1->gdt[0] = SEG_NULL;
c1->gdt[SEG_KCODE] = SEG(STA_X|STA_R, 0, 0x100000 + 64*1024-1, 0); c1->gdt[SEG_KCODE] = SEG(STA_X|STA_R, 0, 0x100000 + 64*1024-1, 0);
c1->gdt[SEG_KDATA] = SEG(STA_W, 0, 0xffffffff, 0); c1->gdt[SEG_KDATA] = SEG(STA_W, 0, 0xffffffff, 0);
c1->gdt[SEG_KCPU] = SEG(STA_W, (uint)&c1->tls+sizeof(c1->tls), 0xffffffff, 0); c1->gdt[SEG_KCPU] = SEG(STA_W, (uint)(&c1->tls+1), 0xffffffff, 0);
c1->gdt[SEG_UCODE] = SEG_NULL;
c1->gdt[SEG_UDATA] = SEG_NULL;
c1->gdt[SEG_TSS] = SEG_NULL;
lgdt(c1->gdt, sizeof(c1->gdt)); lgdt(c1->gdt, sizeof(c1->gdt));
loadfsgs(SEG_KCPU << 3); loadfsgs(SEG_KCPU << 3);
@ -106,23 +111,12 @@ void
usegment(void) usegment(void)
{ {
pushcli(); pushcli();
c->ts.ss0 = SEG_KDATA << 3;
if(cp)
c->ts.esp0 = (uint)(cp->kstack + KSTACKSIZE);
else
c->ts.esp0 = 0xffffffff;
if(cp){
c->gdt[SEG_UCODE] = SEG(STA_X|STA_R, (uint)cp->mem, cp->sz-1, DPL_USER); c->gdt[SEG_UCODE] = SEG(STA_X|STA_R, (uint)cp->mem, cp->sz-1, DPL_USER);
c->gdt[SEG_UDATA] = SEG(STA_W, (uint)cp->mem, cp->sz-1, DPL_USER); c->gdt[SEG_UDATA] = SEG(STA_W, (uint)cp->mem, cp->sz-1, DPL_USER);
} else {
c->gdt[SEG_UCODE] = SEG_NULL;
c->gdt[SEG_UDATA] = SEG_NULL;
}
c->gdt[SEG_TSS] = SEG16(STS_T32A, (uint)&c->ts, sizeof(c->ts)-1, 0); c->gdt[SEG_TSS] = SEG16(STS_T32A, (uint)&c->ts, sizeof(c->ts)-1, 0);
c->gdt[SEG_TSS].s = 0; c->gdt[SEG_TSS].s = 0;
c->ts.ss0 = SEG_KDATA << 3;
lgdt(c->gdt, sizeof(c->gdt)); c->ts.esp0 = (uint)cp->kstack + KSTACKSIZE;
ltr(SEG_TSS << 3); ltr(SEG_TSS << 3);
popcli(); popcli();
} }
@ -171,7 +165,7 @@ void
userinit(void) userinit(void)
{ {
struct proc *p; struct proc *p;
extern uchar _binary_initcode_start[], _binary_initcode_size[]; extern char _binary_initcode_start[], _binary_initcode_size[];
p = allocproc(); p = allocproc();
initproc = p; initproc = p;
@ -179,6 +173,7 @@ userinit(void)
// Initialize memory from initcode.S // Initialize memory from initcode.S
p->sz = PAGE; p->sz = PAGE;
p->mem = kalloc(p->sz); p->mem = kalloc(p->sz);
memset(p->mem, 0, p->sz);
memmove(p->mem, _binary_initcode_start, (int)_binary_initcode_size); memmove(p->mem, _binary_initcode_start, (int)_binary_initcode_size);
memset(p->tf, 0, sizeof(*p->tf)); memset(p->tf, 0, sizeof(*p->tf));
@ -210,7 +205,7 @@ scheduler(void)
struct proc *p; struct proc *p;
for(;;){ for(;;){
// Enable interrupts on this processor, in lieu of saving intena. // Enable interrupts on this processor.
sti(); sti();
// Loop over process table looking for process to run. // Loop over process table looking for process to run.
@ -225,36 +220,35 @@ scheduler(void)
cp = p; cp = p;
usegment(); usegment();
p->state = RUNNING; p->state = RUNNING;
swtch(&c->context, &p->context); swtch(&c->context, p->context);
// Process is done running for now. // Process is done running for now.
// It should have changed its p->state before coming back. // It should have changed its p->state before coming back.
cp = 0; cp = 0;
usegment();
} }
release(&ptable.lock); release(&ptable.lock);
} }
} }
// Enter scheduler. Must already hold ptable.lock // Enter scheduler. Must hold only ptable.lock
// and have changed cp->state. // and have changed cp->state.
void void
sched(void) sched(void)
{ {
int intena; int intena;
if(readeflags()&FL_IF)
panic("sched interruptible");
if(cp->state == RUNNING)
panic("sched running");
if(!holding(&ptable.lock)) if(!holding(&ptable.lock))
panic("sched ptable.lock"); panic("sched ptable.lock");
if(c->ncli != 1) if(c->ncli != 1)
panic("sched locks"); panic("sched locks");
if(cp->state == RUNNING)
panic("sched running");
if(readeflags()&FL_IF)
panic("sched interruptible");
intena = c->intena; intena = c->intena;
swtch(&cp->context, &c->context); swtch(&cp->context, c->context);
c->intena = intena; c->intena = intena;
} }
@ -262,7 +256,7 @@ sched(void)
void void
yield(void) yield(void)
{ {
acquire(&ptable.lock); acquire(&ptable.lock); //DOC: yieldlock
cp->state = RUNNABLE; cp->state = RUNNABLE;
sched(); sched();
release(&ptable.lock); release(&ptable.lock);
@ -276,8 +270,7 @@ forkret(void)
// Still holding ptable.lock from scheduler. // Still holding ptable.lock from scheduler.
release(&ptable.lock); release(&ptable.lock);
// Jump into assembly, never to return. // Return to "caller", actually trapret (see allocproc).
forkret1(cp->tf);
} }
// Atomically release lock and sleep on chan. // Atomically release lock and sleep on chan.

View file

@ -1,4 +1,6 @@
# void swtch(struct context **old, struct context **new); # Context switch
#
# void swtch(struct context **old, struct context *new);
# #
# Save current register context in old # Save current register context in old
# and then load register context from new. # and then load register context from new.
@ -16,7 +18,7 @@ swtch:
# Switch stacks # Switch stacks
movl %esp, (%eax) movl %esp, (%eax)
movl (%edx), %esp movl %edx, %esp
# Load new callee-save registers # Load new callee-save registers
popl %edi popl %edi

View file

@ -35,11 +35,3 @@ trapret:
popl %ds popl %ds
addl $0x8, %esp # trapno and errcode addl $0x8, %esp # trapno and errcode
iret iret
# A forked process switches to user mode by calling
# forkret1(tf), where tf is the trap frame to use.
.globl forkret1
forkret1:
movl 4(%esp), %esp
jmp trapret