xv6-cs450/proc.c

357 lines
7.5 KiB
C
Raw Normal View History

2006-06-12 17:22:12 +02:00
#include "types.h"
#include "mmu.h"
#include "x86.h"
#include "param.h"
2006-06-27 16:35:53 +02:00
#include "fd.h"
2006-06-22 22:47:23 +02:00
#include "proc.h"
2006-06-12 17:22:12 +02:00
#include "defs.h"
#include "spinlock.h"
struct spinlock proc_table_lock;
2006-06-12 17:22:12 +02:00
struct proc proc[NPROC];
2006-06-22 22:47:23 +02:00
struct proc *curproc[NCPU];
2006-06-15 21:58:01 +02:00
int next_pid = 1;
2006-06-12 17:22:12 +02:00
/*
* set up a process's task state and segment descriptors
* correctly, given its current size and address in memory.
* this should be called whenever the latter change.
* doesn't change the cpu's current segmentation setup.
*/
void
setupsegs(struct proc *p)
{
memset(&p->ts, 0, sizeof(struct Taskstate));
p->ts.ts_ss0 = SEG_KDATA << 3;
p->ts.ts_esp0 = (unsigned)(p->kstack + KSTACKSIZE);
2006-06-13 17:50:06 +02:00
// XXX it may be wrong to modify the current segment table!
2006-06-12 17:22:12 +02:00
p->gdt[0] = SEG_NULL;
p->gdt[SEG_KCODE] = SEG(STA_X|STA_R, 0, 0xffffffff, 0);
p->gdt[SEG_KDATA] = SEG(STA_W, 0, 0xffffffff, 0);
2006-06-15 18:02:20 +02:00
p->gdt[SEG_TSS] = SEG16(STS_T32A, (unsigned) &p->ts,
sizeof(p->ts), 0);
2006-06-12 17:22:12 +02:00
p->gdt[SEG_TSS].sd_s = 0;
p->gdt[SEG_UCODE] = SEG(STA_X|STA_R, (unsigned)p->mem, p->sz, 3);
p->gdt[SEG_UDATA] = SEG(STA_W, (unsigned)p->mem, p->sz, 3);
p->gdt_pd.pd__garbage = 0;
p->gdt_pd.pd_lim = sizeof(p->gdt) - 1;
p->gdt_pd.pd_base = (unsigned) p->gdt;
}
extern void trapret();
/*
* internal fork(). does not copy kernel stack; instead,
* sets up the stack to return as if from system call.
* caller must set state to RUNNABLE.
2006-06-12 17:22:12 +02:00
*/
struct proc *
2006-06-15 18:02:20 +02:00
newproc()
2006-06-12 17:22:12 +02:00
{
struct proc *np;
2006-07-11 03:07:40 +02:00
struct proc *op;
2006-06-27 16:35:53 +02:00
int fd;
2006-06-12 17:22:12 +02:00
acquire(&proc_table_lock);
for(np = &proc[1]; np < &proc[NPROC]; np++){
if(np->state == UNUSED){
np->state = EMBRYO;
2006-06-12 17:22:12 +02:00
break;
}
}
if(np >= &proc[NPROC]){
release(&proc_table_lock);
2006-06-12 17:22:12 +02:00
return 0;
}
2006-06-12 17:22:12 +02:00
2006-07-11 03:07:40 +02:00
// copy from proc[0] if we're bootstrapping
op = curproc[cpu()];
if(op == 0)
op = &proc[0];
2006-06-15 21:58:01 +02:00
np->pid = next_pid++;
2006-06-22 22:47:23 +02:00
np->ppid = op->pid;
release(&proc_table_lock);
2006-06-22 22:47:23 +02:00
np->sz = op->sz;
np->mem = kalloc(op->sz);
2006-06-12 17:22:12 +02:00
if(np->mem == 0)
return 0;
2006-06-22 22:47:23 +02:00
memcpy(np->mem, op->mem, np->sz);
2006-06-12 17:22:12 +02:00
np->kstack = kalloc(KSTACKSIZE);
if(np->kstack == 0){
2006-06-22 22:47:23 +02:00
kfree(np->mem, op->sz);
np->state = UNUSED;
2006-06-12 17:22:12 +02:00
return 0;
}
setupsegs(np);
// set up kernel stack to return to user space
2006-06-15 18:02:20 +02:00
np->tf = (struct Trapframe *) (np->kstack + KSTACKSIZE - sizeof(struct Trapframe));
2006-06-22 22:47:23 +02:00
*(np->tf) = *(op->tf);
np->tf->tf_regs.reg_eax = 0; // so fork() returns 0 in child
2006-07-11 03:07:40 +02:00
// set up new jmpbuf to start executing at trapret with esp pointing at tf
memset(&np->jmpbuf, 0, sizeof np->jmpbuf);
np->jmpbuf.jb_eip = (unsigned) trapret;
np->jmpbuf.jb_esp = (unsigned) np->tf - 4; // -4 for the %eip that isn't actually there
2006-06-12 17:22:12 +02:00
2006-06-27 16:35:53 +02:00
// copy file descriptors
for(fd = 0; fd < NOFILE; fd++){
np->fds[fd] = op->fds[fd];
if(np->fds[fd])
fd_reference(np->fds[fd]);
2006-06-27 16:35:53 +02:00
}
2006-06-12 17:22:12 +02:00
return np;
}
void
2006-07-11 03:07:40 +02:00
scheduler(void)
2006-06-12 17:22:12 +02:00
{
2006-07-11 03:07:40 +02:00
struct proc *op, *np;
int i;
2006-07-11 03:07:40 +02:00
cprintf("start scheduler on cpu %d jmpbuf %p\n", cpu(), &cpus[cpu()].jmpbuf);
cpus[cpu()].lastproc = &proc[0];
2006-07-11 03:07:40 +02:00
setjmp(&cpus[cpu()].jmpbuf);
op = curproc[cpu()];
if(op == 0 || op->mtx != &proc_table_lock)
acquire1(&proc_table_lock, op);
if(op){
if(op->newstate <= 0 || op->newstate > ZOMBIE)
panic("scheduler");
op->state = op->newstate;
op->newstate = -1;
if(op->mtx){
struct spinlock *mtx = op->mtx;
op->mtx = 0;
if(mtx != &proc_table_lock)
release1(mtx, op);
}
}
2006-07-11 03:07:40 +02:00
// find a runnable process and switch to it
curproc[cpu()] = 0;
np = cpus[cpu()].lastproc + 1;
2006-06-12 17:22:12 +02:00
while(1){
for(i = 0; i < NPROC; i++){
if(np >= &proc[NPROC])
np = &proc[0];
2006-06-12 17:22:12 +02:00
if(np->state == RUNNABLE)
break;
2006-06-22 22:47:23 +02:00
np++;
2006-06-12 17:22:12 +02:00
}
if(i < NPROC){
np->state = RUNNING;
release1(&proc_table_lock, op);
2006-06-12 17:22:12 +02:00
break;
}
release1(&proc_table_lock, op);
op = 0;
acquire(&proc_table_lock);
np = &proc[0];
2006-06-12 17:22:12 +02:00
}
2006-07-11 03:07:40 +02:00
cpus[cpu()].lastproc = np;
2006-06-22 22:47:23 +02:00
curproc[cpu()] = np;
2006-06-15 21:58:01 +02:00
// h/w sets busy bit in TSS descriptor sometimes, and faults
// if it's set in LTR. so clear tss descriptor busy bit.
2006-06-22 22:47:23 +02:00
np->gdt[SEG_TSS].sd_type = STS_T32A;
2006-06-15 21:58:01 +02:00
2006-07-11 03:07:40 +02:00
// XXX should probably have an lgdt() function in x86.h
// to confine all the inline assembly.
2006-06-15 21:58:01 +02:00
// XXX probably ought to lgdt on trap return too, in case
// a system call has moved a program or changed its size.
2006-06-13 17:50:06 +02:00
asm volatile("lgdt %0" : : "g" (np->gdt_pd.pd_lim));
ltr(SEG_TSS << 3);
2006-07-11 03:07:40 +02:00
if(0) cprintf("cpu%d: run %d esp=%p callerpc=%p\n", cpu(), np-proc);
longjmp(&np->jmpbuf);
}
// give up the cpu by switching to the scheduler,
// which runs on the per-cpu stack.
void
swtch(int newstate)
2006-07-11 03:07:40 +02:00
{
struct proc *p = curproc[cpu()];
2006-07-11 03:07:40 +02:00
if(p == 0)
panic("swtch no proc");
if(p->mtx == 0 && p->locks != 0)
panic("swtch w/ locks");
if(p->mtx && p->locks != 1)
panic("swtch w/ locks 1");
if(p->mtx && p->mtx->locked == 0)
panic("switch w/ lock but not held");
if(p->locks && (read_eflags() & FL_IF))
panic("swtch w/ lock but FL_IF");
p->newstate = newstate; // basically an argument to scheduler()
2006-07-11 03:07:40 +02:00
if(setjmp(&p->jmpbuf) == 0)
longjmp(&cpus[cpu()].jmpbuf);
2006-06-12 17:22:12 +02:00
}
2006-06-15 21:58:01 +02:00
void
sleep(void *chan, struct spinlock *mtx)
2006-06-15 21:58:01 +02:00
{
2006-07-11 03:07:40 +02:00
struct proc *p = curproc[cpu()];
2006-07-11 03:07:40 +02:00
if(p == 0)
panic("sleep");
2006-07-11 03:07:40 +02:00
p->chan = chan;
p->mtx = mtx; // scheduler will release it
swtch(WAITING);
if(mtx)
acquire(mtx);
p->chan = 0;
2006-06-15 21:58:01 +02:00
}
void
wakeup1(void *chan)
2006-06-15 21:58:01 +02:00
{
struct proc *p;
for(p = proc; p < &proc[NPROC]; p++)
if(p->state == WAITING && p->chan == chan)
2006-06-15 21:58:01 +02:00
p->state = RUNNABLE;
}
void
wakeup(void *chan)
{
acquire(&proc_table_lock);
wakeup1(chan);
release(&proc_table_lock);
2006-06-15 21:58:01 +02:00
}
// give up the CPU but stay marked as RUNNABLE
void
yield()
{
if(curproc[cpu()] == 0 || curproc[cpu()]->state != RUNNING)
panic("yield");
swtch(RUNNABLE);
}
void
proc_exit()
{
struct proc *p;
struct proc *cp = curproc[cpu()];
int fd;
for(fd = 0; fd < NOFILE; fd++){
if(cp->fds[fd]){
fd_close(cp->fds[fd]);
cp->fds[fd] = 0;
}
}
acquire(&proc_table_lock);
// wake up parent
for(p = proc; p < &proc[NPROC]; p++)
if(p->pid == cp->ppid)
wakeup1(p);
// abandon children
for(p = proc; p < &proc[NPROC]; p++)
if(p->ppid == cp->pid)
p->pid = 1;
cp->mtx = &proc_table_lock;
swtch(ZOMBIE);
panic("a zombie revived");
}
int
proc_wait(void)
{
struct proc *p;
struct proc *cp = curproc[cpu()];
int any, pid;
acquire(&proc_table_lock);
while(1){
any = 0;
for(p = proc; p < &proc[NPROC]; p++){
if(p->state == ZOMBIE && p->ppid == cp->pid){
kfree(p->mem, p->sz);
kfree(p->kstack, KSTACKSIZE);
pid = p->pid;
p->state = UNUSED;
release(&proc_table_lock);
return pid;
}
if(p->state != UNUSED && p->ppid == cp->pid)
any = 1;
}
if(any == 0){
release(&proc_table_lock);
return -1;
}
sleep(cp, &proc_table_lock);
}
}
int
proc_kill(int pid)
{
struct proc *p;
acquire(&proc_table_lock);
for(p = proc; p < &proc[NPROC]; p++){
if(p->pid == pid && p->state != UNUSED){
p->killed = 1;
if(p->state == WAITING)
p->state = RUNNABLE;
release(&proc_table_lock);
return 0;
}
}
release(&proc_table_lock);
return -1;
}
// disable interrupts
void
cli(void)
{
if(cpus[cpu()].clis == 0)
__asm __volatile("cli");
cpus[cpu()].clis += 1;
if((read_eflags() & FL_IF) != 0)
panic("cli but enabled");
}
// enable interrupts
void
sti(void)
{
if((read_eflags() & FL_IF) != 0)
panic("sti but enabled");
if(cpus[cpu()].clis < 1)
panic("sti");
cpus[cpu()].clis -= 1;
if(cpus[cpu()].clis < 1)
__asm __volatile("sti");
}