xv6-cs450/proc.c

446 lines
9.5 KiB
C
Raw Normal View History

2006-06-12 17:22:12 +02:00
#include "types.h"
2007-08-28 01:26:33 +02:00
#include "defs.h"
#include "param.h"
2006-06-12 17:22:12 +02:00
#include "mmu.h"
#include "x86.h"
2006-06-22 22:47:23 +02:00
#include "proc.h"
#include "spinlock.h"
struct {
struct spinlock lock;
struct proc proc[NPROC];
} ptable;
2006-06-12 17:22:12 +02:00
2007-08-23 16:35:28 +02:00
static struct proc *initproc;
int nextpid = 1;
extern void forkret(void);
2009-07-12 04:28:29 +02:00
extern void trapret(void);
2006-06-12 17:22:12 +02:00
void
pinit(void)
{
initlock(&ptable.lock, "ptable");
}
//PAGEBREAK: 36
// Print a process listing to console. For debugging.
// Runs when user types ^P on console.
// No lock to avoid wedging a stuck machine further.
void
procdump(void)
{
static char *states[] = {
[UNUSED] "unused",
[EMBRYO] "embryo",
[SLEEPING] "sleep ",
[RUNNABLE] "runble",
[RUNNING] "run ",
[ZOMBIE] "zombie"
};
int i;
struct proc *p;
char *state;
uint pc[10];
for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
if(p->state == UNUSED)
continue;
if(p->state >= 0 && p->state < NELEM(states) && states[p->state])
state = states[p->state];
else
state = "???";
cprintf("%d %s %s", p->pid, state, p->name);
if(p->state == SLEEPING){
getcallerpcs((uint*)p->context->ebp+2, pc);
for(i=0; i<10 && pc[i] != 0; i++)
cprintf(" %p", pc[i]);
}
cprintf("\n");
}
}
2009-09-03 09:46:15 +02:00
//PAGEBREAK: 32
// Look in the process table for an UNUSED proc.
// If found, change state to EMBRYO and return it.
// Otherwise return 0.
static struct proc*
allocproc(void)
2006-06-12 17:22:12 +02:00
{
struct proc *p;
2009-07-12 04:28:29 +02:00
char *sp;
2006-06-12 17:22:12 +02:00
acquire(&ptable.lock);
2009-07-12 04:28:29 +02:00
for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
if(p->state == UNUSED)
goto found;
release(&ptable.lock);
return 0;
found:
2009-07-12 04:28:29 +02:00
p->state = EMBRYO;
p->pid = nextpid++;
release(&ptable.lock);
2009-09-21 02:19:58 +02:00
// Allocate kernel stack if possible.
if((p->kstack = kalloc()) == 0){
p->state = UNUSED;
return 0;
}
2009-07-12 04:28:29 +02:00
sp = p->kstack + KSTACKSIZE;
// Leave room for trap frame.
sp -= sizeof *p->tf;
p->tf = (struct trapframe*)sp;
// Set up new context to start executing at forkret,
// which returns to trapret (see below).
sp -= 4;
*(uint*)sp = (uint)trapret;
sp -= sizeof *p->context;
p->context = (struct context*)sp;
memset(p->context, 0, sizeof *p->context);
p->context->eip = (uint)forkret;
return p;
2006-06-12 17:22:12 +02:00
}
2009-09-03 09:46:15 +02:00
//PAGEBREAK: 32
// Set up first user process.
void
userinit(void)
{
struct proc *p;
extern char _binary_initcode_start[], _binary_initcode_size[];
p = allocproc();
initproc = p;
if (!(p->pgdir = setupkvm()))
panic("userinit: out of memory?");
if (!allocuvm(p->pgdir, 0x0, (int)_binary_initcode_size))
panic("userinit: out of memory?");
2010-08-31 22:26:08 +02:00
inituvm(p->pgdir, 0x0, _binary_initcode_start,
(int)_binary_initcode_size);
p->sz = PGROUNDUP((int)_binary_initcode_size);
memset(p->tf, 0, sizeof(*p->tf));
p->tf->cs = (SEG_UCODE << 3) | DPL_USER;
p->tf->ds = (SEG_UDATA << 3) | DPL_USER;
p->tf->es = p->tf->ds;
p->tf->ss = p->tf->ds;
p->tf->eflags = FL_IF;
p->tf->esp = PGSIZE;
p->tf->eip = 0; // beginning of initcode.S
safestrcpy(p->name, "initcode", sizeof(p->name));
p->cwd = namei("/");
p->state = RUNNABLE;
}
2006-09-08 16:26:51 +02:00
// Grow current process's memory by n bytes.
// Return 0 on success, -1 on failure.
2006-09-08 16:26:51 +02:00
int
growproc(int n)
{
if(n > 0){
if (!allocuvm(proc->pgdir, (char *)proc->sz, n))
return -1;
} else if(n < 0){
if (!deallocuvm(proc->pgdir, (char *)(proc->sz + n), 0 - n))
return -1;
}
proc->sz += n;
switchuvm(proc);
return 0;
2006-09-08 16:26:51 +02:00
}
// Create a new process copying p as the parent.
// Sets up stack to return as if from system call.
// Caller must set state of returned proc to RUNNABLE.
2009-05-31 02:38:51 +02:00
int
fork(void)
2006-06-12 17:22:12 +02:00
{
2009-05-31 02:38:51 +02:00
int i, pid;
2006-06-12 17:22:12 +02:00
struct proc *np;
// Allocate process.
if((np = allocproc()) == 0)
2009-05-31 02:38:51 +02:00
return -1;
// Copy process state from p.
if (!(np->pgdir = copyuvm(proc->pgdir, proc->sz))) {
kfree(np->kstack);
np->kstack = 0;
np->state = UNUSED;
2009-05-31 02:38:51 +02:00
return -1;
2006-06-12 17:22:12 +02:00
}
np->sz = proc->sz;
np->parent = proc;
*np->tf = *proc->tf;
2006-09-06 19:27:19 +02:00
2009-05-31 02:38:51 +02:00
// Clear %eax so that fork returns 0 in the child.
np->tf->eax = 0;
2009-05-31 02:38:51 +02:00
for(i = 0; i < NOFILE; i++)
if(proc->ofile[i])
np->ofile[i] = filedup(proc->ofile[i]);
np->cwd = idup(proc->cwd);
2009-05-31 02:38:51 +02:00
pid = np->pid;
np->state = RUNNABLE;
safestrcpy(np->name, proc->name, sizeof(proc->name));
2009-05-31 02:38:51 +02:00
return pid;
2006-06-12 17:22:12 +02:00
}
2006-09-07 16:12:30 +02:00
//PAGEBREAK: 42
2006-09-06 19:27:19 +02:00
// Per-CPU process scheduler.
// Each CPU calls scheduler() after setting itself up.
// Scheduler never returns. It loops, doing:
// - choose a process to run
2007-08-30 19:39:56 +02:00
// - swtch to start running that process
// - eventually that process transfers control
// via swtch back to the scheduler.
2006-06-12 17:22:12 +02:00
void
2006-07-11 03:07:40 +02:00
scheduler(void)
2006-06-12 17:22:12 +02:00
{
struct proc *p;
for(;;){
2009-07-12 04:28:29 +02:00
// Enable interrupts on this processor.
sti();
// Loop over process table looking for process to run.
acquire(&ptable.lock);
for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
if(p->state != RUNNABLE)
continue;
2006-09-06 19:27:19 +02:00
// Switch to chosen process. It is the process's job
// to release ptable.lock and then reacquire it
// before jumping back to us.
proc = p;
switchuvm(p);
p->state = RUNNING;
swtch(&cpu->scheduler, proc->context);
switchkvm();
2006-09-06 19:27:19 +02:00
// Process is done running for now.
// It should have changed its p->state before coming back.
proc = 0;
}
release(&ptable.lock);
2006-06-12 17:22:12 +02:00
}
}
2006-06-12 17:22:12 +02:00
2009-07-12 04:28:29 +02:00
// Enter scheduler. Must hold only ptable.lock
// and have changed proc->state.
void
sched(void)
{
int intena;
if(!holding(&ptable.lock))
panic("sched ptable.lock");
if(cpu->ncli != 1)
2006-09-07 18:54:00 +02:00
panic("sched locks");
if(proc->state == RUNNING)
2009-07-12 04:28:29 +02:00
panic("sched running");
if(readeflags()&FL_IF)
panic("sched interruptible");
intena = cpu->intena;
swtch(&proc->context, cpu->scheduler);
cpu->intena = intena;
2006-07-11 03:07:40 +02:00
}
// Give up the CPU for one scheduling round.
2006-07-11 03:07:40 +02:00
void
yield(void)
2006-07-11 03:07:40 +02:00
{
2009-07-12 04:28:29 +02:00
acquire(&ptable.lock); //DOC: yieldlock
proc->state = RUNNABLE;
sched();
release(&ptable.lock);
2006-06-12 17:22:12 +02:00
}
2006-06-15 21:58:01 +02:00
2006-08-29 23:35:30 +02:00
// A fork child's very first scheduling by scheduler()
2007-08-30 19:39:56 +02:00
// will swtch here. "Return" to user space.
void
forkret(void)
{
// Still holding ptable.lock from scheduler.
release(&ptable.lock);
2009-07-12 04:28:29 +02:00
// Return to "caller", actually trapret (see allocproc).
}
// Atomically release lock and sleep on chan.
// Reacquires lock when awakened.
2006-06-15 21:58:01 +02:00
void
sleep(void *chan, struct spinlock *lk)
2006-06-15 21:58:01 +02:00
{
if(proc == 0)
2006-07-11 03:07:40 +02:00
panic("sleep");
2006-07-17 07:00:25 +02:00
if(lk == 0)
panic("sleep without lk");
// Must acquire ptable.lock in order to
// change p->state and then call sched.
// Once we hold ptable.lock, we can be
// guaranteed that we won't miss any wakeup
// (wakeup runs with ptable.lock locked),
// so it's okay to release lk.
2009-07-13 03:33:37 +02:00
if(lk != &ptable.lock){ //DOC: sleeplock0
acquire(&ptable.lock); //DOC: sleeplock1
release(lk);
}
// Go to sleep.
proc->chan = chan;
proc->state = SLEEPING;
sched();
// Tidy up.
proc->chan = 0;
// Reacquire original lock.
2009-07-13 03:33:37 +02:00
if(lk != &ptable.lock){ //DOC: sleeplock2
release(&ptable.lock);
acquire(lk);
}
2006-06-15 21:58:01 +02:00
}
//PAGEBREAK!
// Wake up all processes sleeping on chan.
// The ptable lock must be held.
2007-08-24 22:22:55 +02:00
static void
wakeup1(void *chan)
2006-06-15 21:58:01 +02:00
{
struct proc *p;
2009-05-31 07:13:51 +02:00
for(p = ptable.proc; p < &ptable.proc[NPROC]; p++)
if(p->state == SLEEPING && p->chan == chan)
2006-06-15 21:58:01 +02:00
p->state = RUNNABLE;
}
// Wake up all processes sleeping on chan.
void
wakeup(void *chan)
{
acquire(&ptable.lock);
wakeup1(chan);
release(&ptable.lock);
2006-06-15 21:58:01 +02:00
}
// Kill the process with the given pid.
// Process won't exit until it returns
// to user space (see trap in trap.c).
int
kill(int pid)
{
struct proc *p;
acquire(&ptable.lock);
for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
if(p->pid == pid){
p->killed = 1;
// Wake process from sleep if necessary.
if(p->state == SLEEPING)
p->state = RUNNABLE;
release(&ptable.lock);
return 0;
}
}
release(&ptable.lock);
return -1;
}
// Exit the current process. Does not return.
// An exited process remains in the zombie state
// until its parent calls wait() to find out it exited.
void
exit(void)
{
struct proc *p;
int fd;
if(proc == initproc)
2007-08-08 10:57:37 +02:00
panic("init exiting");
// Close all open files.
for(fd = 0; fd < NOFILE; fd++){
if(proc->ofile[fd]){
fileclose(proc->ofile[fd]);
proc->ofile[fd] = 0;
}
}
2006-09-06 19:27:19 +02:00
iput(proc->cwd);
proc->cwd = 0;
acquire(&ptable.lock);
2007-10-20 20:25:38 +02:00
// Parent might be sleeping in wait().
wakeup1(proc->parent);
2007-08-23 16:35:28 +02:00
// Pass abandoned children to init.
for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
if(p->parent == proc){
2007-08-23 16:40:30 +02:00
p->parent = initproc;
2007-08-23 16:35:28 +02:00
if(p->state == ZOMBIE)
wakeup1(initproc);
2007-08-08 10:57:37 +02:00
}
2007-08-23 16:35:28 +02:00
}
2006-09-06 19:27:19 +02:00
// Jump into the scheduler, never to return.
proc->state = ZOMBIE;
sched();
panic("zombie exit");
}
// Wait for a child process to exit and return its pid.
// Return -1 if this process has no children.
int
wait(void)
{
struct proc *p;
2009-05-31 07:13:51 +02:00
int havekids, pid;
acquire(&ptable.lock);
for(;;){
2006-08-29 23:35:30 +02:00
// Scan through table looking for zombie children.
havekids = 0;
for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
if(p->parent != proc)
2006-09-07 03:56:22 +02:00
continue;
2009-07-13 03:33:37 +02:00
havekids = 1;
if(p->state == ZOMBIE){
// Found one.
pid = p->pid;
kfree(p->kstack);
p->kstack = 0;
freevm(p->pgdir);
2009-07-13 03:33:37 +02:00
p->state = UNUSED;
p->pid = 0;
p->parent = 0;
p->name[0] = 0;
p->killed = 0;
release(&ptable.lock);
return pid;
}
}
// No point waiting if we don't have any children.
if(!havekids || proc->killed){
release(&ptable.lock);
return -1;
}
2006-09-06 19:27:19 +02:00
// Wait for children to exit. (See wakeup1 call in proc_exit.)
sleep(proc, &ptable.lock); //DOC: wait-sleep
}
}