#include "types.h" #include "defs.h" #include "param.h" #include "mmu.h" #include "x86.h" #include "proc.h" #include "spinlock.h" struct { struct spinlock lock; struct proc proc[NPROC]; } ptable; static struct proc *initproc; int nextpid = 1; extern void forkret(void); extern void forkret1(struct trapframe*); void pinit(void) { initlock(&ptable.lock, "ptable"); } // Look in the process table for an UNUSED proc. // If found, change state to EMBRYO and return it. // Otherwise return 0. static struct proc* allocproc(void) { int i; struct proc *p; acquire(&ptable.lock); for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ if(p->state == UNUSED){ p->state = EMBRYO; p->pid = nextpid++; goto found; } } release(&ptable.lock); return 0; found: release(&ptable.lock); // Allocate kernel stack if necessary. if((p->kstack = kalloc(KSTACKSIZE)) == 0){ p->state = UNUSED; return 0; } p->tf = (struct trapframe*)(p->kstack + KSTACKSIZE) - 1; // Set up new context to start executing at forkret (see below). p->context = (struct context *)p->tf - 1; memset(p->context, 0, sizeof(*p->context)); p->context->eip = (uint)forkret; return p; } // Grow current process's memory by n bytes. // Return 0 on success, -1 on failure. int growproc(int n) { char *newmem; newmem = kalloc(cp->sz + n); if(newmem == 0) return -1; memmove(newmem, cp->mem, cp->sz); memset(newmem + cp->sz, 0, n); kfree(cp->mem, cp->sz); cp->mem = newmem; cp->sz += n; usegment(); return 0; } // Set up CPU's kernel segment descriptors. void ksegment(void) { struct cpu *c1; c1 = &cpus[cpu()]; c1->gdt[0] = SEG_NULL; c1->gdt[SEG_KCODE] = SEG(STA_X|STA_R, 0, 0x100000 + 64*1024-1, 0); c1->gdt[SEG_KDATA] = SEG(STA_W, 0, 0xffffffff, 0); c1->gdt[SEG_KCPU] = SEG(STA_W, (uint)&c1->tls+sizeof(c1->tls), 0xffffffff, 0); c1->gdt[SEG_UCODE] = SEG_NULL; c1->gdt[SEG_UDATA] = SEG_NULL; c1->gdt[SEG_TSS] = SEG_NULL; lgdt(c1->gdt, sizeof(c1->gdt)); loadfsgs(SEG_KCPU << 3); // Initialize cpu-local variables. c = c1; cp = 0; } // Set up CPU's segment descriptors and task state for the current process. // If cp==0, set up for "idle" state for when scheduler() is running. void usegment(void) { pushcli(); c->ts.ss0 = SEG_KDATA << 3; if(cp) c->ts.esp0 = (uint)(cp->kstack + KSTACKSIZE); else c->ts.esp0 = 0xffffffff; if(cp){ c->gdt[SEG_UCODE] = SEG(STA_X|STA_R, (uint)cp->mem, cp->sz-1, DPL_USER); c->gdt[SEG_UDATA] = SEG(STA_W, (uint)cp->mem, cp->sz-1, DPL_USER); } else { c->gdt[SEG_UCODE] = SEG_NULL; c->gdt[SEG_UDATA] = SEG_NULL; } c->gdt[SEG_TSS] = SEG16(STS_T32A, (uint)&c->ts, sizeof(c->ts)-1, 0); c->gdt[SEG_TSS].s = 0; lgdt(c->gdt, sizeof(c->gdt)); ltr(SEG_TSS << 3); popcli(); } // Create a new process copying p as the parent. // Sets up stack to return as if from system call. // Caller must set state of returned proc to RUNNABLE. int fork(void) { int i, pid; struct proc *np; // Allocate process. if((np = allocproc()) == 0) return -1; // Copy process state from p. np->sz = cp->sz; if((np->mem = kalloc(np->sz)) == 0){ kfree(np->kstack, KSTACKSIZE); np->kstack = 0; np->state = UNUSED; return -1; } memmove(np->mem, cp->mem, np->sz); np->parent = cp; *np->tf = *cp->tf; // Clear %eax so that fork returns 0 in the child. np->tf->eax = 0; for(i = 0; i < NOFILE; i++) if(cp->ofile[i]) np->ofile[i] = filedup(cp->ofile[i]); np->cwd = idup(cp->cwd); pid = np->pid; np->state = RUNNABLE; return pid; } // Set up first user process. void userinit(void) { struct proc *p; extern uchar _binary_initcode_start[], _binary_initcode_size[]; p = allocproc(); initproc = p; // Initialize memory from initcode.S p->sz = PAGE; p->mem = kalloc(p->sz); memmove(p->mem, _binary_initcode_start, (int)_binary_initcode_size); memset(p->tf, 0, sizeof(*p->tf)); p->tf->cs = (SEG_UCODE << 3) | DPL_USER; p->tf->ds = (SEG_UDATA << 3) | DPL_USER; p->tf->es = p->tf->ds; p->tf->ss = p->tf->ds; p->tf->eflags = FL_IF; p->tf->esp = p->sz; p->tf->eip = 0; // beginning of initcode.S safestrcpy(p->name, "initcode", sizeof(p->name)); p->cwd = namei("/"); p->state = RUNNABLE; } //PAGEBREAK: 42 // Per-CPU process scheduler. // Each CPU calls scheduler() after setting itself up. // Scheduler never returns. It loops, doing: // - choose a process to run // - swtch to start running that process // - eventually that process transfers control // via swtch back to the scheduler. void scheduler(void) { struct proc *p; int i; for(;;){ // Enable interrupts on this processor, in lieu of saving intena. sti(); // Loop over process table looking for process to run. acquire(&ptable.lock); for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ if(p->state != RUNNABLE) continue; // Switch to chosen process. It is the process's job // to release ptable.lock and then reacquire it // before jumping back to us. cp = p; usegment(); p->state = RUNNING; swtch(&c->context, &p->context); // Process is done running for now. // It should have changed its p->state before coming back. cp = 0; usegment(); } release(&ptable.lock); } } // Enter scheduler. Must already hold ptable.lock // and have changed cp->state. void sched(void) { int intena; if(readeflags()&FL_IF) panic("sched interruptible"); if(cp->state == RUNNING) panic("sched running"); if(!holding(&ptable.lock)) panic("sched ptable.lock"); if(c->ncli != 1) panic("sched locks"); intena = c->intena; swtch(&cp->context, &c->context); c->intena = intena; } // Give up the CPU for one scheduling round. void yield(void) { acquire(&ptable.lock); cp->state = RUNNABLE; sched(); release(&ptable.lock); } // A fork child's very first scheduling by scheduler() // will swtch here. "Return" to user space. void forkret(void) { // Still holding ptable.lock from scheduler. release(&ptable.lock); // Jump into assembly, never to return. forkret1(cp->tf); } // Atomically release lock and sleep on chan. // Reacquires lock when reawakened. void sleep(void *chan, struct spinlock *lk) { if(cp == 0) panic("sleep"); if(lk == 0) panic("sleep without lk"); // Must acquire ptable.lock in order to // change p->state and then call sched. // Once we hold ptable.lock, we can be // guaranteed that we won't miss any wakeup // (wakeup runs with ptable.lock locked), // so it's okay to release lk. if(lk != &ptable.lock){ acquire(&ptable.lock); release(lk); } // Go to sleep. cp->chan = chan; cp->state = SLEEPING; sched(); // Tidy up. cp->chan = 0; // Reacquire original lock. if(lk != &ptable.lock){ release(&ptable.lock); acquire(lk); } } //PAGEBREAK! // Wake up all processes sleeping on chan. // The ptable lock must be held. static void wakeup1(void *chan) { struct proc *p; for(p = proc; p < &proc[NPROC]; p++) if(p->state == SLEEPING && p->chan == chan) p->state = RUNNABLE; } // Wake up all processes sleeping on chan. void wakeup(void *chan) { acquire(&ptable.lock); wakeup1(chan); release(&ptable.lock); } // Kill the process with the given pid. // Process won't actually exit until it returns // to user space (see trap in trap.c). int kill(int pid) { struct proc *p; acquire(&ptable.lock); for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ if(p->pid == pid){ p->killed = 1; // Wake process from sleep if necessary. if(p->state == SLEEPING) p->state = RUNNABLE; release(&ptable.lock); return 0; } } release(&ptable.lock); return -1; } // Exit the current process. Does not return. // Exited processes remain in the zombie state // until their parent calls wait() to find out they exited. void exit(void) { struct proc *p; int fd; if(cp == initproc) panic("init exiting"); // Close all open files. for(fd = 0; fd < NOFILE; fd++){ if(cp->ofile[fd]){ fileclose(cp->ofile[fd]); cp->ofile[fd] = 0; } } iput(cp->cwd); cp->cwd = 0; acquire(&ptable.lock); // Parent might be sleeping in wait(). wakeup1(cp->parent); // Pass abandoned children to init. for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ if(p->parent == cp){ p->parent = initproc; if(p->state == ZOMBIE) wakeup1(initproc); } } // Jump into the scheduler, never to return. cp->killed = 0; cp->state = ZOMBIE; sched(); panic("zombie exit"); } // Wait for a child process to exit and return its pid. // Return -1 if this process has no children. int wait(void) { struct proc *p; int i, havekids, pid; acquire(&ptable.lock); for(;;){ // Scan through table looking for zombie children. havekids = 0; for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ if(p->state == UNUSED) continue; if(p->parent == cp){ havekids = 1; if(p->state == ZOMBIE){ // Found one. kfree(p->mem, p->sz); kfree(p->kstack, KSTACKSIZE); pid = p->pid; p->state = UNUSED; p->pid = 0; p->parent = 0; p->name[0] = 0; release(&ptable.lock); return pid; } } } // No point waiting if we don't have any children. if(!havekids || cp->killed){ release(&ptable.lock); return -1; } // Wait for children to exit. (See wakeup1 call in proc_exit.) sleep(cp, &ptable.lock); } } // Print a process listing to console. For debugging. // Runs when user types ^P on console. // No lock to avoid wedging a stuck machine further. void procdump(void) { static char *states[] = { [UNUSED] "unused", [EMBRYO] "embryo", [SLEEPING] "sleep ", [RUNNABLE] "runble", [RUNNING] "run ", [ZOMBIE] "zombie" }; int i, j; struct proc *p; char *state; uint pc[10]; for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ if(p->state == UNUSED) continue; if(p->state >= 0 && p->state < NELEM(states) && states[p->state]) state = states[p->state]; else state = "???"; cprintf("%d %s %s", p->pid, state, p->name); if(p->state == SLEEPING){ getcallerpcs((uint*)p->context->ebp+2, pc); for(j=0; j<10 && pc[j] != 0; j++) cprintf(" %p", pc[j]); } cprintf("\n"); } }