assorted fixes:
* rename c/cp to cpu/proc * rename cpu.context to cpu.scheduler * fix some comments * formatting for printout
This commit is contained in:
parent
0aef891495
commit
48755214c9
23 changed files with 2437 additions and 2379 deletions
2
Makefile
2
Makefile
|
@ -37,7 +37,7 @@ AS = $(TOOLPREFIX)gas
|
|||
LD = $(TOOLPREFIX)ld
|
||||
OBJCOPY = $(TOOLPREFIX)objcopy
|
||||
OBJDUMP = $(TOOLPREFIX)objdump
|
||||
CFLAGS = -fno-builtin -O2 -Wall -MD -ggdb -m32
|
||||
CFLAGS = -fno-builtin -fno-strict-aliasing -O2 -Wall -MD -ggdb -m32
|
||||
CFLAGS += $(shell $(CC) -fno-stack-protector -E -x c /dev/null >/dev/null 2>&1 && echo -fno-stack-protector)
|
||||
ASFLAGS = -m32
|
||||
# FreeBSD ld wants ``elf_i386_fbsd''
|
||||
|
|
|
@ -59,7 +59,7 @@ cprintf(char *fmt, ...)
|
|||
if(locking)
|
||||
acquire(&cons.lock);
|
||||
|
||||
argp = (uint*)(void*)&fmt + 1;
|
||||
argp = (uint*)(void*)(&fmt + 1);
|
||||
state = 0;
|
||||
for(i = 0; (c = fmt[i] & 0xff) != 0; i++){
|
||||
if(c != '%'){
|
||||
|
@ -106,7 +106,7 @@ panic(char *s)
|
|||
|
||||
cli();
|
||||
cons.locking = 0;
|
||||
cprintf("cpu%d: panic: ", cpu());
|
||||
cprintf("cpu%d: panic: ", cpu->id);
|
||||
cprintf(s);
|
||||
cprintf("\n");
|
||||
getcallerpcs(&s, pcs);
|
||||
|
@ -229,7 +229,7 @@ consoleread(struct inode *ip, char *dst, int n)
|
|||
acquire(&input.lock);
|
||||
while(n > 0){
|
||||
while(input.r == input.w){
|
||||
if(cp->killed){
|
||||
if(proc->killed){
|
||||
release(&input.lock);
|
||||
ilock(ip);
|
||||
return -1;
|
||||
|
|
2
defs.h
2
defs.h
|
@ -68,7 +68,7 @@ void kinit(void);
|
|||
void kbdintr(void);
|
||||
|
||||
// lapic.c
|
||||
int cpu(void);
|
||||
int cpunum(void);
|
||||
extern volatile uint* lapic;
|
||||
void lapiceoi(void);
|
||||
void lapicinit(int);
|
||||
|
|
14
exec.c
14
exec.c
|
@ -11,7 +11,7 @@ exec(char *path, char **argv)
|
|||
{
|
||||
char *mem, *s, *last;
|
||||
int i, argc, arglen, len, off;
|
||||
uint sz, sp, argp, x;
|
||||
uint sz, sp, argp;
|
||||
struct elfhdr elf;
|
||||
struct inode *ip;
|
||||
struct proghdr ph;
|
||||
|
@ -103,14 +103,14 @@ exec(char *path, char **argv)
|
|||
for(last=s=path; *s; s++)
|
||||
if(*s == '/')
|
||||
last = s+1;
|
||||
safestrcpy(cp->name, last, sizeof(cp->name));
|
||||
safestrcpy(proc->name, last, sizeof(proc->name));
|
||||
|
||||
// Commit to the new image.
|
||||
kfree(cp->mem, cp->sz);
|
||||
cp->mem = mem;
|
||||
cp->sz = sz;
|
||||
cp->tf->eip = elf.entry; // main
|
||||
cp->tf->esp = sp;
|
||||
kfree(proc->mem, proc->sz);
|
||||
proc->mem = mem;
|
||||
proc->sz = sz;
|
||||
proc->tf->eip = elf.entry; // main
|
||||
proc->tf->esp = sp;
|
||||
usegment();
|
||||
return 0;
|
||||
|
||||
|
|
4
fs.c
4
fs.c
|
@ -109,7 +109,7 @@ bfree(int dev, uint b)
|
|||
// to inodes shared between multiple processes.
|
||||
//
|
||||
// ip->ref counts the number of pointer references to this cached
|
||||
// inode; references are typically kept in struct file and in cp->cwd.
|
||||
// inode; references are typically kept in struct file and in proc->cwd.
|
||||
// When ip->ref falls to zero, the inode is no longer cached.
|
||||
// It is an error to use an inode without holding a reference to it.
|
||||
//
|
||||
|
@ -578,7 +578,7 @@ namex(char *path, int nameiparent, char *name)
|
|||
if(*path == '/')
|
||||
ip = iget(ROOTDEV, ROOTINO);
|
||||
else
|
||||
ip = idup(cp->cwd);
|
||||
ip = idup(proc->cwd);
|
||||
|
||||
while((path = skipelem(path, name)) != 0){
|
||||
ilock(ip);
|
||||
|
|
2
ide.c
2
ide.c
|
@ -146,7 +146,7 @@ iderw(struct buf *b)
|
|||
idestart(b);
|
||||
|
||||
// Wait for request to finish.
|
||||
// Assuming will not sleep too long: ignore cp->killed.
|
||||
// Assuming will not sleep too long: ignore proc->killed.
|
||||
while((b->flags & (B_VALID|B_DIRTY)) != B_VALID)
|
||||
sleep(b, &idelock);
|
||||
|
||||
|
|
2
lapic.c
2
lapic.c
|
@ -92,7 +92,7 @@ lapicinit(int c)
|
|||
}
|
||||
|
||||
int
|
||||
cpu(void)
|
||||
cpunum(void)
|
||||
{
|
||||
// Cannot call cpu when interrupts are enabled:
|
||||
// result not guaranteed to last long enough to be used!
|
||||
|
|
20
main.c
20
main.c
|
@ -5,8 +5,8 @@
|
|||
#include "proc.h"
|
||||
#include "x86.h"
|
||||
|
||||
__thread struct cpu *c;
|
||||
__thread struct proc *cp;
|
||||
__thread struct cpu *cpu;
|
||||
__thread struct proc *proc;
|
||||
|
||||
static void bootothers(void);
|
||||
static void mpmain(void) __attribute__((noreturn));
|
||||
|
@ -22,7 +22,7 @@ main(void)
|
|||
ioapicinit(); // another interrupt controller
|
||||
consoleinit(); // I/O devices & their interrupts
|
||||
uartinit(); // serial port
|
||||
cprintf("\ncpu%d: starting xv6\n\n", cpu());
|
||||
cprintf("\ncpu%d: starting xv6\n\n", cpu->id);
|
||||
|
||||
kinit(); // physical memory allocator
|
||||
pinit(); // process table
|
||||
|
@ -45,14 +45,14 @@ main(void)
|
|||
static void
|
||||
mpmain(void)
|
||||
{
|
||||
if(cpu() != mpbcpu())
|
||||
lapicinit(cpu());
|
||||
if(cpunum() != mpbcpu())
|
||||
lapicinit(cpunum());
|
||||
ksegment();
|
||||
cprintf("cpu%d: mpmain\n", cpu());
|
||||
cprintf("cpu%d: mpmain\n", cpu->id);
|
||||
idtinit();
|
||||
xchg(&c->booted, 1);
|
||||
xchg(&cpu->booted, 1);
|
||||
|
||||
cprintf("cpu%d: scheduling\n", cpu());
|
||||
cprintf("cpu%d: scheduling\n", cpu->id);
|
||||
scheduler();
|
||||
}
|
||||
|
||||
|
@ -69,14 +69,14 @@ bootothers(void)
|
|||
memmove(code, _binary_bootother_start, (uint)_binary_bootother_size);
|
||||
|
||||
for(c = cpus; c < cpus+ncpu; c++){
|
||||
if(c == cpus+cpu()) // We've started already.
|
||||
if(c == cpus+cpunum()) // We've started already.
|
||||
continue;
|
||||
|
||||
// Fill in %esp, %eip and start code on cpu.
|
||||
stack = kalloc(KSTACKSIZE);
|
||||
*(void**)(code-4) = stack + KSTACKSIZE;
|
||||
*(void**)(code-8) = mpmain;
|
||||
lapicstartap(c->apicid, (uint)code);
|
||||
lapicstartap(c->id, (uint)code);
|
||||
|
||||
// Wait for cpu to get through bootstrap.
|
||||
while(c->booted == 0)
|
||||
|
|
12
mmu.h
12
mmu.h
|
@ -43,14 +43,14 @@ struct segdesc {
|
|||
|
||||
// Normal segment
|
||||
#define SEG(type, base, lim, dpl) (struct segdesc) \
|
||||
{ ((lim) >> 12) & 0xffff, (base) & 0xffff, ((base) >> 16) & 0xff, \
|
||||
type, 1, dpl, 1, (uint) (lim) >> 28, 0, 0, 1, 1, \
|
||||
(uint) (base) >> 24 }
|
||||
{ ((lim) >> 12) & 0xffff, (uint)(base) & 0xffff, \
|
||||
((uint)(base) >> 16) & 0xff, type, 1, dpl, 1, \
|
||||
(uint)(lim) >> 28, 0, 0, 1, 1, (uint)(base) >> 24 }
|
||||
|
||||
#define SEG16(type, base, lim, dpl) (struct segdesc) \
|
||||
{ (lim) & 0xffff, (base) & 0xffff, ((base) >> 16) & 0xff, \
|
||||
type, 1, dpl, 1, (uint) (lim) >> 16, 0, 0, 1, 0, \
|
||||
(uint) (base) >> 24 }
|
||||
{ (lim) & 0xffff, (uint)(base) & 0xffff, \
|
||||
((uint)(base) >> 16) & 0xff, type, 1, dpl, 1, \
|
||||
(uint)(lim) >> 16, 0, 0, 1, 0, (uint)(base) >> 24 }
|
||||
|
||||
#define DPL_USER 0x3 // User DPL
|
||||
|
||||
|
|
11
mp.c
11
mp.c
|
@ -103,20 +103,22 @@ mpinit(void)
|
|||
struct mpproc *proc;
|
||||
struct mpioapic *ioapic;
|
||||
|
||||
bcpu = &cpus[ncpu];
|
||||
bcpu = &cpus[0];
|
||||
if((conf = mpconfig(&mp)) == 0)
|
||||
return;
|
||||
|
||||
ismp = 1;
|
||||
lapic = (uint*)conf->lapicaddr;
|
||||
|
||||
for(p=(uchar*)(conf+1), e=(uchar*)conf+conf->length; p<e; ){
|
||||
switch(*p){
|
||||
case MPPROC:
|
||||
proc = (struct mpproc*)p;
|
||||
cpus[ncpu].apicid = proc->apicid;
|
||||
if(ncpu != proc->apicid) {
|
||||
cprintf("mpinit: ncpu=%d apicpid=%d", ncpu, proc->apicid);
|
||||
panic("mpinit");
|
||||
}
|
||||
if(proc->flags & MPBOOT)
|
||||
bcpu = &cpus[ncpu];
|
||||
cpus[ncpu].id = ncpu;
|
||||
ncpu++;
|
||||
p += sizeof(struct mpproc);
|
||||
continue;
|
||||
|
@ -135,7 +137,6 @@ mpinit(void)
|
|||
panic("mpinit");
|
||||
}
|
||||
}
|
||||
|
||||
if(mp->imcrp){
|
||||
// Bochs doesn't support IMCR, so this doesn't run on Bochs.
|
||||
// But it would on real hardware.
|
||||
|
|
4
pipe.c
4
pipe.c
|
@ -82,7 +82,7 @@ pipewrite(struct pipe *p, char *addr, int n)
|
|||
acquire(&p->lock);
|
||||
for(i = 0; i < n; i++){
|
||||
while(p->nwrite == p->nread + PIPESIZE) { //DOC: pipewrite-full
|
||||
if(p->readopen == 0 || cp->killed){
|
||||
if(p->readopen == 0 || proc->killed){
|
||||
release(&p->lock);
|
||||
return -1;
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ piperead(struct pipe *p, char *addr, int n)
|
|||
|
||||
acquire(&p->lock);
|
||||
while(p->nread == p->nwrite && p->writeopen){ //DOC: pipe-empty
|
||||
if(cp->killed){
|
||||
if(proc->killed){
|
||||
release(&p->lock);
|
||||
return -1;
|
||||
}
|
||||
|
|
115
proc.c
115
proc.c
|
@ -65,32 +65,31 @@ procdump(void)
|
|||
void
|
||||
ksegment(void)
|
||||
{
|
||||
struct cpu *c1;
|
||||
struct cpu *c;
|
||||
|
||||
c1 = &cpus[cpu()];
|
||||
c1->gdt[SEG_KCODE] = SEG(STA_X|STA_R, 0, 0x100000 + 64*1024-1, 0);
|
||||
c1->gdt[SEG_KDATA] = SEG(STA_W, 0, 0xffffffff, 0);
|
||||
c1->gdt[SEG_KCPU] = SEG(STA_W, (uint)(&c1->tls+1), 0xffffffff, 0);
|
||||
lgdt(c1->gdt, sizeof(c1->gdt));
|
||||
c = &cpus[cpunum()];
|
||||
c->gdt[SEG_KCODE] = SEG(STA_X|STA_R, 0, 0x100000 + 64*1024-1, 0);
|
||||
c->gdt[SEG_KDATA] = SEG(STA_W, 0, 0xffffffff, 0);
|
||||
c->gdt[SEG_KCPU] = SEG(STA_W, (uint)(&c->tls+1), 0xffffffff, 0);
|
||||
lgdt(c->gdt, sizeof(c->gdt));
|
||||
loadfsgs(SEG_KCPU << 3);
|
||||
|
||||
// Initialize cpu-local variables.
|
||||
c = c1;
|
||||
cp = 0;
|
||||
cpu = c;
|
||||
proc = 0;
|
||||
}
|
||||
|
||||
// Set up CPU's segment descriptors and current process task state.
|
||||
// If cp==0, set up for "idle" state for when scheduler() is running.
|
||||
void
|
||||
usegment(void)
|
||||
{
|
||||
pushcli();
|
||||
c->gdt[SEG_UCODE] = SEG(STA_X|STA_R, (uint)cp->mem, cp->sz-1, DPL_USER);
|
||||
c->gdt[SEG_UDATA] = SEG(STA_W, (uint)cp->mem, cp->sz-1, DPL_USER);
|
||||
c->gdt[SEG_TSS] = SEG16(STS_T32A, (uint)&c->ts, sizeof(c->ts)-1, 0);
|
||||
c->gdt[SEG_TSS].s = 0;
|
||||
c->ts.ss0 = SEG_KDATA << 3;
|
||||
c->ts.esp0 = (uint)cp->kstack + KSTACKSIZE;
|
||||
cpu->gdt[SEG_UCODE] = SEG(STA_X|STA_R, proc->mem, proc->sz-1, DPL_USER);
|
||||
cpu->gdt[SEG_UDATA] = SEG(STA_W, proc->mem, proc->sz-1, DPL_USER);
|
||||
cpu->gdt[SEG_TSS] = SEG16(STS_T32A, &cpu->ts, sizeof(cpu->ts)-1, 0);
|
||||
cpu->gdt[SEG_TSS].s = 0;
|
||||
cpu->ts.ss0 = SEG_KDATA << 3;
|
||||
cpu->ts.esp0 = (uint)proc->kstack + KSTACKSIZE;
|
||||
ltr(SEG_TSS << 3);
|
||||
popcli();
|
||||
}
|
||||
|
@ -178,14 +177,14 @@ growproc(int n)
|
|||
{
|
||||
char *newmem;
|
||||
|
||||
newmem = kalloc(cp->sz + n);
|
||||
newmem = kalloc(proc->sz + n);
|
||||
if(newmem == 0)
|
||||
return -1;
|
||||
memmove(newmem, cp->mem, cp->sz);
|
||||
memset(newmem + cp->sz, 0, n);
|
||||
kfree(cp->mem, cp->sz);
|
||||
cp->mem = newmem;
|
||||
cp->sz += n;
|
||||
memmove(newmem, proc->mem, proc->sz);
|
||||
memset(newmem + proc->sz, 0, n);
|
||||
kfree(proc->mem, proc->sz);
|
||||
proc->mem = newmem;
|
||||
proc->sz += n;
|
||||
usegment();
|
||||
return 0;
|
||||
}
|
||||
|
@ -204,24 +203,24 @@ fork(void)
|
|||
return -1;
|
||||
|
||||
// Copy process state from p.
|
||||
np->sz = cp->sz;
|
||||
np->sz = proc->sz;
|
||||
if((np->mem = kalloc(np->sz)) == 0){
|
||||
kfree(np->kstack, KSTACKSIZE);
|
||||
np->kstack = 0;
|
||||
np->state = UNUSED;
|
||||
return -1;
|
||||
}
|
||||
memmove(np->mem, cp->mem, np->sz);
|
||||
np->parent = cp;
|
||||
*np->tf = *cp->tf;
|
||||
memmove(np->mem, proc->mem, np->sz);
|
||||
np->parent = proc;
|
||||
*np->tf = *proc->tf;
|
||||
|
||||
// Clear %eax so that fork returns 0 in the child.
|
||||
np->tf->eax = 0;
|
||||
|
||||
for(i = 0; i < NOFILE; i++)
|
||||
if(cp->ofile[i])
|
||||
np->ofile[i] = filedup(cp->ofile[i]);
|
||||
np->cwd = idup(cp->cwd);
|
||||
if(proc->ofile[i])
|
||||
np->ofile[i] = filedup(proc->ofile[i]);
|
||||
np->cwd = idup(proc->cwd);
|
||||
|
||||
pid = np->pid;
|
||||
np->state = RUNNABLE;
|
||||
|
@ -255,14 +254,14 @@ scheduler(void)
|
|||
// Switch to chosen process. It is the process's job
|
||||
// to release ptable.lock and then reacquire it
|
||||
// before jumping back to us.
|
||||
cp = p;
|
||||
proc = p;
|
||||
usegment();
|
||||
p->state = RUNNING;
|
||||
swtch(&c->context, p->context);
|
||||
swtch(&cpu->scheduler, proc->context);
|
||||
|
||||
// Process is done running for now.
|
||||
// It should have changed its p->state before coming back.
|
||||
cp = 0;
|
||||
proc = 0;
|
||||
}
|
||||
release(&ptable.lock);
|
||||
|
||||
|
@ -270,7 +269,7 @@ scheduler(void)
|
|||
}
|
||||
|
||||
// Enter scheduler. Must hold only ptable.lock
|
||||
// and have changed cp->state.
|
||||
// and have changed proc->state.
|
||||
void
|
||||
sched(void)
|
||||
{
|
||||
|
@ -278,16 +277,16 @@ sched(void)
|
|||
|
||||
if(!holding(&ptable.lock))
|
||||
panic("sched ptable.lock");
|
||||
if(c->ncli != 1)
|
||||
if(cpu->ncli != 1)
|
||||
panic("sched locks");
|
||||
if(cp->state == RUNNING)
|
||||
if(proc->state == RUNNING)
|
||||
panic("sched running");
|
||||
if(readeflags()&FL_IF)
|
||||
panic("sched interruptible");
|
||||
|
||||
intena = c->intena;
|
||||
swtch(&cp->context, c->context);
|
||||
c->intena = intena;
|
||||
intena = cpu->intena;
|
||||
swtch(&proc->context, cpu->scheduler);
|
||||
cpu->intena = intena;
|
||||
}
|
||||
|
||||
// Give up the CPU for one scheduling round.
|
||||
|
@ -295,7 +294,7 @@ void
|
|||
yield(void)
|
||||
{
|
||||
acquire(&ptable.lock); //DOC: yieldlock
|
||||
cp->state = RUNNABLE;
|
||||
proc->state = RUNNABLE;
|
||||
sched();
|
||||
release(&ptable.lock);
|
||||
}
|
||||
|
@ -312,11 +311,11 @@ forkret(void)
|
|||
}
|
||||
|
||||
// Atomically release lock and sleep on chan.
|
||||
// Reacquires lock when reawakened.
|
||||
// Reacquires lock when awakened.
|
||||
void
|
||||
sleep(void *chan, struct spinlock *lk)
|
||||
{
|
||||
if(cp == 0)
|
||||
if(proc == 0)
|
||||
panic("sleep");
|
||||
|
||||
if(lk == 0)
|
||||
|
@ -334,12 +333,12 @@ sleep(void *chan, struct spinlock *lk)
|
|||
}
|
||||
|
||||
// Go to sleep.
|
||||
cp->chan = chan;
|
||||
cp->state = SLEEPING;
|
||||
proc->chan = chan;
|
||||
proc->state = SLEEPING;
|
||||
sched();
|
||||
|
||||
// Tidy up.
|
||||
cp->chan = 0;
|
||||
proc->chan = 0;
|
||||
|
||||
// Reacquire original lock.
|
||||
if(lk != &ptable.lock){ //DOC: sleeplock2
|
||||
|
@ -371,7 +370,7 @@ wakeup(void *chan)
|
|||
}
|
||||
|
||||
// Kill the process with the given pid.
|
||||
// Process won't actually exit until it returns
|
||||
// Process won't exit until it returns
|
||||
// to user space (see trap in trap.c).
|
||||
int
|
||||
kill(int pid)
|
||||
|
@ -394,36 +393,36 @@ kill(int pid)
|
|||
}
|
||||
|
||||
// Exit the current process. Does not return.
|
||||
// Exited processes remain in the zombie state
|
||||
// until their parent calls wait() to find out they exited.
|
||||
// An exited process remains in the zombie state
|
||||
// until its parent calls wait() to find out it exited.
|
||||
void
|
||||
exit(void)
|
||||
{
|
||||
struct proc *p;
|
||||
int fd;
|
||||
|
||||
if(cp == initproc)
|
||||
if(proc == initproc)
|
||||
panic("init exiting");
|
||||
|
||||
// Close all open files.
|
||||
for(fd = 0; fd < NOFILE; fd++){
|
||||
if(cp->ofile[fd]){
|
||||
fileclose(cp->ofile[fd]);
|
||||
cp->ofile[fd] = 0;
|
||||
if(proc->ofile[fd]){
|
||||
fileclose(proc->ofile[fd]);
|
||||
proc->ofile[fd] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
iput(cp->cwd);
|
||||
cp->cwd = 0;
|
||||
iput(proc->cwd);
|
||||
proc->cwd = 0;
|
||||
|
||||
acquire(&ptable.lock);
|
||||
|
||||
// Parent might be sleeping in wait().
|
||||
wakeup1(cp->parent);
|
||||
wakeup1(proc->parent);
|
||||
|
||||
// Pass abandoned children to init.
|
||||
for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
||||
if(p->parent == cp){
|
||||
if(p->parent == proc){
|
||||
p->parent = initproc;
|
||||
if(p->state == ZOMBIE)
|
||||
wakeup1(initproc);
|
||||
|
@ -431,7 +430,7 @@ exit(void)
|
|||
}
|
||||
|
||||
// Jump into the scheduler, never to return.
|
||||
cp->state = ZOMBIE;
|
||||
proc->state = ZOMBIE;
|
||||
sched();
|
||||
panic("zombie exit");
|
||||
}
|
||||
|
@ -449,7 +448,7 @@ wait(void)
|
|||
// Scan through table looking for zombie children.
|
||||
havekids = 0;
|
||||
for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
||||
if(p->parent != cp)
|
||||
if(p->parent != proc)
|
||||
continue;
|
||||
havekids = 1;
|
||||
if(p->state == ZOMBIE){
|
||||
|
@ -468,13 +467,13 @@ wait(void)
|
|||
}
|
||||
|
||||
// No point waiting if we don't have any children.
|
||||
if(!havekids || cp->killed){
|
||||
if(!havekids || proc->killed){
|
||||
release(&ptable.lock);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Wait for children to exit. (See wakeup1 call in proc_exit.)
|
||||
sleep(cp, &ptable.lock); //DOC: wait-sleep
|
||||
sleep(proc, &ptable.lock); //DOC: wait-sleep
|
||||
}
|
||||
}
|
||||
|
||||
|
|
8
proc.h
8
proc.h
|
@ -51,8 +51,8 @@ struct proc {
|
|||
|
||||
// Per-CPU state
|
||||
struct cpu {
|
||||
uchar apicid; // Local APIC ID
|
||||
struct context *context; // Switch here to enter scheduler
|
||||
uchar id; // Local APIC ID; index into cpus[] below
|
||||
struct context *scheduler; // Switch here to enter scheduler
|
||||
struct taskstate ts; // Used by x86 to find stack for interrupt
|
||||
struct segdesc gdt[NSEGS]; // x86 global descriptor table
|
||||
volatile uint booted; // Has the CPU started?
|
||||
|
@ -70,5 +70,5 @@ extern int ncpu;
|
|||
// pointed at by gs; the name __thread derives from the use
|
||||
// of the same mechanism to provide per-thread storage in
|
||||
// multithreaded user programs.
|
||||
extern __thread struct cpu *c; // This cpu.
|
||||
extern __thread struct proc *cp; // Current process on this cpu.
|
||||
extern __thread struct cpu *cpu; // This cpu.
|
||||
extern __thread struct proc *proc; // Current process on this cpu.
|
||||
|
|
|
@ -45,6 +45,8 @@ file.c
|
|||
sysfile.c
|
||||
exec.c
|
||||
|
||||
|
||||
|
||||
# pipes
|
||||
pipe.c
|
||||
|
||||
|
|
19
spinlock.c
19
spinlock.c
|
@ -13,7 +13,7 @@ initlock(struct spinlock *lk, char *name)
|
|||
{
|
||||
lk->name = name;
|
||||
lk->locked = 0;
|
||||
lk->cpu = 0xffffffff;
|
||||
lk->cpu = 0;
|
||||
}
|
||||
|
||||
// Acquire the lock.
|
||||
|
@ -34,10 +34,7 @@ acquire(struct spinlock *lk)
|
|||
;
|
||||
|
||||
// Record info about lock acquisition for debugging.
|
||||
// The +10 is only so that we can tell the difference
|
||||
// between forgetting to initialize lock->cpu
|
||||
// and holding a lock on cpu 0.
|
||||
lk->cpu = cpu() + 10;
|
||||
lk->cpu = cpu;
|
||||
getcallerpcs(&lk, lk->pcs);
|
||||
}
|
||||
|
||||
|
@ -49,7 +46,7 @@ release(struct spinlock *lk)
|
|||
panic("release");
|
||||
|
||||
lk->pcs[0] = 0;
|
||||
lk->cpu = 0xffffffff;
|
||||
lk->cpu = 0;
|
||||
|
||||
// The xchg serializes, so that reads before release are
|
||||
// not reordered after it. The 1996 PentiumPro manual (Volume 3,
|
||||
|
@ -87,7 +84,7 @@ getcallerpcs(void *v, uint pcs[])
|
|||
int
|
||||
holding(struct spinlock *lock)
|
||||
{
|
||||
return lock->locked && lock->cpu == cpu() + 10;
|
||||
return lock->locked && lock->cpu == cpu;
|
||||
}
|
||||
|
||||
|
||||
|
@ -102,8 +99,8 @@ pushcli(void)
|
|||
|
||||
eflags = readeflags();
|
||||
cli();
|
||||
if(c->ncli++ == 0)
|
||||
c->intena = eflags & FL_IF;
|
||||
if(cpu->ncli++ == 0)
|
||||
cpu->intena = eflags & FL_IF;
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -111,9 +108,9 @@ popcli(void)
|
|||
{
|
||||
if(readeflags()&FL_IF)
|
||||
panic("popcli - interruptible");
|
||||
if(--c->ncli < 0)
|
||||
if(--cpu->ncli < 0)
|
||||
panic("popcli");
|
||||
if(c->ncli == 0 && c->intena)
|
||||
if(cpu->ncli == 0 && cpu->intena)
|
||||
sti();
|
||||
}
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ struct spinlock {
|
|||
|
||||
// For debugging:
|
||||
char *name; // Name of lock.
|
||||
int cpu; // The number of the cpu holding the lock.
|
||||
struct cpu *cpu; // The cpu holding the lock.
|
||||
uint pcs[10]; // The call stack (an array of program counters)
|
||||
// that locked the lock.
|
||||
};
|
||||
|
|
16
syscall.c
16
syscall.c
|
@ -44,7 +44,7 @@ fetchstr(struct proc *p, uint addr, char **pp)
|
|||
int
|
||||
argint(int n, int *ip)
|
||||
{
|
||||
return fetchint(cp, cp->tf->esp + 4 + 4*n, ip);
|
||||
return fetchint(proc, proc->tf->esp + 4 + 4*n, ip);
|
||||
}
|
||||
|
||||
// Fetch the nth word-sized system call argument as a pointer
|
||||
|
@ -57,9 +57,9 @@ argptr(int n, char **pp, int size)
|
|||
|
||||
if(argint(n, &i) < 0)
|
||||
return -1;
|
||||
if((uint)i >= cp->sz || (uint)i+size >= cp->sz)
|
||||
if((uint)i >= proc->sz || (uint)i+size >= proc->sz)
|
||||
return -1;
|
||||
*pp = cp->mem + i;
|
||||
*pp = proc->mem + i;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -73,7 +73,7 @@ argstr(int n, char **pp)
|
|||
int addr;
|
||||
if(argint(n, &addr) < 0)
|
||||
return -1;
|
||||
return fetchstr(cp, addr, pp);
|
||||
return fetchstr(proc, addr, pp);
|
||||
}
|
||||
|
||||
extern int sys_chdir(void);
|
||||
|
@ -125,12 +125,12 @@ syscall(void)
|
|||
{
|
||||
int num;
|
||||
|
||||
num = cp->tf->eax;
|
||||
num = proc->tf->eax;
|
||||
if(num >= 0 && num < NELEM(syscalls) && syscalls[num])
|
||||
cp->tf->eax = syscalls[num]();
|
||||
proc->tf->eax = syscalls[num]();
|
||||
else {
|
||||
cprintf("%d %s: unknown sys call %d\n",
|
||||
cp->pid, cp->name, num);
|
||||
cp->tf->eax = -1;
|
||||
proc->pid, proc->name, num);
|
||||
proc->tf->eax = -1;
|
||||
}
|
||||
}
|
||||
|
|
18
sysfile.c
18
sysfile.c
|
@ -18,7 +18,7 @@ argfd(int n, int *pfd, struct file **pf)
|
|||
|
||||
if(argint(n, &fd) < 0)
|
||||
return -1;
|
||||
if(fd < 0 || fd >= NOFILE || (f=cp->ofile[fd]) == 0)
|
||||
if(fd < 0 || fd >= NOFILE || (f=proc->ofile[fd]) == 0)
|
||||
return -1;
|
||||
if(pfd)
|
||||
*pfd = fd;
|
||||
|
@ -35,8 +35,8 @@ fdalloc(struct file *f)
|
|||
int fd;
|
||||
|
||||
for(fd = 0; fd < NOFILE; fd++){
|
||||
if(cp->ofile[fd] == 0){
|
||||
cp->ofile[fd] = f;
|
||||
if(proc->ofile[fd] == 0){
|
||||
proc->ofile[fd] = f;
|
||||
return fd;
|
||||
}
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ sys_close(void)
|
|||
|
||||
if(argfd(0, &fd, &f) < 0)
|
||||
return -1;
|
||||
cp->ofile[fd] = 0;
|
||||
proc->ofile[fd] = 0;
|
||||
fileclose(f);
|
||||
return 0;
|
||||
}
|
||||
|
@ -338,8 +338,8 @@ sys_chdir(void)
|
|||
return -1;
|
||||
}
|
||||
iunlock(ip);
|
||||
iput(cp->cwd);
|
||||
cp->cwd = ip;
|
||||
iput(proc->cwd);
|
||||
proc->cwd = ip;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -356,13 +356,13 @@ sys_exec(void)
|
|||
for(i=0;; i++){
|
||||
if(i >= NELEM(argv))
|
||||
return -1;
|
||||
if(fetchint(cp, uargv+4*i, (int*)&uarg) < 0)
|
||||
if(fetchint(proc, uargv+4*i, (int*)&uarg) < 0)
|
||||
return -1;
|
||||
if(uarg == 0){
|
||||
argv[i] = 0;
|
||||
break;
|
||||
}
|
||||
if(fetchstr(cp, uarg, &argv[i]) < 0)
|
||||
if(fetchstr(proc, uarg, &argv[i]) < 0)
|
||||
return -1;
|
||||
}
|
||||
return exec(path, argv);
|
||||
|
@ -382,7 +382,7 @@ sys_pipe(void)
|
|||
fd0 = -1;
|
||||
if((fd0 = fdalloc(rf)) < 0 || (fd1 = fdalloc(wf)) < 0){
|
||||
if(fd0 >= 0)
|
||||
cp->ofile[fd0] = 0;
|
||||
proc->ofile[fd0] = 0;
|
||||
fileclose(rf);
|
||||
fileclose(wf);
|
||||
return -1;
|
||||
|
|
|
@ -37,7 +37,7 @@ sys_kill(void)
|
|||
int
|
||||
sys_getpid(void)
|
||||
{
|
||||
return cp->pid;
|
||||
return proc->pid;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -48,7 +48,7 @@ sys_sbrk(void)
|
|||
|
||||
if(argint(0, &n) < 0)
|
||||
return -1;
|
||||
addr = cp->sz;
|
||||
addr = proc->sz;
|
||||
if(growproc(n) < 0)
|
||||
return -1;
|
||||
return addr;
|
||||
|
@ -64,7 +64,7 @@ sys_sleep(void)
|
|||
acquire(&tickslock);
|
||||
ticks0 = ticks;
|
||||
while(ticks - ticks0 < n){
|
||||
if(cp->killed){
|
||||
if(proc->killed){
|
||||
release(&tickslock);
|
||||
return -1;
|
||||
}
|
||||
|
|
8
toc.ftr
8
toc.ftr
|
@ -6,9 +6,9 @@ on the same line as the name, the line number (or, in a few cases, numbers)
|
|||
where the name is defined. Successive lines in an entry list the line
|
||||
numbers where the name is used. For example, this entry:
|
||||
|
||||
swtch 2256
|
||||
0311 1928 1962 2255
|
||||
2256
|
||||
swtch 2208
|
||||
0318 1928 1967 2207
|
||||
2208
|
||||
|
||||
indicates that swtch is defined on line 2256 and is mentioned on five lines
|
||||
indicates that swtch is defined on line 2208 and is mentioned on five lines
|
||||
on sheets 03, 19, and 22.
|
||||
|
|
24
trap.c
24
trap.c
|
@ -36,18 +36,18 @@ void
|
|||
trap(struct trapframe *tf)
|
||||
{
|
||||
if(tf->trapno == T_SYSCALL){
|
||||
if(cp->killed)
|
||||
if(proc->killed)
|
||||
exit();
|
||||
cp->tf = tf;
|
||||
proc->tf = tf;
|
||||
syscall();
|
||||
if(cp->killed)
|
||||
if(proc->killed)
|
||||
exit();
|
||||
return;
|
||||
}
|
||||
|
||||
switch(tf->trapno){
|
||||
case T_IRQ0 + IRQ_TIMER:
|
||||
if(cpu() == 0){
|
||||
if(cpu->id == 0){
|
||||
acquire(&tickslock);
|
||||
ticks++;
|
||||
wakeup(&ticks);
|
||||
|
@ -70,35 +70,35 @@ trap(struct trapframe *tf)
|
|||
case T_IRQ0 + 7:
|
||||
case T_IRQ0 + IRQ_SPURIOUS:
|
||||
cprintf("cpu%d: spurious interrupt at %x:%x\n",
|
||||
cpu(), tf->cs, tf->eip);
|
||||
cpu->id, tf->cs, tf->eip);
|
||||
lapiceoi();
|
||||
break;
|
||||
|
||||
default:
|
||||
if(cp == 0 || (tf->cs&3) == 0){
|
||||
if(proc == 0 || (tf->cs&3) == 0){
|
||||
// In kernel, it must be our mistake.
|
||||
cprintf("unexpected trap %d from cpu %d eip %x\n",
|
||||
tf->trapno, cpu(), tf->eip);
|
||||
tf->trapno, cpu->id, tf->eip);
|
||||
panic("trap");
|
||||
}
|
||||
// In user space, assume process misbehaved.
|
||||
cprintf("pid %d %s: trap %d err %d on cpu %d eip %x -- kill proc\n",
|
||||
cp->pid, cp->name, tf->trapno, tf->err, cpu(), tf->eip);
|
||||
cp->killed = 1;
|
||||
proc->pid, proc->name, tf->trapno, tf->err, cpu->id, tf->eip);
|
||||
proc->killed = 1;
|
||||
}
|
||||
|
||||
// Force process exit if it has been killed and is in user space.
|
||||
// (If it is still executing in the kernel, let it keep running
|
||||
// until it gets to the regular system call return.)
|
||||
if(cp && cp->killed && (tf->cs&3) == DPL_USER)
|
||||
if(proc && proc->killed && (tf->cs&3) == DPL_USER)
|
||||
exit();
|
||||
|
||||
// Force process to give up CPU on clock tick.
|
||||
// If interrupts were on while locks held, would need to check nlock.
|
||||
if(cp && cp->state == RUNNING && tf->trapno == T_IRQ0+IRQ_TIMER)
|
||||
if(proc && proc->state == RUNNING && tf->trapno == T_IRQ0+IRQ_TIMER)
|
||||
yield();
|
||||
|
||||
// Check if the process has been killed since we yielded
|
||||
if(cp && cp->killed && (tf->cs&3) == DPL_USER)
|
||||
if(proc && proc->killed && (tf->cs&3) == DPL_USER)
|
||||
exit();
|
||||
}
|
||||
|
|
BIN
xv6.pdf
BIN
xv6.pdf
Binary file not shown.
Loading…
Reference in a new issue