Changes to allow use of native x86 ELF compilers, which on my
Linux 2.4 box using gcc 3.4.6 don't seem to follow the same conventions as the i386-jos-elf-gcc compilers. Can run make 'TOOLPREFIX=' or edit the Makefile. curproc[cpu()] can now be NULL, indicating that no proc is running. This seemed safer to me than having curproc[0] and curproc[1] both pointing at proc[0] potentially. The old implementation of swtch depended on the stack frame layout used inside swtch being okay to return from on the other stack (exactly the V6 you are not expected to understand this). It also could be called in two contexts: at boot time, to schedule the very first process, and later, on behalf of a process, to sleep or schedule some other process. I split this into two functions: scheduler and swtch. The scheduler is now a separate never-returning function, invoked by each cpu once set up. The scheduler looks like: scheduler() { setjmp(cpu.context); pick proc to schedule blah blah blah longjmp(proc.context) } The new swtch is intended to be called only when curproc[cpu()] is not NULL, that is, only on behalf of a user proc. It does: swtch() { if(setjmp(proc.context) == 0) longjmp(cpu.context) } to save the current proc context and then jump over to the scheduler, running on the cpu stack. Similarly the system call stubs are now in assembly in usys.S to avoid needing to know the details of stack frame layout used by the compiler. Also various changes in the debugging prints.
This commit is contained in:
parent
7ea6c9d197
commit
5ce9751cab
19 changed files with 200 additions and 131 deletions
13
.cvsignore
Normal file
13
.cvsignore
Normal file
|
@ -0,0 +1,13 @@
|
|||
*.asm
|
||||
*.d
|
||||
kernel
|
||||
user1
|
||||
userfs
|
||||
usertests
|
||||
xv6.img
|
||||
vectors.S
|
||||
bochsout.txt
|
||||
bootblock
|
||||
bootother
|
||||
bootother.out
|
||||
parport.out
|
31
Makefile
31
Makefile
|
@ -1,11 +1,18 @@
|
|||
OBJS = main.o console.o string.o kalloc.o proc.o trapasm.o trap.o vectors.o \
|
||||
syscall.o ide.o picirq.o mp.o spinlock.o fd.o pipe.o
|
||||
syscall.o ide.o picirq.o mp.o spinlock.o fd.o pipe.o swtch.o
|
||||
|
||||
CC = i386-jos-elf-gcc
|
||||
LD = i386-jos-elf-ld
|
||||
OBJCOPY = i386-jos-elf-objcopy
|
||||
OBJDUMP = i386-jos-elf-objdump
|
||||
# Cross-compiling (e.g., on Mac OS X)
|
||||
TOOLPREFIX = i386-jos-elf-
|
||||
|
||||
# Using native tools (e.g., on X86 Linux)
|
||||
# TOOLPREFIX =
|
||||
|
||||
CC = $(TOOLPREFIX)gcc
|
||||
LD = $(TOOLPREFIX)ld
|
||||
OBJCOPY = $(TOOLPREFIX)objcopy
|
||||
OBJDUMP = $(TOOLPREFIX)objdump
|
||||
CFLAGS = -nostdinc -I. -O2 -Wall -MD
|
||||
AS = $(TOOLPREFIX)gas
|
||||
|
||||
xv6.img : bootblock kernel
|
||||
dd if=/dev/zero of=xv6.img count=10000
|
||||
|
@ -31,19 +38,21 @@ kernel : $(OBJS) bootother.S user1 usertests userfs
|
|||
vectors.S : vectors.pl
|
||||
perl vectors.pl > vectors.S
|
||||
|
||||
user1 : user1.c ulib.o
|
||||
ULIB = ulib.o usys.o
|
||||
|
||||
user1 : user1.c $(ULIB)
|
||||
$(CC) -nostdinc -I. -c user1.c
|
||||
$(LD) -N -e main -Ttext 0 -o user1 user1.o ulib.o
|
||||
$(LD) -N -e main -Ttext 0 -o user1 user1.o $(ULIB)
|
||||
$(OBJDUMP) -S user1 > user1.asm
|
||||
|
||||
usertests : usertests.c ulib.o
|
||||
usertests : usertests.c $(ULIB)
|
||||
$(CC) -nostdinc -I. -c usertests.c
|
||||
$(LD) -N -e main -Ttext 0 -o usertests usertests.o ulib.o
|
||||
$(LD) -N -e main -Ttext 0 -o usertests usertests.o $(ULIB)
|
||||
$(OBJDUMP) -S usertests > usertests.asm
|
||||
|
||||
userfs : userfs.c ulib.o
|
||||
userfs : userfs.c $(ULIB)
|
||||
$(CC) -nostdinc -I. -c userfs.c
|
||||
$(LD) -N -e main -Ttext 0 -o userfs userfs.o ulib.o
|
||||
$(LD) -N -e main -Ttext 0 -o userfs userfs.o $(ULIB)
|
||||
$(OBJDUMP) -S userfs > userfs.asm
|
||||
|
||||
ulib.o : ulib.c
|
||||
|
|
2
Notes
2
Notes
|
@ -1,5 +1,7 @@
|
|||
bochs 2.2.6:
|
||||
./configure --enable-smp --enable-disasm --enable-debugger --enable-all-optimizations --enable-4meg-pages --enable-global-pages --enable-pae --disable-reset-on-triple-fault
|
||||
bochs CVS after 2.2.6:
|
||||
./configure --enable-smp --enable-disasm --enable-debugger --enable-all-optimizations --enable-4meg-pages --enable-global-pages --enable-pae
|
||||
|
||||
bootmain.c doesn't work right if the ELF sections aren't
|
||||
sector-aligned. so you can't use ld -N. and the sections may also need
|
||||
|
|
|
@ -106,7 +106,7 @@ cprintf(char *fmt, ...)
|
|||
if(c == 'd'){
|
||||
printint(*ap, 10, 1);
|
||||
ap++;
|
||||
} else if(c == 'x'){
|
||||
} else if(c == 'x' || c == 'p'){
|
||||
printint(*ap, 16, 0);
|
||||
ap++;
|
||||
} else if(c == '%'){
|
||||
|
|
7
defs.h
7
defs.h
|
@ -10,11 +10,18 @@ void cons_putc(int);
|
|||
|
||||
// proc.c
|
||||
struct proc;
|
||||
struct jmpbuf;
|
||||
void setupsegs(struct proc *);
|
||||
struct proc * newproc(void);
|
||||
void swtch(void);
|
||||
void sleep(void *);
|
||||
void wakeup(void *);
|
||||
void scheduler(void);
|
||||
|
||||
// swtch.S
|
||||
struct jmpbuf;
|
||||
int setjmp(struct jmpbuf*);
|
||||
void longjmp(struct jmpbuf*);
|
||||
|
||||
// trap.c
|
||||
void tvinit(void);
|
||||
|
|
|
@ -107,7 +107,7 @@ romimage: file=$BXSHARE/BIOS-bochs-latest, address=0xf0000
|
|||
# 650Mhz Athlon K-7 with Linux 2.4.4/egcs-2.91.66 2 to 2.5 Mips
|
||||
# 400Mhz Pentium II with Linux 2.0.36/egcs-1.0.3 1 to 1.8 Mips
|
||||
#=======================================================================
|
||||
cpu: count=1, ips=10000000
|
||||
cpu: count=2, ips=10000000
|
||||
|
||||
#=======================================================================
|
||||
# MEGS
|
||||
|
|
11
main.c
11
main.c
|
@ -28,8 +28,7 @@ main()
|
|||
acquire_spinlock(&kernel_lock);
|
||||
idtinit(); // CPU's idt
|
||||
lapic_init(cpu());
|
||||
curproc[cpu()] = &proc[0]; // XXX
|
||||
swtch();
|
||||
scheduler();
|
||||
}
|
||||
acpu = 1;
|
||||
// clear BSS
|
||||
|
@ -45,7 +44,7 @@ main()
|
|||
|
||||
// create fake process zero
|
||||
p = &proc[0];
|
||||
curproc[cpu()] = p;
|
||||
memset(p, 0, sizeof *p);
|
||||
p->state = WAITING;
|
||||
p->sz = 4 * PAGE;
|
||||
p->mem = kalloc(p->sz);
|
||||
|
@ -70,10 +69,10 @@ main()
|
|||
write_eflags(read_eflags() | FL_IF);
|
||||
|
||||
p = newproc();
|
||||
// load_icode(p, _binary_usertests_start, (unsigned) _binary_usertests_size);
|
||||
// load_icode(p, _binary_usertests_start, (unsigned) _binary_usertests_size);
|
||||
load_icode(p, _binary_userfs_start, (unsigned) _binary_userfs_size);
|
||||
|
||||
swtch();
|
||||
cprintf("loaded userfs\n");
|
||||
scheduler();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
4
mp.c
4
mp.c
|
@ -141,8 +141,8 @@ lapic_timerinit()
|
|||
cprintf("%d: init timer\n", cpu());
|
||||
lapic_write(LAPIC_TDCR, LAPIC_X1);
|
||||
lapic_write(LAPIC_TIMER, LAPIC_CLKIN | LAPIC_PERIODIC | (IRQ_OFFSET + IRQ_TIMER));
|
||||
lapic_write(LAPIC_TCCR, 1000000);
|
||||
lapic_write(LAPIC_TICR, 1000000);
|
||||
lapic_write(LAPIC_TCCR, 10000000);
|
||||
lapic_write(LAPIC_TICR, 10000000);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
83
proc.c
83
proc.c
|
@ -48,8 +48,7 @@ struct proc *
|
|||
newproc()
|
||||
{
|
||||
struct proc *np;
|
||||
struct proc *op = curproc[cpu()];
|
||||
unsigned *sp;
|
||||
struct proc *op;
|
||||
int fd;
|
||||
|
||||
for(np = &proc[1]; np < &proc[NPROC]; np++)
|
||||
|
@ -58,6 +57,11 @@ newproc()
|
|||
if(np >= &proc[NPROC])
|
||||
return 0;
|
||||
|
||||
// copy from proc[0] if we're bootstrapping
|
||||
op = curproc[cpu()];
|
||||
if(op == 0)
|
||||
op = &proc[0];
|
||||
|
||||
np->pid = next_pid++;
|
||||
np->ppid = op->pid;
|
||||
np->sz = op->sz;
|
||||
|
@ -76,11 +80,12 @@ newproc()
|
|||
np->tf = (struct Trapframe *) (np->kstack + KSTACKSIZE - sizeof(struct Trapframe));
|
||||
*(np->tf) = *(op->tf);
|
||||
np->tf->tf_regs.reg_eax = 0; // so fork() returns 0 in child
|
||||
sp = (unsigned *) np->tf;
|
||||
*(--sp) = (unsigned) &trapret; // for return from swtch()
|
||||
*(--sp) = 0; // previous bp for leave in swtch()
|
||||
np->esp = (unsigned) sp;
|
||||
np->ebp = (unsigned) sp;
|
||||
cprintf("newproc pid=%d return to %x:%x tf-%p\n", np->pid, np->tf->tf_cs, np->tf->tf_eip, np->tf);
|
||||
|
||||
// set up new jmpbuf to start executing at trapret with esp pointing at tf
|
||||
memset(&np->jmpbuf, 0, sizeof np->jmpbuf);
|
||||
np->jmpbuf.jb_eip = (unsigned) trapret;
|
||||
np->jmpbuf.jb_esp = (unsigned) np->tf - 4; // -4 for the %eip that isn't actually there
|
||||
|
||||
// copy file descriptors
|
||||
for(fd = 0; fd < NOFILE; fd++){
|
||||
|
@ -96,33 +101,20 @@ newproc()
|
|||
return np;
|
||||
}
|
||||
|
||||
/*
|
||||
* find a runnable process and switch to it.
|
||||
*/
|
||||
void
|
||||
swtch()
|
||||
scheduler(void)
|
||||
{
|
||||
struct proc *np;
|
||||
struct proc *op = curproc[cpu()];
|
||||
unsigned sp;
|
||||
struct proc *op, *np;
|
||||
int i;
|
||||
|
||||
// force push of callee-saved registers
|
||||
asm volatile("nop" : : : "%edi", "%esi", "%ebx");
|
||||
|
||||
// save calling process's stack pointers
|
||||
op->ebp = read_ebp();
|
||||
op->esp = read_esp();
|
||||
|
||||
// don't execute on calling process's stack
|
||||
sp = (unsigned) cpus[cpu()].mpstack + MPSTACK - 32;
|
||||
asm volatile("movl %0, %%esp" : : "g" (sp));
|
||||
asm volatile("movl %0, %%ebp" : : "g" (sp));
|
||||
|
||||
// gcc might store op on the stack
|
||||
np = curproc[cpu()];
|
||||
np = np + 1;
|
||||
cprintf("start scheduler on cpu %d jmpbuf %p\n", cpu(), &cpus[cpu()].jmpbuf);
|
||||
cpus[cpu()].lastproc = &proc[0];
|
||||
|
||||
setjmp(&cpus[cpu()].jmpbuf);
|
||||
|
||||
// find a runnable process and switch to it
|
||||
curproc[cpu()] = 0;
|
||||
np = cpus[cpu()].lastproc + 1;
|
||||
while(1){
|
||||
for(i = 0; i < NPROC; i++){
|
||||
if(np >= &proc[NPROC])
|
||||
|
@ -139,34 +131,47 @@ swtch()
|
|||
acquire_spinlock(&kernel_lock);
|
||||
np = &proc[0];
|
||||
}
|
||||
|
||||
cprintf("cpu %d swtch %x -> %x\n", cpu(), curproc[cpu()], np);
|
||||
|
||||
cpus[cpu()].lastproc = np;
|
||||
curproc[cpu()] = np;
|
||||
|
||||
np->state = RUNNING;
|
||||
|
||||
// h/w sets busy bit in TSS descriptor sometimes, and faults
|
||||
// if it's set in LTR. so clear tss descriptor busy bit.
|
||||
np->gdt[SEG_TSS].sd_type = STS_T32A;
|
||||
|
||||
// XXX should probably have an lgdt() function in x86.h
|
||||
// to confine all the inline assembly.
|
||||
// XXX probably ought to lgdt on trap return too, in case
|
||||
// a system call has moved a program or changed its size.
|
||||
|
||||
asm volatile("lgdt %0" : : "g" (np->gdt_pd.pd_lim));
|
||||
ltr(SEG_TSS << 3);
|
||||
|
||||
// this happens to work, but probably isn't safe:
|
||||
// it's not clear that np->ebp is guaranteed to evaluate
|
||||
// correctly after changing the stack pointer.
|
||||
asm volatile("movl %0, %%esp" : : "g" (np->esp));
|
||||
asm volatile("movl %0, %%ebp" : : "g" (np->ebp));
|
||||
if(0) cprintf("cpu%d: run %d esp=%p callerpc=%p\n", cpu(), np-proc);
|
||||
longjmp(&np->jmpbuf);
|
||||
}
|
||||
|
||||
// give up the cpu by switching to the scheduler,
|
||||
// which runs on the per-cpu stack.
|
||||
void
|
||||
swtch(void)
|
||||
{
|
||||
struct proc *p = curproc[cpu()];
|
||||
if(p == 0)
|
||||
panic("swtch");
|
||||
if(setjmp(&p->jmpbuf) == 0)
|
||||
longjmp(&cpus[cpu()].jmpbuf);
|
||||
}
|
||||
|
||||
void
|
||||
sleep(void *chan)
|
||||
{
|
||||
curproc[cpu()]->chan = chan;
|
||||
curproc[cpu()]->state = WAITING;
|
||||
struct proc *p = curproc[cpu()];
|
||||
if(p == 0)
|
||||
panic("sleep");
|
||||
p->chan = chan;
|
||||
p->state = WAITING;
|
||||
swtch();
|
||||
}
|
||||
|
||||
|
|
24
proc.h
24
proc.h
|
@ -16,6 +16,23 @@
|
|||
#define SEG_TSS 5 // this process's task state
|
||||
#define NSEGS 6
|
||||
|
||||
struct jmpbuf {
|
||||
// saved registers for kernel context switches
|
||||
// don't need to save all the fs etc. registers because
|
||||
// they are constant across kernel contexts
|
||||
// save all the regular registers so we don't care which are caller save
|
||||
// don't save eax because that's the return register
|
||||
// layout known to swtch.S
|
||||
int jb_ebx;
|
||||
int jb_ecx;
|
||||
int jb_edx;
|
||||
int jb_esi;
|
||||
int jb_edi;
|
||||
int jb_esp;
|
||||
int jb_ebp;
|
||||
int jb_eip;
|
||||
};
|
||||
|
||||
struct proc{
|
||||
char *mem; // start of process's physical memory
|
||||
unsigned sz; // total size of mem, including kernel stack
|
||||
|
@ -32,17 +49,22 @@ struct proc{
|
|||
unsigned esp; // kernel stack pointer
|
||||
unsigned ebp; // kernel frame pointer
|
||||
|
||||
struct jmpbuf jmpbuf;
|
||||
|
||||
struct Trapframe *tf; // points into kstack, used to find user regs
|
||||
};
|
||||
|
||||
extern struct proc proc[];
|
||||
extern struct proc *curproc[NCPU];
|
||||
extern struct proc *curproc[NCPU]; // can be NULL if no proc running.
|
||||
// XXX move curproc into cpu structure?
|
||||
|
||||
#define MPSTACK 512
|
||||
|
||||
struct cpu {
|
||||
uint8_t apicid; // Local APIC ID
|
||||
struct jmpbuf jmpbuf;
|
||||
char mpstack[MPSTACK]; // per-cpu start-up stack, only used to get into main()
|
||||
struct proc *lastproc; // last proc scheduled on this cpu (never NULL)
|
||||
};
|
||||
|
||||
extern struct cpu cpus[NCPU];
|
||||
|
|
23
spinlock.c
23
spinlock.c
|
@ -4,31 +4,39 @@
|
|||
#include "mmu.h"
|
||||
|
||||
#define LOCK_FREE -1
|
||||
#define DEBUG 0
|
||||
|
||||
uint32_t kernel_lock = LOCK_FREE;
|
||||
|
||||
int getcallerpc(void *v) {
|
||||
return ((int*)v)[-1];
|
||||
}
|
||||
|
||||
// lock = LOCK_FREE if free, else = cpu_id of owner CPU
|
||||
void
|
||||
void
|
||||
acquire_spinlock(uint32_t* lock)
|
||||
{
|
||||
int cpu_id = cpu();
|
||||
|
||||
if (*lock == cpu_id)
|
||||
return;
|
||||
|
||||
// on a real machine there would be a memory barrier here
|
||||
if(DEBUG) cprintf("cpu%d: acquiring at %x\n", cpu_id, getcallerpc(&lock));
|
||||
write_eflags(read_eflags() & ~FL_IF);
|
||||
if (*lock == cpu_id)
|
||||
panic("recursive lock");
|
||||
|
||||
while ( cmpxchg(LOCK_FREE, cpu_id, lock) != cpu_id ) { ; }
|
||||
// cprintf ("acquired: %d\n", cpu_id);
|
||||
if(DEBUG) cprintf("cpu%d: acquired at %x\n", cpu_id, getcallerpc(&lock));
|
||||
}
|
||||
|
||||
void
|
||||
release_spinlock(uint32_t* lock)
|
||||
{
|
||||
int cpu_id = cpu();
|
||||
// cprintf ("release: %d\n", cpu_id);
|
||||
if(DEBUG) cprintf ("cpu%d: releasing at %x\n", cpu_id, getcallerpc(&lock));
|
||||
if (*lock != cpu_id)
|
||||
panic("release_spinlock: releasing a lock that i don't own\n");
|
||||
*lock = LOCK_FREE;
|
||||
// on a real machine there would be a memory barrier here
|
||||
write_eflags(read_eflags() | FL_IF);
|
||||
}
|
||||
|
||||
|
@ -36,8 +44,9 @@ void
|
|||
release_grant_spinlock(uint32_t* lock, int c)
|
||||
{
|
||||
int cpu_id = cpu();
|
||||
// cprintf ("release_grant: %d -> %d\n", cpu_id, c);
|
||||
if(DEBUG) cprintf ("cpu%d: release_grant to %d at %x\n", cpu_id, c, getcallerpc(&lock));
|
||||
if (*lock != cpu_id)
|
||||
panic("release_spinlock: releasing a lock that i don't own\n");
|
||||
*lock = c;
|
||||
}
|
||||
|
||||
|
|
37
swtch.S
Normal file
37
swtch.S
Normal file
|
@ -0,0 +1,37 @@
|
|||
.globl setjmp
|
||||
setjmp:
|
||||
movl 4(%esp), %eax
|
||||
|
||||
movl %ebx, 0(%eax)
|
||||
movl %ecx, 4(%eax)
|
||||
movl %edx, 8(%eax)
|
||||
movl %esi, 12(%eax)
|
||||
movl %edi, 16(%eax)
|
||||
movl %esp, 20(%eax)
|
||||
movl %ebp, 24(%eax)
|
||||
pushl 0(%esp) /* %eip */
|
||||
popl 28(%eax)
|
||||
|
||||
movl $0, %eax /* return value */
|
||||
ret
|
||||
|
||||
.globl longjmp
|
||||
longjmp:
|
||||
movl 4(%esp), %eax
|
||||
|
||||
movl 0(%eax), %ebx
|
||||
movl 4(%eax), %ecx
|
||||
movl 8(%eax), %edx
|
||||
movl 12(%eax), %esi
|
||||
movl 16(%eax), %edi
|
||||
movl 20(%eax), %esp
|
||||
movl 24(%eax), %ebp
|
||||
|
||||
addl $4, %esp /* pop %eip into thin air */
|
||||
pushl 28(%eax) /* push new %eip */
|
||||
|
||||
movl $1, %eax /* return value (appears to come from setjmp!) */
|
||||
ret
|
||||
|
||||
|
||||
|
|
@ -39,7 +39,7 @@ fetcharg(int argno, int *ip)
|
|||
unsigned esp;
|
||||
|
||||
esp = (unsigned) curproc[cpu()]->tf->tf_esp;
|
||||
return fetchint(curproc[cpu()], esp + 8 + 4*argno, ip);
|
||||
return fetchint(curproc[cpu()], esp + 4 + 4*argno, ip);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -178,6 +178,7 @@ sys_exit()
|
|||
if(p->ppid == cp->pid)
|
||||
p->pid = 1;
|
||||
|
||||
// switch into scheduler
|
||||
swtch();
|
||||
|
||||
return 0;
|
||||
|
|
8
trap.c
8
trap.c
|
@ -36,13 +36,15 @@ trap(struct Trapframe *tf)
|
|||
int v = tf->tf_trapno;
|
||||
|
||||
if(tf->tf_cs == 0x8 && kernel_lock == cpu())
|
||||
cprintf("cpu %d: trap from %x:%x with lock=%d\n",
|
||||
cpu(), tf->tf_cs, tf->tf_eip, kernel_lock);
|
||||
cprintf("cpu %d: trap %d from %x:%x with lock=%d\n",
|
||||
cpu(), v, tf->tf_cs, tf->tf_eip, kernel_lock);
|
||||
|
||||
acquire_spinlock(&kernel_lock); // released in trapret in trapasm.S
|
||||
|
||||
if(v == T_SYSCALL){
|
||||
struct proc *cp = curproc[cpu()];
|
||||
if(cp == 0)
|
||||
panic("syscall with no proc");
|
||||
cp->tf = tf;
|
||||
syscall();
|
||||
if(cp != curproc[cpu()])
|
||||
|
@ -51,7 +53,7 @@ trap(struct Trapframe *tf)
|
|||
panic("trap ret but not RUNNING");
|
||||
if(tf != cp->tf)
|
||||
panic("trap ret wrong tf");
|
||||
if(read_esp() < cp->kstack || read_esp() >= cp->kstack + KSTACKSIZE)
|
||||
if(read_esp() < (unsigned)cp->kstack || read_esp() >= (unsigned)cp->kstack + KSTACKSIZE)
|
||||
panic("trap ret esp wrong");
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -22,9 +22,10 @@ alltraps:
|
|||
* expects ESP to point to a Trapframe
|
||||
*/
|
||||
trapret:
|
||||
push $kernel_lock
|
||||
pushl $kernel_lock
|
||||
call release_spinlock
|
||||
addl $4, %esp
|
||||
addl $0x4, %esp
|
||||
|
||||
popal
|
||||
popl %es
|
||||
popl %ds
|
||||
|
|
56
ulib.c
56
ulib.c
|
@ -1,24 +1,3 @@
|
|||
int
|
||||
fork()
|
||||
{
|
||||
asm("mov $1, %eax");
|
||||
asm("int $48");
|
||||
}
|
||||
|
||||
int
|
||||
exit()
|
||||
{
|
||||
asm("mov $2, %eax");
|
||||
asm("int $48");
|
||||
}
|
||||
|
||||
void
|
||||
cons_putc(int c)
|
||||
{
|
||||
asm("mov $4, %eax");
|
||||
asm("int $48");
|
||||
}
|
||||
|
||||
int
|
||||
puts(char *s)
|
||||
{
|
||||
|
@ -29,38 +8,3 @@ puts(char *s)
|
|||
return i;
|
||||
}
|
||||
|
||||
int
|
||||
pipe(int fds[])
|
||||
{
|
||||
asm("mov $5, %eax");
|
||||
asm("int $48");
|
||||
}
|
||||
|
||||
int
|
||||
read(int fd, char *buf, int n)
|
||||
{
|
||||
asm("mov $7, %eax");
|
||||
asm("int $48");
|
||||
}
|
||||
|
||||
int
|
||||
write(int fd, char *buf, int n)
|
||||
{
|
||||
asm("mov $6, %eax");
|
||||
asm("int $48");
|
||||
}
|
||||
|
||||
int
|
||||
close(int fd)
|
||||
{
|
||||
asm("mov $8, %eax");
|
||||
asm("int $48");
|
||||
}
|
||||
|
||||
int
|
||||
block(void)
|
||||
{
|
||||
asm("mov $9, %eax");
|
||||
asm("int $48");
|
||||
}
|
||||
|
||||
|
|
1
userfs.c
1
userfs.c
|
@ -4,5 +4,6 @@ char buf[1024];
|
|||
|
||||
main()
|
||||
{
|
||||
puts("userfs running\n");
|
||||
block();
|
||||
}
|
||||
|
|
18
usys.S
Normal file
18
usys.S
Normal file
|
@ -0,0 +1,18 @@
|
|||
#include "syscall.h"
|
||||
#include "traps.h"
|
||||
|
||||
#define STUB(name) \
|
||||
.globl name; \
|
||||
name: \
|
||||
movl $SYS_ ## name, %eax; \
|
||||
int $T_SYSCALL; \
|
||||
ret
|
||||
|
||||
STUB(fork)
|
||||
STUB(exit)
|
||||
STUB(cons_putc)
|
||||
STUB(pipe)
|
||||
STUB(read)
|
||||
STUB(write)
|
||||
STUB(close)
|
||||
STUB(block)
|
1
x86.h
1
x86.h
|
@ -349,7 +349,6 @@ struct Trapframe {
|
|||
uint16_t tf_padding4;
|
||||
};
|
||||
|
||||
|
||||
#define MAX_IRQS 16 // Number of IRQs
|
||||
|
||||
#define IRQ_OFFSET 32 // IRQ 0 corresponds to int IRQ_OFFSET
|
||||
|
|
Loading…
Reference in a new issue