Attempt to clean up newproc somewhat.
Also remove all calls to memcpy in favor of memmove, which has defined semantics when the ranges overlap. The fact that memcpy was working in console.c to scroll the screen is not guaranteed by all implementations.
This commit is contained in:
parent
65bd8e139a
commit
856e1fc1ad
7 changed files with 105 additions and 86 deletions
|
@ -58,7 +58,7 @@ real_cons_putc(int c)
|
|||
|
||||
if((ind / 80) >= 24){
|
||||
// scroll up
|
||||
memcpy(crt, crt + 80, sizeof(crt[0]) * (23 * 80));
|
||||
memmove(crt, crt + 80, sizeof(crt[0]) * (23 * 80));
|
||||
ind -= 80;
|
||||
memset(crt + ind, 0, sizeof(crt[0]) * ((24 * 80) - ind));
|
||||
}
|
||||
|
|
4
defs.h
4
defs.h
|
@ -12,7 +12,7 @@ void cons_putc(int);
|
|||
struct proc;
|
||||
struct jmpbuf;
|
||||
void setupsegs(struct proc *);
|
||||
struct proc * newproc(void);
|
||||
struct proc * copyproc(struct proc*);
|
||||
struct spinlock;
|
||||
void sleep(void *, struct spinlock *);
|
||||
void wakeup(void *);
|
||||
|
@ -32,7 +32,6 @@ void tvinit(void);
|
|||
void idtinit(void);
|
||||
|
||||
// string.c
|
||||
void * memcpy(void *dst, void *src, unsigned n);
|
||||
void * memset(void *dst, int c, unsigned n);
|
||||
int memcmp(const void *v1, const void *v2, unsigned n);
|
||||
void *memmove(void *dst, const void *src, unsigned n);
|
||||
|
@ -92,3 +91,4 @@ void ide_init(void);
|
|||
void ide_intr(void);
|
||||
void* ide_start_read(uint32_t secno, void *dst, unsigned nsecs);
|
||||
int ide_finish_read(void *);
|
||||
|
||||
|
|
4
main.c
4
main.c
|
@ -81,7 +81,7 @@ main()
|
|||
// become interruptable
|
||||
sti();
|
||||
|
||||
p = newproc();
|
||||
p = copyproc(&proc[0]);
|
||||
|
||||
load_icode(p, _binary_usertests_start, (unsigned) _binary_usertests_size);
|
||||
//load_icode(p, _binary_userfs_start, (unsigned) _binary_userfs_size);
|
||||
|
@ -122,7 +122,7 @@ load_icode(struct proc *p, uint8_t *binary, unsigned size)
|
|||
panic("load_icode: icode wants to be above UTOP");
|
||||
|
||||
// Load/clear the segment
|
||||
memcpy(p->mem + ph->p_va, binary + ph->p_offset, ph->p_filesz);
|
||||
memmove(p->mem + ph->p_va, binary + ph->p_offset, ph->p_filesz);
|
||||
memset(p->mem + ph->p_va + ph->p_filesz, 0, ph->p_memsz - ph->p_filesz);
|
||||
}
|
||||
}
|
||||
|
|
126
proc.c
126
proc.c
|
@ -13,6 +13,7 @@ struct proc proc[NPROC];
|
|||
struct proc *curproc[NCPU];
|
||||
int next_pid = 1;
|
||||
extern void forkret(void);
|
||||
extern void forkret1(struct Trapframe*);
|
||||
|
||||
/*
|
||||
* set up a process's task state and segment descriptors
|
||||
|
@ -42,84 +43,87 @@ setupsegs(struct proc *p)
|
|||
p->gdt_pd.pd_base = (unsigned) p->gdt;
|
||||
}
|
||||
|
||||
extern void trapret();
|
||||
|
||||
/*
|
||||
* internal fork(). does not copy kernel stack; instead,
|
||||
* sets up the stack to return as if from system call.
|
||||
* caller must set state to RUNNABLE.
|
||||
*/
|
||||
struct proc *
|
||||
newproc()
|
||||
// Look in the process table for an UNUSED proc.
|
||||
// If found, change state to EMBRYO and return it.
|
||||
// Otherwise return 0.
|
||||
struct proc*
|
||||
allocproc(void)
|
||||
{
|
||||
struct proc *np;
|
||||
struct proc *op;
|
||||
int fd;
|
||||
int i;
|
||||
struct proc *p;
|
||||
|
||||
acquire(&proc_table_lock);
|
||||
|
||||
for(np = &proc[1]; np < &proc[NPROC]; np++){
|
||||
if(np->state == UNUSED){
|
||||
np->state = EMBRYO;
|
||||
break;
|
||||
for(i = 0; i < NPROC; i++){
|
||||
p = &proc[i];
|
||||
if(p->state == UNUSED){
|
||||
p->state = EMBRYO;
|
||||
return p;
|
||||
}
|
||||
}
|
||||
if(np >= &proc[NPROC]){
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Create a new process copying p as the parent.
|
||||
// Does not copy the kernel stack.
|
||||
// Instead, sets up stack to return as if from system call.
|
||||
// Caller must arrange for process to run (set state to RUNNABLE).
|
||||
struct proc *
|
||||
copyproc(struct proc* p)
|
||||
{
|
||||
int i;
|
||||
struct proc *np;
|
||||
|
||||
// Allocate process.
|
||||
acquire(&proc_table_lock);
|
||||
if((np = allocproc()) == 0){
|
||||
release(&proc_table_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// copy from proc[0] if we're bootstrapping
|
||||
op = curproc[cpu()];
|
||||
if(op == 0)
|
||||
op = &proc[0];
|
||||
|
||||
np->pid = next_pid++;
|
||||
np->ppid = op->pid;
|
||||
|
||||
np->ppid = p->pid;
|
||||
release(&proc_table_lock);
|
||||
|
||||
np->sz = op->sz;
|
||||
np->mem = kalloc(op->sz);
|
||||
if(np->mem == 0)
|
||||
return 0;
|
||||
memcpy(np->mem, op->mem, np->sz);
|
||||
np->kstack = kalloc(KSTACKSIZE);
|
||||
if(np->kstack == 0){
|
||||
kfree(np->mem, op->sz);
|
||||
// Copy process image memory.
|
||||
np->sz = p->sz;
|
||||
np->mem = kalloc(np->sz);
|
||||
if(np->mem == 0){
|
||||
np->state = UNUSED;
|
||||
return 0;
|
||||
}
|
||||
memmove(np->mem, p->mem, np->sz);
|
||||
|
||||
// Allocate kernel stack.
|
||||
np->kstack = kalloc(KSTACKSIZE);
|
||||
if(np->kstack == 0){
|
||||
kfree(np->mem, np->sz);
|
||||
np->state = UNUSED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Initialize segment table.
|
||||
setupsegs(np);
|
||||
|
||||
// set up kernel stack to return to user space
|
||||
np->tf = (struct Trapframe *) (np->kstack + KSTACKSIZE - sizeof(struct Trapframe));
|
||||
*(np->tf) = *(op->tf);
|
||||
np->tf->tf_regs.reg_eax = 0; // so fork() returns 0 in child
|
||||
// Copy trapframe registers from parent.
|
||||
np->tf = (struct Trapframe*)(np->kstack + KSTACKSIZE) - 1;
|
||||
*np->tf = *p->tf;
|
||||
|
||||
// Set up new jmpbuf to start executing forkret (see trapasm.S)
|
||||
// with esp pointing at tf. Forkret will call forkret1 (below) to release
|
||||
// the proc_table_lock and then jump into the usual trap return code.
|
||||
// Clear %eax so that fork system call returns 0 in child.
|
||||
np->tf->tf_regs.reg_eax = 0;
|
||||
|
||||
// Set up new jmpbuf to start executing at forkret (see below).
|
||||
memset(&np->jmpbuf, 0, sizeof np->jmpbuf);
|
||||
np->jmpbuf.jb_eip = (unsigned) forkret;
|
||||
np->jmpbuf.jb_esp = (unsigned) np->tf - 4; // -4 for the %eip that isn't actually there
|
||||
np->jmpbuf.jb_eip = (unsigned)forkret;
|
||||
np->jmpbuf.jb_esp = (unsigned)np->tf;
|
||||
|
||||
// Copy file descriptors
|
||||
for(fd = 0; fd < NOFILE; fd++){
|
||||
np->fds[fd] = op->fds[fd];
|
||||
if(np->fds[fd])
|
||||
fd_reference(np->fds[fd]);
|
||||
for(i = 0; i < NOFILE; i++){
|
||||
np->fds[i] = p->fds[i];
|
||||
if(np->fds[i])
|
||||
fd_reference(np->fds[i]);
|
||||
}
|
||||
|
||||
return np;
|
||||
}
|
||||
|
||||
void
|
||||
forkret1(void)
|
||||
{
|
||||
release(&proc_table_lock);
|
||||
}
|
||||
|
||||
// Per-CPU process scheduler.
|
||||
// Each CPU calls scheduler() after setting itself up.
|
||||
// Scheduler never returns. It loops, doing:
|
||||
|
@ -199,7 +203,7 @@ sched(void)
|
|||
|
||||
// Give up the CPU for one scheduling round.
|
||||
void
|
||||
yield()
|
||||
yield(void)
|
||||
{
|
||||
struct proc *p;
|
||||
|
||||
|
@ -211,6 +215,18 @@ yield()
|
|||
release(&proc_table_lock);
|
||||
}
|
||||
|
||||
// A process's very first scheduling by scheduler()
|
||||
// will longjmp here to do the first jump into user space.
|
||||
void
|
||||
forkret(void)
|
||||
{
|
||||
// Still holding proc_table_lock from scheduler.
|
||||
release(&proc_table_lock);
|
||||
|
||||
// Jump into assembly, never to return.
|
||||
forkret1(curproc[cpu()]->tf);
|
||||
}
|
||||
|
||||
// Atomically release lock and sleep on chan.
|
||||
// Reacquires lock when reawakened.
|
||||
void
|
||||
|
|
30
string.c
30
string.c
|
@ -1,18 +1,6 @@
|
|||
#include "types.h"
|
||||
#include "defs.h"
|
||||
|
||||
void *
|
||||
memcpy(void *dst, void *src, unsigned n)
|
||||
{
|
||||
char *d = (char *) dst;
|
||||
char *s = (char *) src;
|
||||
|
||||
while(n-- > 0)
|
||||
*d++ = *s++;
|
||||
|
||||
return dst;
|
||||
}
|
||||
|
||||
void *
|
||||
memset(void *dst, int c, unsigned n)
|
||||
{
|
||||
|
@ -69,3 +57,21 @@ strncmp(const char *p, const char *q, unsigned n)
|
|||
else
|
||||
return (int) ((unsigned char) *p - (unsigned char) *q);
|
||||
}
|
||||
|
||||
// Memcpy is deprecated and should NOT be called.
|
||||
// Use memmove instead, which has defined semantics
|
||||
// when the two memory ranges overlap.
|
||||
// Memcpy is here only because gcc compiles some
|
||||
// structure assignments into calls to memcpy.
|
||||
void *
|
||||
memcpy(void *dst, void *src, unsigned n)
|
||||
{
|
||||
char *d = (char *) dst;
|
||||
char *s = (char *) src;
|
||||
|
||||
while(n-- > 0)
|
||||
*d++ = *s++;
|
||||
|
||||
return dst;
|
||||
}
|
||||
|
||||
|
|
13
syscall.c
13
syscall.c
|
@ -30,7 +30,7 @@ fetchint(struct proc *p, unsigned addr, int *ip)
|
|||
|
||||
if(addr > p->sz - 4)
|
||||
return -1;
|
||||
memcpy(ip, p->mem + addr, 4);
|
||||
memmove(ip, p->mem + addr, 4);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ putint(struct proc *p, unsigned addr, int ip)
|
|||
{
|
||||
if(addr > p->sz - 4)
|
||||
return -1;
|
||||
memcpy(p->mem + addr, &ip, 4);
|
||||
memmove(p->mem + addr, &ip, 4);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -150,13 +150,10 @@ sys_fork(void)
|
|||
{
|
||||
struct proc *np;
|
||||
|
||||
np = newproc();
|
||||
if(np){
|
||||
np->state = RUNNABLE;
|
||||
return np->pid;
|
||||
} else {
|
||||
if((np = copyproc(curproc[cpu()])) == 0)
|
||||
return -1;
|
||||
}
|
||||
np->state = RUNNABLE;
|
||||
return np->pid;
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -30,9 +30,9 @@ trapret:
|
|||
addl $0x8, %esp /* trapno and errcode */
|
||||
iret
|
||||
|
||||
.globl forkret
|
||||
forkret:
|
||||
call forkret1
|
||||
.globl forkret1
|
||||
forkret1:
|
||||
movl 4(%esp), %esp
|
||||
jmp trapret
|
||||
|
||||
.globl acpu
|
||||
|
|
Loading…
Reference in a new issue