xv6: boot loader adjustments

do Bochs breakpoint and spin in bootasm.S.
not needed in bootmain too.
fix readseg bug (rounding of va).
zero segments when memsz > filesz.
no need to clear BSS in kernel main.
make bootother.S like bootasm.S
This commit is contained in:
rsc 2009-03-08 21:41:30 +00:00
parent 8220135362
commit b7f653dc49
4 changed files with 41 additions and 31 deletions

View file

@ -5,13 +5,13 @@
# memory at physical address 0x7c00 and starts executing in real mode # memory at physical address 0x7c00 and starts executing in real mode
# with %cs=0 %ip=7c00. # with %cs=0 %ip=7c00.
.set PROT_MODE_CSEG, 0x8 # kernel code segment selector .set CSEG32, 0x8 # kernel code segment selector
.set PROT_MODE_DSEG, 0x10 # kernel data segment selector .set DSEG32, 0x10 # kernel data segment selector
.set CR0_PE_ON, 0x1 # protected mode enable flag .set CR0_PE, 0x1 # protected mode enable flag
.code16 # Assemble for 16-bit mode
.globl start .globl start
start: start:
.code16 # Assemble for 16-bit mode
cli # Disable interrupts cli # Disable interrupts
cld # String operations increment cld # String operations increment
@ -48,17 +48,17 @@ seta20.2:
# effective memory map does not change during the switch. # effective memory map does not change during the switch.
lgdt gdtdesc lgdt gdtdesc
movl %cr0, %eax movl %cr0, %eax
orl $CR0_PE_ON, %eax orl $CR0_PE, %eax
movl %eax, %cr0 movl %eax, %cr0
# Jump to next instruction, but in 32-bit code segment. # Jump to next instruction, but in 32-bit code segment.
# Switches processor into 32-bit mode. # Switches processor into 32-bit mode.
ljmp $PROT_MODE_CSEG, $protcseg ljmp $CSEG32, $start32
.code32 # Assemble for 32-bit mode .code32 # Assemble for 32-bit mode
protcseg: start32:
# Set up the protected-mode data segment registers # Set up the protected-mode data segment registers
movw $PROT_MODE_DSEG, %ax # Our data segment selector movw $DSEG32, %ax # Our data segment selector
movw %ax, %ds # -> DS: Data Segment movw %ax, %ds # -> DS: Data Segment
movw %ax, %es # -> ES: Extra Segment movw %ax, %es # -> ES: Extra Segment
movw %ax, %fs # -> FS movw %ax, %fs # -> FS
@ -69,7 +69,13 @@ protcseg:
movl $start, %esp movl $start, %esp
call bootmain call bootmain
# If bootmain returns (it shouldn't), loop. # If bootmain returns (it shouldn't), trigger a Bochs
# breakpoint if running under Bochs, then loop.
movw $0x8a00, %ax # 0x8a00 -> port 0x8a00
movw %ax, %dx
outw %ax, %dx
movw $0x8e00, %ax # 0x8e00 -> port 0x8a00
outw %ax, %dx
spin: spin:
jmp spin jmp spin

View file

@ -11,7 +11,7 @@
#define SECTSIZE 512 #define SECTSIZE 512
void readseg(uint, uint, uint); void readseg(uchar*, uint, uint);
void void
bootmain(void) bootmain(void)
@ -19,32 +19,31 @@ bootmain(void)
struct elfhdr *elf; struct elfhdr *elf;
struct proghdr *ph, *eph; struct proghdr *ph, *eph;
void (*entry)(void); void (*entry)(void);
uchar* va;
elf = (struct elfhdr*)0x10000; // scratch space elf = (struct elfhdr*)0x10000; // scratch space
// Read 1st page off disk // Read 1st page off disk
readseg((uint)elf, SECTSIZE*8, 0); readseg((uchar*)elf, 4096, 0);
// Is this an ELF executable? // Is this an ELF executable?
if(elf->magic != ELF_MAGIC) if(elf->magic != ELF_MAGIC)
goto bad; return; // let bootasm.S handle error
// Load each program segment (ignores ph flags). // Load each program segment (ignores ph flags).
ph = (struct proghdr*)((uchar*)elf + elf->phoff); ph = (struct proghdr*)((uchar*)elf + elf->phoff);
eph = ph + elf->phnum; eph = ph + elf->phnum;
for(; ph < eph; ph++) for(; ph < eph; ph++) {
readseg(ph->va & 0xFFFFFF, ph->memsz, ph->offset); va = (uchar*)(ph->va & 0xFFFFFF);
readseg(va, ph->filesz, ph->offset);
if(ph->memsz > ph->filesz)
stosb(va + ph->filesz, 0, ph->memsz - ph->filesz);
}
// Call the entry point from the ELF header. // Call the entry point from the ELF header.
// Does not return! // Does not return!
entry = (void(*)(void))(elf->entry & 0xFFFFFF); entry = (void(*)(void))(elf->entry & 0xFFFFFF);
entry(); entry();
bad:
outw(0x8A00, 0x8A00);
outw(0x8A00, 0x8E00);
for(;;)
;
} }
void void
@ -76,14 +75,14 @@ readsect(void *dst, uint offset)
// Read 'count' bytes at 'offset' from kernel into virtual address 'va'. // Read 'count' bytes at 'offset' from kernel into virtual address 'va'.
// Might copy more than asked. // Might copy more than asked.
void void
readseg(uint va, uint count, uint offset) readseg(uchar* va, uint count, uint offset)
{ {
uint eva; uchar* eva;
eva = va + count; eva = va + count;
// Round down to sector boundary. // Round down to sector boundary.
va &= ~(SECTSIZE - 1); va -= offset % SECTSIZE;
// Translate from bytes to sectors; kernel starts at sector 1. // Translate from bytes to sectors; kernel starts at sector 1.
offset = (offset / SECTSIZE) + 1; offset = (offset / SECTSIZE) + 1;
@ -92,5 +91,5 @@ readseg(uint va, uint count, uint offset)
// We'd write more to memory than asked, but it doesn't matter -- // We'd write more to memory than asked, but it doesn't matter --
// we load in increasing order. // we load in increasing order.
for(; va < eva; va += SECTSIZE, offset++) for(; va < eva; va += SECTSIZE, offset++)
readsect((uchar*)va, offset); readsect(va, offset);
} }

View file

@ -61,7 +61,17 @@ protcseg:
movl start-4, %esp movl start-4, %esp
movl start-8, %eax movl start-8, %eax
jmp *%eax call *%eax
# If bootmain returns (it shouldn't), trigger a Bochs
# breakpoint if running under Bochs, then loop.
movw $0x8a00, %ax # 0x8a00 -> port 0x8a00
movw %ax, %dx
outw %ax, %dx
movw $0x8e00, %ax # 0x8e00 -> port 0x8a00
outw %ax, %dx
spin:
jmp spin
# Bootstrap GDT # Bootstrap GDT
.p2align 2 # force 4 byte alignment .p2align 2 # force 4 byte alignment

5
main.c
View file

@ -12,11 +12,6 @@ static void mpmain(void) __attribute__((noreturn));
int int
main(void) main(void)
{ {
extern char edata[], end[];
// clear BSS
memset(edata, 0, end - edata);
mp_init(); // collect info about this machine mp_init(); // collect info about this machine
lapic_init(mp_bcpu()); lapic_init(mp_bcpu());
cprintf("\ncpu%d: starting xv6\n\n", cpu()); cprintf("\ncpu%d: starting xv6\n\n", cpu());