bring bootasm and bootother more in sync
This commit is contained in:
parent
5d1f4b8a3a
commit
2bc72bdd29
2 changed files with 48 additions and 58 deletions
46
bootasm.S
46
bootasm.S
|
@ -9,10 +9,11 @@
|
||||||
.set PROT_MODE_DSEG, 0x10 # kernel data segment selector
|
.set PROT_MODE_DSEG, 0x10 # kernel data segment selector
|
||||||
.set CR0_PE_ON, 0x1 # protected mode enable flag
|
.set CR0_PE_ON, 0x1 # protected mode enable flag
|
||||||
|
|
||||||
.globl start # Entry point
|
.globl start
|
||||||
start:
|
start:
|
||||||
.code16 # This runs in real mode
|
.code16 # Assemble for 16-bit mode
|
||||||
cli # Disable interrupts
|
cli # Disable interrupts
|
||||||
|
cld # String operations increment
|
||||||
|
|
||||||
# Set up the important data segment registers (DS, ES, SS).
|
# Set up the important data segment registers (DS, ES, SS).
|
||||||
xorw %ax,%ax # Segment number zero
|
xorw %ax,%ax # Segment number zero
|
||||||
|
@ -40,28 +41,21 @@ seta20.2:
|
||||||
movb $0xdf,%al # 0xdf -> port 0x60
|
movb $0xdf,%al # 0xdf -> port 0x60
|
||||||
outb %al,$0x60
|
outb %al,$0x60
|
||||||
|
|
||||||
# Switch from real to protected mode
|
//PAGEBREAK!
|
||||||
# The descriptors in our GDT allow all physical memory to be accessed.
|
# Switch from real to protected mode, using a bootstrap GDT
|
||||||
# Furthermore, the descriptors have base addresses of 0, so that the
|
# and segment translation that makes virtual addresses
|
||||||
# segment translation is a NOP, ie. virtual addresses are identical to
|
# identical to their physical addresses, so that the
|
||||||
# their physical addresses. With this setup, immediately after
|
# effective memory map does not change during the switch.
|
||||||
# enabling protected mode it will still appear to this code
|
lgdt gdtdesc
|
||||||
# that it is running directly on physical memory with no translation.
|
movl %cr0, %eax
|
||||||
# This initial NOP-translation setup is required by the processor
|
orl $CR0_PE_ON, %eax
|
||||||
# to ensure that the transition to protected mode occurs smoothly.
|
movl %eax, %cr0
|
||||||
real_to_prot:
|
|
||||||
cli # Mandatory since we dont set up an IDT
|
# Jump to next instruction, but in 32-bit code segment.
|
||||||
lgdt gdtdesc # load GDT -- mandatory in protected mode
|
# Switches processor into 32-bit mode.
|
||||||
movl %cr0, %eax # turn on protected mode
|
|
||||||
orl $CR0_PE_ON, %eax #
|
|
||||||
movl %eax, %cr0 #
|
|
||||||
### CPU magic: jump to relocation, flush prefetch queue, and reload %cs
|
|
||||||
### Has the effect of just jmp to the next instruction, but simultaneous
|
|
||||||
### loads CS with $PROT_MODE_CSEG.
|
|
||||||
ljmp $PROT_MODE_CSEG, $protcseg
|
ljmp $PROT_MODE_CSEG, $protcseg
|
||||||
|
|
||||||
#### we are in 32-bit protected mode (hence the .code32)
|
.code32 # Assemble for 32-bit mode
|
||||||
.code32
|
|
||||||
protcseg:
|
protcseg:
|
||||||
# Set up the protected-mode data segment registers
|
# Set up the protected-mode data segment registers
|
||||||
movw $PROT_MODE_DSEG, %ax # Our data segment selector
|
movw $PROT_MODE_DSEG, %ax # Our data segment selector
|
||||||
|
@ -71,13 +65,13 @@ protcseg:
|
||||||
movw %ax, %gs # -> GS
|
movw %ax, %gs # -> GS
|
||||||
movw %ax, %ss # -> SS: Stack Segment
|
movw %ax, %ss # -> SS: Stack Segment
|
||||||
|
|
||||||
# Set up the stack pointer, growing downward from 0x7c00.
|
# Set up the stack pointer and call into C.
|
||||||
movl $start, %esp
|
movl $start, %esp
|
||||||
|
call cmain
|
||||||
|
|
||||||
call cmain # finish the boot load from C.
|
# If cmain returns (it shouldn't), loop.
|
||||||
# cmain() should not return
|
|
||||||
spin:
|
spin:
|
||||||
jmp spin # ..but in case it does, spin
|
jmp spin
|
||||||
|
|
||||||
# Bootstrap GDT
|
# Bootstrap GDT
|
||||||
.p2align 2 # force 4 byte alignment
|
.p2align 2 # force 4 byte alignment
|
||||||
|
|
50
bootother.S
50
bootother.S
|
@ -12,14 +12,19 @@
|
||||||
# mp.c causes each non-boot CPU in turn to jump to start.
|
# mp.c causes each non-boot CPU in turn to jump to start.
|
||||||
# mp.c puts the correct %esp in start-4, and the place to jump
|
# mp.c puts the correct %esp in start-4, and the place to jump
|
||||||
# to in start-8.
|
# to in start-8.
|
||||||
|
#
|
||||||
|
# This code is identical to bootasm.S except:
|
||||||
|
# - it does not need to enable A20
|
||||||
|
# - it uses 0(start-4) for the %esp
|
||||||
|
# - it jumps to 0(start-8) instead of calling cmain
|
||||||
|
|
||||||
.set PROT_MODE_CSEG,0x8 # code segment selector
|
.set PROT_MODE_CSEG, 0x8 # kernel code segment selector
|
||||||
.set PROT_MODE_DSEG,0x10 # data segment selector
|
.set PROT_MODE_DSEG, 0x10 # kernel data segment selector
|
||||||
.set CR0_PE_ON,0x1 # protected mode enable flag
|
.set CR0_PE_ON, 0x1 # protected mode enable flag
|
||||||
|
|
||||||
.globl start
|
.globl start
|
||||||
start:
|
start:
|
||||||
.code16 # This runs in real mode
|
.code16 # Assemble for 16-bit mode
|
||||||
cli # Disable interrupts
|
cli # Disable interrupts
|
||||||
cld # String operations increment
|
cld # String operations increment
|
||||||
|
|
||||||
|
@ -29,31 +34,21 @@ start:
|
||||||
movw %ax,%es # -> Extra Segment
|
movw %ax,%es # -> Extra Segment
|
||||||
movw %ax,%ss # -> Stack Segment
|
movw %ax,%ss # -> Stack Segment
|
||||||
|
|
||||||
# Set up the stack pointer, growing downward from 0x7000-8.
|
//PAGEBREAK!
|
||||||
movw $start-8,%sp # Stack Pointer
|
# Switch from real to protected mode, using a bootstrap GDT
|
||||||
|
# and segment translation that makes virtual addresses
|
||||||
|
# identical to their physical addresses, so that the
|
||||||
|
# effective memory map does not change during the switch.
|
||||||
|
lgdt gdtdesc
|
||||||
|
movl %cr0, %eax
|
||||||
|
orl $CR0_PE_ON, %eax
|
||||||
|
movl %eax, %cr0
|
||||||
|
|
||||||
# Switch from real to protected mode
|
# Jump to next instruction, but in 32-bit code segment.
|
||||||
# The descriptors in our GDT allow all physical memory to be accessed.
|
# Switches processor into 32-bit mode.
|
||||||
# Furthermore, the descriptors have base addresses of 0, so that the
|
|
||||||
# segment translation is a NOP, ie. virtual addresses are identical to
|
|
||||||
# their physical addresses. With this setup, immediately after
|
|
||||||
# enabling protected mode it will still appear to this code
|
|
||||||
# that it is running directly on physical memory with no translation.
|
|
||||||
# This initial NOP-translation setup is required by the processor
|
|
||||||
# to ensure that the transition to protected mode occurs smoothly.
|
|
||||||
|
|
||||||
lgdt gdtdesc # load GDT -- mandatory in protected mode
|
|
||||||
movl %cr0, %eax # turn on protected mode
|
|
||||||
orl $CR0_PE_ON, %eax #
|
|
||||||
movl %eax, %cr0 #
|
|
||||||
|
|
||||||
# CPU magic: jump to relocation, flush prefetch queue, and reload %cs
|
|
||||||
# Has the effect of just jmp to the next instruction, but simultaneous
|
|
||||||
# loads CS with $PROT_MODE_CSEG.
|
|
||||||
ljmp $PROT_MODE_CSEG, $protcseg
|
ljmp $PROT_MODE_CSEG, $protcseg
|
||||||
|
|
||||||
# We are now in 32-bit protected mode (hence the .code32)
|
.code32 # Assemble for 32-bit mode
|
||||||
.code32
|
|
||||||
protcseg:
|
protcseg:
|
||||||
# Set up the protected-mode data segment registers
|
# Set up the protected-mode data segment registers
|
||||||
movw $PROT_MODE_DSEG, %ax # Our data segment selector
|
movw $PROT_MODE_DSEG, %ax # Our data segment selector
|
||||||
|
@ -63,10 +58,11 @@ protcseg:
|
||||||
movw %ax, %gs # -> GS
|
movw %ax, %gs # -> GS
|
||||||
movw %ax, %ss # -> SS: Stack Segment
|
movw %ax, %ss # -> SS: Stack Segment
|
||||||
|
|
||||||
movl start-8, %eax
|
|
||||||
movl start-4, %esp
|
movl start-4, %esp
|
||||||
|
movl start-8, %eax
|
||||||
jmp *%eax
|
jmp *%eax
|
||||||
|
|
||||||
|
# Bootstrap GDT
|
||||||
.p2align 2 # force 4 byte alignment
|
.p2align 2 # force 4 byte alignment
|
||||||
gdt:
|
gdt:
|
||||||
SEG_NULLASM # null seg
|
SEG_NULLASM # null seg
|
||||||
|
|
Loading…
Reference in a new issue