minix/kernel/arch/i386/protect.c
Ben Gras 50e2064049 No more intel/minix segments.
This commit removes all traces of Minix segments (the text/data/stack
memory map abstraction in the kernel) and significance of Intel segments
(hardware segments like CS, DS that add offsets to all addressing before
page table translation). This ultimately simplifies the memory layout
and addressing and makes the same layout possible on non-Intel
architectures.

There are only two types of addresses in the world now: virtual
and physical; even the kernel and processes have the same virtual
address space. Kernel and user processes can be distinguished at a
glance as processes won't use 0xF0000000 and above.

No static pre-allocated memory sizes exist any more.

Changes to booting:
        . The pre_init.c leaves the kernel and modules exactly as
          they were left by the bootloader in physical memory
        . The kernel starts running using physical addressing,
          loaded at a fixed location given in its linker script by the
          bootloader.  All code and data in this phase are linked to
          this fixed low location.
        . It makes a bootstrap pagetable to map itself to a
          fixed high location (also in linker script) and jumps to
          the high address. All code and data then use this high addressing.
        . All code/data symbols linked at the low addresses is prefixed by
          an objcopy step with __k_unpaged_*, so that that code cannot
          reference highly-linked symbols (which aren't valid yet) or vice
          versa (symbols that aren't valid any more).
        . The two addressing modes are separated in the linker script by
          collecting the unpaged_*.o objects and linking them with low
          addresses, and linking the rest high. Some objects are linked
          twice, once low and once high.
        . The bootstrap phase passes a lot of information (e.g. free memory
          list, physical location of the modules, etc.) using the kinfo
          struct.
        . After this bootstrap the low-linked part is freed.
        . The kernel maps in VM into the bootstrap page table so that VM can
          begin executing. Its first job is to make page tables for all other
          boot processes. So VM runs before RS, and RS gets a fully dynamic,
          VM-managed address space. VM gets its privilege info from RS as usual
          but that happens after RS starts running.
        . Both the kernel loading VM and VM organizing boot processes happen
	  using the libexec logic. This removes the last reason for VM to
	  still know much about exec() and vm/exec.c is gone.

Further Implementation:
        . All segments are based at 0 and have a 4 GB limit.
        . The kernel is mapped in at the top of the virtual address
          space so as not to constrain the user processes.
        . Processes do not use segments from the LDT at all; there are
          no segments in the LDT any more, so no LLDT is needed.
        . The Minix segments T/D/S are gone and so none of the
          user-space or in-kernel copy functions use them. The copy
          functions use a process endpoint of NONE to realize it's
          a physical address, virtual otherwise.
        . The umap call only makes sense to translate a virtual address
          to a physical address now.
        . Segments-related calls like newmap and alloc_segments are gone.
        . All segments-related translation in VM is gone (vir2map etc).
        . Initialization in VM is simpler as no moving around is necessary.
        . VM and all other boot processes can be linked wherever they wish
          and will be mapped in at the right location by the kernel and VM
          respectively.

Other changes:
        . The multiboot code is less special: it does not use mb_print
          for its diagnostics any more but uses printf() as normal, saving
          the output into the diagnostics buffer, only printing to the
          screen using the direct print functions if a panic() occurs.
        . The multiboot code uses the flexible 'free memory map list'
          style to receive the list of free memory if available.
        . The kernel determines the memory layout of the processes to
          a degree: it tells VM where the kernel starts and ends and
          where the kernel wants the top of the process to be. VM then
          uses this entire range, i.e. the stack is right at the top,
          and mmap()ped bits of memory are placed below that downwards,
          and the break grows upwards.

Other Consequences:
        . Every process gets its own page table as address spaces
          can't be separated any more by segments.
        . As all segments are 0-based, there is no distinction between
          virtual and linear addresses, nor between userspace and
          kernel addresses.
        . Less work is done when context switching, leading to a net
          performance increase. (8% faster on my machine for 'make servers'.)
	. The layout and configuration of the GDT makes sysenter and syscall
	  possible.
2012-07-15 22:30:15 +02:00

384 lines
12 KiB
C

/* This file contains code for initialization of protected mode, to initialize
* code and data segment descriptors, and to initialize global descriptors
* for local descriptors in the process table.
*/
#include <string.h>
#include <assert.h>
#include <machine/multiboot.h>
#include "kernel/kernel.h"
#include "kernel/proc.h"
#include "archconst.h"
#include "arch_proto.h"
#include <libexec.h>
#define INT_GATE_TYPE (INT_286_GATE | DESC_386_BIT)
#define TSS_TYPE (AVL_286_TSS | DESC_386_BIT)
/* This is OK initially, when the 1:1 mapping is still there. */
char *video_mem = (char *) MULTIBOOT_VIDEO_BUFFER;
struct gatedesc_s {
u16_t offset_low;
u16_t selector;
u8_t pad; /* |000|XXXXX| ig & trpg, |XXXXXXXX| task g */
u8_t p_dpl_type; /* |P|DL|0|TYPE| */
u16_t offset_high;
} __attribute__((packed));
/* Storage for gdt, idt and tss. */
static struct segdesc_s gdt[GDT_SIZE] __aligned(DESC_SIZE);
struct gatedesc_s idt[IDT_SIZE] __aligned(DESC_SIZE);
struct tss_s tss[CONFIG_MAX_CPUS];
phys_bytes vir2phys(void *vir)
{
extern char _kern_vir_base, _kern_phys_base; /* in kernel.lds */
u32_t offset = (vir_bytes) &_kern_vir_base -
(vir_bytes) &_kern_phys_base;
return (phys_bytes)vir - offset;
}
/*===========================================================================*
* enable_iop *
*===========================================================================*/
void enable_iop(struct proc *pp)
{
/* Allow a user process to use I/O instructions. Change the I/O Permission
* Level bits in the psw. These specify least-privileged Current Permission
* Level allowed to execute I/O instructions. Users and servers have CPL 3.
* You can't have less privilege than that. Kernel has CPL 0, tasks CPL 1.
*/
pp->p_reg.psw |= 0x3000;
}
/*===========================================================================*
* sdesc *
*===========================================================================*/
void sdesc(struct segdesc_s *segdp, phys_bytes base, vir_bytes size)
{
/* Fill in the size fields (base, limit and granularity) of a descriptor. */
segdp->base_low = base;
segdp->base_middle = base >> BASE_MIDDLE_SHIFT;
segdp->base_high = base >> BASE_HIGH_SHIFT;
--size; /* convert to a limit, 0 size means 4G */
if (size > BYTE_GRAN_MAX) {
segdp->limit_low = size >> PAGE_GRAN_SHIFT;
segdp->granularity = GRANULAR | (size >>
(PAGE_GRAN_SHIFT + GRANULARITY_SHIFT));
} else {
segdp->limit_low = size;
segdp->granularity = size >> GRANULARITY_SHIFT;
}
segdp->granularity |= DEFAULT; /* means BIG for data seg */
}
/*===========================================================================*
* init_dataseg *
*===========================================================================*/
void init_param_dataseg(register struct segdesc_s *segdp,
phys_bytes base, vir_bytes size, const int privilege)
{
/* Build descriptor for a data segment. */
sdesc(segdp, base, size);
segdp->access = (privilege << DPL_SHIFT) | (PRESENT | SEGMENT |
WRITEABLE | ACCESSED);
/* EXECUTABLE = 0, EXPAND_DOWN = 0, ACCESSED = 0 */
}
void init_dataseg(int index, const int privilege)
{
init_param_dataseg(&gdt[index], 0, 0xFFFFFFFF, privilege);
}
/*===========================================================================*
* init_codeseg *
*===========================================================================*/
static void init_codeseg(int index, int privilege)
{
/* Build descriptor for a code segment. */
sdesc(&gdt[index], 0, 0xFFFFFFFF);
gdt[index].access = (privilege << DPL_SHIFT)
| (PRESENT | SEGMENT | EXECUTABLE | READABLE);
/* CONFORMING = 0, ACCESSED = 0 */
}
static struct gate_table_s gate_table_pic[] = {
{ hwint00, VECTOR( 0), INTR_PRIVILEGE },
{ hwint01, VECTOR( 1), INTR_PRIVILEGE },
{ hwint02, VECTOR( 2), INTR_PRIVILEGE },
{ hwint03, VECTOR( 3), INTR_PRIVILEGE },
{ hwint04, VECTOR( 4), INTR_PRIVILEGE },
{ hwint05, VECTOR( 5), INTR_PRIVILEGE },
{ hwint06, VECTOR( 6), INTR_PRIVILEGE },
{ hwint07, VECTOR( 7), INTR_PRIVILEGE },
{ hwint08, VECTOR( 8), INTR_PRIVILEGE },
{ hwint09, VECTOR( 9), INTR_PRIVILEGE },
{ hwint10, VECTOR(10), INTR_PRIVILEGE },
{ hwint11, VECTOR(11), INTR_PRIVILEGE },
{ hwint12, VECTOR(12), INTR_PRIVILEGE },
{ hwint13, VECTOR(13), INTR_PRIVILEGE },
{ hwint14, VECTOR(14), INTR_PRIVILEGE },
{ hwint15, VECTOR(15), INTR_PRIVILEGE },
{ NULL, 0, 0}
};
static struct gate_table_s gate_table_exceptions[] = {
{ divide_error, DIVIDE_VECTOR, INTR_PRIVILEGE },
{ single_step_exception, DEBUG_VECTOR, INTR_PRIVILEGE },
{ nmi, NMI_VECTOR, INTR_PRIVILEGE },
{ breakpoint_exception, BREAKPOINT_VECTOR, USER_PRIVILEGE },
{ overflow, OVERFLOW_VECTOR, USER_PRIVILEGE },
{ bounds_check, BOUNDS_VECTOR, INTR_PRIVILEGE },
{ inval_opcode, INVAL_OP_VECTOR, INTR_PRIVILEGE },
{ copr_not_available, COPROC_NOT_VECTOR, INTR_PRIVILEGE },
{ double_fault, DOUBLE_FAULT_VECTOR, INTR_PRIVILEGE },
{ copr_seg_overrun, COPROC_SEG_VECTOR, INTR_PRIVILEGE },
{ inval_tss, INVAL_TSS_VECTOR, INTR_PRIVILEGE },
{ segment_not_present, SEG_NOT_VECTOR, INTR_PRIVILEGE },
{ stack_exception, STACK_FAULT_VECTOR, INTR_PRIVILEGE },
{ general_protection, PROTECTION_VECTOR, INTR_PRIVILEGE },
{ page_fault, PAGE_FAULT_VECTOR, INTR_PRIVILEGE },
{ copr_error, COPROC_ERR_VECTOR, INTR_PRIVILEGE },
{ alignment_check, ALIGNMENT_CHECK_VECTOR, INTR_PRIVILEGE },
{ machine_check, MACHINE_CHECK_VECTOR, INTR_PRIVILEGE },
{ simd_exception, SIMD_EXCEPTION_VECTOR, INTR_PRIVILEGE },
{ ipc_entry, IPC_VECTOR, USER_PRIVILEGE },
{ kernel_call_entry, KERN_CALL_VECTOR, USER_PRIVILEGE },
{ NULL, 0, 0}
};
int tss_init(unsigned cpu, void * kernel_stack)
{
struct tss_s * t = &tss[cpu];
int index = TSS_INDEX(cpu);
struct segdesc_s *tssgdt;
tssgdt = &gdt[index];
init_param_dataseg(tssgdt, (phys_bytes) t,
sizeof(struct tss_s), INTR_PRIVILEGE);
tssgdt->access = PRESENT | (INTR_PRIVILEGE << DPL_SHIFT) | TSS_TYPE;
/* Build TSS. */
memset(t, 0, sizeof(*t));
t->ds = t->es = t->fs = t->gs = t->ss0 = KERN_DS_SELECTOR;
t->cs = KERN_CS_SELECTOR;
t->iobase = sizeof(struct tss_s); /* empty i/o permissions map */
/*
* make space for process pointer and cpu id and point to the first
* usable word
*/
t->sp0 = ((unsigned) kernel_stack) - X86_STACK_TOP_RESERVED;
/*
* set the cpu id at the top of the stack so we know on which cpu is
* this stak in use when we trap to kernel
*/
*((reg_t *)(t->sp0 + 1 * sizeof(reg_t))) = cpu;
return SEG_SELECTOR(index);
}
phys_bytes init_segdesc(int gdt_index, void *base, int size)
{
struct desctableptr_s *dtp = (struct desctableptr_s *) &gdt[gdt_index];
dtp->limit = size - 1;
dtp->base = (phys_bytes) base;
return (phys_bytes) dtp;
}
void int_gate(struct gatedesc_s *tab,
unsigned vec_nr, vir_bytes offset, unsigned dpl_type)
{
/* Build descriptor for an interrupt gate. */
register struct gatedesc_s *idp;
idp = &tab[vec_nr];
idp->offset_low = offset;
idp->selector = KERN_CS_SELECTOR;
idp->p_dpl_type = dpl_type;
idp->offset_high = offset >> OFFSET_HIGH_SHIFT;
}
void int_gate_idt(unsigned vec_nr, vir_bytes offset, unsigned dpl_type)
{
int_gate(idt, vec_nr, offset, dpl_type);
}
void idt_copy_vectors(struct gate_table_s * first)
{
struct gate_table_s *gtp;
for (gtp = first; gtp->gate; gtp++) {
int_gate(idt, gtp->vec_nr, (vir_bytes) gtp->gate,
PRESENT | INT_GATE_TYPE |
(gtp->privilege << DPL_SHIFT));
}
}
void idt_copy_vectors_pic(void)
{
idt_copy_vectors(gate_table_pic);
}
void idt_init(void)
{
idt_copy_vectors_pic();
idt_copy_vectors(gate_table_exceptions);
}
struct desctableptr_s gdt_desc, idt_desc;
void idt_reload(void)
{
x86_lidt(&idt_desc);
}
multiboot_module_t *bootmod(int pnr)
{
int i;
assert(pnr >= 0);
/* Search for desired process in boot process
* list. The first NR_TASKS ones do not correspond
* to a module, however, so we don't search those.
*/
for(i = NR_TASKS; i < NR_BOOT_PROCS; i++) {
int p;
p = i - NR_TASKS;
if(image[i].proc_nr == pnr) {
assert(p < MULTIBOOT_MAX_MODS);
assert(p < kinfo.mbi.mods_count);
return &kinfo.module_list[p];
}
}
panic("boot module %d not found", pnr);
}
/*===========================================================================*
* prot_init *
*===========================================================================*/
void prot_init()
{
int sel_tss;
extern char k_boot_stktop;
memset(gdt, 0, sizeof(gdt));
memset(idt, 0, sizeof(idt));
/* Build GDT, IDT, IDT descriptors. */
gdt_desc.base = (u32_t) gdt;
gdt_desc.limit = sizeof(gdt)-1;
idt_desc.base = (u32_t) idt;
idt_desc.limit = sizeof(idt)-1;
sel_tss = tss_init(0, &k_boot_stktop);
/* Build GDT */
init_param_dataseg(&gdt[LDT_INDEX],
(phys_bytes) 0, 0, INTR_PRIVILEGE); /* unusable LDT */
gdt[LDT_INDEX].access = PRESENT | LDT;
init_codeseg(KERN_CS_INDEX, INTR_PRIVILEGE);
init_dataseg(KERN_DS_INDEX, INTR_PRIVILEGE);
init_codeseg(USER_CS_INDEX, USER_PRIVILEGE);
init_dataseg(USER_DS_INDEX, USER_PRIVILEGE);
x86_lgdt(&gdt_desc); /* Load gdt */
idt_init();
idt_reload();
x86_lldt(LDT_SELECTOR); /* Load bogus ldt */
x86_ltr(sel_tss); /* Load global TSS */
/* Currently the multiboot segments are loaded; which is fine, but
* let's replace them with the ones from our own GDT so we test
* right away whether they work as expected.
*/
x86_load_kerncs();
x86_load_ds(KERN_DS_SELECTOR);
x86_load_es(KERN_DS_SELECTOR);
x86_load_fs(KERN_DS_SELECTOR);
x86_load_gs(KERN_DS_SELECTOR);
x86_load_ss(KERN_DS_SELECTOR);
/* Set up a new post-relocate bootstrap pagetable so that
* we can map in VM, and we no longer rely on pre-relocated
* data.
*/
pg_clear();
pg_identity(); /* Still need 1:1 for lapic and video mem and such. */
pg_mapkernel();
pg_load();
bootstrap_pagetable_done = 1; /* secondary CPU's can use it too */
}
void arch_post_init(void)
{
/* Let memory mapping code know what's going on at bootstrap time */
struct proc *vm;
vm = proc_addr(VM_PROC_NR);
get_cpulocal_var(ptproc) = vm;
pg_info(&vm->p_seg.p_cr3, &vm->p_seg.p_cr3_v);
}
int libexec_pg_alloc(struct exec_info *execi, off_t vaddr, size_t len)
{
pg_map(PG_ALLOCATEME, vaddr, vaddr+len, &kinfo);
pg_load();
memset((char *) vaddr, 0, len);
return OK;
}
void arch_boot_proc(struct boot_image *ip, struct proc *rp)
{
multiboot_module_t *mod;
if(rp->p_nr < 0) return;
mod = bootmod(rp->p_nr);
/* Important special case: we put VM in the bootstrap pagetable
* so it can run.
*/
if(rp->p_nr == VM_PROC_NR) {
struct exec_info execi;
memset(&execi, 0, sizeof(execi));
/* exec parameters */
execi.stack_high = kinfo.user_sp;
execi.stack_size = 16 * 1024; /* not too crazy as it must be preallocated */
execi.proc_e = ip->endpoint;
execi.hdr = (char *) mod->mod_start; /* phys mem direct */
execi.hdr_len = mod->mod_end - mod->mod_start;
strcpy(execi.progname, ip->proc_name);
execi.frame_len = 0;
/* callbacks for use in the kernel */
execi.copymem = libexec_copy_memcpy;
execi.clearmem = libexec_clear_memset;
execi.allocmem_prealloc = libexec_pg_alloc;
execi.allocmem_ondemand = libexec_pg_alloc;
execi.clearproc = NULL;
/* parse VM ELF binary and alloc/map it into bootstrap pagetable */
libexec_load_elf(&execi);
/* Initialize the server stack pointer. Take it down three words
* to give startup code something to use as "argc", "argv" and "envp".
*/
arch_proc_init(rp, execi.pc, kinfo.user_sp - 3*4, ip->proc_name);
/* Free VM blob that was just copied into existence. */
cut_memmap(&kinfo, mod->mod_start, mod->mod_end);
}
}