minix/kernel/arch/i386/pg_utils.c
Lionel Sambuc f14fb60209 Libraries updates and cleanup
* Updating common/lib
 * Updating lib/csu
 * Updating lib/libc
 * Updating libexec/ld.elf_so
 * Corrected test on __minix in featuretest to actually follow the
   meaning of the comment.
 * Cleaned up _REENTRANT-related defintions.
 * Disabled -D_REENTRANT for libfetch
 * Removing some unneeded __NBSD_LIBC defines and tests

Change-Id: Ic1394baef74d11b9f86b312f5ff4bbc3cbf72ce2
2013-01-14 11:36:26 +01:00

310 lines
7.8 KiB
C

#include <minix/cpufeature.h>
#include <assert.h>
#include "kernel/kernel.h"
#include <libexec.h>
#include "arch_proto.h"
#include <string.h>
#include <libexec.h>
/* These are set/computed in kernel.lds. */
extern char _kern_vir_base, _kern_phys_base, _kern_size;
/* Retrieve the absolute values to something we can use. */
static phys_bytes kern_vir_start = (phys_bytes) &_kern_vir_base;
static phys_bytes kern_phys_start = (phys_bytes) &_kern_phys_base;
static phys_bytes kern_kernlen = (phys_bytes) &_kern_size;
/* page directory we can use to map things */
static u32_t pagedir[1024] __aligned(4096);
void print_memmap(kinfo_t *cbi)
{
int m;
assert(cbi->mmap_size < MAXMEMMAP);
for(m = 0; m < cbi->mmap_size; m++) {
phys_bytes addr = cbi->memmap[m].addr, endit = cbi->memmap[m].addr + cbi->memmap[m].len;
printf("%08lx-%08lx ",addr, endit);
}
printf("\nsize %08lx\n", cbi->mmap_size);
}
void cut_memmap(kinfo_t *cbi, phys_bytes start, phys_bytes end)
{
int m;
phys_bytes o;
if((o=start % I386_PAGE_SIZE))
start -= o;
if((o=end % I386_PAGE_SIZE))
end += I386_PAGE_SIZE - o;
assert(kernel_may_alloc);
for(m = 0; m < cbi->mmap_size; m++) {
phys_bytes substart = start, subend = end;
phys_bytes memaddr = cbi->memmap[m].addr,
memend = cbi->memmap[m].addr + cbi->memmap[m].len;
/* adjust cut range to be a subset of the free memory */
if(substart < memaddr) substart = memaddr;
if(subend > memend) subend = memend;
if(substart >= subend) continue;
/* if there is any overlap, forget this one and add
* 1-2 subranges back
*/
cbi->memmap[m].addr = cbi->memmap[m].len = 0;
if(substart > memaddr)
add_memmap(cbi, memaddr, substart-memaddr);
if(subend < memend)
add_memmap(cbi, subend, memend-subend);
}
}
phys_bytes alloc_lowest(kinfo_t *cbi, phys_bytes len)
{
/* Allocate the lowest physical page we have. */
int m;
#define EMPTY 0xffffffff
phys_bytes lowest = EMPTY;
assert(len > 0);
len = roundup(len, I386_PAGE_SIZE);
assert(kernel_may_alloc);
for(m = 0; m < cbi->mmap_size; m++) {
if(cbi->memmap[m].len < len) continue;
if(cbi->memmap[m].addr < lowest) lowest = cbi->memmap[m].addr;
}
assert(lowest != EMPTY);
cut_memmap(cbi, lowest, len);
return lowest;
}
void add_memmap(kinfo_t *cbi, u64_t addr, u64_t len)
{
int m;
#define LIMIT 0xFFFFF000
/* Truncate available memory at 4GB as the rest of minix
* currently can't deal with any bigger.
*/
if(addr > LIMIT) return;
if(addr + len > LIMIT) {
len -= (addr + len - LIMIT);
}
assert(cbi->mmap_size < MAXMEMMAP);
if(len == 0) return;
addr = roundup(addr, I386_PAGE_SIZE);
len = rounddown(len, I386_PAGE_SIZE);
assert(kernel_may_alloc);
for(m = 0; m < MAXMEMMAP; m++) {
phys_bytes highmark;
if(cbi->memmap[m].len) continue;
cbi->memmap[m].addr = addr;
cbi->memmap[m].len = len;
cbi->memmap[m].type = MULTIBOOT_MEMORY_AVAILABLE;
if(m >= cbi->mmap_size)
cbi->mmap_size = m+1;
highmark = addr + len;
if(highmark > cbi->mem_high_phys) {
cbi->mem_high_phys = highmark;
}
return;
}
panic("no available memmap slot");
}
u32_t *alloc_pagetable(phys_bytes *ph)
{
u32_t *ret;
#define PG_PAGETABLES 6
static u32_t pagetables[PG_PAGETABLES][1024] __aligned(4096);
static int pt_inuse = 0;
if(pt_inuse >= PG_PAGETABLES) panic("no more pagetables");
assert(sizeof(pagetables[pt_inuse]) == I386_PAGE_SIZE);
ret = pagetables[pt_inuse++];
*ph = vir2phys(ret);
return ret;
}
#define PAGE_KB (I386_PAGE_SIZE / 1024)
phys_bytes pg_alloc_page(kinfo_t *cbi)
{
int m;
multiboot_memory_map_t *mmap;
assert(kernel_may_alloc);
for(m = cbi->mmap_size-1; m >= 0; m--) {
mmap = &cbi->memmap[m];
if(!mmap->len) continue;
assert(mmap->len > 0);
assert(!(mmap->len % I386_PAGE_SIZE));
assert(!(mmap->addr % I386_PAGE_SIZE));
mmap->len -= I386_PAGE_SIZE;
return mmap->addr + mmap->len;
}
panic("can't find free memory");
}
void pg_identity(kinfo_t *cbi)
{
int i;
phys_bytes phys;
/* We map memory that does not correspond to physical memory
* as non-cacheable. Make sure we know what it is.
*/
assert(cbi->mem_high_phys);
/* Set up an identity mapping page directory */
for(i = 0; i < I386_VM_DIR_ENTRIES; i++) {
u32_t flags = I386_VM_PRESENT | I386_VM_BIGPAGE |
I386_VM_USER | I386_VM_WRITE;
phys = i * I386_BIG_PAGE_SIZE;
if((cbi->mem_high_phys & I386_VM_ADDR_MASK_4MB)
<= (phys & I386_VM_ADDR_MASK_4MB)) {
flags |= I386_VM_PWT | I386_VM_PCD;
}
pagedir[i] = phys | flags;
}
}
int pg_mapkernel(void)
{
int pde;
u32_t mapped = 0, kern_phys = kern_phys_start;
assert(!(kern_vir_start % I386_BIG_PAGE_SIZE));
assert(!(kern_phys % I386_BIG_PAGE_SIZE));
pde = kern_vir_start / I386_BIG_PAGE_SIZE; /* start pde */
while(mapped < kern_kernlen) {
pagedir[pde] = kern_phys | I386_VM_PRESENT |
I386_VM_BIGPAGE | I386_VM_WRITE;
mapped += I386_BIG_PAGE_SIZE;
kern_phys += I386_BIG_PAGE_SIZE;
pde++;
}
return pde; /* free pde */
}
void vm_enable_paging(void)
{
u32_t cr0, cr4;
int pgeok;
pgeok = _cpufeature(_CPUF_I386_PGE);
cr0= read_cr0();
cr4= read_cr4();
/* The boot loader should have put us in protected mode. */
assert(cr0 & I386_CR0_PE);
/* First clear PG and PGE flag, as PGE must be enabled after PG. */
write_cr0(cr0 & ~I386_CR0_PG);
write_cr4(cr4 & ~(I386_CR4_PGE | I386_CR4_PSE));
cr0= read_cr0();
cr4= read_cr4();
/* Our page table contains 4MB entries. */
cr4 |= I386_CR4_PSE;
write_cr4(cr4);
/* First enable paging, then enable global page flag. */
cr0 |= I386_CR0_PG;
write_cr0(cr0);
cr0 |= I386_CR0_WP;
write_cr0(cr0);
/* May we enable these features? */
if(pgeok)
cr4 |= I386_CR4_PGE;
write_cr4(cr4);
}
phys_bytes pg_load()
{
phys_bytes phpagedir = vir2phys(pagedir);
write_cr3(phpagedir);
return phpagedir;
}
void pg_clear(void)
{
memset(pagedir, 0, sizeof(pagedir));
}
phys_bytes pg_rounddown(phys_bytes b)
{
phys_bytes o;
if(!(o = b % I386_PAGE_SIZE))
return b;
return b - o;
}
void pg_map(phys_bytes phys, vir_bytes vaddr, vir_bytes vaddr_end,
kinfo_t *cbi)
{
static int mapped_pde = -1;
static u32_t *pt = NULL;
int pde, pte;
assert(kernel_may_alloc);
if(phys == PG_ALLOCATEME) {
assert(!(vaddr % I386_PAGE_SIZE));
} else {
assert((vaddr % I386_PAGE_SIZE) == (phys % I386_PAGE_SIZE));
vaddr = pg_rounddown(vaddr);
phys = pg_rounddown(phys);
}
assert(vaddr < kern_vir_start);
while(vaddr < vaddr_end) {
phys_bytes source = phys;
assert(!(vaddr % I386_PAGE_SIZE));
if(phys == PG_ALLOCATEME) {
source = pg_alloc_page(cbi);
} else {
assert(!(phys % I386_PAGE_SIZE));
}
assert(!(source % I386_PAGE_SIZE));
pde = I386_VM_PDE(vaddr);
pte = I386_VM_PTE(vaddr);
if(mapped_pde < pde) {
phys_bytes ph;
pt = alloc_pagetable(&ph);
pagedir[pde] = (ph & I386_VM_ADDR_MASK)
| I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;
mapped_pde = pde;
}
assert(pt);
pt[pte] = (source & I386_VM_ADDR_MASK) |
I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;
vaddr += I386_PAGE_SIZE;
if(phys != PG_ALLOCATEME)
phys += I386_PAGE_SIZE;
}
}
void pg_info(reg_t *pagedir_ph, u32_t **pagedir_v)
{
*pagedir_ph = vir2phys(pagedir);
*pagedir_v = pagedir;
}