minix/kernel/arch/earm/pg_utils.c
Kees Jongenburger b9cb8251bc arm:make no assumptions about TRE and AFE
The bootloader can leave the system control register
in at state that doesn't match our setup. make no assumptions
and configure TRE and AFE.
2013-05-16 20:39:19 +02:00

288 lines
7.1 KiB
C

#include <minix/cpufeature.h>
#include <minix/type.h>
#include <libexec.h>
#include <assert.h>
#include "kernel/kernel.h"
#include "arch_proto.h"
#include <machine/cpu.h>
#include <string.h>
#include <libexec.h>
#include <minix/type.h>
/* These are set/computed in kernel.lds. */
extern char _kern_vir_base, _kern_phys_base, _kern_size;
/* Retrieve the absolute values to something we can use. */
static phys_bytes kern_vir_start = (phys_bytes) &_kern_vir_base;
static phys_bytes kern_phys_start = (phys_bytes) &_kern_phys_base;
static phys_bytes kern_kernlen = (phys_bytes) &_kern_size;
/* page directory we can use to map things */
static u32_t pagedir[4096] __aligned(16384);
void print_memmap(kinfo_t *cbi)
{
int m;
assert(cbi->mmap_size < MAXMEMMAP);
for(m = 0; m < cbi->mmap_size; m++) {
phys_bytes addr = cbi->memmap[m].addr, endit = cbi->memmap[m].addr + cbi->memmap[m].len;
printf("%08lx-%08lx ",addr, endit);
}
printf("\nsize %08lx\n", cbi->mmap_size);
}
void cut_memmap(kinfo_t *cbi, phys_bytes start, phys_bytes end)
{
int m;
phys_bytes o;
if((o=start % ARM_PAGE_SIZE))
start -= o;
if((o=end % ARM_PAGE_SIZE))
end += ARM_PAGE_SIZE - o;
assert(kernel_may_alloc);
for(m = 0; m < cbi->mmap_size; m++) {
phys_bytes substart = start, subend = end;
phys_bytes memaddr = cbi->memmap[m].addr,
memend = cbi->memmap[m].addr + cbi->memmap[m].len;
/* adjust cut range to be a subset of the free memory */
if(substart < memaddr) substart = memaddr;
if(subend > memend) subend = memend;
if(substart >= subend) continue;
/* if there is any overlap, forget this one and add
* 1-2 subranges back
*/
cbi->memmap[m].addr = cbi->memmap[m].len = 0;
if(substart > memaddr)
add_memmap(cbi, memaddr, substart-memaddr);
if(subend < memend)
add_memmap(cbi, subend, memend-subend);
}
}
void add_memmap(kinfo_t *cbi, u64_t addr, u64_t len)
{
int m;
#define LIMIT 0xFFFFF000
/* Truncate available memory at 4GB as the rest of minix
* currently can't deal with any bigger.
*/
if(addr > LIMIT) return;
if(addr + len > LIMIT) {
len -= (addr + len - LIMIT);
}
assert(cbi->mmap_size < MAXMEMMAP);
if(len == 0) return;
addr = roundup(addr, ARM_PAGE_SIZE);
len = rounddown(len, ARM_PAGE_SIZE);
assert(kernel_may_alloc);
for(m = 0; m < MAXMEMMAP; m++) {
phys_bytes highmark;
if(cbi->memmap[m].len) continue;
cbi->memmap[m].addr = addr;
cbi->memmap[m].len = len;
cbi->memmap[m].type = MULTIBOOT_MEMORY_AVAILABLE;
if(m >= cbi->mmap_size)
cbi->mmap_size = m+1;
highmark = addr + len;
if(highmark > cbi->mem_high_phys) {
cbi->mem_high_phys = highmark;
}
return;
}
panic("no available memmap slot");
}
u32_t *alloc_pagetable(phys_bytes *ph)
{
u32_t *ret;
#define PG_PAGETABLES 24
static u32_t pagetables[PG_PAGETABLES][256] __aligned(1024);
static int pt_inuse = 0;
if(pt_inuse >= PG_PAGETABLES) panic("no more pagetables");
assert(sizeof(pagetables[pt_inuse]) == 1024);
ret = pagetables[pt_inuse++];
*ph = vir2phys(ret);
return ret;
}
#define PAGE_KB (ARM_PAGE_SIZE / 1024)
phys_bytes pg_alloc_page(kinfo_t *cbi)
{
int m;
multiboot_memory_map_t *mmap;
assert(kernel_may_alloc);
for(m = 0; m < cbi->mmap_size; m++) {
mmap = &cbi->memmap[m];
if(!mmap->len) continue;
assert(mmap->len > 0);
assert(!(mmap->len % ARM_PAGE_SIZE));
assert(!(mmap->addr % ARM_PAGE_SIZE));
u32_t addr = mmap->addr;
mmap->addr += ARM_PAGE_SIZE;
mmap->len -= ARM_PAGE_SIZE;
cbi->kernel_allocated_bytes_dynamic += ARM_PAGE_SIZE;
return addr;
}
panic("can't find free memory");
}
void pg_identity(kinfo_t *cbi)
{
int i;
phys_bytes phys;
/* We map memory that does not correspond to physical memory
* as non-cacheable. Make sure we know what it is.
*/
assert(cbi->mem_high_phys);
/* Set up an identity mapping page directory */
for(i = 0; i < ARM_VM_DIR_ENTRIES; i++) {
u32_t flags = ARM_VM_SECTION
| ARM_VM_SECTION_USER
| ARM_VM_SECTION_DEVICE
| ARM_VM_SECTION_DOMAIN;
phys = i * ARM_SECTION_SIZE;
pagedir[i] = phys | flags;
}
}
int pg_mapkernel(void)
{
int pde;
u32_t mapped = 0, kern_phys = kern_phys_start;
assert(!(kern_vir_start % ARM_SECTION_SIZE));
assert(!(kern_phys_start % ARM_SECTION_SIZE));
pde = kern_vir_start / ARM_SECTION_SIZE; /* start pde */
while(mapped < kern_kernlen) {
pagedir[pde] = (kern_phys & ARM_VM_PDE_MASK) | ARM_VM_SECTION
| ARM_VM_SECTION_SUPER
| ARM_VM_SECTION_DOMAIN
| ARM_VM_SECTION_WT;
mapped += ARM_SECTION_SIZE;
kern_phys += ARM_SECTION_SIZE;
pde++;
}
return pde; /* free pde */
}
void vm_enable_paging(void)
{
u32_t sctlr;
write_ttbcr(0);
/* Set all Domains to Client */
write_dacr(0x55555555);
sctlr = read_sctlr();
/* Enable MMU */
sctlr |= SCTLR_M;
/* AFE set to zero (default reset value): not using simplified model. */
/* TRE set to zero (default reset value): TEX[2:0] are used, plus C and B bits.*/
sctlr &= ~SCTLR_TRE;
/* AFE set to zero (default reset value): not using simplified model. */
sctlr &= ~SCTLR_AFE;
/* Enable instruction and data cache */
sctlr |= SCTLR_C;
sctlr |= SCTLR_I;
write_sctlr(sctlr);
}
phys_bytes pg_load()
{
phys_bytes phpagedir = vir2phys(pagedir);
write_ttbr0(phpagedir);
return phpagedir;
}
void pg_clear(void)
{
memset(pagedir, 0, sizeof(pagedir));
}
phys_bytes pg_rounddown(phys_bytes b)
{
phys_bytes o;
if(!(o = b % ARM_PAGE_SIZE))
return b;
return b - o;
}
void pg_map(phys_bytes phys, vir_bytes vaddr, vir_bytes vaddr_end,
kinfo_t *cbi)
{
static int mapped_pde = -1;
static u32_t *pt = NULL;
int pde, pte;
assert(kernel_may_alloc);
if(phys == PG_ALLOCATEME) {
assert(!(vaddr % ARM_PAGE_SIZE));
} else {
assert((vaddr % ARM_PAGE_SIZE) == (phys % ARM_PAGE_SIZE));
vaddr = pg_rounddown(vaddr);
phys = pg_rounddown(phys);
}
assert(vaddr < kern_vir_start);
while(vaddr < vaddr_end) {
phys_bytes source = phys;
assert(!(vaddr % ARM_PAGE_SIZE));
if(phys == PG_ALLOCATEME) {
source = pg_alloc_page(cbi);
} else {
assert(!(phys % ARM_PAGE_SIZE));
}
assert(!(source % ARM_PAGE_SIZE));
pde = ARM_VM_PDE(vaddr);
pte = ARM_VM_PTE(vaddr);
if(mapped_pde < pde) {
phys_bytes ph;
pt = alloc_pagetable(&ph);
pagedir[pde] = (ph & ARM_VM_PDE_MASK)
| ARM_VM_PAGEDIR
| ARM_VM_PDE_DOMAIN;
mapped_pde = pde;
}
assert(pt);
pt[pte] = (source & ARM_VM_PTE_MASK)
| ARM_VM_PAGETABLE
| ARM_VM_PTE_WT
| ARM_VM_PTE_USER;
vaddr += ARM_PAGE_SIZE;
if(phys != PG_ALLOCATEME)
phys += ARM_PAGE_SIZE;
}
}
void pg_info(reg_t *pagedir_ph, u32_t **pagedir_v)
{
*pagedir_ph = vir2phys(pagedir);
*pagedir_v = pagedir;
}