b1c4ba4ab6
Due to the ABI we are using we have to use the earm architecture moniker for the build system to behave correctly. This involves then some headers to move around. There is also a few related Makefile updates as well as minor source code corrections.
297 lines
7.3 KiB
C
297 lines
7.3 KiB
C
|
|
#include <minix/cpufeature.h>
|
|
|
|
#include <minix/type.h>
|
|
#include <libexec.h>
|
|
#include <assert.h>
|
|
#include "kernel/kernel.h"
|
|
#include "arch_proto.h"
|
|
#include <machine/cpu.h>
|
|
|
|
#include <string.h>
|
|
#include <libexec.h>
|
|
#include <minix/type.h>
|
|
|
|
/* These are set/computed in kernel.lds. */
|
|
extern char _kern_vir_base, _kern_phys_base, _kern_size;
|
|
|
|
/* Retrieve the absolute values to something we can use. */
|
|
static phys_bytes kern_vir_start = (phys_bytes) &_kern_vir_base;
|
|
static phys_bytes kern_phys_start = (phys_bytes) &_kern_phys_base;
|
|
static phys_bytes kern_kernlen = (phys_bytes) &_kern_size;
|
|
|
|
/* page directory we can use to map things */
|
|
static u32_t pagedir[4096] __aligned(16384);
|
|
|
|
void print_memmap(kinfo_t *cbi)
|
|
{
|
|
int m;
|
|
assert(cbi->mmap_size < MAXMEMMAP);
|
|
for(m = 0; m < cbi->mmap_size; m++) {
|
|
phys_bytes addr = cbi->memmap[m].addr, endit = cbi->memmap[m].addr + cbi->memmap[m].len;
|
|
printf("%08lx-%08lx ",addr, endit);
|
|
}
|
|
printf("\nsize %08lx\n", cbi->mmap_size);
|
|
}
|
|
|
|
void cut_memmap(kinfo_t *cbi, phys_bytes start, phys_bytes end)
|
|
{
|
|
int m;
|
|
phys_bytes o;
|
|
|
|
if((o=start % ARM_PAGE_SIZE))
|
|
start -= o;
|
|
if((o=end % ARM_PAGE_SIZE))
|
|
end += ARM_PAGE_SIZE - o;
|
|
|
|
assert(kernel_may_alloc);
|
|
|
|
for(m = 0; m < cbi->mmap_size; m++) {
|
|
phys_bytes substart = start, subend = end;
|
|
phys_bytes memaddr = cbi->memmap[m].addr,
|
|
memend = cbi->memmap[m].addr + cbi->memmap[m].len;
|
|
|
|
/* adjust cut range to be a subset of the free memory */
|
|
if(substart < memaddr) substart = memaddr;
|
|
if(subend > memend) subend = memend;
|
|
if(substart >= subend) continue;
|
|
|
|
/* if there is any overlap, forget this one and add
|
|
* 1-2 subranges back
|
|
*/
|
|
cbi->memmap[m].addr = cbi->memmap[m].len = 0;
|
|
if(substart > memaddr)
|
|
add_memmap(cbi, memaddr, substart-memaddr);
|
|
if(subend < memend)
|
|
add_memmap(cbi, subend, memend-subend);
|
|
}
|
|
}
|
|
|
|
phys_bytes alloc_lowest(kinfo_t *cbi, phys_bytes len)
|
|
{
|
|
/* Allocate the lowest physical page we have. */
|
|
int m;
|
|
#define EMPTY 0xffffffff
|
|
phys_bytes lowest = EMPTY;
|
|
assert(len > 0);
|
|
len = roundup(len, ARM_PAGE_SIZE);
|
|
|
|
assert(kernel_may_alloc);
|
|
|
|
for(m = 0; m < cbi->mmap_size; m++) {
|
|
if(cbi->memmap[m].len < len) continue;
|
|
if(cbi->memmap[m].addr < lowest) lowest = cbi->memmap[m].addr;
|
|
}
|
|
assert(lowest != EMPTY);
|
|
cut_memmap(cbi, lowest, len);
|
|
return lowest;
|
|
}
|
|
|
|
void add_memmap(kinfo_t *cbi, u64_t addr, u64_t len)
|
|
{
|
|
int m;
|
|
#define LIMIT 0xFFFFF000
|
|
/* Truncate available memory at 4GB as the rest of minix
|
|
* currently can't deal with any bigger.
|
|
*/
|
|
if(addr > LIMIT) return;
|
|
if(addr + len > LIMIT) {
|
|
len -= (addr + len - LIMIT);
|
|
}
|
|
assert(cbi->mmap_size < MAXMEMMAP);
|
|
if(len == 0) return;
|
|
addr = roundup(addr, ARM_PAGE_SIZE);
|
|
len = rounddown(len, ARM_PAGE_SIZE);
|
|
|
|
assert(kernel_may_alloc);
|
|
|
|
for(m = 0; m < MAXMEMMAP; m++) {
|
|
phys_bytes highmark;
|
|
if(cbi->memmap[m].len) continue;
|
|
cbi->memmap[m].addr = addr;
|
|
cbi->memmap[m].len = len;
|
|
cbi->memmap[m].type = MULTIBOOT_MEMORY_AVAILABLE;
|
|
if(m >= cbi->mmap_size)
|
|
cbi->mmap_size = m+1;
|
|
highmark = addr + len;
|
|
if(highmark > cbi->mem_high_phys) {
|
|
cbi->mem_high_phys = highmark;
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
panic("no available memmap slot");
|
|
}
|
|
|
|
u32_t *alloc_pagetable(phys_bytes *ph)
|
|
{
|
|
u32_t *ret;
|
|
#define PG_PAGETABLES 24
|
|
static u32_t pagetables[PG_PAGETABLES][256] __aligned(1024);
|
|
static int pt_inuse = 0;
|
|
if(pt_inuse >= PG_PAGETABLES) panic("no more pagetables");
|
|
assert(sizeof(pagetables[pt_inuse]) == 1024);
|
|
ret = pagetables[pt_inuse++];
|
|
*ph = vir2phys(ret);
|
|
return ret;
|
|
}
|
|
|
|
#define PAGE_KB (ARM_PAGE_SIZE / 1024)
|
|
|
|
phys_bytes pg_alloc_page(kinfo_t *cbi)
|
|
{
|
|
int m;
|
|
multiboot_memory_map_t *mmap;
|
|
|
|
assert(kernel_may_alloc);
|
|
|
|
for(m = 0; m < cbi->mmap_size; m++) {
|
|
mmap = &cbi->memmap[m];
|
|
if(!mmap->len) continue;
|
|
assert(mmap->len > 0);
|
|
assert(!(mmap->len % ARM_PAGE_SIZE));
|
|
assert(!(mmap->addr % ARM_PAGE_SIZE));
|
|
|
|
u32_t addr = mmap->addr;
|
|
mmap->addr += ARM_PAGE_SIZE;
|
|
mmap->len -= ARM_PAGE_SIZE;
|
|
|
|
return addr;
|
|
}
|
|
|
|
panic("can't find free memory");
|
|
}
|
|
|
|
void pg_identity(kinfo_t *cbi)
|
|
{
|
|
int i;
|
|
phys_bytes phys;
|
|
|
|
/* We map memory that does not correspond to physical memory
|
|
* as non-cacheable. Make sure we know what it is.
|
|
*/
|
|
assert(cbi->mem_high_phys);
|
|
|
|
/* Set up an identity mapping page directory */
|
|
for(i = 0; i < ARM_VM_DIR_ENTRIES; i++) {
|
|
u32_t flags = ARM_VM_SECTION |
|
|
ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_USER;
|
|
phys = i * ARM_BIG_PAGE_SIZE;
|
|
pagedir[i] = phys | flags;
|
|
}
|
|
}
|
|
|
|
int pg_mapkernel(void)
|
|
{
|
|
int pde;
|
|
u32_t mapped = 0, kern_phys = kern_phys_start;
|
|
|
|
assert(!(kern_vir_start % ARM_BIG_PAGE_SIZE));
|
|
assert(!(kern_phys_start % ARM_BIG_PAGE_SIZE));
|
|
pde = kern_vir_start / ARM_BIG_PAGE_SIZE; /* start pde */
|
|
while(mapped < kern_kernlen) {
|
|
pagedir[pde] = (kern_phys & ARM_VM_PDE_MASK) |
|
|
ARM_VM_SECTION |
|
|
ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_WB |
|
|
ARM_VM_SECTION_SHAREABLE | ARM_VM_SECTION_SUPER;
|
|
mapped += ARM_BIG_PAGE_SIZE;
|
|
kern_phys += ARM_BIG_PAGE_SIZE;
|
|
pde++;
|
|
}
|
|
return pde; /* free pde */
|
|
}
|
|
|
|
void vm_enable_paging(void)
|
|
{
|
|
u32_t sctlr;
|
|
|
|
write_ttbcr(0);
|
|
|
|
/* Set all Domains to Client */
|
|
write_dacr(0x55555555);
|
|
|
|
sctlr = read_sctlr();
|
|
|
|
/* Enable MMU */
|
|
sctlr |= (SCTLR_M);
|
|
|
|
/* Enable instruction and data cache */
|
|
sctlr |= SCTLR_C;
|
|
sctlr |= SCTLR_I;
|
|
write_sctlr(sctlr);
|
|
}
|
|
|
|
phys_bytes pg_load()
|
|
{
|
|
phys_bytes phpagedir = vir2phys(pagedir);
|
|
refresh_tlb();
|
|
write_ttbr0(phpagedir);
|
|
return phpagedir;
|
|
}
|
|
|
|
void pg_clear(void)
|
|
{
|
|
memset(pagedir, 0, sizeof(pagedir));
|
|
}
|
|
|
|
phys_bytes pg_rounddown(phys_bytes b)
|
|
{
|
|
phys_bytes o;
|
|
if(!(o = b % ARM_PAGE_SIZE))
|
|
return b;
|
|
return b - o;
|
|
}
|
|
|
|
void pg_map(phys_bytes phys, vir_bytes vaddr, vir_bytes vaddr_end,
|
|
kinfo_t *cbi)
|
|
{
|
|
static int mapped_pde = -1;
|
|
static u32_t *pt = NULL;
|
|
int pde, pte;
|
|
|
|
assert(kernel_may_alloc);
|
|
|
|
if(phys == PG_ALLOCATEME) {
|
|
assert(!(vaddr % ARM_PAGE_SIZE));
|
|
} else {
|
|
assert((vaddr % ARM_PAGE_SIZE) == (phys % ARM_PAGE_SIZE));
|
|
vaddr = pg_rounddown(vaddr);
|
|
phys = pg_rounddown(phys);
|
|
}
|
|
assert(vaddr < kern_vir_start);
|
|
|
|
while(vaddr < vaddr_end) {
|
|
phys_bytes source = phys;
|
|
assert(!(vaddr % ARM_PAGE_SIZE));
|
|
if(phys == PG_ALLOCATEME) {
|
|
source = pg_alloc_page(cbi);
|
|
} else {
|
|
assert(!(phys % ARM_PAGE_SIZE));
|
|
}
|
|
assert(!(source % ARM_PAGE_SIZE));
|
|
pde = ARM_VM_PDE(vaddr);
|
|
pte = ARM_VM_PTE(vaddr);
|
|
if(mapped_pde < pde) {
|
|
phys_bytes ph;
|
|
pt = alloc_pagetable(&ph);
|
|
pagedir[pde] = (ph & ARM_VM_PDE_MASK)
|
|
| ARM_VM_PAGEDIR | ARM_VM_PDE_DOMAIN;
|
|
mapped_pde = pde;
|
|
}
|
|
assert(pt);
|
|
pt[pte] = (source & ARM_VM_PTE_MASK)
|
|
| ARM_VM_PAGETABLE
|
|
| ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE
|
|
| ARM_VM_PTE_USER;
|
|
vaddr += ARM_PAGE_SIZE;
|
|
if(phys != PG_ALLOCATEME)
|
|
phys += ARM_PAGE_SIZE;
|
|
}
|
|
}
|
|
|
|
void pg_info(reg_t *pagedir_ph, u32_t **pagedir_v)
|
|
{
|
|
*pagedir_ph = vir2phys(pagedir);
|
|
*pagedir_v = pagedir;
|
|
}
|