VM: pagetable.c: harmonize x86/ARM findhole() code

ARM needs to be able to find N consecutive free slots; use same
code for x86.

Change-Id: Ic79677961c8adfca2aeb5385962942ae0d76867c
This commit is contained in:
Ben Gras 2013-02-24 22:00:52 +01:00
parent 90d777f053
commit 718a9ef472
2 changed files with 79 additions and 62 deletions

View file

@ -54,7 +54,7 @@ struct {
#define page_isfree(i) GET_BIT(free_pages_bitmap, i) #define page_isfree(i) GET_BIT(free_pages_bitmap, i)
#define RESERVEDMAGIC 0x6e4c74d5 #define RESERVEDMAGIC 0x6e4c74d5
#define MAXRESERVEDPAGES 100 #define MAXRESERVEDPAGES 300
#define MAXRESERVEDQUEUES 15 #define MAXRESERVEDQUEUES 15
static struct reserved_pages { static struct reserved_pages {

View file

@ -44,8 +44,6 @@ static struct pdm {
u32_t *page_directories; u32_t *page_directories;
} pagedir_mappings[MAX_PAGEDIR_PDES]; } pagedir_mappings[MAX_PAGEDIR_PDES];
static u32_t global_bit = 0;
static multiboot_module_t *kern_mb_mod = NULL; static multiboot_module_t *kern_mb_mod = NULL;
static size_t kern_size = 0; static size_t kern_size = 0;
static int kern_start_pde = -1; static int kern_start_pde = -1;
@ -61,13 +59,14 @@ struct vmproc *vmprocess = &vmproc[VM_PROC_NR];
* page table. * page table.
*/ */
#if SANITYCHECKS #if SANITYCHECKS
#define SPAREPAGES 100 #define SPAREPAGES 200
#define STATIC_SPAREPAGES 90 #define STATIC_SPAREPAGES 190
#else #else
#ifdef __arm__ #ifdef __arm__
# define SPAREPAGES 150 # define SPAREPAGES 150
# define STATIC_SPAREPAGES 140 # define STATIC_SPAREPAGES 140
#else #else
static u32_t global_bit = 0;
# define SPAREPAGES 20 # define SPAREPAGES 20
# define STATIC_SPAREPAGES 15 # define STATIC_SPAREPAGES 15
#endif /* __arm__ */ #endif /* __arm__ */
@ -150,23 +149,20 @@ static u32_t findhole(int pages)
static u32_t lastv = 0; static u32_t lastv = 0;
pt_t *pt = &vmprocess->vm_pt; pt_t *pt = &vmprocess->vm_pt;
vir_bytes vmin, vmax; vir_bytes vmin, vmax;
#if defined(__arm__) u32_t holev = NO_MEM;
u32_t holev; int holesize = -1;
#endif
vmin = (vir_bytes) (&_end); /* marks end of VM BSS */ vmin = (vir_bytes) (&_end); /* marks end of VM BSS */
vmin += 1024*1024*1024; /* reserve 1GB virtual address space for VM heap */ vmin += 1024*1024*1024; /* reserve 1GB virtual address space for VM heap */
vmin &= ARCH_VM_ADDR_MASK; vmin &= ARCH_VM_ADDR_MASK;
vmax = VM_STACKTOP; vmax = vmin + 100 * 1024 * 1024; /* allow 100MB of address space for VM */
/* Input sanity check. */ /* Input sanity check. */
assert(vmin + VM_PAGE_SIZE >= vmin); assert(vmin + VM_PAGE_SIZE >= vmin);
assert(vmax >= vmin + VM_PAGE_SIZE); assert(vmax >= vmin + VM_PAGE_SIZE);
assert((vmin % VM_PAGE_SIZE) == 0); assert((vmin % VM_PAGE_SIZE) == 0);
assert((vmax % VM_PAGE_SIZE) == 0); assert((vmax % VM_PAGE_SIZE) == 0);
#if defined(__arm__)
assert(pages > 0); assert(pages > 0);
#endif
curv = lastv; curv = lastv;
if(curv < vmin || curv >= vmax) if(curv < vmin || curv >= vmax)
@ -177,9 +173,6 @@ static u32_t findhole(int pages)
/* Start looking for a free page starting at vmin. */ /* Start looking for a free page starting at vmin. */
while(curv < vmax) { while(curv < vmax) {
int pte; int pte;
#if defined(__arm__)
int i, nohole;
#endif
assert(curv >= vmin); assert(curv >= vmin);
assert(curv < vmax); assert(curv < vmax);
@ -188,53 +181,42 @@ static u32_t findhole(int pages)
pde = I386_VM_PDE(curv); pde = I386_VM_PDE(curv);
pte = I386_VM_PTE(curv); pte = I386_VM_PTE(curv);
#elif defined(__arm__) #elif defined(__arm__)
holev = curv; /* the candidate hole */ pde = ARM_VM_PDE(curv);
nohole = 0; pte = ARM_VM_PTE(curv);
for (i = 0; i < pages && !nohole; ++i) {
if(curv >= vmax) {
break;
}
#endif #endif
#if defined(__i386__) if((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) &&
if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) || (pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
!(pt->pt_pt[pde][pte] & ARCH_VM_PAGE_PRESENT)) { /* there is a page here - so keep looking for holes */
#elif defined(__arm__) holev = NO_MEM;
pde = ARM_VM_PDE(curv); holesize = 0;
pte = ARM_VM_PTE(curv); } else {
/* there is no page here - so we have a hole, a bigger
* one if we already had one
*/
if(holev == NO_MEM) {
holev = curv;
holesize = 1;
} else holesize++;
/* if page present, no hole */ assert(holesize > 0);
if((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) && assert(holesize <= pages);
(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT))
nohole = 1;
/* if not contiguous, no hole */ /* if it's big enough, return it */
if (curv != holev + i * VM_PAGE_SIZE) if(holesize == pages) {
nohole = 1; lastv = curv + VM_PAGE_SIZE;
return holev;
curv+=VM_PAGE_SIZE; }
} }
/* there's a large enough hole */
if (!nohole && i == pages) {
#endif
lastv = curv;
#if defined(__i386__)
return curv;
#elif defined(__arm__)
return holev;
#endif
}
#if defined(__i386__)
curv+=VM_PAGE_SIZE; curv+=VM_PAGE_SIZE;
#elif defined(__arm__) /* if we reached the limit, start scanning from the beginning if
/* Reset curv */ * we haven't looked there yet
#endif */
if(curv >= vmax && try_restart) { if(curv >= vmax && try_restart) {
curv = vmin;
try_restart = 0; try_restart = 0;
curv = vmin;
} }
} }
@ -785,6 +767,31 @@ void pt_clearmapcache(void)
panic("VMCTL_CLEARMAPCACHE failed"); panic("VMCTL_CLEARMAPCACHE failed");
} }
int pt_writable(struct vmproc *vmp, vir_bytes v)
{
u32_t entry;
pt_t *pt = &vmp->vm_pt;
assert(!(v % VM_PAGE_SIZE));
#if defined(__i386__)
int pde = I386_VM_PDE(v);
int pte = I386_VM_PTE(v);
#elif defined(__arm__)
int pde = ARM_VM_PDE(v);
int pte = ARM_VM_PTE(v);
#endif
assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT);
assert(pt->pt_pt[pde]);
entry = pt->pt_pt[pde][pte];
#if defined(__i386__)
return((entry & PTF_WRITE) ? 1 : 0);
#elif defined(__arm__)
return((entry & ARCH_VM_PTE_RO) ? 0 : 1);
#endif
}
/*===========================================================================* /*===========================================================================*
* pt_writemap * * pt_writemap *
*===========================================================================*/ *===========================================================================*/
@ -1443,6 +1450,27 @@ int pt_mapkernel(pt_t *pt)
/* Kernel also wants various mappings of its own. */ /* Kernel also wants various mappings of its own. */
for(i = 0; i < kernmappings; i++) { for(i = 0; i < kernmappings; i++) {
int r; int r;
#if defined(__arm__)
if(kern_mappings[i].phys_addr == 0x48000000) {
addr = kern_mappings[i].phys_addr;
assert(!(kern_mappings[i].len % ARCH_BIG_PAGE_SIZE));
for(mapped = 0; mapped < kern_mappings[i].len;
mapped += ARCH_BIG_PAGE_SIZE) {
int map_pde = addr / ARCH_BIG_PAGE_SIZE;
assert(!(addr % ARCH_BIG_PAGE_SIZE));
assert(addr == (addr & ARCH_VM_PDE_MASK));
assert(!pt->pt_dir[map_pde]);
pt->pt_dir[map_pde] = addr |
ARM_VM_SECTION | ARM_VM_SECTION_DOMAIN |
ARM_VM_SECTION_WB |
ARM_VM_SECTION_SHAREABLE |
ARM_VM_SECTION_SUPER;
addr += ARCH_BIG_PAGE_SIZE;
}
continue;
}
#endif
if((r=pt_writemap(NULL, pt, if((r=pt_writemap(NULL, pt,
kern_mappings[i].vir_addr, kern_mappings[i].vir_addr,
kern_mappings[i].phys_addr, kern_mappings[i].phys_addr,
@ -1451,17 +1479,6 @@ int pt_mapkernel(pt_t *pt)
return r; return r;
} }
#if defined(__arm__)
if(kern_mappings[i].phys_addr == 0x48000000) {
if((r=pt_writemap(NULL, pt,
kern_mappings[i].phys_addr,
kern_mappings[i].phys_addr,
kern_mappings[i].len,
kern_mappings[i].flags, 0)) != OK) {
return r;
}
}
#endif
} }
return OK; return OK;