vm: replace phys avl by array
. make vm be able to use malloc() by overriding brk() and minix_mmap() functions . phys regions can then be malloc()ed and free()d instead of being in an avl tree, which is slightly faster . 'offset' field in phys_region can go too (offset is implied by position in array) but leads to bigger code changes
This commit is contained in:
parent
cee2d9a296
commit
29edcad310
15 changed files with 262 additions and 246 deletions
|
@ -95,6 +95,13 @@ void utrace(struct ut *, int);
|
|||
# define _MALLOC_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock);
|
||||
#endif /* __FreeBSD__ */
|
||||
|
||||
/* #undef these things so that malloc uses the non-internal symbols.
|
||||
* This is necessary for VM to be able to define its own versions, and
|
||||
* use this malloc.
|
||||
*/
|
||||
#undef minix_mmap
|
||||
#undef minix_munmap
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include <sys/types.h>
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
PROG= vm
|
||||
SRCS= main.c alloc.c utility.c exit.c fork.c break.c \
|
||||
mmap.c slaballoc.c region.c pagefaults.c \
|
||||
physravl.c rs.c queryexit.c yieldedavl.c regionavl.c pb.c \
|
||||
rs.c queryexit.c yieldedavl.c regionavl.c pb.c \
|
||||
mem_anon.c mem_directphys.c mem_anon_contig.c mem_shared.c
|
||||
|
||||
DPADD+= ${LIBSYS}
|
||||
|
|
|
@ -54,25 +54,22 @@ struct vmproc *vmprocess = &vmproc[VM_PROC_NR];
|
|||
* circular dependency on allocating memory and writing it into VM's
|
||||
* page table.
|
||||
*/
|
||||
#if defined(__i386__)
|
||||
#if SANITYCHECKS
|
||||
#define SPAREPAGES 100
|
||||
#define STATIC_SPAREPAGES 90
|
||||
#else
|
||||
#define SPAREPAGES 15
|
||||
#define STATIC_SPAREPAGES 10
|
||||
#define SPAREPAGES 20
|
||||
#define STATIC_SPAREPAGES 15
|
||||
#endif
|
||||
#elif defined(__arm__)
|
||||
|
||||
#define SPAREPAGEDIRS 11
|
||||
#define STATIC_SPAREPAGEDIRS 10
|
||||
#define SPAREPAGES 250
|
||||
#define STATIC_SPAREPAGES 100
|
||||
|
||||
int missing_sparedirs = SPAREPAGEDIRS;
|
||||
static struct {
|
||||
void *pagedir;
|
||||
phys_bytes phys;
|
||||
} sparepagedirs[SPAREPAGEDIRS];
|
||||
#endif
|
||||
|
||||
int missing_spares = SPAREPAGES;
|
||||
static struct {
|
||||
|
@ -143,11 +140,7 @@ void pt_sanitycheck(pt_t *pt, char *file, int line)
|
|||
/*===========================================================================*
|
||||
* findhole *
|
||||
*===========================================================================*/
|
||||
#if defined(__i386__)
|
||||
static u32_t findhole(void)
|
||||
#elif defined(__arm__)
|
||||
static u32_t findhole(int pages)
|
||||
#endif
|
||||
{
|
||||
/* Find a space in the virtual address space of VM. */
|
||||
u32_t curv;
|
||||
|
@ -159,7 +152,9 @@ static u32_t findhole(int pages)
|
|||
u32_t holev;
|
||||
#endif
|
||||
|
||||
vmin = (vir_bytes) (&_end) & ARCH_VM_ADDR_MASK; /* marks end of VM BSS */
|
||||
vmin = (vir_bytes) (&_end); /* marks end of VM BSS */
|
||||
vmin += 1024*1024*1024; /* reserve 1GB virtual address space for VM heap */
|
||||
vmin &= ARCH_VM_ADDR_MASK;
|
||||
vmax = VM_STACKTOP;
|
||||
|
||||
/* Input sanity check. */
|
||||
|
@ -298,10 +293,10 @@ static void *vm_getsparepage(phys_bytes *phys)
|
|||
return sp;
|
||||
}
|
||||
}
|
||||
printf("no spare found, %d missing\n", missing_spares);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#if defined(__arm__)
|
||||
/*===========================================================================*
|
||||
* vm_getsparepagedir *
|
||||
*===========================================================================*/
|
||||
|
@ -322,7 +317,6 @@ static void *vm_getsparepagedir(phys_bytes *phys)
|
|||
}
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*===========================================================================*
|
||||
* vm_checkspares *
|
||||
|
@ -332,7 +326,7 @@ static void *vm_checkspares(void)
|
|||
int s, n = 0;
|
||||
static int total = 0, worst = 0;
|
||||
assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
|
||||
for(s = 0; s < SPAREPAGES && missing_spares > 0; s++)
|
||||
for(s = 0; s < SPAREPAGES && missing_spares > 0; s++) {
|
||||
if(!sparepages[s].page) {
|
||||
n++;
|
||||
if((sparepages[s].page = vm_allocpage(&sparepages[s].phys,
|
||||
|
@ -344,6 +338,7 @@ static void *vm_checkspares(void)
|
|||
printf("VM: warning: couldn't get new spare page\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
if(worst < n) worst = n;
|
||||
total += n;
|
||||
|
||||
|
@ -376,14 +371,14 @@ static void *vm_checksparedirs(void)
|
|||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static int pt_init_done;
|
||||
|
||||
/*===========================================================================*
|
||||
* vm_allocpage *
|
||||
*===========================================================================*/
|
||||
void *vm_allocpage(phys_bytes *phys, int reason)
|
||||
void *vm_allocpages(phys_bytes *phys, int reason, int pages)
|
||||
{
|
||||
/* Allocate a page for use by VM itself. */
|
||||
phys_bytes newpage;
|
||||
|
@ -392,13 +387,13 @@ void *vm_allocpage(phys_bytes *phys, int reason)
|
|||
int r;
|
||||
static int level = 0;
|
||||
void *ret;
|
||||
#if defined(__arm__)
|
||||
u32_t mem_bytes, mem_clicks, mem_flags;
|
||||
#endif
|
||||
u32_t mem_flags = 0;
|
||||
|
||||
pt = &vmprocess->vm_pt;
|
||||
assert(reason >= 0 && reason < VMP_CATEGORIES);
|
||||
|
||||
assert(pages > 0);
|
||||
|
||||
level++;
|
||||
|
||||
assert(level >= 1);
|
||||
|
@ -406,16 +401,13 @@ void *vm_allocpage(phys_bytes *phys, int reason)
|
|||
|
||||
if((level > 1) || !pt_init_done) {
|
||||
void *s;
|
||||
#if defined(__i386__)
|
||||
s=vm_getsparepage(phys);
|
||||
#elif defined(__arm__)
|
||||
|
||||
if (reason == VMP_PAGEDIR)
|
||||
s=vm_getsparepagedir(phys);
|
||||
else
|
||||
s=vm_getsparepage(phys);
|
||||
|
||||
#endif
|
||||
if(pages == 1) s=vm_getsparepage(phys);
|
||||
else if(pages == 4) s=vm_getsparepagedir(phys);
|
||||
else panic("%d pages", pages);
|
||||
|
||||
level--;
|
||||
if(!s) {
|
||||
util_stacktrace();
|
||||
|
@ -427,23 +419,14 @@ void *vm_allocpage(phys_bytes *phys, int reason)
|
|||
|
||||
#if defined(__arm__)
|
||||
if (reason == VMP_PAGEDIR) {
|
||||
mem_bytes = ARCH_PAGEDIR_SIZE;
|
||||
mem_flags = PAF_ALIGN16K;
|
||||
} else {
|
||||
mem_bytes = VM_PAGE_SIZE;
|
||||
mem_flags = 0;
|
||||
mem_flags |= PAF_ALIGN16K;
|
||||
}
|
||||
mem_clicks = mem_bytes / VM_PAGE_SIZE * CLICKSPERPAGE;
|
||||
|
||||
#endif
|
||||
|
||||
/* VM does have a pagetable, so get a page and map it in there.
|
||||
* Where in our virtual address space can we put it?
|
||||
*/
|
||||
#if defined(__i386__)
|
||||
loc = findhole();
|
||||
#elif defined(__arm__)
|
||||
loc = findhole(mem_bytes / VM_PAGE_SIZE);
|
||||
#endif
|
||||
loc = findhole(pages);
|
||||
if(loc == NO_MEM) {
|
||||
level--;
|
||||
printf("VM: vm_allocpage: findhole failed\n");
|
||||
|
@ -453,11 +436,7 @@ void *vm_allocpage(phys_bytes *phys, int reason)
|
|||
/* Allocate page of memory for use by VM. As VM
|
||||
* is trusted, we don't have to pre-clear it.
|
||||
*/
|
||||
#if defined(__i386__)
|
||||
if((newpage = alloc_mem(CLICKSPERPAGE, 0)) == NO_MEM) {
|
||||
#elif defined(__arm__)
|
||||
if((newpage = alloc_mem(mem_clicks, mem_flags)) == NO_MEM) {
|
||||
#endif
|
||||
if((newpage = alloc_mem(pages, mem_flags)) == NO_MEM) {
|
||||
level--;
|
||||
printf("VM: vm_allocpage: alloc_mem failed\n");
|
||||
return NULL;
|
||||
|
@ -466,16 +445,13 @@ void *vm_allocpage(phys_bytes *phys, int reason)
|
|||
*phys = CLICK2ABS(newpage);
|
||||
|
||||
/* Map this page into our address space. */
|
||||
#if defined(__i386__)
|
||||
if((r=pt_writemap(vmprocess, pt, loc, *phys, VM_PAGE_SIZE,
|
||||
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW, 0)) != OK) {
|
||||
free_mem(newpage, CLICKSPERPAGE);
|
||||
#elif defined(__arm__)
|
||||
if((r=pt_writemap(vmprocess, pt, loc, *phys, mem_bytes,
|
||||
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW |
|
||||
ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE, 0)) != OK) {
|
||||
free_mem(newpage, mem_clicks);
|
||||
if((r=pt_writemap(vmprocess, pt, loc, *phys, VM_PAGE_SIZE*pages,
|
||||
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
|
||||
#if defined(__arm__)
|
||||
| ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE
|
||||
#endif
|
||||
, 0)) != OK) {
|
||||
free_mem(newpage, pages);
|
||||
printf("vm_allocpage writemap failed\n");
|
||||
level--;
|
||||
return NULL;
|
||||
|
@ -494,6 +470,11 @@ void *vm_allocpage(phys_bytes *phys, int reason)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void *vm_allocpage(phys_bytes *phys, int reason)
|
||||
{
|
||||
return vm_allocpages(phys, reason, 1);
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* vm_pagelock *
|
||||
*===========================================================================*/
|
||||
|
@ -1089,12 +1070,12 @@ int pt_new(pt_t *pt)
|
|||
* the page directories (the page_directories data).
|
||||
*/
|
||||
if(!pt->pt_dir &&
|
||||
!(pt->pt_dir = vm_allocpage((phys_bytes *)&pt->pt_dir_phys, VMP_PAGEDIR))) {
|
||||
!(pt->pt_dir = vm_allocpages((phys_bytes *)&pt->pt_dir_phys,
|
||||
VMP_PAGEDIR, ARCH_PAGEDIR_SIZE/VM_PAGE_SIZE))) {
|
||||
return ENOMEM;
|
||||
}
|
||||
#if defined(__arm__)
|
||||
|
||||
assert(!((u32_t)pt->pt_dir_phys % ARCH_PAGEDIR_SIZE));
|
||||
#endif
|
||||
|
||||
for(i = 0; i < ARCH_VM_DIR_ENTRIES; i++) {
|
||||
pt->pt_dir[i] = 0; /* invalid entry (PRESENT bit = 0) */
|
||||
|
@ -1360,6 +1341,8 @@ void pt_init(void)
|
|||
|
||||
pt_init_done = 1;
|
||||
|
||||
vm_checkspares();
|
||||
|
||||
/* All OK. */
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -39,7 +39,6 @@ static int anon_contig_new(struct vir_region *region)
|
|||
u32_t allocflags;
|
||||
phys_bytes new_pages, new_page_cl, cur_ph;
|
||||
int p, pages;
|
||||
physr_iter iter;
|
||||
|
||||
allocflags = vrallocflags(region->flags);
|
||||
|
||||
|
@ -68,20 +67,15 @@ static int anon_contig_new(struct vir_region *region)
|
|||
|
||||
cur_ph = new_pages = CLICK2ABS(new_page_cl);
|
||||
|
||||
physr_start_iter_least(region->phys, &iter);
|
||||
|
||||
for(p = 0; p < pages; p++) {
|
||||
struct phys_region *pr = physr_get_iter(&iter);
|
||||
struct phys_region *pr = physblock_get(region, p * VM_PAGE_SIZE);
|
||||
assert(pr);
|
||||
assert(pr->ph);
|
||||
assert(pr->ph->phys == MAP_NONE);
|
||||
assert(pr->offset == p * VM_PAGE_SIZE);
|
||||
pr->ph->phys = cur_ph + pr->offset;
|
||||
physr_incr_iter(&iter);
|
||||
}
|
||||
|
||||
assert(!physr_get_iter(&iter));
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -129,12 +129,11 @@ static int shared_pagefault(struct vmproc *vmp, struct vir_region *region,
|
|||
assert(ph->ph->phys == MAP_NONE);
|
||||
pb_free(ph->ph);
|
||||
|
||||
if(!(pr = physr_search(src_region->phys, ph->offset, AVL_EQUAL))) {
|
||||
if(!(pr = physblock_get(src_region, ph->offset))) {
|
||||
int r;
|
||||
if((r=map_pf(src_vmp, src_region, ph->offset, write)) != OK)
|
||||
return r;
|
||||
if(!(pr = physr_search(src_region->phys, ph->offset,
|
||||
AVL_EQUAL))) {
|
||||
if(!(pr = physblock_get(src_region, ph->offset))) {
|
||||
panic("missing region after pagefault handling");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include "glo.h"
|
||||
#include "region.h"
|
||||
#include "sanitycheck.h"
|
||||
#include "physravl.h"
|
||||
#include "memlist.h"
|
||||
|
||||
struct phys_block *pb_new(phys_bytes phys)
|
||||
|
@ -84,7 +83,7 @@ struct phys_region *pb_reference(struct phys_block *newpb,
|
|||
/* New physical region. */
|
||||
pb_link(newphysr, newpb, offset, region);
|
||||
|
||||
physr_insert(region->phys, newphysr);
|
||||
physblock_set(region, offset, newphysr);
|
||||
|
||||
return newphysr;
|
||||
}
|
||||
|
@ -129,5 +128,5 @@ void pb_unreferenced(struct vir_region *region, struct phys_region *pr, int rm)
|
|||
|
||||
pr->ph = NULL;
|
||||
|
||||
if(rm) physr_remove(region->phys, pr->offset);
|
||||
if(rm) physblock_set(region, pr->offset, NULL);
|
||||
}
|
||||
|
|
|
@ -17,10 +17,6 @@ typedef struct phys_region {
|
|||
|
||||
/* list of phys_regions that reference the same phys_block */
|
||||
struct phys_region *next_ph_list;
|
||||
|
||||
/* AVL fields */
|
||||
struct phys_region *less, *greater;
|
||||
int factor;
|
||||
} phys_region_t;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
|
||||
#include "proto.h"
|
||||
#include "sanitycheck.h"
|
||||
#include "phys_region.h"
|
||||
#include "physravl_defs.h"
|
||||
#include "cavl_impl.h"
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
|
||||
#ifndef _PHYSRAVL_H
|
||||
#define _PHYSRAVL_H
|
||||
|
||||
#include "phys_region.h"
|
||||
#include "physravl_defs.h"
|
||||
#include "cavl_if.h"
|
||||
#include "unavl.h"
|
||||
|
||||
#endif
|
|
@ -1,20 +0,0 @@
|
|||
|
||||
#include <minix/u64.h>
|
||||
|
||||
#define AVL_UNIQUE(id) physr_ ## id
|
||||
#define AVL_HANDLE phys_region_t *
|
||||
#define AVL_KEY vir_bytes
|
||||
#define AVL_MAX_DEPTH 30 /* good for 2 million nodes */
|
||||
#define AVL_NULL NULL
|
||||
#define AVL_GET_LESS(h, a) (h)->less
|
||||
#define AVL_GET_GREATER(h, a) (h)->greater
|
||||
#define AVL_SET_LESS(h1, h2) USE((h1), (h1)->less = h2;);
|
||||
#define AVL_SET_GREATER(h1, h2) USE((h1), (h1)->greater = h2;);
|
||||
#define AVL_GET_BALANCE_FACTOR(h) (h)->factor
|
||||
#define AVL_SET_BALANCE_FACTOR(h, f) USE((h), (h)->factor = f;);
|
||||
#define AVL_SET_ROOT(h, v) USE((h), (h)->root = v;);
|
||||
#define AVL_COMPARE_KEY_KEY(k1, k2) ((k1) > (k2) ? 1 : ((k1) < (k2) ? -1 : 0))
|
||||
#define AVL_COMPARE_KEY_NODE(k, h) AVL_COMPARE_KEY_KEY((k), (h)->offset)
|
||||
#define AVL_COMPARE_NODE_NODE(h1, h2) AVL_COMPARE_KEY_KEY((h1)->offset, (h2)->offset)
|
||||
#define AVL_INSIDE_STRUCT char pad[4];
|
||||
|
|
@ -92,6 +92,7 @@ int pt_writemap(struct vmproc * vmp, pt_t *pt, vir_bytes v, phys_bytes
|
|||
int pt_checkrange(pt_t *pt, vir_bytes v, size_t bytes, int write);
|
||||
int pt_bind(pt_t *pt, struct vmproc *who);
|
||||
void *vm_allocpage(phys_bytes *p, int cat);
|
||||
void *vm_allocpages(phys_bytes *p, int cat, int pages);
|
||||
void *vm_allocpagedir(phys_bytes *p);
|
||||
void pt_cycle(void);
|
||||
int pt_mapkernel(pt_t *pt);
|
||||
|
@ -145,9 +146,12 @@ void printregionstats(struct vmproc *vmp);
|
|||
void map_setparent(struct vmproc *vmp);
|
||||
int yielded_block_cmp(struct block_id *, struct block_id *);
|
||||
struct phys_region *map_clone_ph_block(struct vmproc *vmp,
|
||||
struct vir_region *region, struct phys_region *ph, physr_iter *iter);
|
||||
struct vir_region *region, struct phys_region *ph);
|
||||
u32_t vrallocflags(u32_t flags);
|
||||
int map_free(struct vir_region *region);
|
||||
struct phys_region *physblock_get(struct vir_region *region, vir_bytes offset);
|
||||
void physblock_set(struct vir_region *region, vir_bytes offset,
|
||||
struct phys_region *newphysr);
|
||||
|
||||
struct vir_region * map_region_lookup_tag(struct vmproc *vmp, u32_t
|
||||
tag);
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <sys/mman.h>
|
||||
|
||||
#include <limits.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include <stdint.h>
|
||||
|
@ -25,7 +26,7 @@
|
|||
#include "glo.h"
|
||||
#include "region.h"
|
||||
#include "sanitycheck.h"
|
||||
#include "physravl.h"
|
||||
#include "yieldedavl.h"
|
||||
#include "memlist.h"
|
||||
#include "memtype.h"
|
||||
|
||||
|
@ -74,23 +75,50 @@ static yielded_avl *get_yielded_avl(block_id_t id)
|
|||
return &vm_yielded_blocks[h];
|
||||
}
|
||||
|
||||
void map_printregion(struct vmproc *vmp, struct vir_region *vr)
|
||||
void map_printregion(struct vir_region *vr)
|
||||
{
|
||||
physr_iter iter;
|
||||
int i;
|
||||
struct phys_region *ph;
|
||||
printf("map_printmap: map_name: %s\n", vr->memtype->name);
|
||||
printf("\t%lx (len 0x%lx, %lukB), %p\n",
|
||||
vr->vaddr, vr->length, vr->length/1024, vr->memtype->name);
|
||||
printf("\t\tphysblocks:\n");
|
||||
physr_start_iter_least(vr->phys, &iter);
|
||||
while((ph = physr_get_iter(&iter))) {
|
||||
for(i = 0; i < vr->length/VM_PAGE_SIZE; i++) {
|
||||
if(!(ph=vr->physblocks[i])) continue;
|
||||
printf("\t\t@ %lx (refs %d): phys 0x%lx\n",
|
||||
(vr->vaddr + ph->offset),
|
||||
ph->ph->refcount, ph->ph->phys);
|
||||
physr_incr_iter(&iter);
|
||||
}
|
||||
}
|
||||
|
||||
struct phys_region *physblock_get(struct vir_region *region, vir_bytes offset)
|
||||
{
|
||||
int i;
|
||||
struct phys_region *foundregion;
|
||||
assert(!(offset % VM_PAGE_SIZE));
|
||||
assert(offset >= 0 && offset < region->length);
|
||||
i = offset/VM_PAGE_SIZE;
|
||||
if((foundregion = region->physblocks[i]))
|
||||
assert(foundregion->offset == offset);
|
||||
return foundregion;
|
||||
}
|
||||
|
||||
void physblock_set(struct vir_region *region, vir_bytes offset,
|
||||
struct phys_region *newphysr)
|
||||
{
|
||||
int i;
|
||||
assert(!(offset % VM_PAGE_SIZE));
|
||||
assert(offset >= 0 && offset < region->length);
|
||||
i = offset/VM_PAGE_SIZE;
|
||||
if(newphysr) {
|
||||
assert(!region->physblocks[i]);
|
||||
assert(newphysr->offset == offset);
|
||||
} else {
|
||||
assert(region->physblocks[i]);
|
||||
}
|
||||
region->physblocks[i] = newphysr;
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
* map_printmap *
|
||||
*===========================================================================*/
|
||||
|
@ -104,7 +132,7 @@ struct vmproc *vmp;
|
|||
|
||||
region_start_iter_least(&vmp->vm_regions_avl, &iter);
|
||||
while((vr = region_get_iter(&iter))) {
|
||||
map_printregion(vmp, vr);
|
||||
map_printregion(vr);
|
||||
region_incr_iter(&iter);
|
||||
}
|
||||
}
|
||||
|
@ -156,7 +184,7 @@ static int map_sanitycheck_pt(struct vmproc *vmp,
|
|||
if(r != OK) {
|
||||
printf("proc %d phys_region 0x%lx sanity check failed\n",
|
||||
vmp->vm_endpoint, pr->offset);
|
||||
map_printregion(vmp, vr);
|
||||
map_printregion(vr);
|
||||
}
|
||||
|
||||
return r;
|
||||
|
@ -176,19 +204,20 @@ void map_sanitycheck(char *file, int line)
|
|||
*/
|
||||
#define ALLREGIONS(regioncode, physcode) \
|
||||
for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) { \
|
||||
vir_bytes voffset; \
|
||||
region_iter v_iter; \
|
||||
struct vir_region *vr; \
|
||||
if(!(vmp->vm_flags & VMF_INUSE)) \
|
||||
continue; \
|
||||
region_start_iter_least(&vmp->vm_regions_avl, &v_iter); \
|
||||
while((vr = region_get_iter(&v_iter))) { \
|
||||
physr_iter iter; \
|
||||
struct phys_region *pr; \
|
||||
regioncode; \
|
||||
physr_start_iter_least(vr->phys, &iter); \
|
||||
while((pr = physr_get_iter(&iter))) { \
|
||||
for(voffset = 0; voffset < vr->length; \
|
||||
voffset += VM_PAGE_SIZE) { \
|
||||
if(!(pr = physblock_get(vr, voffset))) \
|
||||
continue; \
|
||||
physcode; \
|
||||
physr_incr_iter(&iter); \
|
||||
} \
|
||||
region_incr_iter(&v_iter); \
|
||||
} \
|
||||
|
@ -201,6 +230,7 @@ void map_sanitycheck(char *file, int line)
|
|||
|
||||
/* Do counting for consistency check. */
|
||||
ALLREGIONS(;,USE(pr->ph, pr->ph->seencount = 0;););
|
||||
ALLREGIONS(;,MYASSERT(pr->offset == voffset););
|
||||
ALLREGIONS(;,USE(pr->ph, pr->ph->seencount++;);
|
||||
if(pr->ph->seencount == 1) {
|
||||
if(pr->parent->memtype->ev_sanitycheck)
|
||||
|
@ -466,12 +496,19 @@ static vir_bytes region_find_slot(struct vmproc *vmp,
|
|||
return region_find_slot_range(vmp, minv, maxv, length);
|
||||
}
|
||||
|
||||
static int phys_slot(vir_bytes len)
|
||||
{
|
||||
assert(!(len % VM_PAGE_SIZE));
|
||||
return len / VM_PAGE_SIZE;
|
||||
}
|
||||
|
||||
struct vir_region *region_new(struct vmproc *vmp, vir_bytes startv, vir_bytes length,
|
||||
int flags, mem_type_t *memtype)
|
||||
{
|
||||
physr_avl *phavl;
|
||||
struct vir_region *newregion;
|
||||
struct phys_region **physregions;
|
||||
static u32_t id;
|
||||
int slots = phys_slot(length);
|
||||
|
||||
if(!(SLABALLOC(newregion))) {
|
||||
printf("vm: region_new: could not allocate\n");
|
||||
|
@ -490,14 +527,13 @@ USE(newregion,
|
|||
newregion->lower = newregion->higher = NULL;
|
||||
newregion->parent = vmp;);
|
||||
|
||||
SLABALLOC(phavl);
|
||||
if(!phavl) {
|
||||
printf("VM: region_new: allocating phys avl failed\n");
|
||||
if(!(physregions = calloc(slots, sizeof(struct phys_region *)))) {
|
||||
printf("VM: region_new: allocating phys blocks failed\n");
|
||||
SLABFREE(newregion);
|
||||
return NULL;
|
||||
}
|
||||
USE(newregion, newregion->phys = phavl;);
|
||||
physr_init(newregion->phys);
|
||||
|
||||
USE(newregion, newregion->physblocks = physregions;);
|
||||
|
||||
return newregion;
|
||||
}
|
||||
|
@ -543,8 +579,9 @@ mem_type_t *memtype;
|
|||
if(mapflags & MF_PREALLOC) {
|
||||
if(map_handle_memory(vmp, newregion, 0, length, 1) != OK) {
|
||||
printf("VM: map_page_region: prealloc failed\n");
|
||||
free(newregion->physblocks);
|
||||
USE(newregion,
|
||||
SLABFREE(newregion->phys););
|
||||
newregion->physblocks = NULL;);
|
||||
SLABFREE(newregion);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -580,51 +617,37 @@ static int map_subfree(struct vir_region *region,
|
|||
vir_bytes start, vir_bytes len)
|
||||
{
|
||||
struct phys_region *pr;
|
||||
physr_iter iter;
|
||||
vir_bytes end = start+len;
|
||||
|
||||
int full = 0;
|
||||
vir_bytes voffset;
|
||||
|
||||
#if SANITYCHECKS
|
||||
{
|
||||
SLABSANE(region);
|
||||
SLABSANE(region->phys);
|
||||
physr_start_iter_least(region->phys, &iter);
|
||||
while((pr = physr_get_iter(&iter))) {
|
||||
for(voffset = 0; voffset < phys_slot(region->length);
|
||||
voffset += VM_PAGE_SIZE) {
|
||||
struct phys_region *others;
|
||||
struct phys_block *pb;
|
||||
|
||||
if(!(pr = physblock_get(region, voffset)))
|
||||
continue;
|
||||
|
||||
pb = pr->ph;
|
||||
|
||||
for(others = pb->firstregion; others;
|
||||
others = others->next_ph_list) {
|
||||
assert(others->ph == pb);
|
||||
}
|
||||
physr_incr_iter(&iter);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if(start == 0 && len == region->length)
|
||||
full = 1;
|
||||
|
||||
physr_init_iter(&iter);
|
||||
physr_start_iter(region->phys, &iter, start, AVL_GREATER_EQUAL);
|
||||
while((pr = physr_get_iter(&iter))) {
|
||||
physr_incr_iter(&iter);
|
||||
if(pr->offset >= end)
|
||||
break;
|
||||
pb_unreferenced(region, pr, !full);
|
||||
if(!full) {
|
||||
physr_start_iter(region->phys, &iter,
|
||||
pr->offset, AVL_GREATER_EQUAL);
|
||||
}
|
||||
for(voffset = start; voffset < end; voffset+=VM_PAGE_SIZE) {
|
||||
if(!(pr = physblock_get(region, voffset)))
|
||||
continue;
|
||||
assert(pr->offset >= start);
|
||||
assert(pr->offset < end);
|
||||
pb_unreferenced(region, pr, 1);
|
||||
SLABFREE(pr);
|
||||
}
|
||||
|
||||
if(full)
|
||||
physr_init(region->phys);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
@ -642,9 +665,8 @@ int map_free(struct vir_region *region)
|
|||
|
||||
if(region->memtype->ev_delete)
|
||||
region->memtype->ev_delete(region);
|
||||
|
||||
USE(region,
|
||||
SLABFREE(region->phys););
|
||||
free(region->physblocks);
|
||||
region->physblocks = NULL;
|
||||
SLABFREE(region);
|
||||
|
||||
return OK;
|
||||
|
@ -829,7 +851,7 @@ struct phys_region **physr;
|
|||
if(offset >= r->vaddr && offset < r->vaddr + r->length) {
|
||||
ph = offset - r->vaddr;
|
||||
if(physr) {
|
||||
*physr = physr_search(r->phys, ph, AVL_EQUAL);
|
||||
*physr = physblock_get(r, ph);
|
||||
if(*physr) assert((*physr)->offset == ph);
|
||||
}
|
||||
return r;
|
||||
|
@ -862,11 +884,10 @@ u32_t vrallocflags(u32_t flags)
|
|||
/*===========================================================================*
|
||||
* map_clone_ph_block *
|
||||
*===========================================================================*/
|
||||
struct phys_region *map_clone_ph_block(vmp, region, ph, iter)
|
||||
struct phys_region *map_clone_ph_block(vmp, region, ph)
|
||||
struct vmproc *vmp;
|
||||
struct vir_region *region;
|
||||
struct phys_region *ph;
|
||||
physr_iter *iter;
|
||||
{
|
||||
vir_bytes offset;
|
||||
u32_t allocflags;
|
||||
|
@ -927,15 +948,10 @@ physr_iter *iter;
|
|||
if(copy_abs2region(physaddr, region, offset, VM_PAGE_SIZE) != OK)
|
||||
panic("copy_abs2region failed, no good reason for that");
|
||||
|
||||
newpr = physr_search(region->phys, offset, AVL_EQUAL);
|
||||
newpr = physblock_get(region, offset);
|
||||
assert(newpr);
|
||||
assert(newpr->offset == offset);
|
||||
|
||||
if(iter) {
|
||||
physr_start_iter(region->phys, iter, offset, AVL_EQUAL);
|
||||
assert(physr_get_iter(iter) == newpr);
|
||||
}
|
||||
|
||||
SANITYCHECK(SCL_FUNCTIONS);
|
||||
|
||||
return newpr;
|
||||
|
@ -964,7 +980,7 @@ int write;
|
|||
|
||||
SANITYCHECK(SCL_FUNCTIONS);
|
||||
|
||||
if(!(ph = physr_search(region->phys, offset, AVL_EQUAL))) {
|
||||
if(!(ph = physblock_get(region, offset))) {
|
||||
struct phys_block *pb;
|
||||
|
||||
/* New block. */
|
||||
|
@ -993,6 +1009,7 @@ int write;
|
|||
if(!write || !region->memtype->writable(ph)) {
|
||||
assert(region->memtype->ev_pagefault);
|
||||
assert(ph->ph);
|
||||
|
||||
if((r = region->memtype->ev_pagefault(vmp,
|
||||
region, ph, write)) == SUSPEND) {
|
||||
panic("map_pf: memtype->ev_pagefault returned SUSPEND\n");
|
||||
|
@ -1071,21 +1088,6 @@ int map_pin_memory(struct vmproc *vmp)
|
|||
return OK;
|
||||
}
|
||||
|
||||
#if SANITYCHECKS
|
||||
static int count_phys_regions(struct vir_region *vr)
|
||||
{
|
||||
int n = 0;
|
||||
struct phys_region *ph;
|
||||
physr_iter iter;
|
||||
physr_start_iter_least(vr->phys, &iter);
|
||||
while((ph = physr_get_iter(&iter))) {
|
||||
n++;
|
||||
physr_incr_iter(&iter);
|
||||
}
|
||||
return n;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*===========================================================================*
|
||||
* map_copy_region *
|
||||
*===========================================================================*/
|
||||
|
@ -1102,11 +1104,11 @@ static struct vir_region *map_copy_region(struct vmproc *vmp, struct vir_region
|
|||
struct vir_region *newvr;
|
||||
struct phys_region *ph;
|
||||
int r;
|
||||
physr_iter iter;
|
||||
#if SANITYCHECKS
|
||||
int cr;
|
||||
cr = count_phys_regions(vr);
|
||||
cr = physregions(vr);
|
||||
#endif
|
||||
vir_bytes p;
|
||||
|
||||
if(!(newvr = region_new(vr->parent, vr->vaddr, vr->length, vr->flags, vr->memtype)))
|
||||
return NULL;
|
||||
|
@ -1117,21 +1119,20 @@ static struct vir_region *map_copy_region(struct vmproc *vmp, struct vir_region
|
|||
return NULL;
|
||||
}
|
||||
|
||||
physr_start_iter_least(vr->phys, &iter);
|
||||
while((ph = physr_get_iter(&iter))) {
|
||||
for(p = 0; p < phys_slot(vr->length); p++) {
|
||||
if(!(ph = physblock_get(vr, p*VM_PAGE_SIZE))) continue;
|
||||
struct phys_region *newph = pb_reference(ph->ph, ph->offset, newvr);
|
||||
|
||||
if(!newph) { map_free(newvr); return NULL; }
|
||||
|
||||
#if SANITYCHECKS
|
||||
USE(newph, newph->written = 0;);
|
||||
assert(count_phys_regions(vr) == cr);
|
||||
assert(physregions(vr) == cr);
|
||||
#endif
|
||||
physr_incr_iter(&iter);
|
||||
}
|
||||
|
||||
#if SANITYCHECKS
|
||||
assert(count_phys_regions(vr) == count_phys_regions(newvr));
|
||||
assert(physregions(vr) == physregions(newvr));
|
||||
#endif
|
||||
|
||||
return newvr;
|
||||
|
@ -1145,13 +1146,13 @@ int copy_abs2region(phys_bytes abs, struct vir_region *destregion,
|
|||
|
||||
{
|
||||
assert(destregion);
|
||||
assert(destregion->phys);
|
||||
assert(destregion->physblocks);
|
||||
while(len > 0) {
|
||||
phys_bytes sublen, suboffset;
|
||||
struct phys_region *ph;
|
||||
assert(destregion);
|
||||
assert(destregion->phys);
|
||||
if(!(ph = physr_search(destregion->phys, offset, AVL_LESS_EQUAL))) {
|
||||
assert(destregion->physblocks);
|
||||
if(!(ph = physblock_get(destregion, offset))) {
|
||||
printf("VM: copy_abs2region: no phys region found (1).\n");
|
||||
return EFAULT;
|
||||
}
|
||||
|
@ -1195,11 +1196,9 @@ int map_writept(struct vmproc *vmp)
|
|||
region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
|
||||
|
||||
while((vr = region_get_iter(&v_iter))) {
|
||||
physr_iter ph_iter;
|
||||
physr_start_iter_least(vr->phys, &ph_iter);
|
||||
|
||||
while((ph = physr_get_iter(&ph_iter))) {
|
||||
physr_incr_iter(&ph_iter);
|
||||
vir_bytes p;
|
||||
for(p = 0; p < vr->length; p += VM_PAGE_SIZE) {
|
||||
if(!(ph = physblock_get(vr, p))) continue;
|
||||
|
||||
if((r=map_ph_writept(vmp, vr, ph)) != OK) {
|
||||
printf("VM: map_writept: failed\n");
|
||||
|
@ -1250,34 +1249,30 @@ struct vir_region *start_src_vr;
|
|||
SANITYCHECK(SCL_FUNCTIONS);
|
||||
|
||||
while((vr = region_get_iter(&v_iter))) {
|
||||
physr_iter iter_orig, iter_new;
|
||||
struct vir_region *newvr;
|
||||
struct phys_region *orig_ph, *new_ph;
|
||||
if(!(newvr = map_copy_region(dst, vr))) {
|
||||
map_free_proc(dst);
|
||||
return ENOMEM;
|
||||
}
|
||||
USE(newvr, newvr->parent = dst;);
|
||||
region_insert(&dst->vm_regions_avl, newvr);
|
||||
physr_start_iter_least(vr->phys, &iter_orig);
|
||||
physr_start_iter_least(newvr->phys, &iter_new);
|
||||
while((orig_ph = physr_get_iter(&iter_orig))) {
|
||||
struct phys_block *pb;
|
||||
new_ph = physr_get_iter(&iter_new);
|
||||
/* Check two physregions both are nonnull,
|
||||
* are different, and match physblocks.
|
||||
*/
|
||||
assert(new_ph);
|
||||
assert(orig_ph);
|
||||
assert(orig_ph != new_ph);
|
||||
pb = orig_ph->ph;
|
||||
assert(orig_ph->ph == new_ph->ph);
|
||||
assert(vr->length == newvr->length);
|
||||
|
||||
/* Get next new physregion */
|
||||
physr_incr_iter(&iter_orig);
|
||||
physr_incr_iter(&iter_new);
|
||||
#if SANITYCHECKS
|
||||
{
|
||||
vir_bytes vaddr;
|
||||
struct phys_region *orig_ph, *new_ph;
|
||||
assert(vr->physblocks != newvr->physblocks);
|
||||
for(vaddr = 0; vaddr < vr->length; vaddr += VM_PAGE_SIZE) {
|
||||
orig_ph = physblock_get(vr, vaddr);
|
||||
new_ph = physblock_get(newvr, vaddr);
|
||||
if(!orig_ph) { assert(!new_ph); continue;}
|
||||
assert(new_ph);
|
||||
assert(orig_ph != new_ph);
|
||||
assert(orig_ph->ph == new_ph->ph);
|
||||
}
|
||||
assert(!physr_get_iter(&iter_new));
|
||||
}
|
||||
#endif
|
||||
region_incr_iter(&v_iter);
|
||||
}
|
||||
|
||||
|
@ -1292,6 +1287,8 @@ int map_region_extend_upto_v(struct vmproc *vmp, vir_bytes v)
|
|||
{
|
||||
vir_bytes offset = v;
|
||||
struct vir_region *vr, *nextvr;
|
||||
struct phys_region **newpr;
|
||||
int newslots, prevslots, addedslots;
|
||||
|
||||
offset = roundup(offset, VM_PAGE_SIZE);
|
||||
|
||||
|
@ -1300,7 +1297,24 @@ int map_region_extend_upto_v(struct vmproc *vmp, vir_bytes v)
|
|||
return ENOMEM;
|
||||
}
|
||||
|
||||
if(vr->vaddr + vr->length >= v) return OK;
|
||||
|
||||
assert(vr->vaddr <= offset);
|
||||
newslots = phys_slot(offset - vr->vaddr);
|
||||
prevslots = phys_slot(vr->length);
|
||||
assert(newslots >= prevslots);
|
||||
addedslots = newslots - prevslots;
|
||||
|
||||
if(!(newpr = realloc(vr->physblocks,
|
||||
newslots * sizeof(struct phys_region *)))) {
|
||||
printf("VM: map_region_extend_upto_v: realloc failed\n");
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
vr->physblocks = newpr;
|
||||
memset(vr->physblocks + prevslots, 0,
|
||||
addedslots * sizeof(struct phys_region *));
|
||||
|
||||
if((nextvr = getnextvr(vr))) {
|
||||
assert(offset <= nextvr->vaddr);
|
||||
}
|
||||
|
@ -1328,6 +1342,7 @@ int map_unmap_region(struct vmproc *vmp, struct vir_region *r,
|
|||
* memory it used to reference if any.
|
||||
*/
|
||||
vir_bytes regionstart;
|
||||
int freeslots = phys_slot(len);
|
||||
|
||||
SANITYCHECK(SCL_FUNCTIONS);
|
||||
|
||||
|
@ -1344,7 +1359,8 @@ int map_unmap_region(struct vmproc *vmp, struct vir_region *r,
|
|||
/* if unmap was at start/end of this region, it actually shrinks */
|
||||
if(offset == 0) {
|
||||
struct phys_region *pr;
|
||||
physr_iter iter;
|
||||
vir_bytes voffset;
|
||||
int remslots;
|
||||
|
||||
region_remove(&vmp->vm_regions_avl, r->vaddr);
|
||||
|
||||
|
@ -1352,20 +1368,23 @@ int map_unmap_region(struct vmproc *vmp, struct vir_region *r,
|
|||
r->vaddr += len;
|
||||
r->length -= len;);
|
||||
|
||||
remslots = phys_slot(r->length);
|
||||
|
||||
region_insert(&vmp->vm_regions_avl, r);
|
||||
|
||||
/* vaddr has increased; to make all the phys_regions
|
||||
* point to the same addresses, make them shrink by the
|
||||
* same amount.
|
||||
*/
|
||||
physr_init_iter(&iter);
|
||||
physr_start_iter(r->phys, &iter, offset, AVL_GREATER_EQUAL);
|
||||
|
||||
while((pr = physr_get_iter(&iter))) {
|
||||
for(voffset = offset; voffset < r->length;
|
||||
voffset += VM_PAGE_SIZE) {
|
||||
if(!(pr = physblock_get(r, voffset))) continue;
|
||||
assert(pr->offset >= offset);
|
||||
USE(pr, pr->offset -= len;);
|
||||
physr_incr_iter(&iter);
|
||||
}
|
||||
if(remslots)
|
||||
memmove(r->physblocks, r->physblocks + freeslots,
|
||||
remslots * sizeof(struct phys_region *));
|
||||
} else if(offset + len == r->length) {
|
||||
assert(len <= r->length);
|
||||
r->length -= len;
|
||||
|
@ -1459,10 +1478,10 @@ static void get_usage_info_vm(struct vm_usage_info *vui)
|
|||
void get_usage_info(struct vmproc *vmp, struct vm_usage_info *vui)
|
||||
{
|
||||
struct vir_region *vr;
|
||||
physr_iter iter;
|
||||
struct phys_region *ph;
|
||||
region_iter v_iter;
|
||||
region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
|
||||
vir_bytes voffset;
|
||||
|
||||
memset(vui, 0, sizeof(*vui));
|
||||
|
||||
|
@ -1477,8 +1496,8 @@ void get_usage_info(struct vmproc *vmp, struct vm_usage_info *vui)
|
|||
}
|
||||
|
||||
while((vr = region_get_iter(&v_iter))) {
|
||||
physr_start_iter_least(vr->phys, &iter);
|
||||
while((ph = physr_get_iter(&iter))) {
|
||||
for(voffset = 0; voffset < vr->length; voffset += VM_PAGE_SIZE) {
|
||||
if(!(ph = physblock_get(vr, voffset))) continue;
|
||||
/* All present pages are counted towards the total. */
|
||||
vui->vui_total += VM_PAGE_SIZE;
|
||||
|
||||
|
@ -1490,7 +1509,6 @@ void get_usage_info(struct vmproc *vmp, struct vm_usage_info *vui)
|
|||
if (vr->flags & VR_SHARED)
|
||||
vui->vui_shared += VM_PAGE_SIZE;
|
||||
}
|
||||
physr_incr_iter(&iter);
|
||||
}
|
||||
region_incr_iter(&v_iter);
|
||||
}
|
||||
|
@ -1515,13 +1533,18 @@ int get_region_info(struct vmproc *vmp, struct vm_region_info *vri,
|
|||
if(!(vr = region_get_iter(&v_iter))) return 0;
|
||||
|
||||
for(count = 0; (vr = region_get_iter(&v_iter)) && count < max; count++, vri++) {
|
||||
struct phys_region *ph1, *ph2;
|
||||
struct phys_region *ph1 = NULL, *ph2 = NULL;
|
||||
vir_bytes voffset;
|
||||
|
||||
/* Report part of the region that's actually in use. */
|
||||
|
||||
/* Get first and last phys_regions, if any */
|
||||
ph1 = physr_search_least(vr->phys);
|
||||
ph2 = physr_search_greatest(vr->phys);
|
||||
for(voffset = 0; voffset > vr->length; voffset += VM_PAGE_SIZE) {
|
||||
struct phys_region *ph;
|
||||
if(!(ph = physblock_get(vr, voffset))) continue;
|
||||
if(!ph1) ph1 = ph;
|
||||
ph2 = ph;
|
||||
}
|
||||
if(!ph1 || !ph2) { assert(!ph1 && !ph2); continue; }
|
||||
|
||||
/* Report start+length of region starting from lowest use. */
|
||||
|
@ -1548,18 +1571,17 @@ void printregionstats(struct vmproc *vmp)
|
|||
{
|
||||
struct vir_region *vr;
|
||||
struct phys_region *pr;
|
||||
physr_iter iter;
|
||||
vir_bytes used = 0, weighted = 0;
|
||||
region_iter v_iter;
|
||||
region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
|
||||
|
||||
while((vr = region_get_iter(&v_iter))) {
|
||||
vir_bytes voffset;
|
||||
region_incr_iter(&v_iter);
|
||||
if(vr->flags & VR_DIRECT)
|
||||
continue;
|
||||
physr_start_iter_least(vr->phys, &iter);
|
||||
while((pr = physr_get_iter(&iter))) {
|
||||
physr_incr_iter(&iter);
|
||||
for(voffset = 0; voffset < vr->length; voffset+=VM_PAGE_SIZE) {
|
||||
if(!(pr = physblock_get(vr, voffset))) continue;
|
||||
used += VM_PAGE_SIZE;
|
||||
weighted += VM_PAGE_SIZE / pr->ph->refcount;
|
||||
}
|
||||
|
@ -1594,7 +1616,7 @@ get_clean_phys_region(struct vmproc *vmp, vir_bytes vaddr, struct vir_region **r
|
|||
assert(ph->ph->refcount > 0);
|
||||
if(ph->ph->refcount > 1) {
|
||||
if(!(ph = map_clone_ph_block(vmp, region,
|
||||
ph, NULL))) {
|
||||
ph))) {
|
||||
printf("VM: get_clean_phys_region: ph copy failed\n");
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1893,11 +1915,10 @@ void map_setparent(struct vmproc *vmp)
|
|||
int physregions(struct vir_region *vr)
|
||||
{
|
||||
int n = 0;
|
||||
physr_iter iter;
|
||||
physr_start_iter_least(vr->phys, &iter);
|
||||
while(physr_get_iter(&iter)) {
|
||||
vir_bytes voffset;
|
||||
for(voffset = 0; voffset < vr->length; voffset += VM_PAGE_SIZE) {
|
||||
if(physblock_get(vr, voffset))
|
||||
n++;
|
||||
physr_incr_iter(&iter);
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include <minix/const.h>
|
||||
|
||||
#include "phys_region.h"
|
||||
#include "physravl.h"
|
||||
#include "memtype.h"
|
||||
#include "vm.h"
|
||||
|
||||
|
@ -38,7 +37,7 @@ struct phys_block {
|
|||
typedef struct vir_region {
|
||||
vir_bytes vaddr; /* virtual address, offset from pagetable */
|
||||
vir_bytes length; /* length in bytes */
|
||||
physr_avl *phys; /* avl tree of physical memory blocks */
|
||||
struct phys_region **physblocks;
|
||||
u16_t flags;
|
||||
struct vmproc *parent; /* Process that owns this vir_region. */
|
||||
mem_type_t *memtype; /* Default instantiated memory type. */
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <unistd.h>
|
||||
#include <assert.h>
|
||||
#include <sys/param.h>
|
||||
#include <sys/mman.h>
|
||||
|
||||
#include "proto.h"
|
||||
#include "glo.h"
|
||||
|
@ -264,3 +265,55 @@ int swap_proc_dyn_data(struct vmproc *src_vmp, struct vmproc *dst_vmp)
|
|||
return OK;
|
||||
}
|
||||
|
||||
void *minix_mmap(void *addr, size_t len, int f, int f2, int f3, off_t o)
|
||||
{
|
||||
void *ret;
|
||||
phys_bytes p;
|
||||
|
||||
assert(!addr);
|
||||
assert(!(len % VM_PAGE_SIZE));
|
||||
|
||||
ret = vm_allocpages(&p, VMP_SLAB, len/VM_PAGE_SIZE);
|
||||
|
||||
if(!ret) return MAP_FAILED;
|
||||
memset(ret, 0, len);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int minix_munmap(void * addr, size_t len)
|
||||
{
|
||||
vm_freepages((vir_bytes) addr, roundup(len, VM_PAGE_SIZE)/VM_PAGE_SIZE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int _brk(void *addr)
|
||||
{
|
||||
vir_bytes target = roundup((vir_bytes)addr, VM_PAGE_SIZE), v;
|
||||
extern char _end;
|
||||
extern char *_brksize;
|
||||
static vir_bytes prevbrk = (vir_bytes) &_end;
|
||||
struct vmproc *vmprocess = &vmproc[VM_PROC_NR];
|
||||
|
||||
for(v = roundup(prevbrk, VM_PAGE_SIZE); v < target;
|
||||
v += VM_PAGE_SIZE) {
|
||||
phys_bytes mem, newpage = alloc_mem(1, 0);
|
||||
if(newpage == NO_MEM) return -1;
|
||||
mem = CLICK2ABS(newpage);
|
||||
if(pt_writemap(vmprocess, &vmprocess->vm_pt,
|
||||
v, mem, VM_PAGE_SIZE,
|
||||
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW, 0) != OK) {
|
||||
free_mem(newpage, 1);
|
||||
return -1;
|
||||
}
|
||||
prevbrk = v + VM_PAGE_SIZE;
|
||||
}
|
||||
|
||||
_brksize = (char *) addr;
|
||||
|
||||
if(sys_vmctl(SELF, VMCTL_FLUSHTLB, 0) != OK)
|
||||
panic("flushtlb failed");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -7,8 +7,6 @@
|
|||
|
||||
#include "pt.h"
|
||||
#include "vm.h"
|
||||
#include "physravl.h"
|
||||
#include "yieldedavl.h"
|
||||
#include "regionavl.h"
|
||||
|
||||
struct vmproc;
|
||||
|
|
Loading…
Reference in a new issue