2008-11-19 13:26:10 +01:00
|
|
|
|
|
|
|
#include <minix/com.h>
|
|
|
|
#include <minix/callnr.h>
|
|
|
|
#include <minix/type.h>
|
|
|
|
#include <minix/config.h>
|
|
|
|
#include <minix/const.h>
|
|
|
|
#include <minix/sysutil.h>
|
|
|
|
#include <minix/syslib.h>
|
2009-09-21 16:49:49 +02:00
|
|
|
#include <minix/debug.h>
|
|
|
|
#include <minix/bitmap.h>
|
2010-10-15 11:10:14 +02:00
|
|
|
#include <minix/hash.h>
|
2012-09-18 22:19:22 +02:00
|
|
|
#include <machine/multiboot.h>
|
2008-11-19 13:26:10 +01:00
|
|
|
|
|
|
|
#include <sys/mman.h>
|
|
|
|
|
|
|
|
#include <limits.h>
|
2012-12-17 19:26:52 +01:00
|
|
|
#include <stdlib.h>
|
2008-11-19 13:26:10 +01:00
|
|
|
#include <string.h>
|
|
|
|
#include <assert.h>
|
|
|
|
#include <stdint.h>
|
2010-08-21 15:10:41 +02:00
|
|
|
#include <sys/param.h>
|
2008-11-19 13:26:10 +01:00
|
|
|
|
|
|
|
#include "vm.h"
|
|
|
|
#include "proto.h"
|
|
|
|
#include "util.h"
|
|
|
|
#include "glo.h"
|
|
|
|
#include "region.h"
|
|
|
|
#include "sanitycheck.h"
|
2010-04-12 13:25:24 +02:00
|
|
|
#include "memlist.h"
|
2012-10-11 15:15:49 +02:00
|
|
|
#include "memtype.h"
|
2012-11-08 13:45:06 +01:00
|
|
|
#include "regionavl.h"
|
2010-05-05 13:35:04 +02:00
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static struct vir_region *map_copy_region(struct vmproc *vmp, struct
|
2012-03-24 16:16:34 +01:00
|
|
|
vir_region *vr);
|
2008-11-19 13:26:10 +01:00
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
void map_region_init(void)
|
2010-10-15 11:10:14 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2013-08-20 14:02:33 +02:00
|
|
|
static void map_printregion(struct vir_region *vr)
|
2009-09-27 14:44:36 +02:00
|
|
|
{
|
2013-08-20 14:02:33 +02:00
|
|
|
unsigned int i;
|
2009-09-27 14:44:36 +02:00
|
|
|
struct phys_region *ph;
|
2013-03-20 20:09:01 +01:00
|
|
|
printf("map_printmap: map_name: %s\n", vr->def_memtype->name);
|
2013-03-20 20:18:52 +01:00
|
|
|
printf("\t%lx (len 0x%lx, %lukB), %p, %s\n",
|
|
|
|
vr->vaddr, vr->length, vr->length/1024,
|
|
|
|
vr->def_memtype->name,
|
|
|
|
(vr->flags & VR_WRITABLE) ? "writable" : "readonly");
|
2009-09-27 14:44:36 +02:00
|
|
|
printf("\t\tphysblocks:\n");
|
2012-12-17 19:26:52 +01:00
|
|
|
for(i = 0; i < vr->length/VM_PAGE_SIZE; i++) {
|
|
|
|
if(!(ph=vr->physblocks[i])) continue;
|
2013-03-20 20:18:52 +01:00
|
|
|
printf("\t\t@ %lx (refs %d): phys 0x%lx, %s\n",
|
No more intel/minix segments.
This commit removes all traces of Minix segments (the text/data/stack
memory map abstraction in the kernel) and significance of Intel segments
(hardware segments like CS, DS that add offsets to all addressing before
page table translation). This ultimately simplifies the memory layout
and addressing and makes the same layout possible on non-Intel
architectures.
There are only two types of addresses in the world now: virtual
and physical; even the kernel and processes have the same virtual
address space. Kernel and user processes can be distinguished at a
glance as processes won't use 0xF0000000 and above.
No static pre-allocated memory sizes exist any more.
Changes to booting:
. The pre_init.c leaves the kernel and modules exactly as
they were left by the bootloader in physical memory
. The kernel starts running using physical addressing,
loaded at a fixed location given in its linker script by the
bootloader. All code and data in this phase are linked to
this fixed low location.
. It makes a bootstrap pagetable to map itself to a
fixed high location (also in linker script) and jumps to
the high address. All code and data then use this high addressing.
. All code/data symbols linked at the low addresses is prefixed by
an objcopy step with __k_unpaged_*, so that that code cannot
reference highly-linked symbols (which aren't valid yet) or vice
versa (symbols that aren't valid any more).
. The two addressing modes are separated in the linker script by
collecting the unpaged_*.o objects and linking them with low
addresses, and linking the rest high. Some objects are linked
twice, once low and once high.
. The bootstrap phase passes a lot of information (e.g. free memory
list, physical location of the modules, etc.) using the kinfo
struct.
. After this bootstrap the low-linked part is freed.
. The kernel maps in VM into the bootstrap page table so that VM can
begin executing. Its first job is to make page tables for all other
boot processes. So VM runs before RS, and RS gets a fully dynamic,
VM-managed address space. VM gets its privilege info from RS as usual
but that happens after RS starts running.
. Both the kernel loading VM and VM organizing boot processes happen
using the libexec logic. This removes the last reason for VM to
still know much about exec() and vm/exec.c is gone.
Further Implementation:
. All segments are based at 0 and have a 4 GB limit.
. The kernel is mapped in at the top of the virtual address
space so as not to constrain the user processes.
. Processes do not use segments from the LDT at all; there are
no segments in the LDT any more, so no LLDT is needed.
. The Minix segments T/D/S are gone and so none of the
user-space or in-kernel copy functions use them. The copy
functions use a process endpoint of NONE to realize it's
a physical address, virtual otherwise.
. The umap call only makes sense to translate a virtual address
to a physical address now.
. Segments-related calls like newmap and alloc_segments are gone.
. All segments-related translation in VM is gone (vir2map etc).
. Initialization in VM is simpler as no moving around is necessary.
. VM and all other boot processes can be linked wherever they wish
and will be mapped in at the right location by the kernel and VM
respectively.
Other changes:
. The multiboot code is less special: it does not use mb_print
for its diagnostics any more but uses printf() as normal, saving
the output into the diagnostics buffer, only printing to the
screen using the direct print functions if a panic() occurs.
. The multiboot code uses the flexible 'free memory map list'
style to receive the list of free memory if available.
. The kernel determines the memory layout of the processes to
a degree: it tells VM where the kernel starts and ends and
where the kernel wants the top of the process to be. VM then
uses this entire range, i.e. the stack is right at the top,
and mmap()ped bits of memory are placed below that downwards,
and the break grows upwards.
Other Consequences:
. Every process gets its own page table as address spaces
can't be separated any more by segments.
. As all segments are 0-based, there is no distinction between
virtual and linear addresses, nor between userspace and
kernel addresses.
. Less work is done when context switching, leading to a net
performance increase. (8% faster on my machine for 'make servers'.)
. The layout and configuration of the GDT makes sysenter and syscall
possible.
2012-05-07 16:03:35 +02:00
|
|
|
(vr->vaddr + ph->offset),
|
2013-03-20 20:18:52 +01:00
|
|
|
ph->ph->refcount, ph->ph->phys,
|
|
|
|
pt_writable(vr->parent, vr->vaddr + ph->offset) ? "W" : "R");
|
|
|
|
|
2009-09-27 14:44:36 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-17 19:26:52 +01:00
|
|
|
struct phys_region *physblock_get(struct vir_region *region, vir_bytes offset)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct phys_region *foundregion;
|
|
|
|
assert(!(offset % VM_PAGE_SIZE));
|
2013-08-20 14:02:33 +02:00
|
|
|
assert( /* offset >= 0 && */ offset < region->length);
|
2012-12-17 19:26:52 +01:00
|
|
|
i = offset/VM_PAGE_SIZE;
|
|
|
|
if((foundregion = region->physblocks[i]))
|
|
|
|
assert(foundregion->offset == offset);
|
|
|
|
return foundregion;
|
|
|
|
}
|
|
|
|
|
|
|
|
void physblock_set(struct vir_region *region, vir_bytes offset,
|
|
|
|
struct phys_region *newphysr)
|
|
|
|
{
|
|
|
|
int i;
|
2013-06-25 14:41:01 +02:00
|
|
|
struct vmproc *proc;
|
2012-12-17 19:26:52 +01:00
|
|
|
assert(!(offset % VM_PAGE_SIZE));
|
2013-08-20 14:02:33 +02:00
|
|
|
assert( /* offset >= 0 && */ offset < region->length);
|
2012-12-17 19:26:52 +01:00
|
|
|
i = offset/VM_PAGE_SIZE;
|
2013-06-25 14:41:01 +02:00
|
|
|
proc = region->parent;
|
|
|
|
assert(proc);
|
2012-12-17 19:26:52 +01:00
|
|
|
if(newphysr) {
|
|
|
|
assert(!region->physblocks[i]);
|
|
|
|
assert(newphysr->offset == offset);
|
2013-06-25 14:41:01 +02:00
|
|
|
proc->vm_total += VM_PAGE_SIZE;
|
|
|
|
if (proc->vm_total > proc->vm_total_max)
|
|
|
|
proc->vm_total_max = proc->vm_total;
|
2012-12-17 19:26:52 +01:00
|
|
|
} else {
|
|
|
|
assert(region->physblocks[i]);
|
2013-06-25 14:41:01 +02:00
|
|
|
proc->vm_total -= VM_PAGE_SIZE;
|
2012-12-17 19:26:52 +01:00
|
|
|
}
|
|
|
|
region->physblocks[i] = newphysr;
|
|
|
|
}
|
|
|
|
|
2008-11-19 13:26:10 +01:00
|
|
|
/*===========================================================================*
|
|
|
|
* map_printmap *
|
|
|
|
*===========================================================================*/
|
2013-08-20 14:02:33 +02:00
|
|
|
void map_printmap(struct vmproc *vmp)
|
2008-11-19 13:26:10 +01:00
|
|
|
{
|
|
|
|
struct vir_region *vr;
|
2010-10-04 13:41:10 +02:00
|
|
|
region_iter iter;
|
2009-09-21 16:49:49 +02:00
|
|
|
|
2008-12-08 17:43:20 +01:00
|
|
|
printf("memory regions in process %d:\n", vmp->vm_endpoint);
|
2010-10-04 13:41:10 +02:00
|
|
|
|
|
|
|
region_start_iter_least(&vmp->vm_regions_avl, &iter);
|
|
|
|
while((vr = region_get_iter(&iter))) {
|
2012-12-17 19:26:52 +01:00
|
|
|
map_printregion(vr);
|
2010-10-04 13:41:10 +02:00
|
|
|
region_incr_iter(&iter);
|
2008-11-19 13:26:10 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static struct vir_region *getnextvr(struct vir_region *vr)
|
2010-10-04 13:41:10 +02:00
|
|
|
{
|
|
|
|
struct vir_region *nextvr;
|
|
|
|
region_iter v_iter;
|
|
|
|
SLABSANE(vr);
|
|
|
|
region_start_iter(&vr->parent->vm_regions_avl, &v_iter, vr->vaddr, AVL_EQUAL);
|
|
|
|
assert(region_get_iter(&v_iter));
|
|
|
|
assert(region_get_iter(&v_iter) == vr);
|
|
|
|
region_incr_iter(&v_iter);
|
|
|
|
nextvr = region_get_iter(&v_iter);
|
|
|
|
if(!nextvr) return NULL;
|
|
|
|
SLABSANE(nextvr);
|
|
|
|
assert(vr->parent == nextvr->parent);
|
|
|
|
assert(vr->vaddr < nextvr->vaddr);
|
|
|
|
assert(vr->vaddr + vr->length <= nextvr->vaddr);
|
|
|
|
return nextvr;
|
|
|
|
}
|
2008-12-08 17:43:20 +01:00
|
|
|
|
2013-08-20 14:02:33 +02:00
|
|
|
static int pr_writable(struct vir_region *vr, struct phys_region *pr)
|
2012-10-11 15:15:49 +02:00
|
|
|
{
|
2013-03-20 20:18:52 +01:00
|
|
|
assert(pr->memtype->writable);
|
|
|
|
return ((vr->flags & VR_WRITABLE) && pr->memtype->writable(pr));
|
2012-10-11 15:15:49 +02:00
|
|
|
}
|
|
|
|
|
2008-12-08 17:43:20 +01:00
|
|
|
#if SANITYCHECKS
|
2010-10-04 13:41:10 +02:00
|
|
|
|
2009-09-23 15:33:01 +02:00
|
|
|
/*===========================================================================*
|
|
|
|
* map_sanitycheck_pt *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
static int map_sanitycheck_pt(struct vmproc *vmp,
|
2009-09-23 15:33:01 +02:00
|
|
|
struct vir_region *vr, struct phys_region *pr)
|
|
|
|
{
|
|
|
|
struct phys_block *pb = pr->ph;
|
|
|
|
int rw;
|
2010-04-12 13:25:24 +02:00
|
|
|
int r;
|
2009-09-23 15:33:01 +02:00
|
|
|
|
2012-10-11 15:15:49 +02:00
|
|
|
if(pr_writable(vr, pr))
|
2009-09-23 15:33:01 +02:00
|
|
|
rw = PTF_WRITE;
|
|
|
|
else
|
2012-08-16 23:34:15 +02:00
|
|
|
rw = PTF_READ;
|
2009-09-23 15:33:01 +02:00
|
|
|
|
2010-09-15 16:11:12 +02:00
|
|
|
r = pt_writemap(vmp, &vmp->vm_pt, vr->vaddr + pr->offset,
|
2012-09-18 13:17:49 +02:00
|
|
|
pb->phys, VM_PAGE_SIZE, PTF_PRESENT | PTF_USER | rw, WMF_VERIFY);
|
2010-04-12 13:25:24 +02:00
|
|
|
|
|
|
|
if(r != OK) {
|
|
|
|
printf("proc %d phys_region 0x%lx sanity check failed\n",
|
|
|
|
vmp->vm_endpoint, pr->offset);
|
2012-12-17 19:26:52 +01:00
|
|
|
map_printregion(vr);
|
2010-04-12 13:25:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
2009-09-23 15:33:01 +02:00
|
|
|
}
|
|
|
|
|
2008-11-19 13:26:10 +01:00
|
|
|
/*===========================================================================*
|
|
|
|
* map_sanitycheck *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
void map_sanitycheck(char *file, int line)
|
2008-11-19 13:26:10 +01:00
|
|
|
{
|
|
|
|
struct vmproc *vmp;
|
|
|
|
|
|
|
|
/* Macro for looping over all physical blocks of all regions of
|
|
|
|
* all processes.
|
|
|
|
*/
|
|
|
|
#define ALLREGIONS(regioncode, physcode) \
|
2009-09-21 16:49:49 +02:00
|
|
|
for(vmp = vmproc; vmp < &vmproc[VMP_NR]; vmp++) { \
|
2012-12-17 19:26:52 +01:00
|
|
|
vir_bytes voffset; \
|
2010-10-04 13:41:10 +02:00
|
|
|
region_iter v_iter; \
|
2008-11-19 13:26:10 +01:00
|
|
|
struct vir_region *vr; \
|
|
|
|
if(!(vmp->vm_flags & VMF_INUSE)) \
|
|
|
|
continue; \
|
2010-10-04 13:41:10 +02:00
|
|
|
region_start_iter_least(&vmp->vm_regions_avl, &v_iter); \
|
|
|
|
while((vr = region_get_iter(&v_iter))) { \
|
2008-11-19 13:26:10 +01:00
|
|
|
struct phys_region *pr; \
|
|
|
|
regioncode; \
|
2012-12-17 19:26:52 +01:00
|
|
|
for(voffset = 0; voffset < vr->length; \
|
|
|
|
voffset += VM_PAGE_SIZE) { \
|
|
|
|
if(!(pr = physblock_get(vr, voffset))) \
|
|
|
|
continue; \
|
2008-11-19 13:26:10 +01:00
|
|
|
physcode; \
|
|
|
|
} \
|
2010-10-04 13:41:10 +02:00
|
|
|
region_incr_iter(&v_iter); \
|
2008-11-19 13:26:10 +01:00
|
|
|
} \
|
|
|
|
}
|
|
|
|
|
2009-09-21 16:49:49 +02:00
|
|
|
#define MYSLABSANE(s) MYASSERT(slabsane_f(__FILE__, __LINE__, s, sizeof(*(s))))
|
2009-04-22 14:39:29 +02:00
|
|
|
/* Basic pointers check. */
|
|
|
|
ALLREGIONS(MYSLABSANE(vr),MYSLABSANE(pr); MYSLABSANE(pr->ph);MYSLABSANE(pr->parent));
|
2009-09-21 16:49:49 +02:00
|
|
|
ALLREGIONS(/* MYASSERT(vr->parent == vmp) */,MYASSERT(pr->parent == vr););
|
2009-04-22 14:39:29 +02:00
|
|
|
|
2008-11-19 13:26:10 +01:00
|
|
|
/* Do counting for consistency check. */
|
2009-09-21 16:49:49 +02:00
|
|
|
ALLREGIONS(;,USE(pr->ph, pr->ph->seencount = 0;););
|
2012-12-17 19:26:52 +01:00
|
|
|
ALLREGIONS(;,MYASSERT(pr->offset == voffset););
|
2009-09-21 16:49:49 +02:00
|
|
|
ALLREGIONS(;,USE(pr->ph, pr->ph->seencount++;);
|
|
|
|
if(pr->ph->seencount == 1) {
|
2013-03-20 20:18:52 +01:00
|
|
|
if(pr->memtype->ev_sanitycheck)
|
|
|
|
pr->memtype->ev_sanitycheck(pr, file, line);
|
2009-09-21 16:49:49 +02:00
|
|
|
}
|
|
|
|
);
|
2008-11-19 13:26:10 +01:00
|
|
|
|
|
|
|
/* Do consistency check. */
|
2010-10-04 13:41:10 +02:00
|
|
|
ALLREGIONS({ struct vir_region *nextvr = getnextvr(vr);
|
|
|
|
if(nextvr) {
|
|
|
|
MYASSERT(vr->vaddr < nextvr->vaddr);
|
|
|
|
MYASSERT(vr->vaddr + vr->length <= nextvr->vaddr);
|
|
|
|
}
|
2008-11-19 13:26:10 +01:00
|
|
|
}
|
|
|
|
MYASSERT(!(vr->vaddr % VM_PAGE_SIZE));,
|
2013-03-20 20:18:52 +01:00
|
|
|
if(pr->ph->flags & PBF_INCACHE) pr->ph->seencount++;
|
2008-11-19 13:26:10 +01:00
|
|
|
if(pr->ph->refcount != pr->ph->seencount) {
|
|
|
|
map_printmap(vmp);
|
2012-09-18 13:17:49 +02:00
|
|
|
printf("ph in vr %p: 0x%lx refcount %u "
|
2012-09-18 13:17:45 +02:00
|
|
|
"but seencount %u\n",
|
2009-09-21 16:49:49 +02:00
|
|
|
vr, pr->offset,
|
2008-11-19 13:26:10 +01:00
|
|
|
pr->ph->refcount, pr->ph->seencount);
|
|
|
|
}
|
2009-04-22 14:39:29 +02:00
|
|
|
{
|
|
|
|
int n_others = 0;
|
|
|
|
struct phys_region *others;
|
|
|
|
if(pr->ph->refcount > 0) {
|
|
|
|
MYASSERT(pr->ph->firstregion);
|
|
|
|
if(pr->ph->refcount == 1) {
|
|
|
|
MYASSERT(pr->ph->firstregion == pr);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
MYASSERT(!pr->ph->firstregion);
|
|
|
|
}
|
|
|
|
for(others = pr->ph->firstregion; others;
|
|
|
|
others = others->next_ph_list) {
|
|
|
|
MYSLABSANE(others);
|
|
|
|
MYASSERT(others->ph == pr->ph);
|
|
|
|
n_others++;
|
|
|
|
}
|
2013-03-20 20:18:52 +01:00
|
|
|
if(pr->ph->flags & PBF_INCACHE) n_others++;
|
2009-04-22 14:39:29 +02:00
|
|
|
MYASSERT(pr->ph->refcount == n_others);
|
|
|
|
}
|
2008-11-19 13:26:10 +01:00
|
|
|
MYASSERT(pr->ph->refcount == pr->ph->seencount);
|
2012-09-18 13:17:49 +02:00
|
|
|
MYASSERT(!(pr->offset % VM_PAGE_SIZE)););
|
2009-09-23 15:33:01 +02:00
|
|
|
ALLREGIONS(,MYASSERT(map_sanitycheck_pt(vmp, vr, pr) == OK));
|
2008-11-19 13:26:10 +01:00
|
|
|
}
|
2010-05-05 13:35:04 +02:00
|
|
|
|
2008-11-19 13:26:10 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*=========================================================================*
|
|
|
|
* map_ph_writept *
|
|
|
|
*=========================================================================*/
|
2013-03-20 20:18:52 +01:00
|
|
|
int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
|
2009-09-21 16:49:49 +02:00
|
|
|
struct phys_region *pr)
|
2008-11-19 13:26:10 +01:00
|
|
|
{
|
2013-02-10 20:20:14 +01:00
|
|
|
int flags = PTF_PRESENT | PTF_USER;
|
2009-09-21 16:49:49 +02:00
|
|
|
struct phys_block *pb = pr->ph;
|
2008-11-19 13:26:10 +01:00
|
|
|
|
2012-10-11 15:15:49 +02:00
|
|
|
assert(vr);
|
|
|
|
assert(pr);
|
|
|
|
assert(pb);
|
|
|
|
|
2010-04-12 14:37:28 +02:00
|
|
|
assert(!(vr->vaddr % VM_PAGE_SIZE));
|
|
|
|
assert(!(pr->offset % VM_PAGE_SIZE));
|
|
|
|
assert(pb->refcount > 0);
|
2008-11-19 13:26:10 +01:00
|
|
|
|
2012-10-11 15:15:49 +02:00
|
|
|
if(pr_writable(vr, pr))
|
2013-02-10 20:20:14 +01:00
|
|
|
flags |= PTF_WRITE;
|
2008-11-19 13:26:10 +01:00
|
|
|
else
|
2013-02-10 20:20:14 +01:00
|
|
|
flags |= PTF_READ;
|
|
|
|
|
2013-09-25 10:41:26 +02:00
|
|
|
|
|
|
|
if(vr->def_memtype->pt_flags)
|
|
|
|
flags |= vr->def_memtype->pt_flags(vr);
|
2008-11-19 13:26:10 +01:00
|
|
|
|
2010-09-15 16:11:12 +02:00
|
|
|
if(pt_writemap(vmp, &vmp->vm_pt, vr->vaddr + pr->offset,
|
2013-02-10 20:20:14 +01:00
|
|
|
pb->phys, VM_PAGE_SIZE, flags,
|
2009-09-27 14:44:36 +02:00
|
|
|
#if SANITYCHECKS
|
|
|
|
!pr->written ? 0 :
|
|
|
|
#endif
|
|
|
|
WMF_OVERWRITE) != OK) {
|
2008-11-19 13:26:10 +01:00
|
|
|
printf("VM: map_writept: pt_writemap failed\n");
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
|
2009-09-27 14:44:36 +02:00
|
|
|
#if SANITYCHECKS
|
|
|
|
USE(pr, pr->written = 1;);
|
|
|
|
#endif
|
|
|
|
|
2008-11-19 13:26:10 +01:00
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
2010-10-07 12:04:05 +02:00
|
|
|
#define SLOT_FAIL ((vir_bytes) -1)
|
|
|
|
|
2008-11-19 13:26:10 +01:00
|
|
|
/*===========================================================================*
|
2010-10-07 12:04:05 +02:00
|
|
|
* region_find_slot_range *
|
2008-11-19 13:26:10 +01:00
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
static vir_bytes region_find_slot_range(struct vmproc *vmp,
|
2010-10-07 12:04:05 +02:00
|
|
|
vir_bytes minv, vir_bytes maxv, vir_bytes length)
|
2008-11-19 13:26:10 +01:00
|
|
|
{
|
No more intel/minix segments.
This commit removes all traces of Minix segments (the text/data/stack
memory map abstraction in the kernel) and significance of Intel segments
(hardware segments like CS, DS that add offsets to all addressing before
page table translation). This ultimately simplifies the memory layout
and addressing and makes the same layout possible on non-Intel
architectures.
There are only two types of addresses in the world now: virtual
and physical; even the kernel and processes have the same virtual
address space. Kernel and user processes can be distinguished at a
glance as processes won't use 0xF0000000 and above.
No static pre-allocated memory sizes exist any more.
Changes to booting:
. The pre_init.c leaves the kernel and modules exactly as
they were left by the bootloader in physical memory
. The kernel starts running using physical addressing,
loaded at a fixed location given in its linker script by the
bootloader. All code and data in this phase are linked to
this fixed low location.
. It makes a bootstrap pagetable to map itself to a
fixed high location (also in linker script) and jumps to
the high address. All code and data then use this high addressing.
. All code/data symbols linked at the low addresses is prefixed by
an objcopy step with __k_unpaged_*, so that that code cannot
reference highly-linked symbols (which aren't valid yet) or vice
versa (symbols that aren't valid any more).
. The two addressing modes are separated in the linker script by
collecting the unpaged_*.o objects and linking them with low
addresses, and linking the rest high. Some objects are linked
twice, once low and once high.
. The bootstrap phase passes a lot of information (e.g. free memory
list, physical location of the modules, etc.) using the kinfo
struct.
. After this bootstrap the low-linked part is freed.
. The kernel maps in VM into the bootstrap page table so that VM can
begin executing. Its first job is to make page tables for all other
boot processes. So VM runs before RS, and RS gets a fully dynamic,
VM-managed address space. VM gets its privilege info from RS as usual
but that happens after RS starts running.
. Both the kernel loading VM and VM organizing boot processes happen
using the libexec logic. This removes the last reason for VM to
still know much about exec() and vm/exec.c is gone.
Further Implementation:
. All segments are based at 0 and have a 4 GB limit.
. The kernel is mapped in at the top of the virtual address
space so as not to constrain the user processes.
. Processes do not use segments from the LDT at all; there are
no segments in the LDT any more, so no LLDT is needed.
. The Minix segments T/D/S are gone and so none of the
user-space or in-kernel copy functions use them. The copy
functions use a process endpoint of NONE to realize it's
a physical address, virtual otherwise.
. The umap call only makes sense to translate a virtual address
to a physical address now.
. Segments-related calls like newmap and alloc_segments are gone.
. All segments-related translation in VM is gone (vir2map etc).
. Initialization in VM is simpler as no moving around is necessary.
. VM and all other boot processes can be linked wherever they wish
and will be mapped in at the right location by the kernel and VM
respectively.
Other changes:
. The multiboot code is less special: it does not use mb_print
for its diagnostics any more but uses printf() as normal, saving
the output into the diagnostics buffer, only printing to the
screen using the direct print functions if a panic() occurs.
. The multiboot code uses the flexible 'free memory map list'
style to receive the list of free memory if available.
. The kernel determines the memory layout of the processes to
a degree: it tells VM where the kernel starts and ends and
where the kernel wants the top of the process to be. VM then
uses this entire range, i.e. the stack is right at the top,
and mmap()ped bits of memory are placed below that downwards,
and the break grows upwards.
Other Consequences:
. Every process gets its own page table as address spaces
can't be separated any more by segments.
. As all segments are 0-based, there is no distinction between
virtual and linear addresses, nor between userspace and
kernel addresses.
. Less work is done when context switching, leading to a net
performance increase. (8% faster on my machine for 'make servers'.)
. The layout and configuration of the GDT makes sysenter and syscall
possible.
2012-05-07 16:03:35 +02:00
|
|
|
struct vir_region *lastregion;
|
2011-06-01 11:30:58 +02:00
|
|
|
vir_bytes startv = 0;
|
2008-11-19 13:26:10 +01:00
|
|
|
int foundflag = 0;
|
2010-10-07 12:04:05 +02:00
|
|
|
region_iter iter;
|
2010-10-04 13:41:10 +02:00
|
|
|
|
2008-11-19 13:26:10 +01:00
|
|
|
SANITYCHECK(SCL_FUNCTIONS);
|
|
|
|
|
|
|
|
/* Length must be reasonable. */
|
2010-04-12 14:37:28 +02:00
|
|
|
assert(length > 0);
|
2008-11-19 13:26:10 +01:00
|
|
|
|
|
|
|
/* Special case: allow caller to set maxv to 0 meaning 'I want
|
|
|
|
* it to be mapped in right here.'
|
|
|
|
*/
|
|
|
|
if(maxv == 0) {
|
|
|
|
maxv = minv + length;
|
|
|
|
|
|
|
|
/* Sanity check. */
|
|
|
|
if(maxv <= minv) {
|
2009-09-21 16:49:49 +02:00
|
|
|
printf("region_find_slot: minv 0x%lx and bytes 0x%lx\n",
|
2008-11-19 13:26:10 +01:00
|
|
|
minv, length);
|
2010-10-07 12:04:05 +02:00
|
|
|
return SLOT_FAIL;
|
2008-11-19 13:26:10 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Basic input sanity checks. */
|
2010-04-12 14:37:28 +02:00
|
|
|
assert(!(length % VM_PAGE_SIZE));
|
2008-11-19 13:26:10 +01:00
|
|
|
if(minv >= maxv) {
|
|
|
|
printf("VM: 1 minv: 0x%lx maxv: 0x%lx length: 0x%lx\n",
|
|
|
|
minv, maxv, length);
|
|
|
|
}
|
2012-09-18 13:17:52 +02:00
|
|
|
|
2010-04-12 14:37:28 +02:00
|
|
|
assert(minv < maxv);
|
2012-09-18 13:17:52 +02:00
|
|
|
|
|
|
|
if(minv + length > maxv)
|
|
|
|
return SLOT_FAIL;
|
2008-11-19 13:26:10 +01:00
|
|
|
|
No more intel/minix segments.
This commit removes all traces of Minix segments (the text/data/stack
memory map abstraction in the kernel) and significance of Intel segments
(hardware segments like CS, DS that add offsets to all addressing before
page table translation). This ultimately simplifies the memory layout
and addressing and makes the same layout possible on non-Intel
architectures.
There are only two types of addresses in the world now: virtual
and physical; even the kernel and processes have the same virtual
address space. Kernel and user processes can be distinguished at a
glance as processes won't use 0xF0000000 and above.
No static pre-allocated memory sizes exist any more.
Changes to booting:
. The pre_init.c leaves the kernel and modules exactly as
they were left by the bootloader in physical memory
. The kernel starts running using physical addressing,
loaded at a fixed location given in its linker script by the
bootloader. All code and data in this phase are linked to
this fixed low location.
. It makes a bootstrap pagetable to map itself to a
fixed high location (also in linker script) and jumps to
the high address. All code and data then use this high addressing.
. All code/data symbols linked at the low addresses is prefixed by
an objcopy step with __k_unpaged_*, so that that code cannot
reference highly-linked symbols (which aren't valid yet) or vice
versa (symbols that aren't valid any more).
. The two addressing modes are separated in the linker script by
collecting the unpaged_*.o objects and linking them with low
addresses, and linking the rest high. Some objects are linked
twice, once low and once high.
. The bootstrap phase passes a lot of information (e.g. free memory
list, physical location of the modules, etc.) using the kinfo
struct.
. After this bootstrap the low-linked part is freed.
. The kernel maps in VM into the bootstrap page table so that VM can
begin executing. Its first job is to make page tables for all other
boot processes. So VM runs before RS, and RS gets a fully dynamic,
VM-managed address space. VM gets its privilege info from RS as usual
but that happens after RS starts running.
. Both the kernel loading VM and VM organizing boot processes happen
using the libexec logic. This removes the last reason for VM to
still know much about exec() and vm/exec.c is gone.
Further Implementation:
. All segments are based at 0 and have a 4 GB limit.
. The kernel is mapped in at the top of the virtual address
space so as not to constrain the user processes.
. Processes do not use segments from the LDT at all; there are
no segments in the LDT any more, so no LLDT is needed.
. The Minix segments T/D/S are gone and so none of the
user-space or in-kernel copy functions use them. The copy
functions use a process endpoint of NONE to realize it's
a physical address, virtual otherwise.
. The umap call only makes sense to translate a virtual address
to a physical address now.
. Segments-related calls like newmap and alloc_segments are gone.
. All segments-related translation in VM is gone (vir2map etc).
. Initialization in VM is simpler as no moving around is necessary.
. VM and all other boot processes can be linked wherever they wish
and will be mapped in at the right location by the kernel and VM
respectively.
Other changes:
. The multiboot code is less special: it does not use mb_print
for its diagnostics any more but uses printf() as normal, saving
the output into the diagnostics buffer, only printing to the
screen using the direct print functions if a panic() occurs.
. The multiboot code uses the flexible 'free memory map list'
style to receive the list of free memory if available.
. The kernel determines the memory layout of the processes to
a degree: it tells VM where the kernel starts and ends and
where the kernel wants the top of the process to be. VM then
uses this entire range, i.e. the stack is right at the top,
and mmap()ped bits of memory are placed below that downwards,
and the break grows upwards.
Other Consequences:
. Every process gets its own page table as address spaces
can't be separated any more by segments.
. As all segments are 0-based, there is no distinction between
virtual and linear addresses, nor between userspace and
kernel addresses.
. Less work is done when context switching, leading to a net
performance increase. (8% faster on my machine for 'make servers'.)
. The layout and configuration of the GDT makes sysenter and syscall
possible.
2012-05-07 16:03:35 +02:00
|
|
|
#define FREEVRANGE_TRY(rangestart, rangeend) { \
|
2008-11-19 13:26:10 +01:00
|
|
|
vir_bytes frstart = (rangestart), frend = (rangeend); \
|
|
|
|
frstart = MAX(frstart, minv); \
|
|
|
|
frend = MIN(frend, maxv); \
|
|
|
|
if(frend > frstart && (frend - frstart) >= length) { \
|
No more intel/minix segments.
This commit removes all traces of Minix segments (the text/data/stack
memory map abstraction in the kernel) and significance of Intel segments
(hardware segments like CS, DS that add offsets to all addressing before
page table translation). This ultimately simplifies the memory layout
and addressing and makes the same layout possible on non-Intel
architectures.
There are only two types of addresses in the world now: virtual
and physical; even the kernel and processes have the same virtual
address space. Kernel and user processes can be distinguished at a
glance as processes won't use 0xF0000000 and above.
No static pre-allocated memory sizes exist any more.
Changes to booting:
. The pre_init.c leaves the kernel and modules exactly as
they were left by the bootloader in physical memory
. The kernel starts running using physical addressing,
loaded at a fixed location given in its linker script by the
bootloader. All code and data in this phase are linked to
this fixed low location.
. It makes a bootstrap pagetable to map itself to a
fixed high location (also in linker script) and jumps to
the high address. All code and data then use this high addressing.
. All code/data symbols linked at the low addresses is prefixed by
an objcopy step with __k_unpaged_*, so that that code cannot
reference highly-linked symbols (which aren't valid yet) or vice
versa (symbols that aren't valid any more).
. The two addressing modes are separated in the linker script by
collecting the unpaged_*.o objects and linking them with low
addresses, and linking the rest high. Some objects are linked
twice, once low and once high.
. The bootstrap phase passes a lot of information (e.g. free memory
list, physical location of the modules, etc.) using the kinfo
struct.
. After this bootstrap the low-linked part is freed.
. The kernel maps in VM into the bootstrap page table so that VM can
begin executing. Its first job is to make page tables for all other
boot processes. So VM runs before RS, and RS gets a fully dynamic,
VM-managed address space. VM gets its privilege info from RS as usual
but that happens after RS starts running.
. Both the kernel loading VM and VM organizing boot processes happen
using the libexec logic. This removes the last reason for VM to
still know much about exec() and vm/exec.c is gone.
Further Implementation:
. All segments are based at 0 and have a 4 GB limit.
. The kernel is mapped in at the top of the virtual address
space so as not to constrain the user processes.
. Processes do not use segments from the LDT at all; there are
no segments in the LDT any more, so no LLDT is needed.
. The Minix segments T/D/S are gone and so none of the
user-space or in-kernel copy functions use them. The copy
functions use a process endpoint of NONE to realize it's
a physical address, virtual otherwise.
. The umap call only makes sense to translate a virtual address
to a physical address now.
. Segments-related calls like newmap and alloc_segments are gone.
. All segments-related translation in VM is gone (vir2map etc).
. Initialization in VM is simpler as no moving around is necessary.
. VM and all other boot processes can be linked wherever they wish
and will be mapped in at the right location by the kernel and VM
respectively.
Other changes:
. The multiboot code is less special: it does not use mb_print
for its diagnostics any more but uses printf() as normal, saving
the output into the diagnostics buffer, only printing to the
screen using the direct print functions if a panic() occurs.
. The multiboot code uses the flexible 'free memory map list'
style to receive the list of free memory if available.
. The kernel determines the memory layout of the processes to
a degree: it tells VM where the kernel starts and ends and
where the kernel wants the top of the process to be. VM then
uses this entire range, i.e. the stack is right at the top,
and mmap()ped bits of memory are placed below that downwards,
and the break grows upwards.
Other Consequences:
. Every process gets its own page table as address spaces
can't be separated any more by segments.
. As all segments are 0-based, there is no distinction between
virtual and linear addresses, nor between userspace and
kernel addresses.
. Less work is done when context switching, leading to a net
performance increase. (8% faster on my machine for 'make servers'.)
. The layout and configuration of the GDT makes sysenter and syscall
possible.
2012-05-07 16:03:35 +02:00
|
|
|
startv = frend-length; \
|
2008-11-19 13:26:10 +01:00
|
|
|
foundflag = 1; \
|
|
|
|
} }
|
|
|
|
|
No more intel/minix segments.
This commit removes all traces of Minix segments (the text/data/stack
memory map abstraction in the kernel) and significance of Intel segments
(hardware segments like CS, DS that add offsets to all addressing before
page table translation). This ultimately simplifies the memory layout
and addressing and makes the same layout possible on non-Intel
architectures.
There are only two types of addresses in the world now: virtual
and physical; even the kernel and processes have the same virtual
address space. Kernel and user processes can be distinguished at a
glance as processes won't use 0xF0000000 and above.
No static pre-allocated memory sizes exist any more.
Changes to booting:
. The pre_init.c leaves the kernel and modules exactly as
they were left by the bootloader in physical memory
. The kernel starts running using physical addressing,
loaded at a fixed location given in its linker script by the
bootloader. All code and data in this phase are linked to
this fixed low location.
. It makes a bootstrap pagetable to map itself to a
fixed high location (also in linker script) and jumps to
the high address. All code and data then use this high addressing.
. All code/data symbols linked at the low addresses is prefixed by
an objcopy step with __k_unpaged_*, so that that code cannot
reference highly-linked symbols (which aren't valid yet) or vice
versa (symbols that aren't valid any more).
. The two addressing modes are separated in the linker script by
collecting the unpaged_*.o objects and linking them with low
addresses, and linking the rest high. Some objects are linked
twice, once low and once high.
. The bootstrap phase passes a lot of information (e.g. free memory
list, physical location of the modules, etc.) using the kinfo
struct.
. After this bootstrap the low-linked part is freed.
. The kernel maps in VM into the bootstrap page table so that VM can
begin executing. Its first job is to make page tables for all other
boot processes. So VM runs before RS, and RS gets a fully dynamic,
VM-managed address space. VM gets its privilege info from RS as usual
but that happens after RS starts running.
. Both the kernel loading VM and VM organizing boot processes happen
using the libexec logic. This removes the last reason for VM to
still know much about exec() and vm/exec.c is gone.
Further Implementation:
. All segments are based at 0 and have a 4 GB limit.
. The kernel is mapped in at the top of the virtual address
space so as not to constrain the user processes.
. Processes do not use segments from the LDT at all; there are
no segments in the LDT any more, so no LLDT is needed.
. The Minix segments T/D/S are gone and so none of the
user-space or in-kernel copy functions use them. The copy
functions use a process endpoint of NONE to realize it's
a physical address, virtual otherwise.
. The umap call only makes sense to translate a virtual address
to a physical address now.
. Segments-related calls like newmap and alloc_segments are gone.
. All segments-related translation in VM is gone (vir2map etc).
. Initialization in VM is simpler as no moving around is necessary.
. VM and all other boot processes can be linked wherever they wish
and will be mapped in at the right location by the kernel and VM
respectively.
Other changes:
. The multiboot code is less special: it does not use mb_print
for its diagnostics any more but uses printf() as normal, saving
the output into the diagnostics buffer, only printing to the
screen using the direct print functions if a panic() occurs.
. The multiboot code uses the flexible 'free memory map list'
style to receive the list of free memory if available.
. The kernel determines the memory layout of the processes to
a degree: it tells VM where the kernel starts and ends and
where the kernel wants the top of the process to be. VM then
uses this entire range, i.e. the stack is right at the top,
and mmap()ped bits of memory are placed below that downwards,
and the break grows upwards.
Other Consequences:
. Every process gets its own page table as address spaces
can't be separated any more by segments.
. As all segments are 0-based, there is no distinction between
virtual and linear addresses, nor between userspace and
kernel addresses.
. Less work is done when context switching, leading to a net
performance increase. (8% faster on my machine for 'make servers'.)
. The layout and configuration of the GDT makes sysenter and syscall
possible.
2012-05-07 16:03:35 +02:00
|
|
|
#define FREEVRANGE(start, end) { \
|
|
|
|
assert(!foundflag); \
|
|
|
|
FREEVRANGE_TRY(((start)+VM_PAGE_SIZE), ((end)-VM_PAGE_SIZE)); \
|
|
|
|
if(!foundflag) { \
|
|
|
|
FREEVRANGE_TRY((start), (end)); \
|
|
|
|
} \
|
|
|
|
}
|
2010-10-07 12:04:05 +02:00
|
|
|
|
No more intel/minix segments.
This commit removes all traces of Minix segments (the text/data/stack
memory map abstraction in the kernel) and significance of Intel segments
(hardware segments like CS, DS that add offsets to all addressing before
page table translation). This ultimately simplifies the memory layout
and addressing and makes the same layout possible on non-Intel
architectures.
There are only two types of addresses in the world now: virtual
and physical; even the kernel and processes have the same virtual
address space. Kernel and user processes can be distinguished at a
glance as processes won't use 0xF0000000 and above.
No static pre-allocated memory sizes exist any more.
Changes to booting:
. The pre_init.c leaves the kernel and modules exactly as
they were left by the bootloader in physical memory
. The kernel starts running using physical addressing,
loaded at a fixed location given in its linker script by the
bootloader. All code and data in this phase are linked to
this fixed low location.
. It makes a bootstrap pagetable to map itself to a
fixed high location (also in linker script) and jumps to
the high address. All code and data then use this high addressing.
. All code/data symbols linked at the low addresses is prefixed by
an objcopy step with __k_unpaged_*, so that that code cannot
reference highly-linked symbols (which aren't valid yet) or vice
versa (symbols that aren't valid any more).
. The two addressing modes are separated in the linker script by
collecting the unpaged_*.o objects and linking them with low
addresses, and linking the rest high. Some objects are linked
twice, once low and once high.
. The bootstrap phase passes a lot of information (e.g. free memory
list, physical location of the modules, etc.) using the kinfo
struct.
. After this bootstrap the low-linked part is freed.
. The kernel maps in VM into the bootstrap page table so that VM can
begin executing. Its first job is to make page tables for all other
boot processes. So VM runs before RS, and RS gets a fully dynamic,
VM-managed address space. VM gets its privilege info from RS as usual
but that happens after RS starts running.
. Both the kernel loading VM and VM organizing boot processes happen
using the libexec logic. This removes the last reason for VM to
still know much about exec() and vm/exec.c is gone.
Further Implementation:
. All segments are based at 0 and have a 4 GB limit.
. The kernel is mapped in at the top of the virtual address
space so as not to constrain the user processes.
. Processes do not use segments from the LDT at all; there are
no segments in the LDT any more, so no LLDT is needed.
. The Minix segments T/D/S are gone and so none of the
user-space or in-kernel copy functions use them. The copy
functions use a process endpoint of NONE to realize it's
a physical address, virtual otherwise.
. The umap call only makes sense to translate a virtual address
to a physical address now.
. Segments-related calls like newmap and alloc_segments are gone.
. All segments-related translation in VM is gone (vir2map etc).
. Initialization in VM is simpler as no moving around is necessary.
. VM and all other boot processes can be linked wherever they wish
and will be mapped in at the right location by the kernel and VM
respectively.
Other changes:
. The multiboot code is less special: it does not use mb_print
for its diagnostics any more but uses printf() as normal, saving
the output into the diagnostics buffer, only printing to the
screen using the direct print functions if a panic() occurs.
. The multiboot code uses the flexible 'free memory map list'
style to receive the list of free memory if available.
. The kernel determines the memory layout of the processes to
a degree: it tells VM where the kernel starts and ends and
where the kernel wants the top of the process to be. VM then
uses this entire range, i.e. the stack is right at the top,
and mmap()ped bits of memory are placed below that downwards,
and the break grows upwards.
Other Consequences:
. Every process gets its own page table as address spaces
can't be separated any more by segments.
. As all segments are 0-based, there is no distinction between
virtual and linear addresses, nor between userspace and
kernel addresses.
. Less work is done when context switching, leading to a net
performance increase. (8% faster on my machine for 'make servers'.)
. The layout and configuration of the GDT makes sysenter and syscall
possible.
2012-05-07 16:03:35 +02:00
|
|
|
/* find region after maxv. */
|
|
|
|
region_start_iter(&vmp->vm_regions_avl, &iter, maxv, AVL_GREATER_EQUAL);
|
|
|
|
lastregion = region_get_iter(&iter);
|
|
|
|
|
|
|
|
if(!lastregion) {
|
|
|
|
/* This is the free virtual address space after the last region. */
|
|
|
|
region_start_iter(&vmp->vm_regions_avl, &iter, maxv, AVL_LESS);
|
|
|
|
lastregion = region_get_iter(&iter);
|
|
|
|
FREEVRANGE(lastregion ?
|
|
|
|
lastregion->vaddr+lastregion->length : 0, VM_DATATOP);
|
2010-10-07 12:04:05 +02:00
|
|
|
}
|
2008-11-19 13:26:10 +01:00
|
|
|
|
|
|
|
if(!foundflag) {
|
2009-09-21 16:49:49 +02:00
|
|
|
struct vir_region *vr;
|
2010-10-04 13:41:10 +02:00
|
|
|
while((vr = region_get_iter(&iter)) && !foundflag) {
|
|
|
|
struct vir_region *nextvr;
|
No more intel/minix segments.
This commit removes all traces of Minix segments (the text/data/stack
memory map abstraction in the kernel) and significance of Intel segments
(hardware segments like CS, DS that add offsets to all addressing before
page table translation). This ultimately simplifies the memory layout
and addressing and makes the same layout possible on non-Intel
architectures.
There are only two types of addresses in the world now: virtual
and physical; even the kernel and processes have the same virtual
address space. Kernel and user processes can be distinguished at a
glance as processes won't use 0xF0000000 and above.
No static pre-allocated memory sizes exist any more.
Changes to booting:
. The pre_init.c leaves the kernel and modules exactly as
they were left by the bootloader in physical memory
. The kernel starts running using physical addressing,
loaded at a fixed location given in its linker script by the
bootloader. All code and data in this phase are linked to
this fixed low location.
. It makes a bootstrap pagetable to map itself to a
fixed high location (also in linker script) and jumps to
the high address. All code and data then use this high addressing.
. All code/data symbols linked at the low addresses is prefixed by
an objcopy step with __k_unpaged_*, so that that code cannot
reference highly-linked symbols (which aren't valid yet) or vice
versa (symbols that aren't valid any more).
. The two addressing modes are separated in the linker script by
collecting the unpaged_*.o objects and linking them with low
addresses, and linking the rest high. Some objects are linked
twice, once low and once high.
. The bootstrap phase passes a lot of information (e.g. free memory
list, physical location of the modules, etc.) using the kinfo
struct.
. After this bootstrap the low-linked part is freed.
. The kernel maps in VM into the bootstrap page table so that VM can
begin executing. Its first job is to make page tables for all other
boot processes. So VM runs before RS, and RS gets a fully dynamic,
VM-managed address space. VM gets its privilege info from RS as usual
but that happens after RS starts running.
. Both the kernel loading VM and VM organizing boot processes happen
using the libexec logic. This removes the last reason for VM to
still know much about exec() and vm/exec.c is gone.
Further Implementation:
. All segments are based at 0 and have a 4 GB limit.
. The kernel is mapped in at the top of the virtual address
space so as not to constrain the user processes.
. Processes do not use segments from the LDT at all; there are
no segments in the LDT any more, so no LLDT is needed.
. The Minix segments T/D/S are gone and so none of the
user-space or in-kernel copy functions use them. The copy
functions use a process endpoint of NONE to realize it's
a physical address, virtual otherwise.
. The umap call only makes sense to translate a virtual address
to a physical address now.
. Segments-related calls like newmap and alloc_segments are gone.
. All segments-related translation in VM is gone (vir2map etc).
. Initialization in VM is simpler as no moving around is necessary.
. VM and all other boot processes can be linked wherever they wish
and will be mapped in at the right location by the kernel and VM
respectively.
Other changes:
. The multiboot code is less special: it does not use mb_print
for its diagnostics any more but uses printf() as normal, saving
the output into the diagnostics buffer, only printing to the
screen using the direct print functions if a panic() occurs.
. The multiboot code uses the flexible 'free memory map list'
style to receive the list of free memory if available.
. The kernel determines the memory layout of the processes to
a degree: it tells VM where the kernel starts and ends and
where the kernel wants the top of the process to be. VM then
uses this entire range, i.e. the stack is right at the top,
and mmap()ped bits of memory are placed below that downwards,
and the break grows upwards.
Other Consequences:
. Every process gets its own page table as address spaces
can't be separated any more by segments.
. As all segments are 0-based, there is no distinction between
virtual and linear addresses, nor between userspace and
kernel addresses.
. Less work is done when context switching, leading to a net
performance increase. (8% faster on my machine for 'make servers'.)
. The layout and configuration of the GDT makes sysenter and syscall
possible.
2012-05-07 16:03:35 +02:00
|
|
|
region_decr_iter(&iter);
|
2010-10-04 13:41:10 +02:00
|
|
|
nextvr = region_get_iter(&iter);
|
No more intel/minix segments.
This commit removes all traces of Minix segments (the text/data/stack
memory map abstraction in the kernel) and significance of Intel segments
(hardware segments like CS, DS that add offsets to all addressing before
page table translation). This ultimately simplifies the memory layout
and addressing and makes the same layout possible on non-Intel
architectures.
There are only two types of addresses in the world now: virtual
and physical; even the kernel and processes have the same virtual
address space. Kernel and user processes can be distinguished at a
glance as processes won't use 0xF0000000 and above.
No static pre-allocated memory sizes exist any more.
Changes to booting:
. The pre_init.c leaves the kernel and modules exactly as
they were left by the bootloader in physical memory
. The kernel starts running using physical addressing,
loaded at a fixed location given in its linker script by the
bootloader. All code and data in this phase are linked to
this fixed low location.
. It makes a bootstrap pagetable to map itself to a
fixed high location (also in linker script) and jumps to
the high address. All code and data then use this high addressing.
. All code/data symbols linked at the low addresses is prefixed by
an objcopy step with __k_unpaged_*, so that that code cannot
reference highly-linked symbols (which aren't valid yet) or vice
versa (symbols that aren't valid any more).
. The two addressing modes are separated in the linker script by
collecting the unpaged_*.o objects and linking them with low
addresses, and linking the rest high. Some objects are linked
twice, once low and once high.
. The bootstrap phase passes a lot of information (e.g. free memory
list, physical location of the modules, etc.) using the kinfo
struct.
. After this bootstrap the low-linked part is freed.
. The kernel maps in VM into the bootstrap page table so that VM can
begin executing. Its first job is to make page tables for all other
boot processes. So VM runs before RS, and RS gets a fully dynamic,
VM-managed address space. VM gets its privilege info from RS as usual
but that happens after RS starts running.
. Both the kernel loading VM and VM organizing boot processes happen
using the libexec logic. This removes the last reason for VM to
still know much about exec() and vm/exec.c is gone.
Further Implementation:
. All segments are based at 0 and have a 4 GB limit.
. The kernel is mapped in at the top of the virtual address
space so as not to constrain the user processes.
. Processes do not use segments from the LDT at all; there are
no segments in the LDT any more, so no LLDT is needed.
. The Minix segments T/D/S are gone and so none of the
user-space or in-kernel copy functions use them. The copy
functions use a process endpoint of NONE to realize it's
a physical address, virtual otherwise.
. The umap call only makes sense to translate a virtual address
to a physical address now.
. Segments-related calls like newmap and alloc_segments are gone.
. All segments-related translation in VM is gone (vir2map etc).
. Initialization in VM is simpler as no moving around is necessary.
. VM and all other boot processes can be linked wherever they wish
and will be mapped in at the right location by the kernel and VM
respectively.
Other changes:
. The multiboot code is less special: it does not use mb_print
for its diagnostics any more but uses printf() as normal, saving
the output into the diagnostics buffer, only printing to the
screen using the direct print functions if a panic() occurs.
. The multiboot code uses the flexible 'free memory map list'
style to receive the list of free memory if available.
. The kernel determines the memory layout of the processes to
a degree: it tells VM where the kernel starts and ends and
where the kernel wants the top of the process to be. VM then
uses this entire range, i.e. the stack is right at the top,
and mmap()ped bits of memory are placed below that downwards,
and the break grows upwards.
Other Consequences:
. Every process gets its own page table as address spaces
can't be separated any more by segments.
. As all segments are 0-based, there is no distinction between
virtual and linear addresses, nor between userspace and
kernel addresses.
. Less work is done when context switching, leading to a net
performance increase. (8% faster on my machine for 'make servers'.)
. The layout and configuration of the GDT makes sysenter and syscall
possible.
2012-05-07 16:03:35 +02:00
|
|
|
FREEVRANGE(nextvr ? nextvr->vaddr+nextvr->length : 0,
|
|
|
|
vr->vaddr);
|
2008-11-19 13:26:10 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if(!foundflag) {
|
2010-10-07 12:04:05 +02:00
|
|
|
return SLOT_FAIL;
|
2008-11-19 13:26:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* However we got it, startv must be in the requested range. */
|
2010-04-12 14:37:28 +02:00
|
|
|
assert(startv >= minv);
|
|
|
|
assert(startv < maxv);
|
|
|
|
assert(startv + length <= maxv);
|
2008-11-19 13:26:10 +01:00
|
|
|
|
2010-10-07 12:04:05 +02:00
|
|
|
/* remember this position as a hint for next time. */
|
|
|
|
vmp->vm_region_top = startv + length;
|
|
|
|
|
2009-09-21 16:49:49 +02:00
|
|
|
return startv;
|
|
|
|
}
|
|
|
|
|
2010-10-07 12:04:05 +02:00
|
|
|
/*===========================================================================*
|
|
|
|
* region_find_slot *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
static vir_bytes region_find_slot(struct vmproc *vmp,
|
2010-10-07 12:04:05 +02:00
|
|
|
vir_bytes minv, vir_bytes maxv, vir_bytes length)
|
|
|
|
{
|
|
|
|
vir_bytes v, hint = vmp->vm_region_top;
|
|
|
|
|
|
|
|
/* use the top of the last inserted region as a minv hint if
|
|
|
|
* possible. remember that a zero maxv is a special case.
|
|
|
|
*/
|
|
|
|
|
2010-10-15 11:09:29 +02:00
|
|
|
if(maxv && hint < maxv && hint >= minv) {
|
No more intel/minix segments.
This commit removes all traces of Minix segments (the text/data/stack
memory map abstraction in the kernel) and significance of Intel segments
(hardware segments like CS, DS that add offsets to all addressing before
page table translation). This ultimately simplifies the memory layout
and addressing and makes the same layout possible on non-Intel
architectures.
There are only two types of addresses in the world now: virtual
and physical; even the kernel and processes have the same virtual
address space. Kernel and user processes can be distinguished at a
glance as processes won't use 0xF0000000 and above.
No static pre-allocated memory sizes exist any more.
Changes to booting:
. The pre_init.c leaves the kernel and modules exactly as
they were left by the bootloader in physical memory
. The kernel starts running using physical addressing,
loaded at a fixed location given in its linker script by the
bootloader. All code and data in this phase are linked to
this fixed low location.
. It makes a bootstrap pagetable to map itself to a
fixed high location (also in linker script) and jumps to
the high address. All code and data then use this high addressing.
. All code/data symbols linked at the low addresses is prefixed by
an objcopy step with __k_unpaged_*, so that that code cannot
reference highly-linked symbols (which aren't valid yet) or vice
versa (symbols that aren't valid any more).
. The two addressing modes are separated in the linker script by
collecting the unpaged_*.o objects and linking them with low
addresses, and linking the rest high. Some objects are linked
twice, once low and once high.
. The bootstrap phase passes a lot of information (e.g. free memory
list, physical location of the modules, etc.) using the kinfo
struct.
. After this bootstrap the low-linked part is freed.
. The kernel maps in VM into the bootstrap page table so that VM can
begin executing. Its first job is to make page tables for all other
boot processes. So VM runs before RS, and RS gets a fully dynamic,
VM-managed address space. VM gets its privilege info from RS as usual
but that happens after RS starts running.
. Both the kernel loading VM and VM organizing boot processes happen
using the libexec logic. This removes the last reason for VM to
still know much about exec() and vm/exec.c is gone.
Further Implementation:
. All segments are based at 0 and have a 4 GB limit.
. The kernel is mapped in at the top of the virtual address
space so as not to constrain the user processes.
. Processes do not use segments from the LDT at all; there are
no segments in the LDT any more, so no LLDT is needed.
. The Minix segments T/D/S are gone and so none of the
user-space or in-kernel copy functions use them. The copy
functions use a process endpoint of NONE to realize it's
a physical address, virtual otherwise.
. The umap call only makes sense to translate a virtual address
to a physical address now.
. Segments-related calls like newmap and alloc_segments are gone.
. All segments-related translation in VM is gone (vir2map etc).
. Initialization in VM is simpler as no moving around is necessary.
. VM and all other boot processes can be linked wherever they wish
and will be mapped in at the right location by the kernel and VM
respectively.
Other changes:
. The multiboot code is less special: it does not use mb_print
for its diagnostics any more but uses printf() as normal, saving
the output into the diagnostics buffer, only printing to the
screen using the direct print functions if a panic() occurs.
. The multiboot code uses the flexible 'free memory map list'
style to receive the list of free memory if available.
. The kernel determines the memory layout of the processes to
a degree: it tells VM where the kernel starts and ends and
where the kernel wants the top of the process to be. VM then
uses this entire range, i.e. the stack is right at the top,
and mmap()ped bits of memory are placed below that downwards,
and the break grows upwards.
Other Consequences:
. Every process gets its own page table as address spaces
can't be separated any more by segments.
. As all segments are 0-based, there is no distinction between
virtual and linear addresses, nor between userspace and
kernel addresses.
. Less work is done when context switching, leading to a net
performance increase. (8% faster on my machine for 'make servers'.)
. The layout and configuration of the GDT makes sysenter and syscall
possible.
2012-05-07 16:03:35 +02:00
|
|
|
v = region_find_slot_range(vmp, minv, hint, length);
|
2010-10-07 12:04:05 +02:00
|
|
|
|
|
|
|
if(v != SLOT_FAIL)
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
|
|
|
return region_find_slot_range(vmp, minv, maxv, length);
|
|
|
|
}
|
|
|
|
|
2013-08-20 14:02:33 +02:00
|
|
|
static unsigned int phys_slot(vir_bytes len)
|
2012-12-17 19:26:52 +01:00
|
|
|
{
|
|
|
|
assert(!(len % VM_PAGE_SIZE));
|
|
|
|
return len / VM_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
2013-08-20 14:02:33 +02:00
|
|
|
static struct vir_region *region_new(struct vmproc *vmp, vir_bytes startv, vir_bytes length,
|
2012-10-11 15:15:49 +02:00
|
|
|
int flags, mem_type_t *memtype)
|
2012-09-18 13:17:51 +02:00
|
|
|
{
|
|
|
|
struct vir_region *newregion;
|
2013-08-20 14:02:33 +02:00
|
|
|
struct phys_region **newphysregions;
|
2012-10-11 15:15:49 +02:00
|
|
|
static u32_t id;
|
2012-12-17 19:26:52 +01:00
|
|
|
int slots = phys_slot(length);
|
2012-09-18 13:17:51 +02:00
|
|
|
|
|
|
|
if(!(SLABALLOC(newregion))) {
|
|
|
|
printf("vm: region_new: could not allocate\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fill in node details. */
|
|
|
|
USE(newregion,
|
2012-10-11 15:15:49 +02:00
|
|
|
memset(newregion, 0, sizeof(*newregion));
|
2012-09-18 13:17:51 +02:00
|
|
|
newregion->vaddr = startv;
|
|
|
|
newregion->length = length;
|
|
|
|
newregion->flags = flags;
|
2013-03-20 20:09:01 +01:00
|
|
|
newregion->def_memtype = memtype;
|
2012-10-11 15:15:49 +02:00
|
|
|
newregion->remaps = 0;
|
|
|
|
newregion->id = id++;
|
2012-09-18 13:17:51 +02:00
|
|
|
newregion->lower = newregion->higher = NULL;
|
|
|
|
newregion->parent = vmp;);
|
|
|
|
|
2013-08-20 14:02:33 +02:00
|
|
|
if(!(newphysregions = calloc(slots, sizeof(struct phys_region *)))) {
|
2012-12-17 19:26:52 +01:00
|
|
|
printf("VM: region_new: allocating phys blocks failed\n");
|
2012-09-18 13:17:51 +02:00
|
|
|
SLABFREE(newregion);
|
|
|
|
return NULL;
|
|
|
|
}
|
2012-12-17 19:26:52 +01:00
|
|
|
|
2013-08-20 14:02:33 +02:00
|
|
|
USE(newregion, newregion->physblocks = newphysregions;);
|
2012-09-18 13:17:51 +02:00
|
|
|
|
|
|
|
return newregion;
|
|
|
|
}
|
|
|
|
|
2009-09-21 16:49:49 +02:00
|
|
|
/*===========================================================================*
|
|
|
|
* map_page_region *
|
|
|
|
*===========================================================================*/
|
2013-08-20 14:02:33 +02:00
|
|
|
struct vir_region *map_page_region(struct vmproc *vmp, vir_bytes minv,
|
|
|
|
vir_bytes maxv, vir_bytes length, u32_t flags, int mapflags,
|
|
|
|
mem_type_t *memtype)
|
2009-09-21 16:49:49 +02:00
|
|
|
{
|
2010-10-07 12:04:05 +02:00
|
|
|
struct vir_region *newregion;
|
2009-09-21 16:49:49 +02:00
|
|
|
vir_bytes startv;
|
|
|
|
|
2010-04-12 14:37:28 +02:00
|
|
|
assert(!(length % VM_PAGE_SIZE));
|
2010-04-12 13:25:24 +02:00
|
|
|
|
2009-09-21 16:49:49 +02:00
|
|
|
SANITYCHECK(SCL_FUNCTIONS);
|
|
|
|
|
2010-10-07 12:04:05 +02:00
|
|
|
startv = region_find_slot(vmp, minv, maxv, length);
|
|
|
|
if (startv == SLOT_FAIL)
|
2009-09-21 16:49:49 +02:00
|
|
|
return NULL;
|
|
|
|
|
2008-11-19 13:26:10 +01:00
|
|
|
/* Now we want a new region. */
|
2012-10-11 15:15:49 +02:00
|
|
|
if(!(newregion = region_new(vmp, startv, length, flags, memtype))) {
|
2008-11-19 13:26:10 +01:00
|
|
|
printf("VM: map_page_region: allocating region failed\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-10-11 15:15:49 +02:00
|
|
|
/* If a new event is specified, invoke it. */
|
2013-03-20 20:09:01 +01:00
|
|
|
if(newregion->def_memtype->ev_new) {
|
|
|
|
if(newregion->def_memtype->ev_new(newregion) != OK) {
|
2012-11-09 16:50:31 +01:00
|
|
|
/* ev_new will have freed and removed the region */
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
2008-11-19 13:26:10 +01:00
|
|
|
|
2012-10-11 15:15:49 +02:00
|
|
|
if(mapflags & MF_PREALLOC) {
|
2013-03-20 20:18:52 +01:00
|
|
|
if(map_handle_memory(vmp, newregion, 0, length, 1,
|
|
|
|
NULL, 0, 0) != OK) {
|
2009-09-21 16:49:49 +02:00
|
|
|
printf("VM: map_page_region: prealloc failed\n");
|
2012-12-17 19:26:52 +01:00
|
|
|
free(newregion->physblocks);
|
2010-04-12 13:25:24 +02:00
|
|
|
USE(newregion,
|
2012-12-17 19:26:52 +01:00
|
|
|
newregion->physblocks = NULL;);
|
2008-11-19 13:26:10 +01:00
|
|
|
SLABFREE(newregion);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-06 19:05:28 +02:00
|
|
|
/* Pre-allocations should be uninitialized, but after that it's a
|
|
|
|
* different story.
|
|
|
|
*/
|
2012-09-18 13:17:49 +02:00
|
|
|
USE(newregion, newregion->flags &= ~VR_UNINITIALIZED;);
|
2012-06-06 19:05:28 +02:00
|
|
|
|
2008-11-19 13:26:10 +01:00
|
|
|
/* Link it. */
|
2010-10-04 13:41:10 +02:00
|
|
|
region_insert(&vmp->vm_regions_avl, newregion);
|
2008-11-19 13:26:10 +01:00
|
|
|
|
|
|
|
#if SANITYCHECKS
|
2010-04-12 14:37:28 +02:00
|
|
|
assert(startv == newregion->vaddr);
|
2010-10-04 13:41:10 +02:00
|
|
|
{
|
|
|
|
struct vir_region *nextvr;
|
|
|
|
if((nextvr = getnextvr(newregion))) {
|
|
|
|
assert(newregion->vaddr < nextvr->vaddr);
|
|
|
|
}
|
2008-11-19 13:26:10 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
SANITYCHECK(SCL_FUNCTIONS);
|
|
|
|
|
|
|
|
return newregion;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*===========================================================================*
|
2009-09-21 16:49:49 +02:00
|
|
|
* map_subfree *
|
2008-11-19 13:26:10 +01:00
|
|
|
*===========================================================================*/
|
2012-09-18 13:17:52 +02:00
|
|
|
static int map_subfree(struct vir_region *region,
|
|
|
|
vir_bytes start, vir_bytes len)
|
2008-11-19 13:26:10 +01:00
|
|
|
{
|
2011-06-01 11:30:58 +02:00
|
|
|
struct phys_region *pr;
|
2012-09-18 13:17:52 +02:00
|
|
|
vir_bytes end = start+len;
|
2012-12-17 19:26:52 +01:00
|
|
|
vir_bytes voffset;
|
2010-04-12 13:25:24 +02:00
|
|
|
|
2009-04-22 14:39:29 +02:00
|
|
|
#if SANITYCHECKS
|
2010-10-04 13:41:10 +02:00
|
|
|
SLABSANE(region);
|
2012-12-17 19:26:52 +01:00
|
|
|
for(voffset = 0; voffset < phys_slot(region->length);
|
|
|
|
voffset += VM_PAGE_SIZE) {
|
2009-04-22 14:39:29 +02:00
|
|
|
struct phys_region *others;
|
|
|
|
struct phys_block *pb;
|
|
|
|
|
2012-12-17 19:26:52 +01:00
|
|
|
if(!(pr = physblock_get(region, voffset)))
|
|
|
|
continue;
|
|
|
|
|
2009-04-22 14:39:29 +02:00
|
|
|
pb = pr->ph;
|
|
|
|
|
|
|
|
for(others = pb->firstregion; others;
|
|
|
|
others = others->next_ph_list) {
|
2010-04-12 14:37:28 +02:00
|
|
|
assert(others->ph == pb);
|
2009-04-22 14:39:29 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-12-17 19:26:52 +01:00
|
|
|
for(voffset = start; voffset < end; voffset+=VM_PAGE_SIZE) {
|
|
|
|
if(!(pr = physblock_get(region, voffset)))
|
|
|
|
continue;
|
|
|
|
assert(pr->offset >= start);
|
|
|
|
assert(pr->offset < end);
|
|
|
|
pb_unreferenced(region, pr, 1);
|
2012-09-18 13:17:52 +02:00
|
|
|
SLABFREE(pr);
|
2008-11-19 13:26:10 +01:00
|
|
|
}
|
|
|
|
|
2009-09-21 16:49:49 +02:00
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*===========================================================================*
|
|
|
|
* map_free *
|
|
|
|
*===========================================================================*/
|
2012-10-11 15:15:49 +02:00
|
|
|
int map_free(struct vir_region *region)
|
2009-09-21 16:49:49 +02:00
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
2012-09-18 13:17:52 +02:00
|
|
|
if((r=map_subfree(region, 0, region->length)) != OK) {
|
2010-04-12 13:25:24 +02:00
|
|
|
printf("%d\n", __LINE__);
|
2009-09-21 16:49:49 +02:00
|
|
|
return r;
|
2010-04-12 13:25:24 +02:00
|
|
|
}
|
2009-09-21 16:49:49 +02:00
|
|
|
|
2013-03-20 20:09:01 +01:00
|
|
|
if(region->def_memtype->ev_delete)
|
|
|
|
region->def_memtype->ev_delete(region);
|
2012-12-17 19:26:52 +01:00
|
|
|
free(region->physblocks);
|
|
|
|
region->physblocks = NULL;
|
2008-11-19 13:26:10 +01:00
|
|
|
SLABFREE(region);
|
|
|
|
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*========================================================================*
|
|
|
|
* map_free_proc *
|
|
|
|
*========================================================================*/
|
2013-08-20 14:02:33 +02:00
|
|
|
int map_free_proc(struct vmproc *vmp)
|
2008-11-19 13:26:10 +01:00
|
|
|
{
|
2010-10-04 13:41:10 +02:00
|
|
|
struct vir_region *r;
|
2008-11-19 13:26:10 +01:00
|
|
|
|
2010-10-04 13:41:10 +02:00
|
|
|
while((r = region_search_root(&vmp->vm_regions_avl))) {
|
2009-04-22 14:39:29 +02:00
|
|
|
SANITYCHECK(SCL_DETAIL);
|
|
|
|
#if SANITYCHECKS
|
|
|
|
nocheck++;
|
|
|
|
#endif
|
2010-10-04 13:41:10 +02:00
|
|
|
region_remove(&vmp->vm_regions_avl, r->vaddr); /* For sanity checks. */
|
2012-09-18 13:17:51 +02:00
|
|
|
map_free(r);
|
2009-04-22 14:39:29 +02:00
|
|
|
#if SANITYCHECKS
|
|
|
|
nocheck--;
|
|
|
|
#endif
|
|
|
|
SANITYCHECK(SCL_DETAIL);
|
2008-11-19 13:26:10 +01:00
|
|
|
}
|
2010-10-04 13:41:10 +02:00
|
|
|
|
|
|
|
region_init(&vmp->vm_regions_avl);
|
2008-11-19 13:26:10 +01:00
|
|
|
|
|
|
|
SANITYCHECK(SCL_FUNCTIONS);
|
|
|
|
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*===========================================================================*
|
|
|
|
* map_lookup *
|
|
|
|
*===========================================================================*/
|
2013-08-20 14:02:33 +02:00
|
|
|
struct vir_region *map_lookup(struct vmproc *vmp,
|
|
|
|
vir_bytes offset, struct phys_region **physr)
|
2008-11-19 13:26:10 +01:00
|
|
|
{
|
|
|
|
struct vir_region *r;
|
|
|
|
|
|
|
|
SANITYCHECK(SCL_FUNCTIONS);
|
|
|
|
|
2010-10-04 13:41:10 +02:00
|
|
|
#if SANITYCHECKS
|
|
|
|
if(!region_search_root(&vmp->vm_regions_avl))
|
2010-03-05 16:05:11 +01:00
|
|
|
panic("process has no regions: %d", vmp->vm_endpoint);
|
2010-10-04 13:41:10 +02:00
|
|
|
#endif
|
2008-11-19 13:26:10 +01:00
|
|
|
|
2010-10-04 13:41:10 +02:00
|
|
|
if((r = region_search(&vmp->vm_regions_avl, offset, AVL_LESS_EQUAL))) {
|
2012-09-18 13:17:52 +02:00
|
|
|
vir_bytes ph;
|
|
|
|
if(offset >= r->vaddr && offset < r->vaddr + r->length) {
|
|
|
|
ph = offset - r->vaddr;
|
|
|
|
if(physr) {
|
2012-12-17 19:26:52 +01:00
|
|
|
*physr = physblock_get(r, ph);
|
2012-10-24 19:47:47 +02:00
|
|
|
if(*physr) assert((*physr)->offset == ph);
|
2012-09-18 13:17:52 +02:00
|
|
|
}
|
2008-11-19 13:26:10 +01:00
|
|
|
return r;
|
2012-09-18 13:17:52 +02:00
|
|
|
}
|
2008-11-19 13:26:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
SANITYCHECK(SCL_FUNCTIONS);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-10-11 15:15:49 +02:00
|
|
|
u32_t vrallocflags(u32_t flags)
|
2010-04-12 13:25:24 +02:00
|
|
|
{
|
|
|
|
u32_t allocflags = 0;
|
|
|
|
|
|
|
|
if(flags & VR_PHYS64K)
|
|
|
|
allocflags |= PAF_ALIGN64K;
|
|
|
|
if(flags & VR_LOWER16MB)
|
|
|
|
allocflags |= PAF_LOWER16MB;
|
|
|
|
if(flags & VR_LOWER1MB)
|
|
|
|
allocflags |= PAF_LOWER1MB;
|
2012-10-11 15:15:49 +02:00
|
|
|
if(!(flags & VR_UNINITIALIZED))
|
|
|
|
allocflags |= PAF_CLEAR;
|
2010-04-12 13:25:24 +02:00
|
|
|
|
|
|
|
return allocflags;
|
|
|
|
}
|
2008-11-19 13:26:10 +01:00
|
|
|
|
|
|
|
/*===========================================================================*
|
2009-04-22 14:39:29 +02:00
|
|
|
* map_pf *
|
2008-11-19 13:26:10 +01:00
|
|
|
*===========================================================================*/
|
2013-08-20 14:02:33 +02:00
|
|
|
int map_pf(struct vmproc *vmp,
|
|
|
|
struct vir_region *region,
|
|
|
|
vir_bytes offset,
|
|
|
|
int write,
|
|
|
|
vfs_callback_t pf_callback,
|
|
|
|
void *state,
|
|
|
|
int len,
|
|
|
|
int *io)
|
2008-11-19 13:26:10 +01:00
|
|
|
{
|
|
|
|
struct phys_region *ph;
|
2009-09-21 16:49:49 +02:00
|
|
|
int r = OK;
|
2008-11-19 13:26:10 +01:00
|
|
|
|
2012-10-11 15:15:49 +02:00
|
|
|
offset -= offset % VM_PAGE_SIZE;
|
|
|
|
|
2013-08-20 14:02:33 +02:00
|
|
|
/* assert(offset >= 0); */ /* always true */
|
2010-04-12 14:37:28 +02:00
|
|
|
assert(offset < region->length);
|
2008-11-19 13:26:10 +01:00
|
|
|
|
2010-04-12 14:37:28 +02:00
|
|
|
assert(!(region->vaddr % VM_PAGE_SIZE));
|
2012-10-11 15:15:49 +02:00
|
|
|
assert(!(write && !(region->flags & VR_WRITABLE)));
|
2008-11-19 13:26:10 +01:00
|
|
|
|
|
|
|
SANITYCHECK(SCL_FUNCTIONS);
|
|
|
|
|
2012-12-17 19:26:52 +01:00
|
|
|
if(!(ph = physblock_get(region, offset))) {
|
2012-10-11 15:15:49 +02:00
|
|
|
struct phys_block *pb;
|
|
|
|
|
|
|
|
/* New block. */
|
|
|
|
|
|
|
|
if(!(pb = pb_new(MAP_NONE))) {
|
|
|
|
printf("map_pf: pb_new failed\n");
|
|
|
|
return ENOMEM;
|
2009-09-21 16:49:49 +02:00
|
|
|
}
|
2012-10-11 15:15:49 +02:00
|
|
|
|
2013-03-20 20:18:52 +01:00
|
|
|
if(!(ph = pb_reference(pb, offset, region,
|
|
|
|
region->def_memtype))) {
|
2012-10-11 15:15:49 +02:00
|
|
|
printf("map_pf: pb_reference failed\n");
|
|
|
|
pb_free(pb);
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(ph);
|
|
|
|
assert(ph->ph);
|
|
|
|
|
|
|
|
/* If we're writing and the block is already
|
|
|
|
* writable, nothing to do.
|
|
|
|
*/
|
|
|
|
|
2013-03-20 20:18:52 +01:00
|
|
|
assert(ph->memtype->writable);
|
2012-10-11 15:15:49 +02:00
|
|
|
|
2013-03-20 20:18:52 +01:00
|
|
|
if(!write || !ph->memtype->writable(ph)) {
|
|
|
|
assert(ph->memtype->ev_pagefault);
|
2012-10-11 15:15:49 +02:00
|
|
|
assert(ph->ph);
|
2012-12-17 19:26:52 +01:00
|
|
|
|
2013-03-20 20:18:52 +01:00
|
|
|
if((r = ph->memtype->ev_pagefault(vmp,
|
2013-06-25 14:41:01 +02:00
|
|
|
region, ph, write, pf_callback, state, len, io)) == SUSPEND) {
|
2012-10-11 15:15:49 +02:00
|
|
|
return SUSPEND;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(r != OK) {
|
2013-05-29 13:48:54 +02:00
|
|
|
printf("map_pf: pagefault in %s failed\n", ph->memtype->name);
|
2012-10-11 15:15:49 +02:00
|
|
|
if(ph)
|
|
|
|
pb_unreferenced(region, ph, 1);
|
|
|
|
return r;
|
2008-11-19 13:26:10 +01:00
|
|
|
}
|
2012-10-11 15:15:49 +02:00
|
|
|
|
|
|
|
assert(ph);
|
|
|
|
assert(ph->ph);
|
2013-03-05 17:17:21 +01:00
|
|
|
assert(ph->ph->phys != MAP_NONE);
|
2008-11-19 13:26:10 +01:00
|
|
|
}
|
|
|
|
|
2012-10-11 15:15:49 +02:00
|
|
|
assert(ph->ph);
|
2013-03-05 17:17:21 +01:00
|
|
|
assert(ph->ph->phys != MAP_NONE);
|
2009-09-27 14:44:36 +02:00
|
|
|
|
2012-10-11 15:15:49 +02:00
|
|
|
if((r = map_ph_writept(vmp, region, ph)) != OK) {
|
|
|
|
printf("map_pf: writept failed\n");
|
2009-09-27 14:44:36 +02:00
|
|
|
return r;
|
|
|
|
}
|
2008-11-19 13:26:10 +01:00
|
|
|
|
2012-10-11 15:15:49 +02:00
|
|
|
SANITYCHECK(SCL_FUNCTIONS);
|
|
|
|
|
2009-09-27 14:44:36 +02:00
|
|
|
#if SANITYCHECKS
|
2012-10-11 15:15:49 +02:00
|
|
|
if(OK != pt_checkrange(&vmp->vm_pt, region->vaddr+offset,
|
2010-04-12 13:25:24 +02:00
|
|
|
VM_PAGE_SIZE, write)) {
|
2010-03-05 16:05:11 +01:00
|
|
|
panic("map_pf: pt_checkrange failed: %d", r);
|
2009-09-27 14:44:36 +02:00
|
|
|
}
|
|
|
|
#endif
|
2008-11-19 13:26:10 +01:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2013-08-20 14:02:33 +02:00
|
|
|
int map_handle_memory(struct vmproc *vmp,
|
|
|
|
struct vir_region *region, vir_bytes start_offset, vir_bytes length,
|
|
|
|
int write, vfs_callback_t cb, void *state, int statelen)
|
2012-10-11 15:15:49 +02:00
|
|
|
{
|
|
|
|
vir_bytes offset, lim;
|
|
|
|
int r;
|
2013-06-25 14:41:01 +02:00
|
|
|
int io = 0;
|
2012-10-11 15:15:49 +02:00
|
|
|
|
|
|
|
assert(length > 0);
|
|
|
|
lim = start_offset + length;
|
|
|
|
assert(lim > start_offset);
|
|
|
|
|
|
|
|
for(offset = start_offset; offset < lim; offset += VM_PAGE_SIZE)
|
2013-03-20 20:18:52 +01:00
|
|
|
if((r = map_pf(vmp, region, offset, write,
|
2013-06-25 14:41:01 +02:00
|
|
|
cb, state, statelen, &io)) != OK)
|
2012-10-11 15:15:49 +02:00
|
|
|
return r;
|
|
|
|
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
2010-06-28 23:53:37 +02:00
|
|
|
/*===========================================================================*
|
|
|
|
* map_pin_memory *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
int map_pin_memory(struct vmproc *vmp)
|
2010-06-28 23:53:37 +02:00
|
|
|
{
|
|
|
|
struct vir_region *vr;
|
2010-07-01 10:54:25 +02:00
|
|
|
int r;
|
2010-10-04 13:41:10 +02:00
|
|
|
region_iter iter;
|
|
|
|
region_start_iter_least(&vmp->vm_regions_avl, &iter);
|
2010-06-28 23:53:37 +02:00
|
|
|
/* Scan all memory regions. */
|
2010-10-04 13:41:10 +02:00
|
|
|
while((vr = region_get_iter(&iter))) {
|
2010-07-01 10:54:25 +02:00
|
|
|
/* Make sure region is mapped to physical memory and writable.*/
|
2013-03-20 20:18:52 +01:00
|
|
|
r = map_handle_memory(vmp, vr, 0, vr->length, 1, NULL, 0, 0);
|
2010-07-01 10:54:25 +02:00
|
|
|
if(r != OK) {
|
|
|
|
panic("map_pin_memory: map_handle_memory failed: %d", r);
|
2010-06-28 23:53:37 +02:00
|
|
|
}
|
2010-10-04 13:41:10 +02:00
|
|
|
region_incr_iter(&iter);
|
2010-06-28 23:53:37 +02:00
|
|
|
}
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
2008-11-19 13:26:10 +01:00
|
|
|
/*===========================================================================*
|
|
|
|
* map_copy_region *
|
|
|
|
*===========================================================================*/
|
2013-03-20 20:18:52 +01:00
|
|
|
struct vir_region *map_copy_region(struct vmproc *vmp, struct vir_region *vr)
|
2008-11-19 13:26:10 +01:00
|
|
|
{
|
2009-04-22 14:39:29 +02:00
|
|
|
/* map_copy_region creates a complete copy of the vir_region
|
|
|
|
* data structure, linking in the same phys_blocks directly,
|
|
|
|
* but all in limbo, i.e., the caller has to link the vir_region
|
|
|
|
* to a process. Therefore it doesn't increase the refcount in
|
|
|
|
* the phys_block; the caller has to do this once it's linked.
|
|
|
|
* The reason for this is to keep the sanity checks working
|
|
|
|
* within this function.
|
|
|
|
*/
|
2008-11-19 13:26:10 +01:00
|
|
|
struct vir_region *newvr;
|
2009-09-21 16:49:49 +02:00
|
|
|
struct phys_region *ph;
|
2012-10-11 15:15:49 +02:00
|
|
|
int r;
|
2008-11-19 13:26:10 +01:00
|
|
|
#if SANITYCHECKS
|
|
|
|
int cr;
|
2012-12-17 19:26:52 +01:00
|
|
|
cr = physregions(vr);
|
2008-11-19 13:26:10 +01:00
|
|
|
#endif
|
2012-12-17 19:26:52 +01:00
|
|
|
vir_bytes p;
|
2009-09-21 16:49:49 +02:00
|
|
|
|
2013-03-20 20:09:01 +01:00
|
|
|
if(!(newvr = region_new(vr->parent, vr->vaddr, vr->length, vr->flags, vr->def_memtype)))
|
2009-09-21 16:49:49 +02:00
|
|
|
return NULL;
|
2008-11-19 13:26:10 +01:00
|
|
|
|
2013-05-07 14:36:09 +02:00
|
|
|
USE(newvr, newvr->parent = vmp;);
|
|
|
|
|
2013-03-20 20:09:01 +01:00
|
|
|
if(vr->def_memtype->ev_copy && (r=vr->def_memtype->ev_copy(vr, newvr)) != OK) {
|
2012-10-11 15:15:49 +02:00
|
|
|
map_free(newvr);
|
|
|
|
printf("VM: memtype-specific copy failed (%d)\n", r);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-12-17 19:26:52 +01:00
|
|
|
for(p = 0; p < phys_slot(vr->length); p++) {
|
2013-03-20 20:18:52 +01:00
|
|
|
struct phys_region *newph;
|
|
|
|
|
2012-12-17 19:26:52 +01:00
|
|
|
if(!(ph = physblock_get(vr, p*VM_PAGE_SIZE))) continue;
|
2013-03-20 20:18:52 +01:00
|
|
|
newph = pb_reference(ph->ph, ph->offset, newvr,
|
|
|
|
vr->def_memtype);
|
2012-09-18 13:17:51 +02:00
|
|
|
|
|
|
|
if(!newph) { map_free(newvr); return NULL; }
|
|
|
|
|
2013-03-20 20:18:52 +01:00
|
|
|
if(ph->memtype->ev_reference)
|
|
|
|
ph->memtype->ev_reference(ph, newph);
|
|
|
|
|
2009-09-27 14:44:36 +02:00
|
|
|
#if SANITYCHECKS
|
|
|
|
USE(newph, newph->written = 0;);
|
2012-12-17 19:26:52 +01:00
|
|
|
assert(physregions(vr) == cr);
|
2010-04-12 13:25:24 +02:00
|
|
|
#endif
|
2008-11-19 13:26:10 +01:00
|
|
|
}
|
|
|
|
|
2010-04-12 13:25:24 +02:00
|
|
|
#if SANITYCHECKS
|
2012-12-17 19:26:52 +01:00
|
|
|
assert(physregions(vr) == physregions(newvr));
|
2010-04-12 13:25:24 +02:00
|
|
|
#endif
|
2008-11-19 13:26:10 +01:00
|
|
|
|
|
|
|
return newvr;
|
|
|
|
}
|
|
|
|
|
2010-04-12 13:25:24 +02:00
|
|
|
/*===========================================================================*
|
|
|
|
* copy_abs2region *
|
|
|
|
*===========================================================================*/
|
2013-08-20 14:02:33 +02:00
|
|
|
int copy_abs2region(phys_bytes absaddr, struct vir_region *destregion,
|
2010-04-12 13:25:24 +02:00
|
|
|
phys_bytes offset, phys_bytes len)
|
|
|
|
|
|
|
|
{
|
2010-04-12 14:37:28 +02:00
|
|
|
assert(destregion);
|
2012-12-17 19:26:52 +01:00
|
|
|
assert(destregion->physblocks);
|
2010-04-12 13:25:24 +02:00
|
|
|
while(len > 0) {
|
|
|
|
phys_bytes sublen, suboffset;
|
|
|
|
struct phys_region *ph;
|
2010-04-12 14:37:28 +02:00
|
|
|
assert(destregion);
|
2012-12-17 19:26:52 +01:00
|
|
|
assert(destregion->physblocks);
|
|
|
|
if(!(ph = physblock_get(destregion, offset))) {
|
2010-04-12 13:25:24 +02:00
|
|
|
printf("VM: copy_abs2region: no phys region found (1).\n");
|
|
|
|
return EFAULT;
|
|
|
|
}
|
2010-04-12 14:37:28 +02:00
|
|
|
assert(ph->offset <= offset);
|
2012-09-18 13:17:49 +02:00
|
|
|
if(ph->offset+VM_PAGE_SIZE <= offset) {
|
2010-04-12 13:25:24 +02:00
|
|
|
printf("VM: copy_abs2region: no phys region found (2).\n");
|
|
|
|
return EFAULT;
|
|
|
|
}
|
|
|
|
suboffset = offset - ph->offset;
|
2012-09-18 13:17:49 +02:00
|
|
|
assert(suboffset < VM_PAGE_SIZE);
|
2010-04-12 13:25:24 +02:00
|
|
|
sublen = len;
|
2012-09-18 13:17:49 +02:00
|
|
|
if(sublen > VM_PAGE_SIZE - suboffset)
|
|
|
|
sublen = VM_PAGE_SIZE - suboffset;
|
|
|
|
assert(suboffset + sublen <= VM_PAGE_SIZE);
|
2010-04-12 13:25:24 +02:00
|
|
|
if(ph->ph->refcount != 1) {
|
2010-05-05 13:35:04 +02:00
|
|
|
printf("VM: copy_abs2region: refcount not 1.\n");
|
2010-04-12 13:25:24 +02:00
|
|
|
return EFAULT;
|
|
|
|
}
|
|
|
|
|
2013-08-20 14:02:33 +02:00
|
|
|
if(sys_abscopy(absaddr, ph->ph->phys + suboffset, sublen) != OK) {
|
2010-04-12 13:25:24 +02:00
|
|
|
printf("VM: copy_abs2region: abscopy failed.\n");
|
|
|
|
return EFAULT;
|
|
|
|
}
|
2013-08-20 14:02:33 +02:00
|
|
|
absaddr += sublen;
|
2010-04-12 13:25:24 +02:00
|
|
|
offset += sublen;
|
|
|
|
len -= sublen;
|
|
|
|
}
|
|
|
|
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
2008-11-19 13:26:10 +01:00
|
|
|
/*=========================================================================*
|
|
|
|
* map_writept *
|
|
|
|
*=========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
int map_writept(struct vmproc *vmp)
|
2008-11-19 13:26:10 +01:00
|
|
|
{
|
|
|
|
struct vir_region *vr;
|
|
|
|
struct phys_region *ph;
|
2009-09-21 16:49:49 +02:00
|
|
|
int r;
|
2010-10-04 13:41:10 +02:00
|
|
|
region_iter v_iter;
|
|
|
|
region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
|
2008-11-19 13:26:10 +01:00
|
|
|
|
2010-10-04 13:41:10 +02:00
|
|
|
while((vr = region_get_iter(&v_iter))) {
|
2012-12-17 19:26:52 +01:00
|
|
|
vir_bytes p;
|
|
|
|
for(p = 0; p < vr->length; p += VM_PAGE_SIZE) {
|
|
|
|
if(!(ph = physblock_get(vr, p))) continue;
|
2010-01-14 16:24:16 +01:00
|
|
|
|
2009-09-21 16:49:49 +02:00
|
|
|
if((r=map_ph_writept(vmp, vr, ph)) != OK) {
|
|
|
|
printf("VM: map_writept: failed\n");
|
|
|
|
return r;
|
|
|
|
}
|
2008-11-19 13:26:10 +01:00
|
|
|
}
|
2010-10-04 13:41:10 +02:00
|
|
|
region_incr_iter(&v_iter);
|
2009-09-21 16:49:49 +02:00
|
|
|
}
|
2008-11-19 13:26:10 +01:00
|
|
|
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*========================================================================*
|
2010-07-20 04:08:28 +02:00
|
|
|
* map_proc_copy *
|
2008-11-19 13:26:10 +01:00
|
|
|
*========================================================================*/
|
2013-08-20 14:02:33 +02:00
|
|
|
int map_proc_copy(struct vmproc *dst, struct vmproc *src)
|
2008-11-19 13:26:10 +01:00
|
|
|
{
|
2010-07-20 04:08:28 +02:00
|
|
|
/* Copy all the memory regions from the src process to the dst process. */
|
2010-10-04 13:41:10 +02:00
|
|
|
region_init(&dst->vm_regions_avl);
|
2008-11-19 13:26:10 +01:00
|
|
|
|
2010-10-04 13:41:10 +02:00
|
|
|
return map_proc_copy_from(dst, src, NULL);
|
2010-07-20 04:08:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*========================================================================*
|
|
|
|
* map_proc_copy_from *
|
|
|
|
*========================================================================*/
|
2013-08-20 14:02:33 +02:00
|
|
|
int map_proc_copy_from(struct vmproc *dst, struct vmproc *src,
|
|
|
|
struct vir_region *start_src_vr)
|
2010-07-20 04:08:28 +02:00
|
|
|
{
|
2010-10-04 13:41:10 +02:00
|
|
|
struct vir_region *vr;
|
|
|
|
region_iter v_iter;
|
|
|
|
|
|
|
|
if(!start_src_vr)
|
|
|
|
start_src_vr = region_search_least(&src->vm_regions_avl);
|
2010-07-20 04:08:28 +02:00
|
|
|
|
2010-10-04 13:41:10 +02:00
|
|
|
assert(start_src_vr);
|
2010-07-20 04:08:28 +02:00
|
|
|
assert(start_src_vr->parent == src);
|
2010-10-04 13:41:10 +02:00
|
|
|
region_start_iter(&src->vm_regions_avl, &v_iter,
|
|
|
|
start_src_vr->vaddr, AVL_EQUAL);
|
|
|
|
assert(region_get_iter(&v_iter) == start_src_vr);
|
2009-09-21 16:49:49 +02:00
|
|
|
|
2010-07-20 04:08:28 +02:00
|
|
|
/* Copy source regions after the destination's last region (if any). */
|
|
|
|
|
|
|
|
SANITYCHECK(SCL_FUNCTIONS);
|
2009-09-21 16:49:49 +02:00
|
|
|
|
2010-10-04 13:41:10 +02:00
|
|
|
while((vr = region_get_iter(&v_iter))) {
|
2008-11-19 13:26:10 +01:00
|
|
|
struct vir_region *newvr;
|
2009-09-21 16:49:49 +02:00
|
|
|
if(!(newvr = map_copy_region(dst, vr))) {
|
2008-11-19 13:26:10 +01:00
|
|
|
map_free_proc(dst);
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
2010-10-04 13:41:10 +02:00
|
|
|
region_insert(&dst->vm_regions_avl, newvr);
|
2012-12-17 19:26:52 +01:00
|
|
|
assert(vr->length == newvr->length);
|
|
|
|
|
|
|
|
#if SANITYCHECKS
|
|
|
|
{
|
|
|
|
vir_bytes vaddr;
|
|
|
|
struct phys_region *orig_ph, *new_ph;
|
|
|
|
assert(vr->physblocks != newvr->physblocks);
|
|
|
|
for(vaddr = 0; vaddr < vr->length; vaddr += VM_PAGE_SIZE) {
|
|
|
|
orig_ph = physblock_get(vr, vaddr);
|
|
|
|
new_ph = physblock_get(newvr, vaddr);
|
|
|
|
if(!orig_ph) { assert(!new_ph); continue;}
|
2010-04-12 14:37:28 +02:00
|
|
|
assert(new_ph);
|
|
|
|
assert(orig_ph != new_ph);
|
2012-09-18 13:17:51 +02:00
|
|
|
assert(orig_ph->ph == new_ph->ph);
|
2008-11-19 13:26:10 +01:00
|
|
|
}
|
2012-12-17 19:26:52 +01:00
|
|
|
}
|
|
|
|
#endif
|
2010-10-04 13:41:10 +02:00
|
|
|
region_incr_iter(&v_iter);
|
2008-11-19 13:26:10 +01:00
|
|
|
}
|
2009-09-21 16:49:49 +02:00
|
|
|
|
2008-11-19 13:26:10 +01:00
|
|
|
map_writept(src);
|
|
|
|
map_writept(dst);
|
|
|
|
|
|
|
|
SANITYCHECK(SCL_FUNCTIONS);
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
2012-04-07 01:19:28 +02:00
|
|
|
int map_region_extend_upto_v(struct vmproc *vmp, vir_bytes v)
|
|
|
|
{
|
2013-03-20 20:18:52 +01:00
|
|
|
vir_bytes offset = v, limit, extralen;
|
2012-04-07 01:19:28 +02:00
|
|
|
struct vir_region *vr, *nextvr;
|
2012-12-17 19:26:52 +01:00
|
|
|
struct phys_region **newpr;
|
2013-03-20 20:18:52 +01:00
|
|
|
int newslots, prevslots, addedslots, r;
|
2012-10-11 15:15:49 +02:00
|
|
|
|
|
|
|
offset = roundup(offset, VM_PAGE_SIZE);
|
2012-04-07 01:19:28 +02:00
|
|
|
|
No more intel/minix segments.
This commit removes all traces of Minix segments (the text/data/stack
memory map abstraction in the kernel) and significance of Intel segments
(hardware segments like CS, DS that add offsets to all addressing before
page table translation). This ultimately simplifies the memory layout
and addressing and makes the same layout possible on non-Intel
architectures.
There are only two types of addresses in the world now: virtual
and physical; even the kernel and processes have the same virtual
address space. Kernel and user processes can be distinguished at a
glance as processes won't use 0xF0000000 and above.
No static pre-allocated memory sizes exist any more.
Changes to booting:
. The pre_init.c leaves the kernel and modules exactly as
they were left by the bootloader in physical memory
. The kernel starts running using physical addressing,
loaded at a fixed location given in its linker script by the
bootloader. All code and data in this phase are linked to
this fixed low location.
. It makes a bootstrap pagetable to map itself to a
fixed high location (also in linker script) and jumps to
the high address. All code and data then use this high addressing.
. All code/data symbols linked at the low addresses is prefixed by
an objcopy step with __k_unpaged_*, so that that code cannot
reference highly-linked symbols (which aren't valid yet) or vice
versa (symbols that aren't valid any more).
. The two addressing modes are separated in the linker script by
collecting the unpaged_*.o objects and linking them with low
addresses, and linking the rest high. Some objects are linked
twice, once low and once high.
. The bootstrap phase passes a lot of information (e.g. free memory
list, physical location of the modules, etc.) using the kinfo
struct.
. After this bootstrap the low-linked part is freed.
. The kernel maps in VM into the bootstrap page table so that VM can
begin executing. Its first job is to make page tables for all other
boot processes. So VM runs before RS, and RS gets a fully dynamic,
VM-managed address space. VM gets its privilege info from RS as usual
but that happens after RS starts running.
. Both the kernel loading VM and VM organizing boot processes happen
using the libexec logic. This removes the last reason for VM to
still know much about exec() and vm/exec.c is gone.
Further Implementation:
. All segments are based at 0 and have a 4 GB limit.
. The kernel is mapped in at the top of the virtual address
space so as not to constrain the user processes.
. Processes do not use segments from the LDT at all; there are
no segments in the LDT any more, so no LLDT is needed.
. The Minix segments T/D/S are gone and so none of the
user-space or in-kernel copy functions use them. The copy
functions use a process endpoint of NONE to realize it's
a physical address, virtual otherwise.
. The umap call only makes sense to translate a virtual address
to a physical address now.
. Segments-related calls like newmap and alloc_segments are gone.
. All segments-related translation in VM is gone (vir2map etc).
. Initialization in VM is simpler as no moving around is necessary.
. VM and all other boot processes can be linked wherever they wish
and will be mapped in at the right location by the kernel and VM
respectively.
Other changes:
. The multiboot code is less special: it does not use mb_print
for its diagnostics any more but uses printf() as normal, saving
the output into the diagnostics buffer, only printing to the
screen using the direct print functions if a panic() occurs.
. The multiboot code uses the flexible 'free memory map list'
style to receive the list of free memory if available.
. The kernel determines the memory layout of the processes to
a degree: it tells VM where the kernel starts and ends and
where the kernel wants the top of the process to be. VM then
uses this entire range, i.e. the stack is right at the top,
and mmap()ped bits of memory are placed below that downwards,
and the break grows upwards.
Other Consequences:
. Every process gets its own page table as address spaces
can't be separated any more by segments.
. As all segments are 0-based, there is no distinction between
virtual and linear addresses, nor between userspace and
kernel addresses.
. Less work is done when context switching, leading to a net
performance increase. (8% faster on my machine for 'make servers'.)
. The layout and configuration of the GDT makes sysenter and syscall
possible.
2012-05-07 16:03:35 +02:00
|
|
|
if(!(vr = region_search(&vmp->vm_regions_avl, offset, AVL_LESS))) {
|
2012-04-07 01:19:28 +02:00
|
|
|
printf("VM: nothing to extend\n");
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
|
2012-12-17 19:26:52 +01:00
|
|
|
if(vr->vaddr + vr->length >= v) return OK;
|
|
|
|
|
2013-03-20 20:18:52 +01:00
|
|
|
limit = vr->vaddr + vr->length;
|
|
|
|
|
2012-04-07 01:19:28 +02:00
|
|
|
assert(vr->vaddr <= offset);
|
2012-12-17 19:26:52 +01:00
|
|
|
newslots = phys_slot(offset - vr->vaddr);
|
|
|
|
prevslots = phys_slot(vr->length);
|
|
|
|
assert(newslots >= prevslots);
|
|
|
|
addedslots = newslots - prevslots;
|
2013-03-20 20:18:52 +01:00
|
|
|
extralen = offset - limit;
|
|
|
|
assert(extralen > 0);
|
2012-12-17 19:26:52 +01:00
|
|
|
|
2012-04-07 01:19:28 +02:00
|
|
|
if((nextvr = getnextvr(vr))) {
|
No more intel/minix segments.
This commit removes all traces of Minix segments (the text/data/stack
memory map abstraction in the kernel) and significance of Intel segments
(hardware segments like CS, DS that add offsets to all addressing before
page table translation). This ultimately simplifies the memory layout
and addressing and makes the same layout possible on non-Intel
architectures.
There are only two types of addresses in the world now: virtual
and physical; even the kernel and processes have the same virtual
address space. Kernel and user processes can be distinguished at a
glance as processes won't use 0xF0000000 and above.
No static pre-allocated memory sizes exist any more.
Changes to booting:
. The pre_init.c leaves the kernel and modules exactly as
they were left by the bootloader in physical memory
. The kernel starts running using physical addressing,
loaded at a fixed location given in its linker script by the
bootloader. All code and data in this phase are linked to
this fixed low location.
. It makes a bootstrap pagetable to map itself to a
fixed high location (also in linker script) and jumps to
the high address. All code and data then use this high addressing.
. All code/data symbols linked at the low addresses is prefixed by
an objcopy step with __k_unpaged_*, so that that code cannot
reference highly-linked symbols (which aren't valid yet) or vice
versa (symbols that aren't valid any more).
. The two addressing modes are separated in the linker script by
collecting the unpaged_*.o objects and linking them with low
addresses, and linking the rest high. Some objects are linked
twice, once low and once high.
. The bootstrap phase passes a lot of information (e.g. free memory
list, physical location of the modules, etc.) using the kinfo
struct.
. After this bootstrap the low-linked part is freed.
. The kernel maps in VM into the bootstrap page table so that VM can
begin executing. Its first job is to make page tables for all other
boot processes. So VM runs before RS, and RS gets a fully dynamic,
VM-managed address space. VM gets its privilege info from RS as usual
but that happens after RS starts running.
. Both the kernel loading VM and VM organizing boot processes happen
using the libexec logic. This removes the last reason for VM to
still know much about exec() and vm/exec.c is gone.
Further Implementation:
. All segments are based at 0 and have a 4 GB limit.
. The kernel is mapped in at the top of the virtual address
space so as not to constrain the user processes.
. Processes do not use segments from the LDT at all; there are
no segments in the LDT any more, so no LLDT is needed.
. The Minix segments T/D/S are gone and so none of the
user-space or in-kernel copy functions use them. The copy
functions use a process endpoint of NONE to realize it's
a physical address, virtual otherwise.
. The umap call only makes sense to translate a virtual address
to a physical address now.
. Segments-related calls like newmap and alloc_segments are gone.
. All segments-related translation in VM is gone (vir2map etc).
. Initialization in VM is simpler as no moving around is necessary.
. VM and all other boot processes can be linked wherever they wish
and will be mapped in at the right location by the kernel and VM
respectively.
Other changes:
. The multiboot code is less special: it does not use mb_print
for its diagnostics any more but uses printf() as normal, saving
the output into the diagnostics buffer, only printing to the
screen using the direct print functions if a panic() occurs.
. The multiboot code uses the flexible 'free memory map list'
style to receive the list of free memory if available.
. The kernel determines the memory layout of the processes to
a degree: it tells VM where the kernel starts and ends and
where the kernel wants the top of the process to be. VM then
uses this entire range, i.e. the stack is right at the top,
and mmap()ped bits of memory are placed below that downwards,
and the break grows upwards.
Other Consequences:
. Every process gets its own page table as address spaces
can't be separated any more by segments.
. As all segments are 0-based, there is no distinction between
virtual and linear addresses, nor between userspace and
kernel addresses.
. Less work is done when context switching, leading to a net
performance increase. (8% faster on my machine for 'make servers'.)
. The layout and configuration of the GDT makes sysenter and syscall
possible.
2012-05-07 16:03:35 +02:00
|
|
|
assert(offset <= nextvr->vaddr);
|
2012-04-07 01:19:28 +02:00
|
|
|
}
|
|
|
|
|
2012-10-11 15:15:49 +02:00
|
|
|
if(nextvr && nextvr->vaddr < offset) {
|
|
|
|
printf("VM: can't grow into next region\n");
|
2008-11-19 13:26:10 +01:00
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
|
2013-03-20 20:09:01 +01:00
|
|
|
if(!vr->def_memtype->ev_resize) {
|
2013-03-20 20:18:52 +01:00
|
|
|
if(!map_page_region(vmp, limit, 0, extralen,
|
|
|
|
VR_WRITABLE | VR_ANON,
|
|
|
|
0, &mem_type_anon)) {
|
|
|
|
printf("resize: couldn't put anon memory there\n");
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(!(newpr = realloc(vr->physblocks,
|
|
|
|
newslots * sizeof(struct phys_region *)))) {
|
|
|
|
printf("VM: map_region_extend_upto_v: realloc failed\n");
|
2012-10-11 15:15:49 +02:00
|
|
|
return ENOMEM;
|
2010-10-04 13:41:10 +02:00
|
|
|
}
|
2008-11-19 13:26:10 +01:00
|
|
|
|
2013-03-20 20:18:52 +01:00
|
|
|
vr->physblocks = newpr;
|
|
|
|
memset(vr->physblocks + prevslots, 0,
|
|
|
|
addedslots * sizeof(struct phys_region *));
|
|
|
|
|
|
|
|
r = vr->def_memtype->ev_resize(vmp, vr, offset - vr->vaddr);
|
|
|
|
|
|
|
|
return r;
|
2008-11-19 13:26:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*========================================================================*
|
|
|
|
* map_unmap_region *
|
|
|
|
*========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
int map_unmap_region(struct vmproc *vmp, struct vir_region *r,
|
2012-09-18 13:17:52 +02:00
|
|
|
vir_bytes offset, vir_bytes len)
|
2008-11-19 13:26:10 +01:00
|
|
|
{
|
2009-09-21 16:49:49 +02:00
|
|
|
/* Shrink the region by 'len' bytes, from the start. Unreference
|
|
|
|
* memory it used to reference if any.
|
|
|
|
*/
|
|
|
|
vir_bytes regionstart;
|
2012-12-17 19:26:52 +01:00
|
|
|
int freeslots = phys_slot(len);
|
2008-11-19 13:26:10 +01:00
|
|
|
|
|
|
|
SANITYCHECK(SCL_FUNCTIONS);
|
|
|
|
|
2012-09-18 13:17:52 +02:00
|
|
|
if(offset+len > r->length || (len % VM_PAGE_SIZE)) {
|
2009-09-21 16:49:49 +02:00
|
|
|
printf("VM: bogus length 0x%lx\n", len);
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
2012-09-18 13:17:52 +02:00
|
|
|
regionstart = r->vaddr + offset;
|
2009-09-21 16:49:49 +02:00
|
|
|
|
2012-09-18 13:17:52 +02:00
|
|
|
/* unreference its memory */
|
|
|
|
map_subfree(r, offset, len);
|
|
|
|
|
|
|
|
/* if unmap was at start/end of this region, it actually shrinks */
|
2013-03-20 20:18:52 +01:00
|
|
|
if(r->length == len) {
|
|
|
|
/* Whole region disappears. Unlink and free it. */
|
|
|
|
region_remove(&vmp->vm_regions_avl, r->vaddr);
|
|
|
|
map_free(r);
|
|
|
|
} else if(offset == 0) {
|
2009-09-21 16:49:49 +02:00
|
|
|
struct phys_region *pr;
|
2012-12-17 19:26:52 +01:00
|
|
|
vir_bytes voffset;
|
|
|
|
int remslots;
|
2012-09-18 13:17:52 +02:00
|
|
|
|
2013-03-20 20:18:52 +01:00
|
|
|
if(!r->def_memtype->ev_lowshrink) {
|
|
|
|
printf("VM: low-shrinking not implemented for %s\n",
|
|
|
|
r->def_memtype->name);
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(r->def_memtype->ev_lowshrink(r, len) != OK) {
|
|
|
|
printf("VM: low-shrinking failed for %s\n",
|
|
|
|
r->def_memtype->name);
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
2012-09-18 13:17:52 +02:00
|
|
|
region_remove(&vmp->vm_regions_avl, r->vaddr);
|
|
|
|
|
2009-09-21 16:49:49 +02:00
|
|
|
USE(r,
|
2014-01-08 17:43:19 +01:00
|
|
|
r->vaddr += len;);
|
2012-09-18 13:17:52 +02:00
|
|
|
|
2012-12-17 19:26:52 +01:00
|
|
|
remslots = phys_slot(r->length);
|
|
|
|
|
2012-09-18 13:17:52 +02:00
|
|
|
region_insert(&vmp->vm_regions_avl, r);
|
2009-09-21 16:49:49 +02:00
|
|
|
|
|
|
|
/* vaddr has increased; to make all the phys_regions
|
|
|
|
* point to the same addresses, make them shrink by the
|
|
|
|
* same amount.
|
|
|
|
*/
|
2014-01-08 17:43:19 +01:00
|
|
|
for(voffset = len; voffset < r->length;
|
2012-12-17 19:26:52 +01:00
|
|
|
voffset += VM_PAGE_SIZE) {
|
|
|
|
if(!(pr = physblock_get(r, voffset))) continue;
|
2012-09-18 13:17:52 +02:00
|
|
|
assert(pr->offset >= offset);
|
2014-01-08 17:43:19 +01:00
|
|
|
assert(pr->offset >= len);
|
2009-09-21 16:49:49 +02:00
|
|
|
USE(pr, pr->offset -= len;);
|
|
|
|
}
|
2012-12-17 19:26:52 +01:00
|
|
|
if(remslots)
|
|
|
|
memmove(r->physblocks, r->physblocks + freeslots,
|
|
|
|
remslots * sizeof(struct phys_region *));
|
2014-01-08 17:43:19 +01:00
|
|
|
USE(r, r->length -= len;);
|
2012-09-18 13:17:52 +02:00
|
|
|
} else if(offset + len == r->length) {
|
|
|
|
assert(len <= r->length);
|
|
|
|
r->length -= len;
|
|
|
|
}
|
|
|
|
|
2008-11-19 13:26:10 +01:00
|
|
|
SANITYCHECK(SCL_DETAIL);
|
|
|
|
|
2010-09-15 16:11:12 +02:00
|
|
|
if(pt_writemap(vmp, &vmp->vm_pt, regionstart,
|
2009-09-21 16:49:49 +02:00
|
|
|
MAP_NONE, len, 0, WMF_OVERWRITE) != OK) {
|
2008-11-19 13:26:10 +01:00
|
|
|
printf("VM: map_unmap_region: pt_writemap failed\n");
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
SANITYCHECK(SCL_FUNCTIONS);
|
|
|
|
|
|
|
|
return OK;
|
|
|
|
}
|
2009-09-21 16:49:49 +02:00
|
|
|
|
2013-08-20 14:02:33 +02:00
|
|
|
static int split_region(struct vmproc *vmp, struct vir_region *vr,
|
2013-03-20 20:18:52 +01:00
|
|
|
struct vir_region **vr1, struct vir_region **vr2, vir_bytes split_len)
|
|
|
|
{
|
|
|
|
struct vir_region *r1 = NULL, *r2 = NULL;
|
|
|
|
vir_bytes rem_len = vr->length - split_len;
|
|
|
|
int slots1, slots2;
|
|
|
|
vir_bytes voffset;
|
|
|
|
int n1 = 0, n2 = 0;
|
|
|
|
|
|
|
|
assert(!(split_len % VM_PAGE_SIZE));
|
|
|
|
assert(!(rem_len % VM_PAGE_SIZE));
|
|
|
|
assert(!(vr->vaddr % VM_PAGE_SIZE));
|
|
|
|
assert(!(vr->length % VM_PAGE_SIZE));
|
|
|
|
|
|
|
|
if(!vr->def_memtype->ev_split) {
|
|
|
|
printf("VM: split region not implemented for %s\n",
|
|
|
|
vr->def_memtype->name);
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
slots1 = phys_slot(split_len);
|
|
|
|
slots2 = phys_slot(rem_len);
|
|
|
|
|
|
|
|
if(!(r1 = region_new(vmp, vr->vaddr, split_len, vr->flags,
|
|
|
|
vr->def_memtype))) {
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(!(r2 = region_new(vmp, vr->vaddr+split_len, rem_len, vr->flags,
|
|
|
|
vr->def_memtype))) {
|
|
|
|
map_free(r1);
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
|
|
|
|
for(voffset = 0; voffset < r1->length; voffset += VM_PAGE_SIZE) {
|
|
|
|
struct phys_region *ph, *phn;
|
|
|
|
if(!(ph = physblock_get(vr, voffset))) continue;
|
|
|
|
if(!(phn = pb_reference(ph->ph, voffset, r1, ph->memtype)))
|
|
|
|
goto bail;
|
|
|
|
n1++;
|
|
|
|
}
|
|
|
|
|
|
|
|
for(voffset = 0; voffset < r2->length; voffset += VM_PAGE_SIZE) {
|
|
|
|
struct phys_region *ph, *phn;
|
|
|
|
if(!(ph = physblock_get(vr, split_len + voffset))) continue;
|
|
|
|
if(!(phn = pb_reference(ph->ph, voffset, r2, ph->memtype)))
|
|
|
|
goto bail;
|
|
|
|
n2++;
|
|
|
|
}
|
|
|
|
|
|
|
|
vr->def_memtype->ev_split(vmp, vr, r1, r2);
|
|
|
|
|
|
|
|
region_remove(&vmp->vm_regions_avl, vr->vaddr);
|
|
|
|
map_free(vr);
|
|
|
|
region_insert(&vmp->vm_regions_avl, r1);
|
|
|
|
region_insert(&vmp->vm_regions_avl, r2);
|
|
|
|
|
|
|
|
*vr1 = r1;
|
|
|
|
*vr2 = r2;
|
|
|
|
|
|
|
|
return OK;
|
|
|
|
|
|
|
|
bail:
|
|
|
|
if(r1) map_free(r1);
|
|
|
|
if(r2) map_free(r2);
|
|
|
|
|
|
|
|
printf("split_region: failed\n");
|
|
|
|
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
int map_unmap_range(struct vmproc *vmp, vir_bytes unmap_start, vir_bytes length)
|
|
|
|
{
|
|
|
|
vir_bytes o = unmap_start % VM_PAGE_SIZE, unmap_limit;
|
|
|
|
region_iter v_iter;
|
|
|
|
struct vir_region *vr, *nextvr;
|
|
|
|
|
|
|
|
unmap_start -= o;
|
|
|
|
length += o;
|
|
|
|
length = roundup(length, VM_PAGE_SIZE);
|
|
|
|
unmap_limit = length + unmap_start;
|
|
|
|
|
|
|
|
if(length < VM_PAGE_SIZE) return EINVAL;
|
|
|
|
if(unmap_limit <= unmap_start) return EINVAL;
|
|
|
|
|
|
|
|
region_start_iter(&vmp->vm_regions_avl, &v_iter, unmap_start, AVL_LESS_EQUAL);
|
|
|
|
|
|
|
|
if(!(vr = region_get_iter(&v_iter))) {
|
|
|
|
region_start_iter(&vmp->vm_regions_avl, &v_iter, unmap_start, AVL_GREATER);
|
|
|
|
if(!(vr = region_get_iter(&v_iter))) {
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(vr);
|
|
|
|
|
|
|
|
for(; vr && vr->vaddr < unmap_limit; vr = nextvr) {
|
|
|
|
vir_bytes thislimit = vr->vaddr + vr->length;
|
|
|
|
vir_bytes this_unmap_start, this_unmap_limit;
|
|
|
|
vir_bytes remainlen;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
region_incr_iter(&v_iter);
|
|
|
|
nextvr = region_get_iter(&v_iter);
|
|
|
|
|
|
|
|
assert(thislimit > vr->vaddr);
|
|
|
|
|
|
|
|
this_unmap_start = MAX(unmap_start, vr->vaddr);
|
|
|
|
this_unmap_limit = MIN(unmap_limit, thislimit);
|
|
|
|
|
|
|
|
if(this_unmap_start >= this_unmap_limit) continue;
|
|
|
|
|
|
|
|
if(this_unmap_start > vr->vaddr && this_unmap_limit < thislimit) {
|
|
|
|
struct vir_region *vr1, *vr2;
|
|
|
|
vir_bytes split_len = this_unmap_limit - vr->vaddr;
|
|
|
|
assert(split_len > 0);
|
|
|
|
assert(split_len < vr->length);
|
|
|
|
if((r=split_region(vmp, vr, &vr1, &vr2, split_len)) != OK) {
|
|
|
|
printf("VM: unmap split failed\n");
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
vr = vr1;
|
|
|
|
thislimit = vr->vaddr + vr->length;
|
|
|
|
}
|
|
|
|
|
|
|
|
remainlen = this_unmap_limit - vr->vaddr;
|
|
|
|
|
|
|
|
assert(this_unmap_start >= vr->vaddr);
|
|
|
|
assert(this_unmap_limit <= thislimit);
|
|
|
|
assert(remainlen > 0);
|
|
|
|
|
|
|
|
r = map_unmap_region(vmp, vr, this_unmap_start - vr->vaddr,
|
|
|
|
this_unmap_limit - this_unmap_start);
|
|
|
|
|
|
|
|
if(r != OK) {
|
|
|
|
printf("map_unmap_range: map_unmap_region failed\n");
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
region_start_iter(&vmp->vm_regions_avl, &v_iter, nextvr->vaddr, AVL_EQUAL);
|
|
|
|
assert(region_get_iter(&v_iter) == nextvr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return OK;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2009-09-21 16:49:49 +02:00
|
|
|
/*========================================================================*
|
|
|
|
* map_get_phys *
|
|
|
|
*========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
int map_get_phys(struct vmproc *vmp, vir_bytes addr, phys_bytes *r)
|
2009-09-21 16:49:49 +02:00
|
|
|
{
|
|
|
|
struct vir_region *vr;
|
|
|
|
|
2012-09-18 13:17:52 +02:00
|
|
|
if (!(vr = map_lookup(vmp, addr, NULL)) ||
|
2009-09-21 16:49:49 +02:00
|
|
|
(vr->vaddr != addr))
|
|
|
|
return EINVAL;
|
|
|
|
|
2013-03-20 20:09:01 +01:00
|
|
|
if (!vr->def_memtype->regionid)
|
2009-09-21 16:49:49 +02:00
|
|
|
return EINVAL;
|
|
|
|
|
2012-10-11 15:15:49 +02:00
|
|
|
if(r)
|
2013-03-20 20:09:01 +01:00
|
|
|
*r = vr->def_memtype->regionid(vr);
|
2009-09-21 16:49:49 +02:00
|
|
|
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*========================================================================*
|
|
|
|
* map_get_ref *
|
|
|
|
*========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
int map_get_ref(struct vmproc *vmp, vir_bytes addr, u8_t *cnt)
|
2009-09-21 16:49:49 +02:00
|
|
|
{
|
|
|
|
struct vir_region *vr;
|
|
|
|
|
2012-09-18 13:17:52 +02:00
|
|
|
if (!(vr = map_lookup(vmp, addr, NULL)) ||
|
2013-03-20 20:09:01 +01:00
|
|
|
(vr->vaddr != addr) || !vr->def_memtype->refcount)
|
2009-09-21 16:49:49 +02:00
|
|
|
return EINVAL;
|
|
|
|
|
|
|
|
if (cnt)
|
2013-03-20 20:09:01 +01:00
|
|
|
*cnt = vr->def_memtype->refcount(vr);
|
2009-09-21 16:49:49 +02:00
|
|
|
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
2012-09-18 22:19:22 +02:00
|
|
|
void get_usage_info_kernel(struct vm_usage_info *vui)
|
|
|
|
{
|
|
|
|
memset(vui, 0, sizeof(*vui));
|
2013-02-08 19:11:42 +01:00
|
|
|
vui->vui_total = kernel_boot_info.kernel_allocated_bytes +
|
|
|
|
kernel_boot_info.kernel_allocated_bytes_dynamic;
|
2012-09-18 22:19:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void get_usage_info_vm(struct vm_usage_info *vui)
|
|
|
|
{
|
|
|
|
memset(vui, 0, sizeof(*vui));
|
|
|
|
vui->vui_total = kernel_boot_info.vm_allocated_bytes +
|
|
|
|
get_vm_self_pages() * VM_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
2010-01-19 22:00:20 +01:00
|
|
|
/*========================================================================*
|
|
|
|
* get_usage_info *
|
|
|
|
*========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
void get_usage_info(struct vmproc *vmp, struct vm_usage_info *vui)
|
2010-01-19 22:00:20 +01:00
|
|
|
{
|
|
|
|
struct vir_region *vr;
|
|
|
|
struct phys_region *ph;
|
2010-10-04 13:41:10 +02:00
|
|
|
region_iter v_iter;
|
|
|
|
region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
|
2012-12-17 19:26:52 +01:00
|
|
|
vir_bytes voffset;
|
2010-01-19 22:00:20 +01:00
|
|
|
|
|
|
|
memset(vui, 0, sizeof(*vui));
|
|
|
|
|
2012-09-18 22:19:22 +02:00
|
|
|
if(vmp->vm_endpoint == VM_PROC_NR) {
|
|
|
|
get_usage_info_vm(vui);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(vmp->vm_endpoint < 0) {
|
|
|
|
get_usage_info_kernel(vui);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-10-04 13:41:10 +02:00
|
|
|
while((vr = region_get_iter(&v_iter))) {
|
2012-12-17 19:26:52 +01:00
|
|
|
for(voffset = 0; voffset < vr->length; voffset += VM_PAGE_SIZE) {
|
|
|
|
if(!(ph = physblock_get(vr, voffset))) continue;
|
2010-01-19 22:00:20 +01:00
|
|
|
/* All present pages are counted towards the total. */
|
2012-09-18 13:17:49 +02:00
|
|
|
vui->vui_total += VM_PAGE_SIZE;
|
2010-01-19 22:00:20 +01:00
|
|
|
|
|
|
|
if (ph->ph->refcount > 1) {
|
|
|
|
/* Any page with a refcount > 1 is common. */
|
2012-09-18 13:17:49 +02:00
|
|
|
vui->vui_common += VM_PAGE_SIZE;
|
2010-01-19 22:00:20 +01:00
|
|
|
|
|
|
|
/* Any common, non-COW page is shared. */
|
2012-10-25 16:38:38 +02:00
|
|
|
if (vr->flags & VR_SHARED)
|
2012-09-18 13:17:49 +02:00
|
|
|
vui->vui_shared += VM_PAGE_SIZE;
|
2010-01-19 22:00:20 +01:00
|
|
|
}
|
|
|
|
}
|
2010-10-04 13:41:10 +02:00
|
|
|
region_incr_iter(&v_iter);
|
2010-01-19 22:00:20 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*===========================================================================*
|
|
|
|
* get_region_info *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
int get_region_info(struct vmproc *vmp, struct vm_region_info *vri,
|
2010-01-19 22:00:20 +01:00
|
|
|
int max, vir_bytes *nextp)
|
|
|
|
{
|
|
|
|
struct vir_region *vr;
|
|
|
|
vir_bytes next;
|
|
|
|
int count;
|
2010-10-04 13:41:10 +02:00
|
|
|
region_iter v_iter;
|
2010-01-19 22:00:20 +01:00
|
|
|
|
|
|
|
next = *nextp;
|
|
|
|
|
|
|
|
if (!max) return 0;
|
|
|
|
|
2010-10-04 13:41:10 +02:00
|
|
|
region_start_iter(&vmp->vm_regions_avl, &v_iter, next, AVL_GREATER_EQUAL);
|
|
|
|
if(!(vr = region_get_iter(&v_iter))) return 0;
|
2010-01-19 22:00:20 +01:00
|
|
|
|
2013-03-20 20:18:52 +01:00
|
|
|
for(count = 0; (vr = region_get_iter(&v_iter)) && count < max;
|
|
|
|
region_incr_iter(&v_iter)) {
|
2012-12-17 19:26:52 +01:00
|
|
|
struct phys_region *ph1 = NULL, *ph2 = NULL;
|
|
|
|
vir_bytes voffset;
|
2011-11-26 16:12:17 +01:00
|
|
|
|
2013-01-09 20:45:37 +01:00
|
|
|
/* where to start on next iteration, regardless of what we find now */
|
|
|
|
next = vr->vaddr + vr->length;
|
|
|
|
|
2011-11-26 16:12:17 +01:00
|
|
|
/* Report part of the region that's actually in use. */
|
|
|
|
|
|
|
|
/* Get first and last phys_regions, if any */
|
2013-01-09 20:45:37 +01:00
|
|
|
for(voffset = 0; voffset < vr->length; voffset += VM_PAGE_SIZE) {
|
2012-12-17 19:26:52 +01:00
|
|
|
struct phys_region *ph;
|
|
|
|
if(!(ph = physblock_get(vr, voffset))) continue;
|
|
|
|
if(!ph1) ph1 = ph;
|
|
|
|
ph2 = ph;
|
|
|
|
}
|
2013-03-20 20:18:52 +01:00
|
|
|
|
|
|
|
if(!ph1 || !ph2) {
|
|
|
|
printf("skipping empty region 0x%lx-0x%lx\n",
|
|
|
|
vr->vaddr, vr->vaddr+vr->length);
|
|
|
|
continue;
|
|
|
|
}
|
2011-11-26 16:12:17 +01:00
|
|
|
|
|
|
|
/* Report start+length of region starting from lowest use. */
|
No more intel/minix segments.
This commit removes all traces of Minix segments (the text/data/stack
memory map abstraction in the kernel) and significance of Intel segments
(hardware segments like CS, DS that add offsets to all addressing before
page table translation). This ultimately simplifies the memory layout
and addressing and makes the same layout possible on non-Intel
architectures.
There are only two types of addresses in the world now: virtual
and physical; even the kernel and processes have the same virtual
address space. Kernel and user processes can be distinguished at a
glance as processes won't use 0xF0000000 and above.
No static pre-allocated memory sizes exist any more.
Changes to booting:
. The pre_init.c leaves the kernel and modules exactly as
they were left by the bootloader in physical memory
. The kernel starts running using physical addressing,
loaded at a fixed location given in its linker script by the
bootloader. All code and data in this phase are linked to
this fixed low location.
. It makes a bootstrap pagetable to map itself to a
fixed high location (also in linker script) and jumps to
the high address. All code and data then use this high addressing.
. All code/data symbols linked at the low addresses is prefixed by
an objcopy step with __k_unpaged_*, so that that code cannot
reference highly-linked symbols (which aren't valid yet) or vice
versa (symbols that aren't valid any more).
. The two addressing modes are separated in the linker script by
collecting the unpaged_*.o objects and linking them with low
addresses, and linking the rest high. Some objects are linked
twice, once low and once high.
. The bootstrap phase passes a lot of information (e.g. free memory
list, physical location of the modules, etc.) using the kinfo
struct.
. After this bootstrap the low-linked part is freed.
. The kernel maps in VM into the bootstrap page table so that VM can
begin executing. Its first job is to make page tables for all other
boot processes. So VM runs before RS, and RS gets a fully dynamic,
VM-managed address space. VM gets its privilege info from RS as usual
but that happens after RS starts running.
. Both the kernel loading VM and VM organizing boot processes happen
using the libexec logic. This removes the last reason for VM to
still know much about exec() and vm/exec.c is gone.
Further Implementation:
. All segments are based at 0 and have a 4 GB limit.
. The kernel is mapped in at the top of the virtual address
space so as not to constrain the user processes.
. Processes do not use segments from the LDT at all; there are
no segments in the LDT any more, so no LLDT is needed.
. The Minix segments T/D/S are gone and so none of the
user-space or in-kernel copy functions use them. The copy
functions use a process endpoint of NONE to realize it's
a physical address, virtual otherwise.
. The umap call only makes sense to translate a virtual address
to a physical address now.
. Segments-related calls like newmap and alloc_segments are gone.
. All segments-related translation in VM is gone (vir2map etc).
. Initialization in VM is simpler as no moving around is necessary.
. VM and all other boot processes can be linked wherever they wish
and will be mapped in at the right location by the kernel and VM
respectively.
Other changes:
. The multiboot code is less special: it does not use mb_print
for its diagnostics any more but uses printf() as normal, saving
the output into the diagnostics buffer, only printing to the
screen using the direct print functions if a panic() occurs.
. The multiboot code uses the flexible 'free memory map list'
style to receive the list of free memory if available.
. The kernel determines the memory layout of the processes to
a degree: it tells VM where the kernel starts and ends and
where the kernel wants the top of the process to be. VM then
uses this entire range, i.e. the stack is right at the top,
and mmap()ped bits of memory are placed below that downwards,
and the break grows upwards.
Other Consequences:
. Every process gets its own page table as address spaces
can't be separated any more by segments.
. As all segments are 0-based, there is no distinction between
virtual and linear addresses, nor between userspace and
kernel addresses.
. Less work is done when context switching, leading to a net
performance increase. (8% faster on my machine for 'make servers'.)
. The layout and configuration of the GDT makes sysenter and syscall
possible.
2012-05-07 16:03:35 +02:00
|
|
|
vri->vri_addr = vr->vaddr + ph1->offset;
|
2013-03-20 20:18:52 +01:00
|
|
|
vri->vri_prot = PROT_READ;
|
2012-09-18 13:17:49 +02:00
|
|
|
vri->vri_length = ph2->offset + VM_PAGE_SIZE - ph1->offset;
|
2010-01-19 22:00:20 +01:00
|
|
|
|
|
|
|
/* "AND" the provided protection with per-page protection. */
|
2013-03-20 20:18:52 +01:00
|
|
|
if (vr->flags & VR_WRITABLE)
|
|
|
|
vri->vri_prot |= PROT_WRITE;
|
|
|
|
count++;
|
|
|
|
vri++;
|
2010-01-19 22:00:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
*nextp = next;
|
|
|
|
return count;
|
|
|
|
}
|
2009-09-21 16:49:49 +02:00
|
|
|
|
|
|
|
/*========================================================================*
|
|
|
|
* regionprintstats *
|
|
|
|
*========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
void printregionstats(struct vmproc *vmp)
|
2009-09-21 16:49:49 +02:00
|
|
|
{
|
|
|
|
struct vir_region *vr;
|
|
|
|
struct phys_region *pr;
|
|
|
|
vir_bytes used = 0, weighted = 0;
|
2010-10-04 13:41:10 +02:00
|
|
|
region_iter v_iter;
|
|
|
|
region_start_iter_least(&vmp->vm_regions_avl, &v_iter);
|
2009-09-21 16:49:49 +02:00
|
|
|
|
2010-10-04 13:41:10 +02:00
|
|
|
while((vr = region_get_iter(&v_iter))) {
|
2012-12-17 19:26:52 +01:00
|
|
|
vir_bytes voffset;
|
2010-10-04 13:41:10 +02:00
|
|
|
region_incr_iter(&v_iter);
|
2009-09-21 16:49:49 +02:00
|
|
|
if(vr->flags & VR_DIRECT)
|
|
|
|
continue;
|
2012-12-17 19:26:52 +01:00
|
|
|
for(voffset = 0; voffset < vr->length; voffset+=VM_PAGE_SIZE) {
|
|
|
|
if(!(pr = physblock_get(vr, voffset))) continue;
|
2012-09-18 13:17:49 +02:00
|
|
|
used += VM_PAGE_SIZE;
|
|
|
|
weighted += VM_PAGE_SIZE / pr->ph->refcount;
|
2009-09-21 16:49:49 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-07-05 15:58:57 +02:00
|
|
|
printf("%6lukB %6lukB\n", used/1024, weighted/1024);
|
2009-09-21 16:49:49 +02:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-10-04 13:41:10 +02:00
|
|
|
void map_setparent(struct vmproc *vmp)
|
|
|
|
{
|
|
|
|
region_iter iter;
|
|
|
|
struct vir_region *vr;
|
|
|
|
region_start_iter_least(&vmp->vm_regions_avl, &iter);
|
|
|
|
while((vr = region_get_iter(&iter))) {
|
|
|
|
USE(vr, vr->parent = vmp;);
|
|
|
|
region_incr_iter(&iter);
|
|
|
|
}
|
|
|
|
}
|
2012-10-11 15:15:49 +02:00
|
|
|
|
2013-08-20 14:02:33 +02:00
|
|
|
unsigned int physregions(struct vir_region *vr)
|
2012-10-11 15:15:49 +02:00
|
|
|
{
|
2013-08-20 14:02:33 +02:00
|
|
|
unsigned int n = 0;
|
2012-12-17 19:26:52 +01:00
|
|
|
vir_bytes voffset;
|
|
|
|
for(voffset = 0; voffset < vr->length; voffset += VM_PAGE_SIZE) {
|
|
|
|
if(physblock_get(vr, voffset))
|
|
|
|
n++;
|
2012-10-11 15:15:49 +02:00
|
|
|
}
|
|
|
|
return n;
|
|
|
|
}
|