minix/servers/vm/slaballoc.c
Ben Gras 50e2064049 No more intel/minix segments.
This commit removes all traces of Minix segments (the text/data/stack
memory map abstraction in the kernel) and significance of Intel segments
(hardware segments like CS, DS that add offsets to all addressing before
page table translation). This ultimately simplifies the memory layout
and addressing and makes the same layout possible on non-Intel
architectures.

There are only two types of addresses in the world now: virtual
and physical; even the kernel and processes have the same virtual
address space. Kernel and user processes can be distinguished at a
glance as processes won't use 0xF0000000 and above.

No static pre-allocated memory sizes exist any more.

Changes to booting:
        . The pre_init.c leaves the kernel and modules exactly as
          they were left by the bootloader in physical memory
        . The kernel starts running using physical addressing,
          loaded at a fixed location given in its linker script by the
          bootloader.  All code and data in this phase are linked to
          this fixed low location.
        . It makes a bootstrap pagetable to map itself to a
          fixed high location (also in linker script) and jumps to
          the high address. All code and data then use this high addressing.
        . All code/data symbols linked at the low addresses is prefixed by
          an objcopy step with __k_unpaged_*, so that that code cannot
          reference highly-linked symbols (which aren't valid yet) or vice
          versa (symbols that aren't valid any more).
        . The two addressing modes are separated in the linker script by
          collecting the unpaged_*.o objects and linking them with low
          addresses, and linking the rest high. Some objects are linked
          twice, once low and once high.
        . The bootstrap phase passes a lot of information (e.g. free memory
          list, physical location of the modules, etc.) using the kinfo
          struct.
        . After this bootstrap the low-linked part is freed.
        . The kernel maps in VM into the bootstrap page table so that VM can
          begin executing. Its first job is to make page tables for all other
          boot processes. So VM runs before RS, and RS gets a fully dynamic,
          VM-managed address space. VM gets its privilege info from RS as usual
          but that happens after RS starts running.
        . Both the kernel loading VM and VM organizing boot processes happen
	  using the libexec logic. This removes the last reason for VM to
	  still know much about exec() and vm/exec.c is gone.

Further Implementation:
        . All segments are based at 0 and have a 4 GB limit.
        . The kernel is mapped in at the top of the virtual address
          space so as not to constrain the user processes.
        . Processes do not use segments from the LDT at all; there are
          no segments in the LDT any more, so no LLDT is needed.
        . The Minix segments T/D/S are gone and so none of the
          user-space or in-kernel copy functions use them. The copy
          functions use a process endpoint of NONE to realize it's
          a physical address, virtual otherwise.
        . The umap call only makes sense to translate a virtual address
          to a physical address now.
        . Segments-related calls like newmap and alloc_segments are gone.
        . All segments-related translation in VM is gone (vir2map etc).
        . Initialization in VM is simpler as no moving around is necessary.
        . VM and all other boot processes can be linked wherever they wish
          and will be mapped in at the right location by the kernel and VM
          respectively.

Other changes:
        . The multiboot code is less special: it does not use mb_print
          for its diagnostics any more but uses printf() as normal, saving
          the output into the diagnostics buffer, only printing to the
          screen using the direct print functions if a panic() occurs.
        . The multiboot code uses the flexible 'free memory map list'
          style to receive the list of free memory if available.
        . The kernel determines the memory layout of the processes to
          a degree: it tells VM where the kernel starts and ends and
          where the kernel wants the top of the process to be. VM then
          uses this entire range, i.e. the stack is right at the top,
          and mmap()ped bits of memory are placed below that downwards,
          and the break grows upwards.

Other Consequences:
        . Every process gets its own page table as address spaces
          can't be separated any more by segments.
        . As all segments are 0-based, there is no distinction between
          virtual and linear addresses, nor between userspace and
          kernel addresses.
        . Less work is done when context switching, leading to a net
          performance increase. (8% faster on my machine for 'make servers'.)
	. The layout and configuration of the GDT makes sysenter and syscall
	  possible.
2012-07-15 22:30:15 +02:00

580 lines
13 KiB
C

#define _SYSTEM 1
#include <minix/callnr.h>
#include <minix/com.h>
#include <minix/config.h>
#include <minix/const.h>
#include <minix/ds.h>
#include <minix/endpoint.h>
#include <minix/keymap.h>
#include <minix/minlib.h>
#include <minix/type.h>
#include <minix/ipc.h>
#include <minix/sysutil.h>
#include <minix/syslib.h>
#include <minix/bitmap.h>
#include <minix/debug.h>
#include <assert.h>
#include <errno.h>
#include <assert.h>
#include <string.h>
#include <env.h>
#include <memory.h>
#include "glo.h"
#include "proto.h"
#include "util.h"
#include "sanitycheck.h"
#define SLABSIZES 60
#define ITEMSPERPAGE(bytes) (DATABYTES / (bytes))
#define ELBITS (sizeof(element_t)*8)
#define BITPAT(b) (1UL << ((b) % ELBITS))
#define BITEL(f, b) (f)->sdh.usebits[(b)/ELBITS]
#define OFF(f, b) assert(!GETBIT(f, b))
#define ON(f, b) assert(GETBIT(f, b))
#if MEMPROTECT
#define SLABDATAWRITABLE(data, wr) do { \
assert(data->sdh.writable == WRITABLE_NONE); \
assert(wr != WRITABLE_NONE); \
vm_pagelock(data, 0); \
data->sdh.writable = wr; \
} while(0)
#define SLABDATAUNWRITABLE(data) do { \
assert(data->sdh.writable != WRITABLE_NONE); \
data->sdh.writable = WRITABLE_NONE; \
vm_pagelock(data, 1); \
} while(0)
#define SLABDATAUSE(data, code) do { \
SLABDATAWRITABLE(data, WRITABLE_HEADER); \
code \
SLABDATAUNWRITABLE(data); \
} while(0)
#else
#define SLABDATAWRITABLE(data, wr)
#define SLABDATAUNWRITABLE(data)
#define SLABDATAUSE(data, code) do { code } while(0)
#endif
#define GETBIT(f, b) (BITEL(f,b) & BITPAT(b))
#define SETBIT(f, b) {OFF(f,b); SLABDATAUSE(f, BITEL(f,b)|= BITPAT(b); (f)->sdh.nused++;); }
#define CLEARBIT(f, b) {ON(f, b); SLABDATAUSE(f, BITEL(f,b)&=~BITPAT(b); (f)->sdh.nused--; (f)->sdh.freeguess = (b);); }
#define MINSIZE 8
#define MAXSIZE (SLABSIZES-1+MINSIZE)
#define USEELEMENTS (1+(VM_PAGE_SIZE/MINSIZE/8))
static int pages = 0;
typedef u8_t element_t;
#define BITS_FULL (~(element_t)0)
typedef element_t elements_t[USEELEMENTS];
/* This file is too low-level to have global SANITYCHECKs everywhere,
* as the (other) data structures are often necessarily in an
* inconsistent state during a slaballoc() / slabfree(). So only do
* our own sanity checks here, with SLABSANITYCHECK.
*/
/* Special writable values. */
#define WRITABLE_NONE -2
#define WRITABLE_HEADER -1
struct sdh {
#if SANITYCHECKS
u32_t magic1;
#endif
u8_t list;
u16_t nused; /* Number of data items used in this slab. */
int freeguess;
struct slabdata *next, *prev;
elements_t usebits;
phys_bytes phys;
#if SANITYCHECKS
int writable; /* data item number or WRITABLE_* */
u32_t magic2;
#endif
};
#define DATABYTES (VM_PAGE_SIZE-sizeof(struct sdh))
#define MAGIC1 0x1f5b842f
#define MAGIC2 0x8bb5a420
#define JUNK 0xdeadbeef
#define NOJUNK 0xc0ffee
#define LIST_UNUSED 0
#define LIST_FREE 1
#define LIST_USED 2
#define LIST_FULL 3
#define LIST_NUMBER 4
static struct slabheader {
struct slabdata {
struct sdh sdh;
u8_t data[DATABYTES];
} *list_head[LIST_NUMBER];
} slabs[SLABSIZES];
static int objstats(void *, int, struct slabheader **, struct slabdata
**, int *);
#define GETSLAB(b, s) { \
int i; \
assert((b) >= MINSIZE); \
i = (b) - MINSIZE; \
assert((i) < SLABSIZES); \
assert((i) >= 0); \
s = &slabs[i]; \
}
#define LH(sl, l) (sl)->list_head[l]
/* move head of list l1 to list of l2 in slabheader sl. */
#define MOVEHEAD(sl, l1, l2) { \
struct slabdata *t; \
assert(LH(sl,l1)); \
REMOVEHEAD(sl, l1, t); \
ADDHEAD(t, sl, l2); \
}
/* remove head of list 'list' in sl, assign it unlinked to 'to'. */
#define REMOVEHEAD(sl, list, to) { \
struct slabdata *dat; \
dat = (to) = LH(sl, list); \
assert(dat); \
LH(sl, list) = dat->sdh.next; \
UNLINKNODE(dat); \
}
/* move slabdata nw to slabheader sl under list number l. */
#define ADDHEAD(nw, sl, l) { \
SLABDATAUSE(nw, \
(nw)->sdh.next = LH(sl, l); \
(nw)->sdh.prev = NULL; \
(nw)->sdh.list = l;); \
LH(sl, l) = (nw); \
if((nw)->sdh.next) { \
SLABDATAUSE((nw)->sdh.next, \
(nw)->sdh.next->sdh.prev = (nw);); \
} \
}
#define UNLINKNODE(node) { \
struct slabdata *next, *prev; \
prev = (node)->sdh.prev; \
next = (node)->sdh.next; \
if(prev) { SLABDATAUSE(prev, prev->sdh.next = next;); } \
if(next) { SLABDATAUSE(next, next->sdh.prev = prev;); } \
}
struct slabdata *newslabdata(int list)
{
struct slabdata *n;
phys_bytes p;
assert(sizeof(*n) == VM_PAGE_SIZE);
if(!(n = vm_allocpage(&p, VMP_SLAB))) {
printf("newslabdata: vm_allocpage failed\n");
return NULL;
}
memset(n->sdh.usebits, 0, sizeof(n->sdh.usebits));
pages++;
n->sdh.phys = p;
#if SANITYCHECKS
n->sdh.magic1 = MAGIC1;
n->sdh.magic2 = MAGIC2;
#endif
n->sdh.nused = 0;
n->sdh.freeguess = 0;
n->sdh.list = list;
#if SANITYCHECKS
n->sdh.writable = WRITABLE_HEADER;
SLABDATAUNWRITABLE(n);
#endif
return n;
}
#if SANITYCHECKS
/*===========================================================================*
* checklist *
*===========================================================================*/
static int checklist(char *file, int line,
struct slabheader *s, int l, int bytes)
{
struct slabdata *n = s->list_head[l];
int ch = 0;
while(n) {
int count = 0, i;
#if SANITYCHECKS
MYASSERT(n->sdh.magic1 == MAGIC1);
MYASSERT(n->sdh.magic2 == MAGIC2);
#endif
MYASSERT(n->sdh.list == l);
MYASSERT(usedpages_add(n->sdh.phys, VM_PAGE_SIZE) == OK);
if(n->sdh.prev)
MYASSERT(n->sdh.prev->sdh.next == n);
else
MYASSERT(s->list_head[l] == n);
if(n->sdh.next) MYASSERT(n->sdh.next->sdh.prev == n);
for(i = 0; i < USEELEMENTS*8; i++)
if(i >= ITEMSPERPAGE(bytes))
MYASSERT(!GETBIT(n, i));
else
if(GETBIT(n,i))
count++;
MYASSERT(count == n->sdh.nused);
ch += count;
n = n->sdh.next;
}
return ch;
}
/*===========================================================================*
* void slab_sanitycheck *
*===========================================================================*/
void slab_sanitycheck(char *file, int line)
{
int s;
for(s = 0; s < SLABSIZES; s++) {
int l;
for(l = 0; l < LIST_NUMBER; l++) {
checklist(file, line, &slabs[s], l, s + MINSIZE);
}
}
}
/*===========================================================================*
* int slabsane *
*===========================================================================*/
int slabsane_f(char *file, int line, void *mem, int bytes)
{
struct slabheader *s;
struct slabdata *f;
int i;
return (objstats(mem, bytes, &s, &f, &i) == OK);
}
#endif
#if SANITYCHECKS
static int nojunkwarning = 0;
#endif
/*===========================================================================*
* void *slaballoc *
*===========================================================================*/
void *slaballoc(int bytes)
{
int i;
int count = 0;
struct slabheader *s;
struct slabdata *firstused;
SLABSANITYCHECK(SCL_FUNCTIONS);
/* Retrieve entry in slabs[]. */
GETSLAB(bytes, s);
assert(s);
/* To make the common case more common, make space in the 'used'
* queue first.
*/
if(!LH(s, LIST_USED)) {
/* Make sure there is something on the freelist. */
SLABSANITYCHECK(SCL_DETAIL);
if(!LH(s, LIST_FREE)) {
struct slabdata *nd = newslabdata(LIST_FREE);
SLABSANITYCHECK(SCL_DETAIL);
if(!nd) return NULL;
ADDHEAD(nd, s, LIST_FREE);
SLABSANITYCHECK(SCL_DETAIL);
}
SLABSANITYCHECK(SCL_DETAIL);
MOVEHEAD(s, LIST_FREE, LIST_USED);
SLABSANITYCHECK(SCL_DETAIL);
}
SLABSANITYCHECK(SCL_DETAIL);
assert(s);
firstused = LH(s, LIST_USED);
assert(firstused);
#if SANITYCHECKS
assert(firstused->sdh.magic1 == MAGIC1);
assert(firstused->sdh.magic2 == MAGIC2);
#endif
assert(firstused->sdh.nused < ITEMSPERPAGE(bytes));
for(i = firstused->sdh.freeguess;
count < ITEMSPERPAGE(bytes); count++, i++) {
SLABSANITYCHECK(SCL_DETAIL);
i = i % ITEMSPERPAGE(bytes);
if(!GETBIT(firstused, i)) {
char *ret;
SETBIT(firstused, i);
SLABSANITYCHECK(SCL_DETAIL);
if(firstused->sdh.nused == ITEMSPERPAGE(bytes)) {
SLABSANITYCHECK(SCL_DETAIL);
MOVEHEAD(s, LIST_USED, LIST_FULL);
SLABSANITYCHECK(SCL_DETAIL);
}
SLABSANITYCHECK(SCL_DETAIL);
ret = ((char *) firstused->data) + i*bytes;
#if SANITYCHECKS
#if MEMPROTECT
nojunkwarning++;
slabunlock(ret, bytes);
nojunkwarning--;
assert(!nojunkwarning);
#endif
*(u32_t *) ret = NOJUNK;
#if MEMPROTECT
slablock(ret, bytes);
#endif
#endif
SLABSANITYCHECK(SCL_FUNCTIONS);
SLABDATAUSE(firstused, firstused->sdh.freeguess = i+1;);
#if SANITYCHECKS
if(bytes >= SLABSIZES+MINSIZE) {
printf("slaballoc: odd, bytes %d?\n", bytes);
}
if(!slabsane_f(__FILE__, __LINE__, ret, bytes))
panic("slaballoc: slabsane failed");
#endif
return ret;
}
SLABSANITYCHECK(SCL_DETAIL);
}
SLABSANITYCHECK(SCL_FUNCTIONS);
panic("slaballoc: no space in 'used' slabdata");
/* Not reached. */
return NULL;
}
/*===========================================================================*
* int objstats *
*===========================================================================*/
static int objstats(void *mem, int bytes,
struct slabheader **sp, struct slabdata **fp, int *ip)
{
#if SANITYCHECKS
#define OBJSTATSCHECK(cond) \
if(!(cond)) { \
printf("VM: objstats: %s failed for ptr %p, %d bytes\n", \
#cond, mem, bytes); \
return EINVAL; \
}
#else
#define OBJSTATSCHECK(cond)
#endif
struct slabheader *s;
struct slabdata *f;
int i;
OBJSTATSCHECK((char *) mem >= (char *) VM_PAGE_SIZE);
#if SANITYCHECKS
if(*(u32_t *) mem == JUNK && !nojunkwarning) {
util_stacktrace();
printf("VM: WARNING: JUNK seen in slab object, likely freed\n");
}
#endif
/* Retrieve entry in slabs[]. */
GETSLAB(bytes, s);
/* Round address down to VM_PAGE_SIZE boundary to get header. */
f = (struct slabdata *) ((char *) mem - (vir_bytes) mem % VM_PAGE_SIZE);
#if SANITYCHECKS
OBJSTATSCHECK(f->sdh.magic1 == MAGIC1);
OBJSTATSCHECK(f->sdh.magic2 == MAGIC2);
#endif
OBJSTATSCHECK(f->sdh.list == LIST_USED || f->sdh.list == LIST_FULL);
/* Make sure it's in range. */
OBJSTATSCHECK((char *) mem >= (char *) f->data);
OBJSTATSCHECK((char *) mem < (char *) f->data + sizeof(f->data));
/* Get position. */
i = (char *) mem - (char *) f->data;
OBJSTATSCHECK(!(i % bytes));
i = i / bytes;
/* Make sure it is marked as allocated. */
OBJSTATSCHECK(GETBIT(f, i));
/* return values */
*ip = i;
*fp = f;
*sp = s;
return OK;
}
/*===========================================================================*
* void *slabfree *
*===========================================================================*/
void slabfree(void *mem, int bytes)
{
int i;
struct slabheader *s;
struct slabdata *f;
SLABSANITYCHECK(SCL_FUNCTIONS);
if(objstats(mem, bytes, &s, &f, &i) != OK) {
panic("slabfree objstats failed");
}
#if SANITYCHECKS
if(*(u32_t *) mem == JUNK) {
printf("VM: WARNING: likely double free, JUNK seen\n");
}
#endif
#if SANITYCHECKS
#if MEMPROTECT
slabunlock(mem, bytes);
#endif
#if JUNKFREE
memset(mem, 0xa6, bytes);
#endif
*(u32_t *) mem = JUNK;
nojunkwarning++;
#if MEMPROTECT
slablock(mem, bytes);
#endif
nojunkwarning--;
assert(!nojunkwarning);
#endif
/* Free this data. */
CLEARBIT(f, i);
/* Check if this slab changes lists. */
if(f->sdh.nused == 0) {
/* Now become FREE; must've been USED */
assert(f->sdh.list == LIST_USED);
UNLINKNODE(f);
if(f == LH(s, LIST_USED))
LH(s, LIST_USED) = f->sdh.next;
ADDHEAD(f, s, LIST_FREE);
SLABSANITYCHECK(SCL_DETAIL);
} else if(f->sdh.nused == ITEMSPERPAGE(bytes)-1) {
/* Now become USED; must've been FULL */
assert(f->sdh.list == LIST_FULL);
UNLINKNODE(f);
if(f == LH(s, LIST_FULL))
LH(s, LIST_FULL) = f->sdh.next;
ADDHEAD(f, s, LIST_USED);
SLABSANITYCHECK(SCL_DETAIL);
} else {
/* Stay USED */
assert(f->sdh.list == LIST_USED);
}
SLABSANITYCHECK(SCL_FUNCTIONS);
return;
}
/*===========================================================================*
* void *slablock *
*===========================================================================*/
void slablock(void *mem, int bytes)
{
int i;
struct slabheader *s;
struct slabdata *f;
if(objstats(mem, bytes, &s, &f, &i) != OK)
panic("slablock objstats failed");
SLABDATAUNWRITABLE(f);
return;
}
/*===========================================================================*
* void *slabunlock *
*===========================================================================*/
void slabunlock(void *mem, int bytes)
{
int i;
struct slabheader *s;
struct slabdata *f;
if(objstats(mem, bytes, &s, &f, &i) != OK)
panic("slabunlock objstats failed");
SLABDATAWRITABLE(f, i);
return;
}
#if SANITYCHECKS
/*===========================================================================*
* void slabstats *
*===========================================================================*/
void slabstats(void)
{
int s, totalbytes = 0;
static int n;
n++;
if(n%1000) return;
for(s = 0; s < SLABSIZES; s++) {
int l;
for(l = 0; l < LIST_NUMBER; l++) {
int b, t;
b = s + MINSIZE;
t = checklist(__FILE__, __LINE__, &slabs[s], l, b);
if(t > 0) {
int bytes = t * b;
printf("VMSTATS: %2d slabs: %d (%dkB)\n", b, t, bytes/1024);
totalbytes += bytes;
}
}
}
if(pages > 0) {
printf("VMSTATS: %dK net used in slab objects in %d pages (%dkB): %d%% utilization\n",
totalbytes/1024, pages, pages*VM_PAGE_SIZE/1024,
100 * totalbytes / (pages*VM_PAGE_SIZE));
}
}
#endif