xv6-cs450/kalloc.c

175 lines
3.2 KiB
C
Raw Normal View History

2006-06-12 17:22:12 +02:00
/*
* physical memory allocator, intended to be used to allocate
* memory for user processes. allocates in 4096-byte "pages".
* free list is sorted and combines adjacent pages into
* long runs, to make it easier to allocate big segments.
* one reason the page size is 4k is that the x86 segment size
* granularity is 4k.
*/
#include "param.h"
#include "types.h"
#include "defs.h"
#include "param.h"
#include "mmu.h"
#include "proc.h"
#include "spinlock.h"
2006-07-29 11:35:02 +02:00
struct spinlock kalloc_lock = { "kalloc" };
2006-06-12 17:22:12 +02:00
struct run {
struct run *next;
int len; // bytes
};
struct run *freelist;
2006-07-17 03:25:22 +02:00
void ktest(void);
2006-06-12 17:22:12 +02:00
/*
* initialize free list of physical pages. this code
* cheats by just considering the one megabyte of pages
* after _end.
*/
void
2006-07-16 18:05:37 +02:00
kinit(void)
2006-06-12 17:22:12 +02:00
{
extern int end;
uint mem;
2006-06-12 17:22:12 +02:00
char *start;
start = (char *) &end;
start = (char *) (((uint)start + PAGE) & ~(PAGE-1));
2006-06-12 17:22:12 +02:00
mem = 256; // XXX
cprintf("mem = %d\n", mem * PAGE);
kfree(start, mem * PAGE);
ktest();
}
void
kfree(char *cp, int len)
{
struct run **rr;
struct run *p = (struct run *) cp;
struct run *pend = (struct run *) (cp + len);
int i;
2006-06-12 17:22:12 +02:00
if(len % PAGE)
panic("kfree");
// XXX fill with junk to help debug
for(i = 0; i < len; i++)
cp[i] = 1;
acquire(&kalloc_lock);
2006-06-12 17:22:12 +02:00
rr = &freelist;
while(*rr){
struct run *rend = (struct run *) ((char *)(*rr) + (*rr)->len);
if(p >= *rr && p < rend)
panic("freeing free page");
if(pend == *rr){
p->len = len + (*rr)->len;
p->next = (*rr)->next;
*rr = p;
goto out;
2006-06-12 17:22:12 +02:00
}
if(pend < *rr){
p->len = len;
p->next = *rr;
*rr = p;
goto out;
2006-06-12 17:22:12 +02:00
}
if(p == rend){
(*rr)->len += len;
if((*rr)->next && (*rr)->next == pend){
(*rr)->len += (*rr)->next->len;
(*rr)->next = (*rr)->next->next;
}
goto out;
2006-06-12 17:22:12 +02:00
}
rr = &((*rr)->next);
}
p->len = len;
p->next = 0;
*rr = p;
out:
release(&kalloc_lock);
2006-06-12 17:22:12 +02:00
}
/*
* allocate n bytes of physical memory.
* returns a kernel-segment pointer.
* returns 0 if there's no run that's big enough.
*/
2006-07-16 18:05:37 +02:00
char*
2006-06-12 17:22:12 +02:00
kalloc(int n)
{
struct run **rr;
if(n % PAGE)
panic("kalloc");
acquire(&kalloc_lock);
2006-06-12 17:22:12 +02:00
rr = &freelist;
while(*rr){
struct run *r = *rr;
if(r->len == n){
*rr = r->next;
release(&kalloc_lock);
2006-06-12 17:22:12 +02:00
return (char *) r;
}
if(r->len > n){
char *p = (char *)r + (r->len - n);
r->len -= n;
release(&kalloc_lock);
2006-06-12 17:22:12 +02:00
return p;
}
rr = &(*rr)->next;
}
release(&kalloc_lock);
2006-06-12 17:22:12 +02:00
return 0;
}
void
2006-07-16 18:05:37 +02:00
ktest(void)
2006-06-12 17:22:12 +02:00
{
char *p1, *p2, *p3;
// test coalescing
p1 = kalloc(4 * PAGE);
kfree(p1 + 3*PAGE, PAGE);
kfree(p1 + 2*PAGE, PAGE);
kfree(p1, PAGE);
kfree(p1 + PAGE, PAGE);
p2 = kalloc(4 * PAGE);
if(p2 != p1)
panic("ktest");
kfree(p2, 4 * PAGE);
// test finding first run that fits
p1 = kalloc(1 * PAGE);
p2 = kalloc(1 * PAGE);
kfree(p1, PAGE);
p3 = kalloc(2 * PAGE);
kfree(p2, PAGE);
kfree(p3, 2 * PAGE);
// test running out of memory
p1 = 0;
2006-07-16 18:05:37 +02:00
while((p2 = kalloc(PAGE)) != 0){
*(char**)p2 = p1;
2006-06-12 17:22:12 +02:00
p1 = p2;
}
while(p1){
p2 = *(char **)p1;
kfree(p1, PAGE);
p1 = p2;
}
p1 = kalloc(PAGE * 20);
if(p1 == 0)
panic("ktest2");
kfree(p1, PAGE * 20);
}