drop the minix_ prefixes for mmap and munmap
also cleanup of various minix-specific changes, cleanup of mmap-related testing. Change-Id: I289a4fc50cf8a13df4a6082038d860853a4bd024
This commit is contained in:
parent
b0cab62bd2
commit
dda632a24f
34 changed files with 197 additions and 180 deletions
|
@ -39,9 +39,6 @@
|
||||||
|
|
||||||
#define MAX_MAP_LEN 1048576
|
#define MAX_MAP_LEN 1048576
|
||||||
|
|
||||||
#define munmap minix_munmap
|
|
||||||
#define mmap minix_mmap
|
|
||||||
|
|
||||||
mmf_t *
|
mmf_t *
|
||||||
mmopen(char *fn, char *mode)
|
mmopen(char *fn, char *mode)
|
||||||
{
|
{
|
||||||
|
|
|
@ -570,7 +570,7 @@ static int m_block_ioctl(devminor_t minor, unsigned long request,
|
||||||
size -= l;
|
size -= l;
|
||||||
}
|
}
|
||||||
size = rounddown(size, PAGE_SIZE);
|
size = rounddown(size, PAGE_SIZE);
|
||||||
r = minix_munmap((void *) a, size);
|
r = munmap((void *) a, size);
|
||||||
if(r != OK) {
|
if(r != OK) {
|
||||||
printf("memory: WARNING: munmap failed: %d\n", r);
|
printf("memory: WARNING: munmap failed: %d\n", r);
|
||||||
}
|
}
|
||||||
|
@ -586,7 +586,7 @@ static int m_block_ioctl(devminor_t minor, unsigned long request,
|
||||||
|
|
||||||
/* Try to allocate a piece of memory for the RAM disk. */
|
/* Try to allocate a piece of memory for the RAM disk. */
|
||||||
if(ramdev_size > 0 &&
|
if(ramdev_size > 0 &&
|
||||||
(mem = minix_mmap(NULL, ramdev_size, PROT_READ|PROT_WRITE,
|
(mem = mmap(NULL, ramdev_size, PROT_READ|PROT_WRITE,
|
||||||
MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) {
|
MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) {
|
||||||
printf("MEM: failed to get memory for ramdisk\n");
|
printf("MEM: failed to get memory for ramdisk\n");
|
||||||
return(ENOMEM);
|
return(ENOMEM);
|
||||||
|
|
|
@ -68,7 +68,7 @@ uds_open(devminor_t UNUSED(orig_minor), int access,
|
||||||
* in use. We use mmap instead of malloc to allow the memory to be
|
* in use. We use mmap instead of malloc to allow the memory to be
|
||||||
* actually freed later.
|
* actually freed later.
|
||||||
*/
|
*/
|
||||||
if ((buf = minix_mmap(NULL, UDS_BUF, PROT_READ | PROT_WRITE,
|
if ((buf = mmap(NULL, UDS_BUF, PROT_READ | PROT_WRITE,
|
||||||
MAP_ANON | MAP_PRIVATE, -1, 0)) == MAP_FAILED)
|
MAP_ANON | MAP_PRIVATE, -1, 0)) == MAP_FAILED)
|
||||||
return ENOMEM;
|
return ENOMEM;
|
||||||
|
|
||||||
|
@ -166,7 +166,7 @@ uds_close(devminor_t minor)
|
||||||
uds_clear_fds(minor, &uds_fd_table[minor].ancillary_data);
|
uds_clear_fds(minor, &uds_fd_table[minor].ancillary_data);
|
||||||
|
|
||||||
/* Release the memory for the ring buffer. */
|
/* Release the memory for the ring buffer. */
|
||||||
minix_munmap(uds_fd_table[minor].buf, UDS_BUF);
|
munmap(uds_fd_table[minor].buf, UDS_BUF);
|
||||||
|
|
||||||
/* Set the socket back to its original UDS_FREE state. */
|
/* Set the socket back to its original UDS_FREE state. */
|
||||||
memset(&uds_fd_table[minor], '\0', sizeof(uds_fd_t));
|
memset(&uds_fd_table[minor], '\0', sizeof(uds_fd_t));
|
||||||
|
|
|
@ -404,7 +404,7 @@ vnd_ioctl(devminor_t UNUSED(minor), unsigned long request, endpoint_t endpt,
|
||||||
* of malloc to allow the memory to be actually freed later.
|
* of malloc to allow the memory to be actually freed later.
|
||||||
*/
|
*/
|
||||||
if (r == OK) {
|
if (r == OK) {
|
||||||
state.buf = minix_mmap(NULL, VND_BUF_SIZE, PROT_READ |
|
state.buf = mmap(NULL, VND_BUF_SIZE, PROT_READ |
|
||||||
PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
|
PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
|
||||||
if (state.buf == MAP_FAILED)
|
if (state.buf == MAP_FAILED)
|
||||||
r = ENOMEM;
|
r = ENOMEM;
|
||||||
|
@ -432,7 +432,7 @@ vnd_ioctl(devminor_t UNUSED(minor), unsigned long request, endpoint_t endpt,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (r != OK) {
|
if (r != OK) {
|
||||||
minix_munmap(state.buf, VND_BUF_SIZE);
|
munmap(state.buf, VND_BUF_SIZE);
|
||||||
close(state.fd);
|
close(state.fd);
|
||||||
state.fd = -1;
|
state.fd = -1;
|
||||||
}
|
}
|
||||||
|
@ -457,7 +457,7 @@ vnd_ioctl(devminor_t UNUSED(minor), unsigned long request, endpoint_t endpt,
|
||||||
* allow reuse until the device has been closed by the other
|
* allow reuse until the device has been closed by the other
|
||||||
* users.
|
* users.
|
||||||
*/
|
*/
|
||||||
minix_munmap(state.buf, VND_BUF_SIZE);
|
munmap(state.buf, VND_BUF_SIZE);
|
||||||
close(state.fd);
|
close(state.fd);
|
||||||
state.fd = -1;
|
state.fd = -1;
|
||||||
|
|
||||||
|
|
|
@ -923,8 +923,8 @@
|
||||||
#define truncate _truncate
|
#define truncate _truncate
|
||||||
#define write _write
|
#define write _write
|
||||||
#define writev _writev
|
#define writev _writev
|
||||||
#define minix_mmap _minix_mmap
|
#define mmap _mmap
|
||||||
#define minix_munmap _minix_munmap
|
#define munmap _munmap
|
||||||
#define vfork __vfork14
|
#define vfork __vfork14
|
||||||
#endif /* __minix */
|
#endif /* __minix */
|
||||||
|
|
||||||
|
|
|
@ -12,9 +12,6 @@
|
||||||
#include <sys/mman.h>
|
#include <sys/mman.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
|
||||||
#define mmap minix_mmap
|
|
||||||
#define munmap minix_munmap
|
|
||||||
|
|
||||||
#include "malloc-debug.h"
|
#include "malloc-debug.h"
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
|
|
|
@ -13,11 +13,9 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef __minix
|
#ifdef __minix
|
||||||
#include <machine/vmparam.h>
|
|
||||||
#define mmap minix_mmap
|
|
||||||
#define munmap minix_munmap
|
|
||||||
#ifdef _LIBSYS
|
#ifdef _LIBSYS
|
||||||
#include <minix/sysutil.h>
|
#include <minix/sysutil.h>
|
||||||
|
#include <machine/vmparam.h>
|
||||||
#define MALLOC_NO_SYSCALLS
|
#define MALLOC_NO_SYSCALLS
|
||||||
#define wrtwarning(w) printf("libminc malloc warning: %s\n", w)
|
#define wrtwarning(w) printf("libminc malloc warning: %s\n", w)
|
||||||
#define wrterror(w) panic("libminc malloc error: %s\n", w)
|
#define wrterror(w) panic("libminc malloc error: %s\n", w)
|
||||||
|
@ -98,8 +96,8 @@ void utrace(struct ut *, int);
|
||||||
* This is necessary for VM to be able to define its own versions, and
|
* This is necessary for VM to be able to define its own versions, and
|
||||||
* use this malloc.
|
* use this malloc.
|
||||||
*/
|
*/
|
||||||
#undef minix_mmap
|
#undef mmap
|
||||||
#undef minix_munmap
|
#undef munmap
|
||||||
|
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#if defined(__NetBSD__)
|
#if defined(__NetBSD__)
|
||||||
|
|
|
@ -12,8 +12,8 @@ __weak_alias(vm_remap, _vm_remap)
|
||||||
__weak_alias(vm_unmap, _vm_unmap)
|
__weak_alias(vm_unmap, _vm_unmap)
|
||||||
__weak_alias(vm_getphys, _vm_getphys)
|
__weak_alias(vm_getphys, _vm_getphys)
|
||||||
__weak_alias(vm_getrefcount, _vm_getrefcount)
|
__weak_alias(vm_getrefcount, _vm_getrefcount)
|
||||||
__weak_alias(minix_mmap, _minix_mmap)
|
__weak_alias(mmap, _mmap)
|
||||||
__weak_alias(minix_munmap, _minix_munmap)
|
__weak_alias(munmap, _munmap)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
@ -71,13 +71,13 @@ int minix_vfs_mmap(endpoint_t who, off_t offset, size_t len,
|
||||||
return _syscall(VM_PROC_NR, VM_VFS_MMAP, &m);
|
return _syscall(VM_PROC_NR, VM_VFS_MMAP, &m);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *minix_mmap(void *addr, size_t len, int prot, int flags,
|
void *mmap(void *addr, size_t len, int prot, int flags,
|
||||||
int fd, off_t offset)
|
int fd, off_t offset)
|
||||||
{
|
{
|
||||||
return minix_mmap_for(SELF, addr, len, prot, flags, fd, offset);
|
return minix_mmap_for(SELF, addr, len, prot, flags, fd, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
int minix_munmap(void *addr, size_t len)
|
int munmap(void *addr, size_t len)
|
||||||
{
|
{
|
||||||
message m;
|
message m;
|
||||||
|
|
||||||
|
|
|
@ -168,10 +168,10 @@ lmfs_alloc_block(struct buf *bp)
|
||||||
|
|
||||||
len = roundup(fs_block_size, PAGE_SIZE);
|
len = roundup(fs_block_size, PAGE_SIZE);
|
||||||
|
|
||||||
if((bp->data = minix_mmap(0, fs_block_size,
|
if((bp->data = mmap(0, fs_block_size,
|
||||||
PROT_READ|PROT_WRITE, MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) {
|
PROT_READ|PROT_WRITE, MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) {
|
||||||
free_unused_blocks();
|
free_unused_blocks();
|
||||||
if((bp->data = minix_mmap(0, fs_block_size, PROT_READ|PROT_WRITE,
|
if((bp->data = mmap(0, fs_block_size, PROT_READ|PROT_WRITE,
|
||||||
MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) {
|
MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) {
|
||||||
panic("libminixfs: could not allocate block");
|
panic("libminixfs: could not allocate block");
|
||||||
}
|
}
|
||||||
|
@ -190,7 +190,7 @@ struct buf *lmfs_get_block(register dev_t dev, register block_t block,
|
||||||
return lmfs_get_block_ino(dev, block, only_search, VMC_NO_INODE, 0);
|
return lmfs_get_block_ino(dev, block, only_search, VMC_NO_INODE, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void minix_munmap_t(void *a, int len)
|
void munmap_t(void *a, int len)
|
||||||
{
|
{
|
||||||
vir_bytes av = (vir_bytes) a;
|
vir_bytes av = (vir_bytes) a;
|
||||||
assert(a);
|
assert(a);
|
||||||
|
@ -202,7 +202,7 @@ void minix_munmap_t(void *a, int len)
|
||||||
|
|
||||||
assert(!(len % PAGE_SIZE));
|
assert(!(len % PAGE_SIZE));
|
||||||
|
|
||||||
if(minix_munmap(a, len) < 0)
|
if(munmap(a, len) < 0)
|
||||||
panic("libminixfs cache: munmap failed");
|
panic("libminixfs cache: munmap failed");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -240,7 +240,7 @@ static void freeblock(struct buf *bp)
|
||||||
MARKCLEAN(bp); /* NO_DEV blocks may be marked dirty */
|
MARKCLEAN(bp); /* NO_DEV blocks may be marked dirty */
|
||||||
if(bp->lmfs_bytes > 0) {
|
if(bp->lmfs_bytes > 0) {
|
||||||
assert(bp->data);
|
assert(bp->data);
|
||||||
minix_munmap_t(bp->data, bp->lmfs_bytes);
|
munmap_t(bp->data, bp->lmfs_bytes);
|
||||||
bp->lmfs_bytes = 0;
|
bp->lmfs_bytes = 0;
|
||||||
bp->data = NULL;
|
bp->data = NULL;
|
||||||
} else assert(!bp->data);
|
} else assert(!bp->data);
|
||||||
|
@ -571,7 +571,7 @@ void lmfs_invalidate(
|
||||||
if (bp->lmfs_dev == device) {
|
if (bp->lmfs_dev == device) {
|
||||||
assert(bp->data);
|
assert(bp->data);
|
||||||
assert(bp->lmfs_bytes > 0);
|
assert(bp->lmfs_bytes > 0);
|
||||||
minix_munmap_t(bp->data, bp->lmfs_bytes);
|
munmap_t(bp->data, bp->lmfs_bytes);
|
||||||
bp->lmfs_dev = NO_DEV;
|
bp->lmfs_dev = NO_DEV;
|
||||||
bp->lmfs_bytes = 0;
|
bp->lmfs_bytes = 0;
|
||||||
bp->data = NULL;
|
bp->data = NULL;
|
||||||
|
@ -862,7 +862,7 @@ void lmfs_buf_pool(int new_nr_bufs)
|
||||||
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) {
|
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) {
|
||||||
if(bp->data) {
|
if(bp->data) {
|
||||||
assert(bp->lmfs_bytes > 0);
|
assert(bp->lmfs_bytes > 0);
|
||||||
minix_munmap_t(bp->data, bp->lmfs_bytes);
|
munmap_t(bp->data, bp->lmfs_bytes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -403,7 +403,7 @@ void *arg;
|
||||||
char *guard_start, *guard_end;
|
char *guard_start, *guard_end;
|
||||||
|
|
||||||
stacksize = round_page(stacksize + MTHREAD_GUARDSIZE);
|
stacksize = round_page(stacksize + MTHREAD_GUARDSIZE);
|
||||||
stackaddr = minix_mmap(NULL, stacksize,
|
stackaddr = mmap(NULL, stacksize,
|
||||||
PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
|
PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
|
||||||
-1, 0);
|
-1, 0);
|
||||||
if (stackaddr == MAP_FAILED)
|
if (stackaddr == MAP_FAILED)
|
||||||
|
@ -431,7 +431,7 @@ void *arg;
|
||||||
# error "Unsupported platform"
|
# error "Unsupported platform"
|
||||||
#endif
|
#endif
|
||||||
stacksize = guarded_stacksize;
|
stacksize = guarded_stacksize;
|
||||||
if (minix_munmap(guard_start, MTHREAD_GUARDSIZE) != 0)
|
if (munmap(guard_start, MTHREAD_GUARDSIZE) != 0)
|
||||||
mthread_panic("unable to unmap stack space for guard");
|
mthread_panic("unable to unmap stack space for guard");
|
||||||
tcb->m_context.uc_stack.ss_sp = guard_end;
|
tcb->m_context.uc_stack.ss_sp = guard_end;
|
||||||
} else
|
} else
|
||||||
|
@ -465,7 +465,7 @@ mthread_thread_t thread;
|
||||||
rt->m_cond = NULL;
|
rt->m_cond = NULL;
|
||||||
if (rt->m_attr.ma_stackaddr == NULL) { /* We allocated stack space */
|
if (rt->m_attr.ma_stackaddr == NULL) { /* We allocated stack space */
|
||||||
if (rt->m_context.uc_stack.ss_sp) {
|
if (rt->m_context.uc_stack.ss_sp) {
|
||||||
if (minix_munmap(rt->m_context.uc_stack.ss_sp,
|
if (munmap(rt->m_context.uc_stack.ss_sp,
|
||||||
rt->m_context.uc_stack.ss_size) != 0) {
|
rt->m_context.uc_stack.ss_size) != 0) {
|
||||||
mthread_panic("unable to unmap memory");
|
mthread_panic("unable to unmap memory");
|
||||||
}
|
}
|
||||||
|
|
|
@ -184,7 +184,7 @@ slowccalloc(struct puffs_usermount *pu)
|
||||||
if (puffs_fakecc)
|
if (puffs_fakecc)
|
||||||
return &fakecc;
|
return &fakecc;
|
||||||
|
|
||||||
sp = minix_mmap(NULL, stacksize, PROT_READ|PROT_WRITE,
|
sp = mmap(NULL, stacksize, PROT_READ|PROT_WRITE,
|
||||||
MAP_ANON|MAP_PRIVATE, -1, 0);
|
MAP_ANON|MAP_PRIVATE, -1, 0);
|
||||||
if (sp == MAP_FAILED)
|
if (sp == MAP_FAILED)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -194,11 +194,11 @@ slowccalloc(struct puffs_usermount *pu)
|
||||||
|
|
||||||
/* initialize both ucontext's */
|
/* initialize both ucontext's */
|
||||||
if (getcontext(&pcc->pcc_uc) == -1) {
|
if (getcontext(&pcc->pcc_uc) == -1) {
|
||||||
minix_munmap(pcc, stacksize);
|
munmap(pcc, stacksize);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
if (getcontext(&pcc->pcc_uc_ret) == -1) {
|
if (getcontext(&pcc->pcc_uc_ret) == -1) {
|
||||||
minix_munmap(pcc, stacksize);
|
munmap(pcc, stacksize);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -280,7 +280,7 @@ cc_free(struct puffs_cc *pcc)
|
||||||
|
|
||||||
DPRINTF(("invalidating pcc %p\n", pcc));
|
DPRINTF(("invalidating pcc %p\n", pcc));
|
||||||
assert(!puffs_fakecc);
|
assert(!puffs_fakecc);
|
||||||
minix_munmap(pcc, stacksize);
|
munmap(pcc, stacksize);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|
|
@ -16,39 +16,18 @@ void *alloc_contig(size_t len, int flags, phys_bytes *phys)
|
||||||
if(flags & AC_LOWER1M)
|
if(flags & AC_LOWER1M)
|
||||||
mmapflags |= MAP_LOWER1M;
|
mmapflags |= MAP_LOWER1M;
|
||||||
if(flags & AC_ALIGN64K)
|
if(flags & AC_ALIGN64K)
|
||||||
mmapflags |= MAP_ALIGN64K;
|
mmapflags |= MAP_ALIGNMENT_64KB;
|
||||||
|
|
||||||
/* First try to get memory with minix_mmap. This is guaranteed
|
/* First try to get memory with mmap. This is guaranteed
|
||||||
* to be page-aligned, and we can tell VM it has to be
|
* to be page-aligned, and we can tell VM it has to be
|
||||||
* pre-allocated and contiguous.
|
* pre-allocated and contiguous.
|
||||||
*/
|
*/
|
||||||
errno = 0;
|
errno = 0;
|
||||||
buf = (vir_bytes) minix_mmap(0, len, PROT_READ|PROT_WRITE, mmapflags, -1, 0);
|
buf = (vir_bytes) mmap(0, len, PROT_READ|PROT_WRITE, mmapflags, -1, 0);
|
||||||
|
|
||||||
/* If that failed, maybe we're not running in paged mode.
|
|
||||||
* If that's the case, ENXIO will be returned.
|
|
||||||
* Memory returned with malloc() will be preallocated and
|
|
||||||
* contiguous, so fallback on that, and ask for a little extra
|
|
||||||
* so we can page align it ourselves.
|
|
||||||
*/
|
|
||||||
if(buf == (vir_bytes) MAP_FAILED) {
|
if(buf == (vir_bytes) MAP_FAILED) {
|
||||||
u32_t align = 0;
|
|
||||||
if(errno != (_SIGN ENXIO)) {
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
if(flags & AC_ALIGN4K)
|
|
||||||
align = 4*1024;
|
|
||||||
if(flags & AC_ALIGN64K)
|
|
||||||
align = 64*1024;
|
|
||||||
if(len + align < len)
|
|
||||||
return NULL;
|
|
||||||
len += align;
|
|
||||||
if(!(buf = (vir_bytes) malloc(len))) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
if(align)
|
|
||||||
buf += align - (buf % align);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Get physical address, if requested. */
|
/* Get physical address, if requested. */
|
||||||
if(phys != NULL && sys_umap(SELF, VM_D, buf, len, phys) != OK)
|
if(phys != NULL && sys_umap(SELF, VM_D, buf, len, phys) != OK)
|
||||||
|
@ -59,6 +38,6 @@ void *alloc_contig(size_t len, int flags, phys_bytes *phys)
|
||||||
|
|
||||||
int free_contig(void *addr, size_t len)
|
int free_contig(void *addr, size_t len)
|
||||||
{
|
{
|
||||||
return minix_munmap(addr, len);
|
return munmap(addr, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -57,10 +57,6 @@ __RCSID("$NetBSD: load.c,v 1.42 2010/12/24 12:41:43 skrll Exp $");
|
||||||
#include <sys/sysctl.h>
|
#include <sys/sysctl.h>
|
||||||
#include <dirent.h>
|
#include <dirent.h>
|
||||||
|
|
||||||
#ifdef __minix
|
|
||||||
#define munmap minix_munmap
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "debug.h"
|
#include "debug.h"
|
||||||
#include "rtld.h"
|
#include "rtld.h"
|
||||||
|
|
||||||
|
|
|
@ -50,8 +50,6 @@ __RCSID("$NetBSD: map_object.c,v 1.45 2012/10/13 21:13:07 dholland Exp $");
|
||||||
#include "rtld.h"
|
#include "rtld.h"
|
||||||
|
|
||||||
#ifdef __minix
|
#ifdef __minix
|
||||||
#define munmap minix_munmap
|
|
||||||
#define mmap minix_mmap
|
|
||||||
#ifndef MAP_SHARED
|
#ifndef MAP_SHARED
|
||||||
#define MAP_SHARED MAP_PRIVATE /* minix: MAP_SHARED should be MAP_PRIVATE */
|
#define MAP_SHARED MAP_PRIVATE /* minix: MAP_SHARED should be MAP_PRIVATE */
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -49,10 +49,6 @@ __RCSID("$NetBSD: map_object.c,v 1.45 2012/10/13 21:13:07 dholland Exp $");
|
||||||
#include "debug.h"
|
#include "debug.h"
|
||||||
#include "rtld.h"
|
#include "rtld.h"
|
||||||
|
|
||||||
#ifdef __minix
|
|
||||||
#define munmap minix_munmap
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define MINIXVERBOSE 0
|
#define MINIXVERBOSE 0
|
||||||
|
|
||||||
#if MINIXVERBOSE
|
#if MINIXVERBOSE
|
||||||
|
@ -137,7 +133,7 @@ _rtld_map_object_fallback(const char *path, int fd, const struct stat *sb)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __minix
|
#ifdef __minix
|
||||||
ehdr = minix_mmap(NULL, _rtld_pagesz, PROT_READ|PROT_WRITE,
|
ehdr = mmap(NULL, _rtld_pagesz, PROT_READ|PROT_WRITE,
|
||||||
MAP_PREALLOC|MAP_ANON, -1, (off_t)0);
|
MAP_PREALLOC|MAP_ANON, -1, (off_t)0);
|
||||||
Pread(ehdr, _rtld_pagesz, fd, 0);
|
Pread(ehdr, _rtld_pagesz, fd, 0);
|
||||||
#if MINIXVERBOSE
|
#if MINIXVERBOSE
|
||||||
|
@ -368,7 +364,7 @@ _rtld_map_object_fallback(const char *path, int fd, const struct stat *sb)
|
||||||
mapbase = mmap(base_addr, mapsize, text_flags,
|
mapbase = mmap(base_addr, mapsize, text_flags,
|
||||||
mapflags | MAP_FILE | MAP_PRIVATE, fd, base_offset);
|
mapflags | MAP_FILE | MAP_PRIVATE, fd, base_offset);
|
||||||
#else
|
#else
|
||||||
mapbase = minix_mmap(base_addr, mapsize, PROT_READ|PROT_WRITE,
|
mapbase = mmap(base_addr, mapsize, PROT_READ|PROT_WRITE,
|
||||||
MAP_ANON | MAP_PREALLOC, -1, 0);
|
MAP_ANON | MAP_PREALLOC, -1, 0);
|
||||||
#if MINIXVERBOSE
|
#if MINIXVERBOSE
|
||||||
fprintf(stderr, "minix mmap for whole block: 0x%lx-0x%lx\n", mapbase, mapbase+mapsize);
|
fprintf(stderr, "minix mmap for whole block: 0x%lx-0x%lx\n", mapbase, mapbase+mapsize);
|
||||||
|
|
|
@ -38,10 +38,6 @@
|
||||||
* John Polstra <jdp@polstra.com>.
|
* John Polstra <jdp@polstra.com>.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef __minix
|
|
||||||
#define munmap minix_munmap
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <sys/cdefs.h>
|
#include <sys/cdefs.h>
|
||||||
#ifndef lint
|
#ifndef lint
|
||||||
__RCSID("$NetBSD: rtld.c,v 1.159 2012/10/01 03:03:46 riastradh Exp $");
|
__RCSID("$NetBSD: rtld.c,v 1.159 2012/10/01 03:03:46 riastradh Exp $");
|
||||||
|
|
|
@ -60,12 +60,6 @@
|
||||||
* SUCH DAMAGE.
|
* SUCH DAMAGE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef __minix
|
|
||||||
/* Minix mmap can do this. */
|
|
||||||
#define mmap minix_mmap
|
|
||||||
#define munmap minix_munmap
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(LIBC_SCCS) && !defined(lint)
|
#if defined(LIBC_SCCS) && !defined(lint)
|
||||||
/*static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91";*/
|
/*static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91";*/
|
||||||
#endif /* LIBC_SCCS and not lint */
|
#endif /* LIBC_SCCS and not lint */
|
||||||
|
|
|
@ -77,12 +77,6 @@
|
||||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* mmap/munmap are used in this file just to allocate/free memory
|
|
||||||
* so these functions are ok.
|
|
||||||
*/
|
|
||||||
#define mmap minix_mmap
|
|
||||||
#define munmap minix_munmap
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* mke2fs.c: "re-invent (dumb but non-GPLed) wheel as a fun project"
|
* mke2fs.c: "re-invent (dumb but non-GPLed) wheel as a fun project"
|
||||||
*
|
*
|
||||||
|
|
|
@ -72,7 +72,7 @@ int do_shmget(message *m)
|
||||||
return ENOSPC;
|
return ENOSPC;
|
||||||
shm = &shm_list[shm_list_nr];
|
shm = &shm_list[shm_list_nr];
|
||||||
memset(shm, 0, sizeof(struct shm_struct));
|
memset(shm, 0, sizeof(struct shm_struct));
|
||||||
shm->page = (vir_bytes) minix_mmap(0, size,
|
shm->page = (vir_bytes) mmap(0, size,
|
||||||
PROT_READ|PROT_WRITE, MAP_ANON, -1, 0);
|
PROT_READ|PROT_WRITE, MAP_ANON, -1, 0);
|
||||||
if (shm->page == (vir_bytes) MAP_FAILED)
|
if (shm->page == (vir_bytes) MAP_FAILED)
|
||||||
return ENOMEM;
|
return ENOMEM;
|
||||||
|
@ -171,7 +171,7 @@ void update_refcount_and_destroy(void)
|
||||||
int size = shm_list[i].shmid_ds.shm_segsz;
|
int size = shm_list[i].shmid_ds.shm_segsz;
|
||||||
if (size % PAGE_SIZE)
|
if (size % PAGE_SIZE)
|
||||||
size += PAGE_SIZE - size % PAGE_SIZE;
|
size += PAGE_SIZE - size % PAGE_SIZE;
|
||||||
minix_munmap((void *)shm_list[i].page, size);
|
munmap((void *)shm_list[i].page, size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
shm_list_nr = j;
|
shm_list_nr = j;
|
||||||
|
|
|
@ -6,19 +6,19 @@
|
||||||
|
|
||||||
#include "inc.h"
|
#include "inc.h"
|
||||||
|
|
||||||
#define minix_munmap _minix_munmap
|
#define munmap _munmap
|
||||||
#include <sys/mman.h>
|
#include <sys/mman.h>
|
||||||
#undef minix_munmap
|
#undef munmap
|
||||||
|
|
||||||
int unmap_ok = 0;
|
int unmap_ok = 0;
|
||||||
|
|
||||||
/*===========================================================================*
|
/*===========================================================================*
|
||||||
* minix_munmap *
|
* munmap *
|
||||||
*===========================================================================*/
|
*===========================================================================*/
|
||||||
int minix_munmap(void *addrstart, vir_bytes len)
|
int munmap(void *addrstart, vir_bytes len)
|
||||||
{
|
{
|
||||||
if(!unmap_ok)
|
if(!unmap_ok)
|
||||||
return ENOSYS;
|
return ENOSYS;
|
||||||
|
|
||||||
return _minix_munmap(addrstart, len);
|
return _munmap(addrstart, len);
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,7 +41,7 @@ static struct vir_region *mmap_region(struct vmproc *vmp, vir_bytes addr,
|
||||||
|
|
||||||
if(vmm_flags & MAP_LOWER16M) vrflags |= VR_LOWER16MB;
|
if(vmm_flags & MAP_LOWER16M) vrflags |= VR_LOWER16MB;
|
||||||
if(vmm_flags & MAP_LOWER1M) vrflags |= VR_LOWER1MB;
|
if(vmm_flags & MAP_LOWER1M) vrflags |= VR_LOWER1MB;
|
||||||
if(vmm_flags & MAP_ALIGN64K) vrflags |= VR_PHYS64K;
|
if(vmm_flags & MAP_ALIGNMENT_64KB) vrflags |= VR_PHYS64K;
|
||||||
if(vmm_flags & MAP_PREALLOC) mfflags |= MF_PREALLOC;
|
if(vmm_flags & MAP_PREALLOC) mfflags |= MF_PREALLOC;
|
||||||
if(vmm_flags & MAP_UNINITIALIZED) {
|
if(vmm_flags & MAP_UNINITIALIZED) {
|
||||||
if(!execpriv) return NULL;
|
if(!execpriv) return NULL;
|
||||||
|
|
|
@ -272,7 +272,7 @@ int swap_proc_dyn_data(struct vmproc *src_vmp, struct vmproc *dst_vmp)
|
||||||
return OK;
|
return OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *minix_mmap(void *addr, size_t len, int f, int f2, int f3, off_t o)
|
void *mmap(void *addr, size_t len, int f, int f2, int f3, off_t o)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
phys_bytes p;
|
phys_bytes p;
|
||||||
|
@ -287,7 +287,7 @@ void *minix_mmap(void *addr, size_t len, int f, int f2, int f3, off_t o)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int minix_munmap(void * addr, size_t len)
|
int munmap(void * addr, size_t len)
|
||||||
{
|
{
|
||||||
vm_freepages((vir_bytes) addr, roundup(len, VM_PAGE_SIZE)/VM_PAGE_SIZE);
|
vm_freepages((vir_bytes) addr, roundup(len, VM_PAGE_SIZE)/VM_PAGE_SIZE);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
135
sys/sys/mman.h
135
sys/sys/mman.h
|
@ -37,7 +37,6 @@
|
||||||
#include <sys/featuretest.h>
|
#include <sys/featuretest.h>
|
||||||
|
|
||||||
#include <machine/ansi.h>
|
#include <machine/ansi.h>
|
||||||
#include <minix/type.h>
|
|
||||||
|
|
||||||
#ifdef _BSD_SIZE_T_
|
#ifdef _BSD_SIZE_T_
|
||||||
typedef _BSD_SIZE_T_ size_t;
|
typedef _BSD_SIZE_T_ size_t;
|
||||||
|
@ -74,48 +73,144 @@ typedef __off_t off_t; /* file offset */
|
||||||
#endif
|
#endif
|
||||||
#define MAP_PRIVATE 0x0002 /* changes are private */
|
#define MAP_PRIVATE 0x0002 /* changes are private */
|
||||||
|
|
||||||
|
#ifdef _KERNEL
|
||||||
|
/*
|
||||||
|
* Deprecated flag; these are treated as MAP_PRIVATE internally by
|
||||||
|
* the kernel.
|
||||||
|
*/
|
||||||
|
#define MAP_COPY 0x0004 /* "copy" region at mmap time */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Other flags
|
||||||
|
*/
|
||||||
|
#define MAP_FIXED 0x0010 /* map addr must be exactly as requested */
|
||||||
|
#define MAP_RENAME 0x0020 /* Sun: rename private pages to file */
|
||||||
|
#define MAP_NORESERVE 0x0040 /* Sun: don't reserve needed swap area */
|
||||||
|
#define MAP_INHERIT 0x0080 /* region is retained after exec */
|
||||||
|
#define MAP_HASSEMAPHORE 0x0200 /* region may contain semaphores */
|
||||||
|
#define MAP_TRYFIXED 0x0400 /* attempt hint address, even within break */
|
||||||
|
#define MAP_WIRED 0x0800 /* mlock() mapping when it is established */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mapping type
|
* Mapping type
|
||||||
*/
|
*/
|
||||||
#define MAP_ANON 0x0004 /* anonymous memory */
|
#define MAP_FILE 0x0000 /* map from file (default) */
|
||||||
|
#define MAP_ANON 0x1000 /* allocated from memory, swap space */
|
||||||
|
#define MAP_STACK 0x2000 /* allocated from memory, swap space (stack) */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Minix specific flags.
|
* Alignment (expressed in log2). Must be >= log2(PAGE_SIZE) and
|
||||||
|
* < # bits in a pointer (26 (acorn26), 32 or 64).
|
||||||
*/
|
*/
|
||||||
#define MAP_PREALLOC 0x0008 /* not on-demand */
|
#define MAP_ALIGNED(n) ((n) << MAP_ALIGNMENT_SHIFT)
|
||||||
#define MAP_CONTIG 0x0010 /* contiguous in physical memory */
|
#define MAP_ALIGNMENT_SHIFT 24
|
||||||
#define MAP_LOWER16M 0x0020 /* physically below 16MB */
|
#define MAP_ALIGNMENT_MASK MAP_ALIGNED(0xff)
|
||||||
#define MAP_ALIGN64K 0x0040 /* physically aligned at 64kB */
|
#define MAP_ALIGNMENT_64KB MAP_ALIGNED(16) /* 2^16 */
|
||||||
#define MAP_LOWER1M 0x0080 /* physically below 16MB */
|
#define MAP_ALIGNMENT_16MB MAP_ALIGNED(24) /* 2^24 */
|
||||||
#define MAP_ALIGNMENT_64KB MAP_ALIGN64K
|
#define MAP_ALIGNMENT_4GB MAP_ALIGNED(32) /* 2^32 */
|
||||||
|
#define MAP_ALIGNMENT_1TB MAP_ALIGNED(40) /* 2^40 */
|
||||||
|
#define MAP_ALIGNMENT_256TB MAP_ALIGNED(48) /* 2^48 */
|
||||||
|
#define MAP_ALIGNMENT_64PB MAP_ALIGNED(56) /* 2^56 */
|
||||||
|
|
||||||
#define MAP_FIXED 0x0200 /* require mapping to happen at hint */
|
#ifdef __minix
|
||||||
#define MAP_THIRDPARTY 0x0400 /* perform on behalf of any process */
|
/*
|
||||||
#define MAP_UNINITIALIZED 0x0800 /* do not clear memory */
|
* Minix-specific flags
|
||||||
#define MAP_FILE 0x1000 /* it's a file */
|
*/
|
||||||
|
#define MAP_UNINITIALIZED 0x040000 /* do not clear memory */
|
||||||
|
#define MAP_PREALLOC 0x080000 /* not on-demand */
|
||||||
|
#define MAP_CONTIG 0x100000 /* contiguous in physical memory */
|
||||||
|
#define MAP_LOWER16M 0x200000 /* physically below 16MB */
|
||||||
|
#define MAP_LOWER1M 0x400000 /* physically below 16MB */
|
||||||
|
#define MAP_THIRDPARTY 0x800000 /* perform on behalf of any process */
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Error indicator returned by mmap(2)
|
* Error indicator returned by mmap(2)
|
||||||
*/
|
*/
|
||||||
#define MAP_FAILED ((void *) -1) /* mmap() failed */
|
#define MAP_FAILED ((void *) -1) /* mmap() failed */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flags to msync
|
||||||
|
*/
|
||||||
|
#define MS_ASYNC 0x01 /* perform asynchronous writes */
|
||||||
|
#define MS_INVALIDATE 0x02 /* invalidate cached data */
|
||||||
|
#define MS_SYNC 0x04 /* perform synchronous writes */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flags to mlockall
|
||||||
|
*/
|
||||||
|
#define MCL_CURRENT 0x01 /* lock all pages currently mapped */
|
||||||
|
#define MCL_FUTURE 0x02 /* lock all pages mapped in the future */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* POSIX memory avissory values.
|
||||||
|
* Note: keep consistent with the original defintions below.
|
||||||
|
*/
|
||||||
|
#define POSIX_MADV_NORMAL 0 /* No further special treatment */
|
||||||
|
#define POSIX_MADV_RANDOM 1 /* Expect random page references */
|
||||||
|
#define POSIX_MADV_SEQUENTIAL 2 /* Expect sequential page references */
|
||||||
|
#define POSIX_MADV_WILLNEED 3 /* Will need these pages */
|
||||||
|
#define POSIX_MADV_DONTNEED 4 /* Don't need these pages */
|
||||||
|
|
||||||
|
#if defined(_NETBSD_SOURCE)
|
||||||
|
/*
|
||||||
|
* Original advice values, equivalent to POSIX defintions,
|
||||||
|
* and few implementation-specific ones.
|
||||||
|
*/
|
||||||
|
#define MADV_NORMAL POSIX_MADV_NORMAL
|
||||||
|
#define MADV_RANDOM POSIX_MADV_RANDOM
|
||||||
|
#define MADV_SEQUENTIAL POSIX_MADV_SEQUENTIAL
|
||||||
|
#define MADV_WILLNEED POSIX_MADV_WILLNEED
|
||||||
|
#define MADV_DONTNEED POSIX_MADV_DONTNEED
|
||||||
|
#define MADV_SPACEAVAIL 5 /* Insure that resources are reserved */
|
||||||
|
#define MADV_FREE 6 /* Pages are empty, free them */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flags to minherit
|
||||||
|
*/
|
||||||
|
#define MAP_INHERIT_SHARE 0 /* share with child */
|
||||||
|
#define MAP_INHERIT_COPY 1 /* copy into child */
|
||||||
|
#define MAP_INHERIT_NONE 2 /* absent from child */
|
||||||
|
#define MAP_INHERIT_DONATE_COPY 3 /* copy and delete -- not
|
||||||
|
implemented in UVM */
|
||||||
|
#define MAP_INHERIT_DEFAULT MAP_INHERIT_COPY
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef _KERNEL
|
||||||
|
|
||||||
#include <sys/cdefs.h>
|
#include <sys/cdefs.h>
|
||||||
|
|
||||||
__BEGIN_DECLS
|
__BEGIN_DECLS
|
||||||
#ifndef __minix
|
|
||||||
void * mmap(void *, size_t, int, int, int, off_t);
|
void * mmap(void *, size_t, int, int, int, off_t);
|
||||||
int munmap(void *, size_t);
|
int munmap(void *, size_t);
|
||||||
#else
|
int mprotect(void *, size_t, int);
|
||||||
void * minix_mmap(void *, size_t, int, int, int, off_t);
|
#ifndef __LIBC12_SOURCE__
|
||||||
void * minix_mmap64(void *, size_t, int, int, int, u64_t);
|
int msync(void *, size_t, int) __RENAME(__msync13);
|
||||||
void * minix_mmap_for(endpoint_t, void *, size_t, int, int, int, u64_t);
|
#endif
|
||||||
int minix_munmap(void *, size_t);
|
int mlock(const void *, size_t);
|
||||||
|
int munlock(const void *, size_t);
|
||||||
|
int mlockall(int);
|
||||||
|
int munlockall(void);
|
||||||
|
#if defined(_NETBSD_SOURCE)
|
||||||
|
int madvise(void *, size_t, int);
|
||||||
|
int mincore(void *, size_t, char *);
|
||||||
|
int minherit(void *, size_t, int);
|
||||||
|
void * mremap(void *, size_t, void *, size_t, int);
|
||||||
|
#endif
|
||||||
|
int posix_madvise(void *, size_t, int);
|
||||||
|
|
||||||
|
#ifdef __minix
|
||||||
|
#include <minix/endpoint.h>
|
||||||
void * vm_remap(endpoint_t d, endpoint_t s, void *da, void *sa, size_t si);
|
void * vm_remap(endpoint_t d, endpoint_t s, void *da, void *sa, size_t si);
|
||||||
void * vm_remap_ro(endpoint_t d, endpoint_t s, void *da, void *sa, size_t si);
|
void * vm_remap_ro(endpoint_t d, endpoint_t s, void *da, void *sa, size_t si);
|
||||||
int vm_unmap(endpoint_t endpt, void *addr);
|
int vm_unmap(endpoint_t endpt, void *addr);
|
||||||
unsigned long vm_getphys(endpoint_t endpt, void *addr);
|
unsigned long vm_getphys(endpoint_t endpt, void *addr);
|
||||||
u8_t vm_getrefcount(endpoint_t endpt, void *addr);
|
u8_t vm_getrefcount(endpoint_t endpt, void *addr);
|
||||||
#endif /* __minix */
|
#endif
|
||||||
|
|
||||||
__END_DECLS
|
__END_DECLS
|
||||||
|
|
||||||
|
#endif /* !_KERNEL */
|
||||||
|
|
||||||
#endif /* !_SYS_MMAN_H_ */
|
#endif /* !_SYS_MMAN_H_ */
|
||||||
|
|
|
@ -103,7 +103,7 @@ static void *alloc_dma_memory(size_t size)
|
||||||
if (contig)
|
if (contig)
|
||||||
ptr = alloc_contig(size, 0, NULL);
|
ptr = alloc_contig(size, 0, NULL);
|
||||||
else
|
else
|
||||||
ptr = minix_mmap(NULL, size, PROT_READ | PROT_WRITE,
|
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
|
||||||
MAP_PREALLOC | MAP_ANON, -1, 0);
|
MAP_PREALLOC | MAP_ANON, -1, 0);
|
||||||
|
|
||||||
if (ptr == MAP_FAILED)
|
if (ptr == MAP_FAILED)
|
||||||
|
@ -118,7 +118,7 @@ static void free_dma_memory(void *ptr, size_t size)
|
||||||
if (contig)
|
if (contig)
|
||||||
free_contig(ptr, size);
|
free_contig(ptr, size);
|
||||||
else
|
else
|
||||||
minix_munmap(ptr, size);
|
munmap(ptr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int set_result(result_t *res, int type, ssize_t value)
|
static int set_result(result_t *res, int type, ssize_t value)
|
||||||
|
|
|
@ -58,7 +58,7 @@ static void alloc_buf(struct buf *buf, phys_bytes next)
|
||||||
* very unlikely that the actual piece of memory will end up
|
* very unlikely that the actual piece of memory will end up
|
||||||
* being physically contiguous with the last piece.
|
* being physically contiguous with the last piece.
|
||||||
*/
|
*/
|
||||||
tmp = minix_mmap((void *) (buf->addr + len + PAGE_SIZE), len,
|
tmp = mmap((void *) (buf->addr + len + PAGE_SIZE), len,
|
||||||
PROT_READ | PROT_WRITE, MAP_ANON | MAP_PREALLOC |
|
PROT_READ | PROT_WRITE, MAP_ANON | MAP_PREALLOC |
|
||||||
MAP_CONTIG, -1, 0L);
|
MAP_CONTIG, -1, 0L);
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ static void alloc_buf(struct buf *buf, phys_bytes next)
|
||||||
panic("unable to allocate temporary buffer");
|
panic("unable to allocate temporary buffer");
|
||||||
}
|
}
|
||||||
|
|
||||||
addr = (vir_bytes) minix_mmap((void *) buf->addr, len,
|
addr = (vir_bytes) mmap((void *) buf->addr, len,
|
||||||
PROT_READ | PROT_WRITE, flags, -1, 0L);
|
PROT_READ | PROT_WRITE, flags, -1, 0L);
|
||||||
|
|
||||||
if (addr != buf->addr)
|
if (addr != buf->addr)
|
||||||
|
@ -75,7 +75,7 @@ static void alloc_buf(struct buf *buf, phys_bytes next)
|
||||||
if (!prealloc)
|
if (!prealloc)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if ((r = minix_munmap(tmp, len)) != OK)
|
if ((r = munmap(tmp, len)) != OK)
|
||||||
panic("unable to unmap buffer (%d)", errno);
|
panic("unable to unmap buffer (%d)", errno);
|
||||||
|
|
||||||
if ((r = sys_umap(SELF, VM_D, addr, len, &buf->phys)) < 0)
|
if ((r = sys_umap(SELF, VM_D, addr, len, &buf->phys)) < 0)
|
||||||
|
@ -92,9 +92,9 @@ static void alloc_buf(struct buf *buf, phys_bytes next)
|
||||||
* unmapped the temporary memory also, there's a small chance we'll end
|
* unmapped the temporary memory also, there's a small chance we'll end
|
||||||
* up with a different physical page this time. Who knows.
|
* up with a different physical page this time. Who knows.
|
||||||
*/
|
*/
|
||||||
minix_munmap((void *) addr, len);
|
munmap((void *) addr, len);
|
||||||
|
|
||||||
addr = (vir_bytes) minix_mmap((void *) buf->addr, len,
|
addr = (vir_bytes) mmap((void *) buf->addr, len,
|
||||||
PROT_READ | PROT_WRITE, flags, -1, 0L);
|
PROT_READ | PROT_WRITE, flags, -1, 0L);
|
||||||
|
|
||||||
if (addr != buf->addr)
|
if (addr != buf->addr)
|
||||||
|
@ -147,7 +147,7 @@ static void free_bufs(struct buf *buf, int count)
|
||||||
|
|
||||||
for (i = 0; i < count; i++) {
|
for (i = 0; i < count; i++) {
|
||||||
for (j = 0; j < buf[i].pages; j++) {
|
for (j = 0; j < buf[i].pages; j++) {
|
||||||
r = minix_munmap((void *) (buf[i].addr + j * PAGE_SIZE),
|
r = munmap((void *) (buf[i].addr + j * PAGE_SIZE),
|
||||||
PAGE_SIZE);
|
PAGE_SIZE);
|
||||||
|
|
||||||
if (r != OK)
|
if (r != OK)
|
||||||
|
@ -664,7 +664,7 @@ static void test_vector2(void)
|
||||||
got_result("invalid virtual vector pointer");
|
got_result("invalid virtual vector pointer");
|
||||||
|
|
||||||
/* Test unallocated virtual vector. */
|
/* Test unallocated virtual vector. */
|
||||||
vvecp = (struct vumap_vir *) minix_mmap(NULL, PAGE_SIZE,
|
vvecp = (struct vumap_vir *) mmap(NULL, PAGE_SIZE,
|
||||||
PROT_READ | PROT_WRITE, MAP_ANON, -1, 0L);
|
PROT_READ | PROT_WRITE, MAP_ANON, -1, 0L);
|
||||||
|
|
||||||
if (vvecp == MAP_FAILED)
|
if (vvecp == MAP_FAILED)
|
||||||
|
@ -677,7 +677,7 @@ static void test_vector2(void)
|
||||||
|
|
||||||
got_result("unallocated virtual vector pointer");
|
got_result("unallocated virtual vector pointer");
|
||||||
|
|
||||||
minix_munmap((void *) vvecp, PAGE_SIZE);
|
munmap((void *) vvecp, PAGE_SIZE);
|
||||||
|
|
||||||
/* Test invalid physical vector pointer. */
|
/* Test invalid physical vector pointer. */
|
||||||
r = do_vumap(SELF, vvec, 2, 0, VUA_READ, NULL, &pcount);
|
r = do_vumap(SELF, vvec, 2, 0, VUA_READ, NULL, &pcount);
|
||||||
|
@ -687,7 +687,7 @@ static void test_vector2(void)
|
||||||
got_result("invalid physical vector pointer");
|
got_result("invalid physical vector pointer");
|
||||||
|
|
||||||
/* Test unallocated physical vector. */
|
/* Test unallocated physical vector. */
|
||||||
pvecp = (struct vumap_phys *) minix_mmap(NULL, PAGE_SIZE,
|
pvecp = (struct vumap_phys *) mmap(NULL, PAGE_SIZE,
|
||||||
PROT_READ | PROT_WRITE, MAP_ANON, -1, 0L);
|
PROT_READ | PROT_WRITE, MAP_ANON, -1, 0L);
|
||||||
|
|
||||||
if (pvecp == MAP_FAILED)
|
if (pvecp == MAP_FAILED)
|
||||||
|
@ -705,7 +705,7 @@ static void test_vector2(void)
|
||||||
|
|
||||||
got_result("unallocated physical vector pointer");
|
got_result("unallocated physical vector pointer");
|
||||||
|
|
||||||
minix_munmap((void *) pvecp, PAGE_SIZE);
|
munmap((void *) pvecp, PAGE_SIZE);
|
||||||
|
|
||||||
free_bufs(buf, 2);
|
free_bufs(buf, 2);
|
||||||
}
|
}
|
||||||
|
|
14
test/run
14
test/run
|
@ -21,27 +21,17 @@ badones= # list of tests that failed
|
||||||
|
|
||||||
# Programs that require setuid
|
# Programs that require setuid
|
||||||
setuids="test11 test33 test43 test44 test46 test56 test60 test61 test65 \
|
setuids="test11 test33 test43 test44 test46 test56 test60 test61 test65 \
|
||||||
test69 test76 test77 test78" # test73"
|
test69 test76 test73 test77 test78"
|
||||||
# Scripts that require to be run as root
|
# Scripts that require to be run as root
|
||||||
rootscripts="testisofs testvnd"
|
rootscripts="testisofs testvnd"
|
||||||
|
|
||||||
alltests="1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 \
|
alltests="1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 \
|
||||||
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 \
|
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 \
|
||||||
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 \
|
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 \
|
||||||
61 62 63 64 65 66 67 68 69 70 71 72 75 76 77 78 79 \
|
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 \
|
||||||
sh1 sh2 interp mfs isofs vnd"
|
sh1 sh2 interp mfs isofs vnd"
|
||||||
tests_no=`expr 0`
|
tests_no=`expr 0`
|
||||||
|
|
||||||
# test mmap only if enabled in sysenv
|
|
||||||
filemap=1 # the default is on
|
|
||||||
if sysenv filemap >/dev/null
|
|
||||||
then filemap=`sysenv filemap`
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$filemap" -ne 0 ]
|
|
||||||
then alltests="$alltests 74"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# If root, make sure the setuid tests have the correct permissions
|
# If root, make sure the setuid tests have the correct permissions
|
||||||
# and make the dir bin-owned.
|
# and make the dir bin-owned.
|
||||||
if [ "$ROOT" ]
|
if [ "$ROOT" ]
|
||||||
|
|
|
@ -31,16 +31,16 @@ main(int argc, char *argv[])
|
||||||
start(44);
|
start(44);
|
||||||
|
|
||||||
for(i = 0; i < CHUNKS; i++) {
|
for(i = 0; i < CHUNKS; i++) {
|
||||||
v[i] = minix_mmap(vaddr, CHUNKSIZE, PROT_READ|PROT_WRITE, 0,
|
v[i] = mmap(vaddr, CHUNKSIZE, PROT_READ|PROT_WRITE, 0,
|
||||||
-1, 0);
|
-1, 0);
|
||||||
if(v[i] == MAP_FAILED) {
|
if(v[i] == MAP_FAILED) {
|
||||||
perror("minix_mmap");
|
perror("mmap");
|
||||||
fprintf(stderr, "minix_mmap failed\n");
|
fprintf(stderr, "mmap failed\n");
|
||||||
quit();
|
quit();
|
||||||
}
|
}
|
||||||
if(v[i] != vaddr) {
|
if(v[i] != vaddr) {
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"minix_mmap said 0x%p but i wanted 0x%p\n",
|
"mmap said 0x%p but i wanted 0x%p\n",
|
||||||
v[i], vaddr);
|
v[i], vaddr);
|
||||||
quit();
|
quit();
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,7 +33,7 @@ int main (int argc, char *argv[])
|
||||||
|
|
||||||
if(f == 0) {
|
if(f == 0) {
|
||||||
/* child: use up as much memory as we can */
|
/* child: use up as much memory as we can */
|
||||||
while((addrs[i++ % NADDRS] = minix_mmap(0, LEN, PROT_READ|PROT_WRITE,
|
while((addrs[i++ % NADDRS] = mmap(0, LEN, PROT_READ|PROT_WRITE,
|
||||||
MAP_PREALLOC|MAP_CONTIG|MAP_ANON, -1, 0)) != MAP_FAILED)
|
MAP_PREALLOC|MAP_CONTIG|MAP_ANON, -1, 0)) != MAP_FAILED)
|
||||||
;
|
;
|
||||||
exit(0);
|
exit(0);
|
||||||
|
|
|
@ -47,7 +47,7 @@ readblock(int b, int blocksize, u32_t seed, char *data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if((mmapdata = minix_mmap(NULL, blocksize, PROT_READ, MAP_PRIVATE | MAP_FILE,
|
if((mmapdata = mmap(NULL, blocksize, PROT_READ, MAP_PRIVATE | MAP_FILE,
|
||||||
fd, offset)) == MAP_FAILED) {
|
fd, offset)) == MAP_FAILED) {
|
||||||
perror("mmap");
|
perror("mmap");
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -65,7 +65,7 @@ readblock(int b, int blocksize, u32_t seed, char *data)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(minix_munmap(mmapdata, blocksize) < 0) {
|
if(munmap(mmapdata, blocksize) < 0) {
|
||||||
perror("munmap");
|
perror("munmap");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -79,7 +79,7 @@ void basic_regression(void)
|
||||||
{
|
{
|
||||||
void *block;
|
void *block;
|
||||||
#define BLOCKSIZE (PAGE_SIZE*10)
|
#define BLOCKSIZE (PAGE_SIZE*10)
|
||||||
block = minix_mmap(0, BLOCKSIZE, PROT_READ | PROT_WRITE,
|
block = mmap(0, BLOCKSIZE, PROT_READ | PROT_WRITE,
|
||||||
MAP_PRIVATE | MAP_ANON, -1, 0);
|
MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||||
|
|
||||||
if(block == MAP_FAILED) { e(1); exit(1); }
|
if(block == MAP_FAILED) { e(1); exit(1); }
|
||||||
|
@ -87,7 +87,7 @@ void basic_regression(void)
|
||||||
memset(block, 0, BLOCKSIZE);
|
memset(block, 0, BLOCKSIZE);
|
||||||
|
|
||||||
/* shrink from bottom */
|
/* shrink from bottom */
|
||||||
minix_munmap(block, PAGE_SIZE);
|
munmap(block, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
|
|
|
@ -33,9 +33,9 @@ int dowriteblock(int b, int blocksize, u32_t seed, char *block)
|
||||||
|
|
||||||
if((bdata = vm_map_cacheblock(MYDEV, dev_off,
|
if((bdata = vm_map_cacheblock(MYDEV, dev_off,
|
||||||
VMC_NO_INODE, 0, NULL, blocksize)) == MAP_FAILED) {
|
VMC_NO_INODE, 0, NULL, blocksize)) == MAP_FAILED) {
|
||||||
if((bdata = minix_mmap(0, blocksize,
|
if((bdata = mmap(0, blocksize,
|
||||||
PROT_READ|PROT_WRITE, MAP_ANON, -1, 0)) == MAP_FAILED) {
|
PROT_READ|PROT_WRITE, MAP_ANON, -1, 0)) == MAP_FAILED) {
|
||||||
printf("minix_mmap failed\n");
|
printf("mmap failed\n");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
mustset = 1;
|
mustset = 1;
|
||||||
|
@ -49,8 +49,8 @@ int dowriteblock(int b, int blocksize, u32_t seed, char *block)
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(minix_munmap(bdata, blocksize) < 0) {
|
if(munmap(bdata, blocksize) < 0) {
|
||||||
printf("dowriteblock: minix_munmap failed %d\n", r);
|
printf("dowriteblock: munmap failed %d\n", r);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,8 +69,8 @@ int readblock(int b, int blocksize, u32_t seed, char *block)
|
||||||
|
|
||||||
memcpy(block, bdata, blocksize);
|
memcpy(block, bdata, blocksize);
|
||||||
|
|
||||||
if(minix_munmap(bdata, blocksize) < 0) {
|
if(munmap(bdata, blocksize) < 0) {
|
||||||
printf("dowriteblock: minix_munmap failed\n");
|
printf("dowriteblock: munmap failed\n");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,11 +44,6 @@
|
||||||
#include <err.h>
|
#include <err.h>
|
||||||
#include "returns.h"
|
#include "returns.h"
|
||||||
|
|
||||||
#if defined(__minix)
|
|
||||||
#define munmap minix_munmap
|
|
||||||
#define mmap minix_mmap
|
|
||||||
#endif /* defined(__minix) */
|
|
||||||
|
|
||||||
void yyparse(void);
|
void yyparse(void);
|
||||||
#define DEF_TERMPATH "."
|
#define DEF_TERMPATH "."
|
||||||
#define DEF_TERM "atf"
|
#define DEF_TERM "atf"
|
||||||
|
|
|
@ -84,8 +84,6 @@ __RCSID("$NetBSD: ldd_elfxx.c,v 1.4 2009/09/07 04:49:03 dholland Exp $");
|
||||||
#include "rtld.h"
|
#include "rtld.h"
|
||||||
#include "ldd.h"
|
#include "ldd.h"
|
||||||
|
|
||||||
#define munmap minix_munmap
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* elfxx_ldd() - bit-size independant ELF ldd implementation.
|
* elfxx_ldd() - bit-size independant ELF ldd implementation.
|
||||||
* returns 0 on success and -1 on failure.
|
* returns 0 on success and -1 on failure.
|
||||||
|
|
|
@ -70,8 +70,6 @@
|
||||||
|
|
||||||
|
|
||||||
#ifdef __minix
|
#ifdef __minix
|
||||||
#define mmap minix_mmap
|
|
||||||
#define munmap minix_munmap
|
|
||||||
#ifndef MAP_COPY
|
#ifndef MAP_COPY
|
||||||
#define MAP_COPY MAP_PRIVATE
|
#define MAP_COPY MAP_PRIVATE
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -96,11 +96,7 @@ msg_file(const char *file)
|
||||||
int fd;
|
int fd;
|
||||||
|
|
||||||
if (msgmap != MAP_FAILED)
|
if (msgmap != MAP_FAILED)
|
||||||
#ifdef __minix
|
|
||||||
minix_munmap(msgmap, msgmapsz);
|
|
||||||
#else /* ! __minix */
|
|
||||||
munmap(msgmap, msgmapsz);
|
munmap(msgmap, msgmapsz);
|
||||||
#endif /* ! __minix */
|
|
||||||
msgmap = MAP_FAILED;
|
msgmap = MAP_FAILED;
|
||||||
if (!file)
|
if (!file)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -109,7 +105,7 @@ msg_file(const char *file)
|
||||||
return -1;
|
return -1;
|
||||||
msgmapsz = lseek(fd, 0, SEEK_END);
|
msgmapsz = lseek(fd, 0, SEEK_END);
|
||||||
#ifdef __minix
|
#ifdef __minix
|
||||||
msgmap = minix_mmap(0, msgmapsz, PROT_READ, MAP_PRIVATE, fd, 0);
|
msgmap = mmap(0, msgmapsz, PROT_READ, MAP_PRIVATE, fd, 0);
|
||||||
#else /* ! __minix */
|
#else /* ! __minix */
|
||||||
msgmap = mmap(0, msgmapsz, PROT_READ, MAP_SHARED, fd, 0);
|
msgmap = mmap(0, msgmapsz, PROT_READ, MAP_SHARED, fd, 0);
|
||||||
#endif /* ! __minix */
|
#endif /* ! __minix */
|
||||||
|
|
Loading…
Reference in a new issue