Use netbsd <sys/mman.h>

Change-Id: I80e9cffc80140383a6faf692248573c64d282b4a
This commit is contained in:
Ben Gras 2013-11-28 17:51:21 +01:00 committed by Lionel Sambuc
parent 364953ad40
commit 88be7bd333
10 changed files with 99 additions and 100 deletions

View file

@ -823,20 +823,18 @@
/* General calls. */
#define VM_MMAP (VM_RQ_BASE+10)
# define VMM_ADDR m5_l1
# define VMM_LEN m5_l2
# define VMM_PROT m5_s1
# define VMM_FLAGS m5_s2
# define VMM_FD m5_i1
# define VMM_OFFSET_LO m5_i2
# define VMM_FORWHOM m5_l3
# define VMM_OFFSET_HI m5_l3
# define VMM_RETADDR m5_l1 /* result */
#define VM_UMAP (VM_RQ_BASE+11)
# define VMU_SEG m1_i1
# define VMU_OFFSET m1_p1
# define VMU_LENGTH m1_p2
# define VMU_RETADDR m1_p3
# define VMM_ADDR m_u.m_mmap.addr
# define VMM_LEN m_u.m_mmap.len
# define VMM_PROT m_u.m_mmap.prot
# define VMM_FLAGS m_u.m_mmap.flags
# define VMM_FD m_u.m_mmap.fd
# define VMM_OFFSET m_u.m_mmap.offset
# define VMM_FORWHOM m_u.m_mmap.forwhom
# define VMM_RETADDR m_u.m_mmap.retaddr
#define VM_MUNMAP (VM_RQ_BASE+17)
# define VMUM_ADDR m_u.m_mmap.addr
# define VMUM_LEN m_u.m_mmap.len
/* to VM: inform VM about a region of memory that is used for
* bus-master DMA
@ -872,10 +870,6 @@
# define VMUP_EP m1_i1
# define VMUP_VADDR m1_p1
#define VM_MUNMAP (VM_RQ_BASE+17)
# define VMUM_ADDR m1_p1
# define VMUM_LEN m1_i1
/* To VM: map in cache block by FS */
#define VM_MAPCACHEPAGE (VM_RQ_BASE+26)
@ -919,8 +913,8 @@
# define VMRE_FLAGS m1_i3
#define VM_SHM_UNMAP (VM_RQ_BASE+34)
# define VMUN_ENDPT m2_i1
# define VMUN_ADDR m2_l1
# define VMUN_ENDPT m_u.m_mmap.forwhom
# define VMUN_ADDR m_u.m_mmap.addr
#define VM_GETPHYS (VM_RQ_BASE+35)
# define VMPHYS_ENDPT m2_i1

View file

@ -114,14 +114,14 @@ _ASSERT_MSG_SIZE(mess_11);
typedef struct {
dev_t dev; /* 64bits long. */
void *block;
u32_t dev_offset_pages;
u32_t ino_offset_pages;
u32_t ino;
off_t dev_offset;
off_t ino_offset;
ino_t ino;
u32_t *flags_ptr;
void *block;
u8_t pages;
u8_t flags;
uint8_t padding[26];
uint8_t padding[12];
} mess_vmmcp;
_ASSERT_MSG_SIZE(mess_vmmcp);
@ -134,15 +134,29 @@ typedef struct {
_ASSERT_MSG_SIZE(mess_notify);
typedef struct {
endpoint_t who;
u32_t offset;
off_t offset;
void *addr;
size_t len;
int prot;
int flags;
int fd;
endpoint_t forwhom;
void *retaddr;
u32_t padding[5];
} mess_mmap;
_ASSERT_MSG_SIZE(mess_mmap);
typedef struct {
off_t offset;
dev_t dev;
u32_t ino;
ino_t ino;
endpoint_t who;
u32_t vaddr;
u32_t len;
u16_t fd;
u16_t clearend_and_flags; /* low 12 bits are clearend, rest flags */
uint8_t padding[24];
u32_t flags;
u32_t fd;
u16_t clearend;
uint8_t padding[8];
} mess_vm_vfs_mmap;
_ASSERT_MSG_SIZE(mess_vm_vfs_mmap);
@ -170,6 +184,7 @@ typedef struct {
mess_11 m_m11;
mess_vmmcp m_vmmcp;
mess_vmmcp_reply m_vmmcp_reply;
mess_mmap m_mmap;
mess_vm_vfs_mmap m_vm_vfs;
mess_notify m_notify; /* notify messages */
mess_sigcalls m_sigcalls; /* SYS_{GETKSIG,ENDKSIG,KILL,SIGSEND,SIGRETURN} */

View file

@ -23,13 +23,11 @@ int vm_query_exit(endpoint_t *endpt);
int vm_watch_exit(endpoint_t ep);
int vm_forgetblock(u64_t id);
void vm_forgetblocks(void);
int minix_vfs_mmap(endpoint_t who, u32_t offset, u32_t len,
dev_t dev, u32_t ino, u16_t fd, u32_t vaddr, u16_t clearend, u16_t
int minix_vfs_mmap(endpoint_t who, off_t offset, size_t len,
dev_t dev, ino_t ino, int fd, u32_t vaddr, u16_t clearend, u16_t
flags);
/* minix vfs mmap flags */
#define MVM_LENMASK 0x0FFF
#define MVM_FLAGSMASK 0xF000
#define MVM_WRITABLE 0x8000
/* VM kernel request types. */
@ -65,11 +63,11 @@ int vm_info_region(endpoint_t who, struct vm_region_info *vri, int
count, vir_bytes *next);
int vm_procctl(endpoint_t ep, int param);
int vm_set_cacheblock(void *block, dev_t dev, u64_t dev_offset,
u64_t ino, u64_t ino_offset, u32_t *flags, int blocksize);
int vm_set_cacheblock(void *block, dev_t dev, off_t dev_offset,
ino_t ino, off_t ino_offset, u32_t *flags, int blocksize);
void *vm_map_cacheblock(dev_t dev, u64_t dev_offset,
u64_t ino, u64_t ino_offset, u32_t *flags, int blocksize);
void *vm_map_cacheblock(dev_t dev, off_t dev_offset,
ino_t ino, off_t ino_offset, u32_t *flags, int blocksize);
int vm_clear_cache(dev_t dev);

View file

@ -29,18 +29,16 @@ void *minix_mmap_for(endpoint_t forwhom,
int r;
memset(&m, 0, sizeof(m));
m.VMM_ADDR = (vir_bytes) addr;
m.VMM_ADDR = addr;
m.VMM_LEN = len;
m.VMM_PROT = prot;
m.VMM_FLAGS = flags;
m.VMM_FD = fd;
m.VMM_OFFSET_LO = ex64lo(offset);
m.VMM_OFFSET = offset;
m.VMM_FORWHOM = forwhom;
if(forwhom != SELF) {
m.VMM_FLAGS |= MAP_THIRDPARTY;
m.VMM_FORWHOM = forwhom;
} else {
m.VMM_OFFSET_HI = ex64hi(offset);
}
r = _syscall(VM_PROC_NR, VM_MMAP, &m);
@ -49,11 +47,11 @@ void *minix_mmap_for(endpoint_t forwhom,
return MAP_FAILED;
}
return (void *) m.VMM_RETADDR;
return m.VMM_RETADDR;
}
int minix_vfs_mmap(endpoint_t who, u32_t offset, u32_t len,
dev_t dev, u32_t ino, u16_t fd, u32_t vaddr, u16_t clearend,
int minix_vfs_mmap(endpoint_t who, off_t offset, size_t len,
dev_t dev, ino_t ino, int fd, u32_t vaddr, u16_t clearend,
u16_t flags)
{
message m;
@ -67,7 +65,8 @@ int minix_vfs_mmap(endpoint_t who, u32_t offset, u32_t len,
m.m_u.m_vm_vfs.vaddr = vaddr;
m.m_u.m_vm_vfs.len = len;
m.m_u.m_vm_vfs.fd = fd;
m.m_u.m_vm_vfs.clearend_and_flags = clearend | flags;
m.m_u.m_vm_vfs.clearend = clearend;
m.m_u.m_vm_vfs.flags = flags;
return _syscall(VM_PROC_NR, VM_VFS_MMAP, &m);
}
@ -78,12 +77,6 @@ void *minix_mmap(void *addr, size_t len, int prot, int flags,
return minix_mmap_for(SELF, addr, len, prot, flags, fd, offset);
}
void *minix_mmap64(void *addr, size_t len, int prot, int flags,
int fd, u64_t offset)
{
return minix_mmap_for(SELF, addr, len, prot, flags, fd, offset);
}
int minix_munmap(void *addr, size_t len)
{
message m;
@ -146,7 +139,7 @@ int vm_unmap(endpoint_t endpt, void *addr)
memset(&m, 0, sizeof(m));
m.VMUN_ENDPT = endpt;
m.VMUN_ADDR = (long) addr;
m.VMUN_ADDR = addr;
return _syscall(VM_PROC_NR, VM_SHM_UNMAP, &m);
}

View file

@ -429,14 +429,14 @@ int block_type; /* INODE_BLOCK, DIRECTORY_BLOCK, or whatever */
* disk immediately if they are dirty.
*/
dev_t dev;
u64_t dev_off;
off_t dev_off;
int r;
if (bp == NULL) return; /* it is easier to check here than in caller */
dev = bp->lmfs_dev;
dev_off = (u64_t) bp->lmfs_blocknr * fs_block_size;
dev_off = (off_t) bp->lmfs_blocknr * fs_block_size;
lowercount(bp);
if (bp->lmfs_count != 0) return; /* block is still in use */
@ -508,7 +508,7 @@ register struct buf *bp; /* buffer pointer */
* from the cache, it is not clear what the caller could do about it anyway.
*/
int r, op_failed;
u64_t pos;
off_t pos;
dev_t dev = bp->lmfs_dev;
op_failed = 0;
@ -518,7 +518,7 @@ register struct buf *bp; /* buffer pointer */
ASSERT(bp->lmfs_bytes == fs_block_size);
ASSERT(fs_block_size > 0);
pos = (u64_t)bp->lmfs_blocknr * fs_block_size;
pos = (off_t)bp->lmfs_blocknr * fs_block_size;
if(fs_block_size > PAGE_SIZE) {
#define MAXPAGES 20
vir_bytes blockrem, vaddr = (vir_bytes) bp->data;
@ -629,7 +629,7 @@ void lmfs_rw_scattered(
register int i;
register iovec_t *iop;
static iovec_t iovec[NR_IOREQS];
u64_t pos;
off_t pos;
int iov_per_block;
int start_in_use = bufs_in_use, start_bufqsize = bufqsize;
@ -704,7 +704,7 @@ void lmfs_rw_scattered(
assert(nblocks > 0);
assert(niovecs > 0);
pos = (u64_t)bufq[0]->lmfs_blocknr * fs_block_size;
pos = (off_t)bufq[0]->lmfs_blocknr * fs_block_size;
if (rw_flag == READING)
r = bdev_gather(dev, pos, iovec, niovecs, BDEV_NOFLAGS);
else
@ -941,7 +941,7 @@ int lmfs_do_bpeek(message *m)
{
block_t startblock, b, limitblock;
dev_t dev = m->REQ_DEV;
u64_t extra, pos = make64(m->REQ_SEEK_POS_LO, m->REQ_SEEK_POS_HI);
off_t extra, pos = make64(m->REQ_SEEK_POS_LO, m->REQ_SEEK_POS_HI);
size_t len = m->REQ_NBYTES;
struct buf *bp;

View file

@ -92,7 +92,6 @@ SRCS+= \
vm_map_phys.c \
vm_memctl.c \
vm_notify_sig.c \
vm_umap.c \
vm_procctl.c \
vm_query_exit.c \
vm_set_priv.c \

View file

@ -9,8 +9,8 @@
#include <minix/sysutil.h>
#include <machine/vmparam.h>
int vm_cachecall(message *m, int call, void *addr, dev_t dev, u64_t dev_offset,
u64_t ino, u64_t ino_offset, u32_t *flags, int blocksize)
int vm_cachecall(message *m, int call, void *addr, dev_t dev, off_t dev_offset,
ino_t ino, off_t ino_offset, u32_t *flags, int blocksize)
{
if(blocksize % PAGE_SIZE)
panic("blocksize %d should be a multiple of pagesize %d\n",
@ -28,8 +28,8 @@ int vm_cachecall(message *m, int call, void *addr, dev_t dev, u64_t dev_offset,
assert(dev != NO_DEV);
m->m_u.m_vmmcp.dev_offset_pages = dev_offset/PAGE_SIZE;
m->m_u.m_vmmcp.ino_offset_pages = ino_offset/PAGE_SIZE;
m->m_u.m_vmmcp.dev_offset = dev_offset;
m->m_u.m_vmmcp.ino_offset = ino_offset;
m->m_u.m_vmmcp.ino = ino;
m->m_u.m_vmmcp.block = addr;
m->m_u.m_vmmcp.flags_ptr = flags;
@ -40,8 +40,8 @@ int vm_cachecall(message *m, int call, void *addr, dev_t dev, u64_t dev_offset,
return _taskcall(VM_PROC_NR, call, m);
}
void *vm_map_cacheblock(dev_t dev, u64_t dev_offset,
u64_t ino, u64_t ino_offset, u32_t *flags, int blocksize)
void *vm_map_cacheblock(dev_t dev, off_t dev_offset,
ino_t ino, off_t ino_offset, u32_t *flags, int blocksize)
{
message m;
@ -52,8 +52,8 @@ void *vm_map_cacheblock(dev_t dev, u64_t dev_offset,
return m.m_u.m_vmmcp_reply.addr;
}
int vm_set_cacheblock(void *block, dev_t dev, u64_t dev_offset,
u64_t ino, u64_t ino_offset, u32_t *flags, int blocksize)
int vm_set_cacheblock(void *block, dev_t dev, off_t dev_offset,
ino_t ino, off_t ino_offset, u32_t *flags, int blocksize)
{
message m;

View file

@ -86,8 +86,8 @@ int
do_mapcache(message *msg)
{
dev_t dev = msg->m_u.m_vmmcp.dev;
u64_t dev_off = (u64_t) msg->m_u.m_vmmcp.dev_offset_pages * VM_PAGE_SIZE;
u64_t ino_off = (u64_t) msg->m_u.m_vmmcp.ino_offset_pages * VM_PAGE_SIZE;
u64_t dev_off = msg->m_u.m_vmmcp.dev_offset;
u64_t ino_off = msg->m_u.m_vmmcp.ino_offset;
int n;
phys_bytes bytes = msg->m_u.m_vmmcp.pages * VM_PAGE_SIZE;
struct vir_region *vr;
@ -95,6 +95,11 @@ do_mapcache(message *msg)
vir_bytes offset;
int io = 0;
if(dev_off % PAGE_SIZE || ino_off % PAGE_SIZE) {
printf("VM: unaligned cache operation\n");
return EFAULT;
}
if(vm_isokendpt(msg->m_source, &n) != OK) panic("bogus source");
caller = &vmproc[n];
@ -166,8 +171,8 @@ do_setcache(message *msg)
{
int r;
dev_t dev = msg->m_u.m_vmmcp.dev;
u64_t dev_off = (u64_t) msg->m_u.m_vmmcp.dev_offset_pages * VM_PAGE_SIZE;
u64_t ino_off = (u64_t) msg->m_u.m_vmmcp.ino_offset_pages * VM_PAGE_SIZE;
u64_t dev_off = (u64_t) msg->m_u.m_vmmcp.dev_offset;
u64_t ino_off = (u64_t) msg->m_u.m_vmmcp.ino_offset;
int n;
struct vmproc *caller;
phys_bytes offset;
@ -175,6 +180,11 @@ do_setcache(message *msg)
if(bytes < VM_PAGE_SIZE) return EINVAL;
if(dev_off % PAGE_SIZE || ino_off % PAGE_SIZE) {
printf("VM: unaligned cache operation\n");
return EFAULT;
}
if(vm_isokendpt(msg->m_source, &n) != OK) panic("bogus source");
caller = &vmproc[n];

View file

@ -81,28 +81,18 @@ static struct vir_region *mmap_region(struct vmproc *vmp, vir_bytes addr,
}
static int mmap_file(struct vmproc *vmp,
int vmfd, u32_t off_lo, u32_t off_hi, int flags,
int vmfd, off_t file_offset, int flags,
ino_t ino, dev_t dev, u64_t filesize, vir_bytes addr, vir_bytes len,
vir_bytes *retaddr, u16_t clearend, int writable, int mayclosefd)
{
/* VFS has replied to a VMVFSREQ_FDLOOKUP request. */
struct vir_region *vr;
u64_t file_offset, page_offset;
u64_t page_offset;
int result = OK;
u32_t vrflags = 0;
if(writable) vrflags |= VR_WRITABLE;
if(flags & MAP_THIRDPARTY) {
file_offset = off_lo;
} else {
file_offset = make64(off_lo, off_hi);
if(off_hi && !off_lo) {
/* XXX clang compatability hack */
off_hi = file_offset = 0;
}
}
/* Do some page alignments. */
if((page_offset = (file_offset % VM_PAGE_SIZE))) {
file_offset -= page_offset;
@ -151,14 +141,14 @@ int do_vfs_mmap(message *m)
/* It might be disabled */
if(!enable_filemap) return ENXIO;
clearend = (m->m_u.m_vm_vfs.clearend_and_flags & MVM_LENMASK);
flags = (m->m_u.m_vm_vfs.clearend_and_flags & MVM_FLAGSMASK);
clearend = m->m_u.m_vm_vfs.clearend;
flags = m->m_u.m_vm_vfs.flags;
if((r=vm_isokendpt(m->m_u.m_vm_vfs.who, &n)) != OK)
panic("bad ep %d from vfs", m->m_u.m_vm_vfs.who);
vmp = &vmproc[n];
return mmap_file(vmp, m->m_u.m_vm_vfs.fd, m->m_u.m_vm_vfs.offset, 0,
return mmap_file(vmp, m->m_u.m_vm_vfs.fd, m->m_u.m_vm_vfs.offset,
MAP_PRIVATE | MAP_FIXED,
m->m_u.m_vm_vfs.ino, m->m_u.m_vm_vfs.dev,
(u64_t) LONG_MAX * VM_PAGE_SIZE,
@ -186,18 +176,18 @@ static void mmap_file_cont(struct vmproc *vmp, message *replymsg, void *cbarg,
result = origmsg->VMV_RESULT;
} else {
/* Finish mmap */
result = mmap_file(vmp, replymsg->VMV_FD, origmsg->VMM_OFFSET_LO,
origmsg->VMM_OFFSET_HI, origmsg->VMM_FLAGS,
result = mmap_file(vmp, replymsg->VMV_FD, origmsg->VMM_OFFSET,
origmsg->VMM_FLAGS,
replymsg->VMV_INO, replymsg->VMV_DEV,
(u64_t) replymsg->VMV_SIZE_PAGES*PAGE_SIZE,
origmsg->VMM_ADDR,
(vir_bytes) origmsg->VMM_ADDR,
origmsg->VMM_LEN, &v, 0, writable, 1);
}
/* Unblock requesting process. */
memset(&mmap_reply, 0, sizeof(mmap_reply));
mmap_reply.m_type = result;
mmap_reply.VMM_ADDR = v;
mmap_reply.VMM_RETADDR = (void *) v;
if(ipc_send(vmp->vm_endpoint, &mmap_reply) != OK)
panic("VM: mmap_file_cont: ipc_send() failed");
@ -210,7 +200,7 @@ int do_mmap(message *m)
{
int r, n;
struct vmproc *vmp;
vir_bytes addr = m->VMM_ADDR;
vir_bytes addr = (vir_bytes) m->VMM_ADDR;
struct vir_region *vr = NULL;
int execpriv = 0;
size_t len = (vir_bytes) m->VMM_LEN;
@ -281,7 +271,7 @@ int do_mmap(message *m)
}
/* Return mapping, as seen from process. */
m->VMM_RETADDR = vr->vaddr;
m->VMM_RETADDR = (void *) vr->vaddr;
return OK;
}

View file

@ -252,14 +252,14 @@ u32_t sqrt_approx(u32_t v)
return (u32_t) sqrt(v);
}
int vm_set_cacheblock(void *block, dev_t dev, u64_t dev_offset,
u64_t ino, u64_t ino_offset, u32_t *flags, int blocksize)
int vm_set_cacheblock(void *block, dev_t dev, off_t dev_offset,
ino_t ino, off_t ino_offset, u32_t *flags, int blocksize)
{
return ENOSYS;
}
void *vm_map_cacheblock(dev_t dev, u64_t dev_offset,
u64_t ino, u64_t ino_offset, u32_t *flags, int blocksize)
void *vm_map_cacheblock(dev_t dev, off_t dev_offset,
ino_t ino, off_t ino_offset, u32_t *flags, int blocksize)
{
return MAP_FAILED;
}