minix/servers/vfs/comm.c
Ben Gras 565f13088f make vfs & filesystems use failable copying
Change the kernel to add features to vircopy and safecopies so that
transparent copy fixing won't happen to avoid deadlocks, and such copies
fail with EFAULT.

Transparently making copying work from filesystems (as normally done by
the kernel & VM when copying fails because of missing/readonly memory)
is problematic as it can happen that, for file-mapped ranges, that that
same filesystem that is blocked on the copy request is needed to satisfy
the memory range, leading to deadlock. Dito for VFS itself, if done with
a blocking call.

This change makes the copying done from a filesystem fail in such cases
with EFAULT by VFS adding the CPF_TRY flag to the grants. If a FS call
fails with EFAULT, VFS will then request the range to be made available
to VM after the FS is unblocked, allowing it to be used to satisfy the
range if need be in another VFS thread.

Similarly, for datacopies that VFS itself does, it uses the failable
vircopy variant and callers use a wrapper that talk to VM if necessary
to get the copy to work.

	. kernel: add CPF_TRY flag to safecopies
	. kernel: only request writable ranges to VM for the
	  target buffer when copying fails
	. do copying in VFS TRY-first
	. some fixes in VM to build SANITYCHECK mode
	. add regression test for the cases where
	  - a FS system call needs memory mapped in a process that the
	    FS itself must map.
	  - such a range covers more than one file-mapped region.
	. add 'try' mode to vircopy, physcopy
	. add flags field to copy kernel call messages
	. if CP_FLAG_TRY is set, do not transparently try
	  to fix memory ranges
	. for use by VFS when accessing user buffers to avoid
	  deadlock
	. remove some obsolete backwards compatability assignments
        . VFS: let thread scheduling work for VM requests too
          Allows VFS to make calls to VM while suspending and resuming
          the currently running thread. Does currently not work for the
          main thread.
        . VM: add fix memory range call for use by VFS

Change-Id: I295794269cea51a3163519a9cfe5901301d90b32
2014-07-28 17:05:14 +02:00

235 lines
6.5 KiB
C

#include "fs.h"
#include <minix/vfsif.h>
#include <assert.h>
#include <string.h>
static int sendmsg(struct vmnt *vmp, endpoint_t dst, struct worker_thread *wp);
static int queuemsg(struct vmnt *vmp);
/*===========================================================================*
* sendmsg *
*===========================================================================*/
static int sendmsg(struct vmnt *vmp, endpoint_t dst, struct worker_thread *wp)
{
/* This is the low level function that sends requests.
* Currently to FSes or VM.
*/
int r, transid;
if(vmp) vmp->m_comm.c_cur_reqs++; /* One more request awaiting a reply */
transid = wp->w_tid + VFS_TRANSID;
wp->w_sendrec->m_type = TRNS_ADD_ID(wp->w_sendrec->m_type, transid);
wp->w_task = dst;
if ((r = asynsend3(dst, wp->w_sendrec, AMF_NOREPLY)) != OK) {
printf("VFS: sendmsg: error sending message. "
"dest: %d req_nr: %d err: %d\n", dst,
wp->w_sendrec->m_type, r);
util_stacktrace();
return(r);
}
return(r);
}
/*===========================================================================*
* send_work *
*===========================================================================*/
void send_work(void)
{
/* Try to send out as many requests as possible */
struct vmnt *vmp;
if (sending == 0) return;
for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; vmp++)
fs_sendmore(vmp);
}
/*===========================================================================*
* fs_cancel *
*===========================================================================*/
void fs_cancel(struct vmnt *vmp)
{
/* Cancel all pending requests for this vmp */
struct worker_thread *worker;
while ((worker = vmp->m_comm.c_req_queue) != NULL) {
vmp->m_comm.c_req_queue = worker->w_next;
worker->w_next = NULL;
sending--;
worker_stop(worker);
}
}
/*===========================================================================*
* fs_sendmore *
*===========================================================================*/
void fs_sendmore(struct vmnt *vmp)
{
struct worker_thread *worker;
/* Can we send more requests? */
if (vmp->m_fs_e == NONE) return;
if ((worker = vmp->m_comm.c_req_queue) == NULL) /* No process is queued */
return;
if (vmp->m_comm.c_cur_reqs >= vmp->m_comm.c_max_reqs)/*No room to send more*/
return;
if (vmp->m_flags & VMNT_CALLBACK) /* Hold off for now */
return;
vmp->m_comm.c_req_queue = worker->w_next; /* Remove head */
worker->w_next = NULL;
sending--;
assert(sending >= 0);
(void) sendmsg(vmp, vmp->m_fs_e, worker);
}
/*===========================================================================*
* drv_sendrec *
*===========================================================================*/
int drv_sendrec(endpoint_t drv_e, message *reqmp)
{
int r;
struct dmap *dp;
/* For the CTTY_MAJOR case, we would actually have to lock the device
* entry being redirected to. However, the CTTY major only hosts a
* character device while this function is used only for block devices.
* Thus, we can simply deny the request immediately.
*/
if (drv_e == CTTY_ENDPT) {
printf("VFS: /dev/tty is not a block device!\n");
return EIO;
}
if ((dp = get_dmap(drv_e)) == NULL)
panic("driver endpoint %d invalid", drv_e);
lock_dmap(dp);
if (dp->dmap_servicing != INVALID_THREAD)
panic("driver locking inconsistency");
dp->dmap_servicing = self->w_tid;
self->w_task = drv_e;
self->w_drv_sendrec = reqmp;
if ((r = asynsend3(drv_e, self->w_drv_sendrec, AMF_NOREPLY)) == OK) {
/* Yield execution until we've received the reply */
worker_wait();
} else {
printf("VFS: drv_sendrec: error sending msg to driver %d: %d\n",
drv_e, r);
util_stacktrace();
}
dp->dmap_servicing = INVALID_THREAD;
self->w_task = NONE;
self->w_drv_sendrec = NULL;
unlock_dmap(dp);
return(OK);
}
/*===========================================================================*
* fs_sendrec *
*===========================================================================*/
int fs_sendrec(endpoint_t fs_e, message *reqmp)
{
struct vmnt *vmp;
int r;
if ((vmp = find_vmnt(fs_e)) == NULL) {
printf("Trying to talk to non-existent FS endpoint %d\n", fs_e);
return(EIO);
}
if (fs_e == fp->fp_endpoint) return(EDEADLK);
self->w_sendrec = reqmp; /* Where to store request and reply */
/* Find out whether we can send right away or have to enqueue */
if ( !(vmp->m_flags & VMNT_CALLBACK) &&
vmp->m_comm.c_cur_reqs < vmp->m_comm.c_max_reqs) {
/* There's still room to send more and no proc is queued */
r = sendmsg(vmp, vmp->m_fs_e, self);
} else {
r = queuemsg(vmp);
}
self->w_next = NULL; /* End of list */
if (r != OK) return(r);
worker_wait(); /* Yield execution until we've received the reply. */
return(reqmp->m_type);
}
/*===========================================================================*
* vm_sendrec *
*===========================================================================*/
int vm_sendrec(message *reqmp)
{
int r;
assert(self);
assert(reqmp);
self->w_sendrec = reqmp; /* Where to store request and reply */
r = sendmsg(NULL, VM_PROC_NR, self);
self->w_next = NULL; /* End of list */
if (r != OK) return(r);
worker_wait(); /* Yield execution until we've received the reply. */
return(reqmp->m_type);
}
/*===========================================================================*
* vm_vfs_procctl_handlemem *
*===========================================================================*/
int vm_vfs_procctl_handlemem(endpoint_t ep,
vir_bytes mem, vir_bytes len, int flags)
{
message m;
/* main thread can not be suspended */
if(!self) return EFAULT;
memset(&m, 0, sizeof(m));
m.m_type = VM_PROCCTL;
m.VMPCTL_WHO = ep;
m.VMPCTL_PARAM = VMPPARAM_HANDLEMEM;
m.VMPCTL_M1 = mem;
m.VMPCTL_LEN = len;
m.VMPCTL_FLAGS = flags;
return vm_sendrec(&m);
}
/*===========================================================================*
* queuemsg *
*===========================================================================*/
static int queuemsg(struct vmnt *vmp)
{
/* Put request on queue for vmnt */
struct worker_thread *queue;
if (vmp->m_comm.c_req_queue == NULL) {
vmp->m_comm.c_req_queue = self;
} else {
/* Walk the list ... */
queue = vmp->m_comm.c_req_queue;
while (queue->w_next != NULL) queue = queue->w_next;
/* ... and append this worker */
queue->w_next = self;
}
self->w_next = NULL; /* End of list */
sending++;
return(OK);
}