minix/servers/vfs/lock.c
Ben Gras 565f13088f make vfs & filesystems use failable copying
Change the kernel to add features to vircopy and safecopies so that
transparent copy fixing won't happen to avoid deadlocks, and such copies
fail with EFAULT.

Transparently making copying work from filesystems (as normally done by
the kernel & VM when copying fails because of missing/readonly memory)
is problematic as it can happen that, for file-mapped ranges, that that
same filesystem that is blocked on the copy request is needed to satisfy
the memory range, leading to deadlock. Dito for VFS itself, if done with
a blocking call.

This change makes the copying done from a filesystem fail in such cases
with EFAULT by VFS adding the CPF_TRY flag to the grants. If a FS call
fails with EFAULT, VFS will then request the range to be made available
to VM after the FS is unblocked, allowing it to be used to satisfy the
range if need be in another VFS thread.

Similarly, for datacopies that VFS itself does, it uses the failable
vircopy variant and callers use a wrapper that talk to VM if necessary
to get the copy to work.

	. kernel: add CPF_TRY flag to safecopies
	. kernel: only request writable ranges to VM for the
	  target buffer when copying fails
	. do copying in VFS TRY-first
	. some fixes in VM to build SANITYCHECK mode
	. add regression test for the cases where
	  - a FS system call needs memory mapped in a process that the
	    FS itself must map.
	  - such a range covers more than one file-mapped region.
	. add 'try' mode to vircopy, physcopy
	. add flags field to copy kernel call messages
	. if CP_FLAG_TRY is set, do not transparently try
	  to fix memory ranges
	. for use by VFS when accessing user buffers to avoid
	  deadlock
	. remove some obsolete backwards compatability assignments
        . VFS: let thread scheduling work for VM requests too
          Allows VFS to make calls to VM while suspending and resuming
          the currently running thread. Does currently not work for the
          main thread.
        . VM: add fix memory range call for use by VFS

Change-Id: I295794269cea51a3163519a9cfe5901301d90b32
2014-07-28 17:05:14 +02:00

185 lines
5.7 KiB
C

/* This file handles advisory file locking as required by POSIX.
*
* The entry points into this file are
* lock_op: perform locking operations for FCNTL system call
* lock_revive: revive processes when a lock is released
*/
#include "fs.h"
#include <minix/com.h>
#include <minix/u64.h>
#include <fcntl.h>
#include <unistd.h>
#include "file.h"
#include "scratchpad.h"
#include "lock.h"
#include "vnode.h"
/*===========================================================================*
* lock_op *
*===========================================================================*/
int lock_op(f, req)
struct filp *f;
int req; /* either F_SETLK or F_SETLKW */
{
/* Perform the advisory locking required by POSIX. */
int r, ltype, i, conflict = 0, unlocking = 0;
mode_t mo;
off_t first, last;
struct flock flock;
struct file_lock *flp, *flp2, *empty;
/* Fetch the flock structure from user space. */
r = sys_datacopy_wrapper(who_e, (vir_bytes) scratch(fp).io.io_buffer, VFS_PROC_NR,
(vir_bytes) &flock, sizeof(flock));
if (r != OK) return(EINVAL);
/* Make some error checks. */
ltype = flock.l_type;
mo = f->filp_mode;
if (ltype != F_UNLCK && ltype != F_RDLCK && ltype != F_WRLCK) return(EINVAL);
if (req == F_GETLK && ltype == F_UNLCK) return(EINVAL);
if (!S_ISREG(f->filp_vno->v_mode) && !S_ISBLK(f->filp_vno->v_mode))
return(EINVAL);
if (req != F_GETLK && ltype == F_RDLCK && (mo & R_BIT) == 0) return(EBADF);
if (req != F_GETLK && ltype == F_WRLCK && (mo & W_BIT) == 0) return(EBADF);
/* Compute the first and last bytes in the lock region. */
switch (flock.l_whence) {
case SEEK_SET: first = 0; break;
case SEEK_CUR: first = f->filp_pos; break;
case SEEK_END: first = f->filp_vno->v_size; break;
default: return(EINVAL);
}
/* Check for overflow. */
if (((long) flock.l_start > 0) && ((first + flock.l_start) < first))
return(EINVAL);
if (((long) flock.l_start < 0) && ((first + flock.l_start) > first))
return(EINVAL);
first = first + flock.l_start;
last = first + flock.l_len - 1;
if (flock.l_len == 0) last = MAX_FILE_POS;
if (last < first) return(EINVAL);
/* Check if this region conflicts with any existing lock. */
empty = NULL;
for (flp = &file_lock[0]; flp < &file_lock[NR_LOCKS]; flp++) {
if (flp->lock_type == 0) {
if (empty == NULL) empty = flp;
continue; /* 0 means unused slot */
}
if (flp->lock_vnode != f->filp_vno) continue; /* different file */
if (last < flp->lock_first) continue; /* new one is in front */
if (first > flp->lock_last) continue; /* new one is afterwards */
if (ltype == F_RDLCK && flp->lock_type == F_RDLCK) continue;
if (ltype != F_UNLCK && flp->lock_pid == fp->fp_pid) continue;
/* There might be a conflict. Process it. */
conflict = 1;
if (req == F_GETLK) break;
/* If we are trying to set a lock, it just failed. */
if (ltype == F_RDLCK || ltype == F_WRLCK) {
if (req == F_SETLK) {
/* For F_SETLK, just report back failure. */
return(EAGAIN);
} else {
/* For F_SETLKW, suspend the process. */
suspend(FP_BLOCKED_ON_LOCK);
return(SUSPEND);
}
}
/* We are clearing a lock and we found something that overlaps. */
unlocking = 1;
if (first <= flp->lock_first && last >= flp->lock_last) {
flp->lock_type = 0; /* mark slot as unused */
nr_locks--; /* number of locks is now 1 less */
continue;
}
/* Part of a locked region has been unlocked. */
if (first <= flp->lock_first) {
flp->lock_first = last + 1;
continue;
}
if (last >= flp->lock_last) {
flp->lock_last = first - 1;
continue;
}
/* Bad luck. A lock has been split in two by unlocking the middle. */
if (nr_locks == NR_LOCKS) return(ENOLCK);
for (i = 0; i < NR_LOCKS; i++)
if (file_lock[i].lock_type == 0) break;
flp2 = &file_lock[i];
flp2->lock_type = flp->lock_type;
flp2->lock_pid = flp->lock_pid;
flp2->lock_vnode = flp->lock_vnode;
flp2->lock_first = last + 1;
flp2->lock_last = flp->lock_last;
flp->lock_last = first - 1;
nr_locks++;
}
if (unlocking) lock_revive();
if (req == F_GETLK) {
if (conflict) {
/* GETLK and conflict. Report on the conflicting lock. */
flock.l_type = flp->lock_type;
flock.l_whence = SEEK_SET;
flock.l_start = flp->lock_first;
flock.l_len = flp->lock_last - flp->lock_first + 1;
flock.l_pid = flp->lock_pid;
} else {
/* It is GETLK and there is no conflict. */
flock.l_type = F_UNLCK;
}
/* Copy the flock structure back to the caller. */
r = sys_datacopy_wrapper(VFS_PROC_NR, (vir_bytes) &flock, who_e,
(vir_bytes) scratch(fp).io.io_buffer, sizeof(flock));
return(r);
}
if (ltype == F_UNLCK) return(OK); /* unlocked a region with no locks */
/* There is no conflict. If space exists, store new lock in the table. */
if (empty == NULL) return(ENOLCK); /* table full */
empty->lock_type = ltype;
empty->lock_pid = fp->fp_pid;
empty->lock_vnode = f->filp_vno;
empty->lock_first = first;
empty->lock_last = last;
nr_locks++;
return(OK);
}
/*===========================================================================*
* lock_revive *
*===========================================================================*/
void lock_revive()
{
/* Go find all the processes that are waiting for any kind of lock and
* revive them all. The ones that are still blocked will block again when
* they run. The others will complete. This strategy is a space-time
* tradeoff. Figuring out exactly which ones to unblock now would take
* extra code, and the only thing it would win would be some performance in
* extremely rare circumstances (namely, that somebody actually used
* locking).
*/
struct fproc *fptr;
for (fptr = &fproc[0]; fptr < &fproc[NR_PROCS]; fptr++){
if (fptr->fp_pid == PID_FREE) continue;
if (fptr->fp_blocked_on == FP_BLOCKED_ON_LOCK) {
revive(fptr->fp_endpoint, 0);
}
}
}