2006-10-25 15:40:36 +02:00
|
|
|
/* Virtual mount table related routines.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "fs.h"
|
|
|
|
#include "vmnt.h"
|
2012-02-13 16:28:04 +01:00
|
|
|
#include <assert.h>
|
2012-11-27 18:33:59 +01:00
|
|
|
#include <string.h>
|
2006-10-25 15:40:36 +02:00
|
|
|
|
2012-03-25 20:25:53 +02:00
|
|
|
static int is_vmnt_locked(struct vmnt *vmp);
|
|
|
|
static void clear_vmnt(struct vmnt *vmp);
|
2012-02-13 16:28:04 +01:00
|
|
|
|
|
|
|
/* Is vmp pointer reasonable? */
|
|
|
|
#define SANEVMP(v) ((((v) >= &vmnt[0] && (v) < &vmnt[NR_MNTS])))
|
|
|
|
#define BADVMP(v, f, l) printf("%s:%d: bad vmp %p\n", f, l, v)
|
|
|
|
/* vp check that panics */
|
|
|
|
#define ASSERTVMP(v) if(!SANEVMP(v)) { \
|
|
|
|
BADVMP(v, __FILE__, __LINE__); panic("bad vmp"); }
|
|
|
|
|
|
|
|
#if LOCK_DEBUG
|
|
|
|
/*===========================================================================*
|
|
|
|
* check_vmnt_locks_by_me *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
void check_vmnt_locks_by_me(struct fproc *rfp)
|
2012-02-13 16:28:04 +01:00
|
|
|
{
|
|
|
|
/* Check whether this thread still has locks held on vmnts */
|
|
|
|
struct vmnt *vmp;
|
|
|
|
|
|
|
|
for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; vmp++) {
|
|
|
|
if (tll_locked_by_me(&vmp->m_lock))
|
|
|
|
panic("Thread %d still holds vmnt lock on vmp %p call_nr=%d\n",
|
2012-04-13 14:50:38 +02:00
|
|
|
mthread_self(), vmp, job_call_nr);
|
2012-02-13 16:28:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (rfp->fp_vmnt_rdlocks != 0)
|
|
|
|
panic("Thread %d still holds read locks on a vmnt (%d) call_nr=%d\n",
|
2012-04-13 14:50:38 +02:00
|
|
|
mthread_self(), rfp->fp_vmnt_rdlocks, job_call_nr);
|
2012-02-13 16:28:04 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*===========================================================================*
|
|
|
|
* check_vmnt_locks *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
void check_vmnt_locks()
|
2012-02-13 16:28:04 +01:00
|
|
|
{
|
|
|
|
struct vmnt *vmp;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; vmp++)
|
|
|
|
if (is_vmnt_locked(vmp)) {
|
|
|
|
count++;
|
|
|
|
printf("vmnt %p is %s, fs_e=%d dev=%d\n", vmp, (tll_islocked(&vmp->m_lock) ? "locked":"pending locked"), vmp->m_fs_e, vmp->m_dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (count) panic("%d locked vmnts\n", count);
|
|
|
|
#if 0
|
|
|
|
printf("check_vmnt_locks OK\n");
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-02-22 14:54:35 +01:00
|
|
|
/*===========================================================================*
|
|
|
|
* mark_vmnt_free *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
void mark_vmnt_free(struct vmnt *vmp)
|
2012-02-22 14:54:35 +01:00
|
|
|
{
|
|
|
|
ASSERTVMP(vmp);
|
|
|
|
|
|
|
|
vmp->m_fs_e = NONE;
|
|
|
|
vmp->m_dev = NO_DEV;
|
|
|
|
}
|
|
|
|
|
2012-02-13 16:28:04 +01:00
|
|
|
/*===========================================================================*
|
|
|
|
* clear_vmnt *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
static void clear_vmnt(struct vmnt *vmp)
|
2012-02-13 16:28:04 +01:00
|
|
|
{
|
|
|
|
/* Reset vmp to initial parameters */
|
|
|
|
ASSERTVMP(vmp);
|
|
|
|
|
|
|
|
vmp->m_fs_e = NONE;
|
|
|
|
vmp->m_dev = NO_DEV;
|
|
|
|
vmp->m_flags = 0;
|
|
|
|
vmp->m_mounted_on = NULL;
|
|
|
|
vmp->m_root_node = NULL;
|
|
|
|
vmp->m_label[0] = '\0';
|
|
|
|
vmp->m_comm.c_max_reqs = 1;
|
|
|
|
vmp->m_comm.c_cur_reqs = 0;
|
|
|
|
vmp->m_comm.c_req_queue = NULL;
|
|
|
|
}
|
2006-10-25 15:40:36 +02:00
|
|
|
|
|
|
|
/*===========================================================================*
|
|
|
|
* get_free_vmnt *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
struct vmnt *get_free_vmnt(void)
|
2006-10-25 15:40:36 +02:00
|
|
|
{
|
2012-02-22 14:54:35 +01:00
|
|
|
struct vmnt *vmp;
|
2012-02-13 16:28:04 +01:00
|
|
|
|
2012-02-22 14:54:35 +01:00
|
|
|
for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp) {
|
|
|
|
if (vmp->m_dev == NO_DEV) {
|
|
|
|
clear_vmnt(vmp);
|
|
|
|
return(vmp);
|
|
|
|
}
|
|
|
|
}
|
2006-10-25 15:40:36 +02:00
|
|
|
|
2010-05-10 15:26:00 +02:00
|
|
|
return(NULL);
|
2006-10-25 15:40:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*===========================================================================*
|
|
|
|
* find_vmnt *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
struct vmnt *find_vmnt(endpoint_t fs_e)
|
2006-10-25 15:40:36 +02:00
|
|
|
{
|
2012-02-13 16:28:04 +01:00
|
|
|
/* Find the vmnt belonging to an FS with endpoint 'fs_e' iff it's in use */
|
2006-10-25 15:40:36 +02:00
|
|
|
struct vmnt *vp;
|
2012-02-13 16:28:04 +01:00
|
|
|
|
|
|
|
for (vp = &vmnt[0]; vp < &vmnt[NR_MNTS]; ++vp)
|
|
|
|
if (vp->m_fs_e == fs_e && vp->m_dev != NO_DEV)
|
|
|
|
return(vp);
|
2006-10-25 15:40:36 +02:00
|
|
|
|
2010-05-10 15:26:00 +02:00
|
|
|
return(NULL);
|
2006-10-25 15:40:36 +02:00
|
|
|
}
|
|
|
|
|
2012-02-13 16:28:04 +01:00
|
|
|
/*===========================================================================*
|
|
|
|
* init_vmnts *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
void init_vmnts(void)
|
2012-02-13 16:28:04 +01:00
|
|
|
{
|
|
|
|
/* Initialize vmnt table */
|
|
|
|
struct vmnt *vmp;
|
2006-10-25 15:40:36 +02:00
|
|
|
|
2012-02-13 16:28:04 +01:00
|
|
|
for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; vmp++) {
|
|
|
|
clear_vmnt(vmp);
|
|
|
|
tll_init(&vmp->m_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*===========================================================================*
|
|
|
|
* is_vmnt_locked *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
static int is_vmnt_locked(struct vmnt *vmp)
|
2012-02-13 16:28:04 +01:00
|
|
|
{
|
|
|
|
ASSERTVMP(vmp);
|
|
|
|
return(tll_islocked(&vmp->m_lock) || tll_haspendinglock(&vmp->m_lock));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*===========================================================================*
|
|
|
|
* lock_vmnt *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
int lock_vmnt(struct vmnt *vmp, tll_access_t locktype)
|
2012-02-13 16:28:04 +01:00
|
|
|
{
|
|
|
|
int r;
|
|
|
|
tll_access_t initial_locktype;
|
|
|
|
|
|
|
|
ASSERTVMP(vmp);
|
|
|
|
|
|
|
|
initial_locktype = (locktype == VMNT_EXCL) ? VMNT_WRITE : locktype;
|
|
|
|
|
|
|
|
if (vmp->m_fs_e == who_e) return(EDEADLK);
|
|
|
|
|
|
|
|
r = tll_lock(&vmp->m_lock, initial_locktype);
|
|
|
|
|
|
|
|
if (r == EBUSY) return(r);
|
|
|
|
|
|
|
|
if (initial_locktype != locktype) {
|
VFS: fix locking bugs
.sync and fsync used unnecessarily restrictive locking type
.fsync violated locking order by obtaining a vmnt lock after a filp lock
.fsync contained a TOCTOU bug
.new_node violated locking rules (didn't upgrade lock upon file creation)
.do_pipe used unnecessarily restrictive locking type
.always lock pipes exclusively; even a read operation might require to do
a write on a vnode object (update pipe size)
.when opening a file with O_TRUNC, upgrade vnode lock when truncating
.utime used unnecessarily restrictive locking type
.path parsing:
.always acquire VMNT_WRITE or VMNT_EXCL on vmnt and downgrade to
VMNT_READ if that was what was actually requested. This prevents the
following deadlock scenario:
thread A:
lock_vmnt(vmp, TLL_READSER);
lock_vnode(vp, TLL_READSER);
upgrade_vmnt_lock(vmp, TLL_WRITE);
thread B:
lock_vmnt(vmp, TLL_READ);
lock_vnode(vp, TLL_READSER);
thread A will be stuck in upgrade_vmnt_lock and thread B is stuck in
lock_vnode. This happens when, for example, thread A tries create a
new node (open.c:new_node) and thread B tries to do eat_path to
change dir (stadir.c:do_chdir). When the path is being resolved, a
vnode is always locked with VNODE_OPCL (TLL_READSER) and then
downgraded to VNODE_READ if read-only is actually requested. Thread
A locks the vmnt with VMNT_WRITE (TLL_READSER) which still allows
VMNT_READ locks. Thread B can't acquire a lock on the vnode because
thread A has it; Thread A can't upgrade its vmnt lock to VMNT_WRITE
(TLL_WRITE) because thread B has a VMNT_READ lock on it.
By serializing vmnt locks during path parsing, thread B can only
acquire a lock on vmp when thread A has completely finished its
operation.
2012-11-30 13:49:53 +01:00
|
|
|
upgrade_vmnt_lock(vmp);
|
2012-02-13 16:28:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
#if LOCK_DEBUG
|
|
|
|
if (locktype == VMNT_READ)
|
|
|
|
fp->fp_vmnt_rdlocks++;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return(OK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*===========================================================================*
|
|
|
|
* vmnt_unmap_by_endpoint *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
void vmnt_unmap_by_endpt(endpoint_t proc_e)
|
2012-02-13 16:28:04 +01:00
|
|
|
{
|
|
|
|
struct vmnt *vmp;
|
|
|
|
|
|
|
|
if ((vmp = find_vmnt(proc_e)) != NULL) {
|
2012-02-22 14:54:35 +01:00
|
|
|
mark_vmnt_free(vmp);
|
2012-02-13 16:28:04 +01:00
|
|
|
fs_cancel(vmp);
|
|
|
|
invalidate_filp_by_endpt(proc_e);
|
|
|
|
if (vmp->m_mounted_on) {
|
|
|
|
/* Only put mount point when it was actually used as mount
|
|
|
|
* point. That is, the mount was succesful. */
|
|
|
|
put_vnode(vmp->m_mounted_on);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*===========================================================================*
|
|
|
|
* unlock_vmnt *
|
|
|
|
*===========================================================================*/
|
2012-03-25 20:25:53 +02:00
|
|
|
void unlock_vmnt(struct vmnt *vmp)
|
2012-02-13 16:28:04 +01:00
|
|
|
{
|
|
|
|
ASSERTVMP(vmp);
|
|
|
|
|
|
|
|
#if LOCK_DEBUG
|
|
|
|
/* Decrease read-only lock counter when not locked as VMNT_WRITE or
|
|
|
|
* VMNT_EXCL */
|
|
|
|
if (!tll_locked_by_me(&vmp->m_lock))
|
|
|
|
fp->fp_vmnt_rdlocks--;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
tll_unlock(&vmp->m_lock);
|
|
|
|
|
|
|
|
#if LOCK_DEBUG
|
|
|
|
assert(!tll_locked_by_me(&vmp->m_lock));
|
|
|
|
#endif
|
|
|
|
|
|
|
|
}
|
2012-11-27 18:33:59 +01:00
|
|
|
|
VFS: fix locking bugs
.sync and fsync used unnecessarily restrictive locking type
.fsync violated locking order by obtaining a vmnt lock after a filp lock
.fsync contained a TOCTOU bug
.new_node violated locking rules (didn't upgrade lock upon file creation)
.do_pipe used unnecessarily restrictive locking type
.always lock pipes exclusively; even a read operation might require to do
a write on a vnode object (update pipe size)
.when opening a file with O_TRUNC, upgrade vnode lock when truncating
.utime used unnecessarily restrictive locking type
.path parsing:
.always acquire VMNT_WRITE or VMNT_EXCL on vmnt and downgrade to
VMNT_READ if that was what was actually requested. This prevents the
following deadlock scenario:
thread A:
lock_vmnt(vmp, TLL_READSER);
lock_vnode(vp, TLL_READSER);
upgrade_vmnt_lock(vmp, TLL_WRITE);
thread B:
lock_vmnt(vmp, TLL_READ);
lock_vnode(vp, TLL_READSER);
thread A will be stuck in upgrade_vmnt_lock and thread B is stuck in
lock_vnode. This happens when, for example, thread A tries create a
new node (open.c:new_node) and thread B tries to do eat_path to
change dir (stadir.c:do_chdir). When the path is being resolved, a
vnode is always locked with VNODE_OPCL (TLL_READSER) and then
downgraded to VNODE_READ if read-only is actually requested. Thread
A locks the vmnt with VMNT_WRITE (TLL_READSER) which still allows
VMNT_READ locks. Thread B can't acquire a lock on the vnode because
thread A has it; Thread A can't upgrade its vmnt lock to VMNT_WRITE
(TLL_WRITE) because thread B has a VMNT_READ lock on it.
By serializing vmnt locks during path parsing, thread B can only
acquire a lock on vmp when thread A has completely finished its
operation.
2012-11-30 13:49:53 +01:00
|
|
|
/*===========================================================================*
|
|
|
|
* downgrade_vmnt_lock *
|
|
|
|
*===========================================================================*/
|
|
|
|
void downgrade_vmnt_lock(struct vmnt *vmp)
|
|
|
|
{
|
|
|
|
ASSERTVMP(vmp);
|
|
|
|
tll_downgrade(&vmp->m_lock);
|
|
|
|
|
|
|
|
#if LOCK_DEBUG
|
|
|
|
/* If we're no longer the owner of a lock, we downgraded to VMNT_READ */
|
|
|
|
if (!tll_locked_by_me(&vmp->m_lock)) {
|
|
|
|
fp->fp_vmnt_rdlocks++;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/*===========================================================================*
|
|
|
|
* upgrade_vmnt_lock *
|
|
|
|
*===========================================================================*/
|
|
|
|
void upgrade_vmnt_lock(struct vmnt *vmp)
|
|
|
|
{
|
|
|
|
ASSERTVMP(vmp);
|
|
|
|
tll_upgrade(&vmp->m_lock);
|
|
|
|
}
|
|
|
|
|
2012-11-27 18:33:59 +01:00
|
|
|
/*===========================================================================*
|
|
|
|
* fetch_vmnt_paths *
|
|
|
|
*===========================================================================*/
|
|
|
|
void fetch_vmnt_paths(void)
|
|
|
|
{
|
|
|
|
struct vmnt *vmp;
|
|
|
|
struct vnode *cur_wd;
|
|
|
|
char orig_path[PATH_MAX];
|
|
|
|
|
|
|
|
cur_wd = fp->fp_wd;
|
|
|
|
|
|
|
|
for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; vmp++) {
|
|
|
|
if (vmp->m_dev == NO_DEV)
|
|
|
|
continue;
|
|
|
|
if (vmp->m_fs_e == PFS_PROC_NR)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
strlcpy(orig_path, vmp->m_mount_path, PATH_MAX);
|
|
|
|
|
|
|
|
/* Find canonical path */
|
|
|
|
if (canonical_path(vmp->m_mount_path, fp) != OK) {
|
|
|
|
/* We failed to find it (moved somewhere else?). Let's try
|
|
|
|
* again by starting at the node on which we are mounted:
|
|
|
|
* pretend that node is our working directory and look for the
|
|
|
|
* canonical path of the relative path to the mount point
|
|
|
|
* (which should be in our 'working directory').
|
|
|
|
*/
|
|
|
|
char *mp;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
fp->fp_wd = vmp->m_mounted_on; /* Change our working dir */
|
|
|
|
|
|
|
|
/* Isolate the mount point name of the full path */
|
|
|
|
len = strlen(vmp->m_mount_path);
|
|
|
|
if (vmp->m_mount_path[len - 1] == '/') {
|
|
|
|
vmp->m_mount_path[len - 1] = '\0';
|
|
|
|
}
|
|
|
|
mp = strrchr(vmp->m_mount_path, '/');
|
|
|
|
strlcpy(vmp->m_mount_path, mp+1, NAME_MAX+1);
|
|
|
|
|
|
|
|
if (canonical_path(vmp->m_mount_path, fp) != OK) {
|
|
|
|
/* Our second try failed too. Maybe an FS has crashed
|
|
|
|
* and we're missing part of the tree. Revert path.
|
|
|
|
*/
|
|
|
|
strlcpy(vmp->m_mount_path, orig_path, PATH_MAX);
|
|
|
|
}
|
|
|
|
fp->fp_wd = cur_wd; /* Revert working dir */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|