41e9fedf87
bugfixes: SYSTEM: . removed rc->p_priv->s_flags = 0; for the priv struct shared by all user processes in get_priv(). this should only be done once. doing a SYS_PRIV_USER in sys_privctl() caused the flags of all user processes to be reset, so they were no longer PREEMPTIBLE. this happened when RS executed a policy script. (this broke test1 in the test set) VFS/MFS: . chown can change the mode of a file, and chmod arguments are only part of the full file mode so the full filemode is slightly magic. changed these calls so that the final modes are returned to VFS, so that the vnode can be kept up-to-date. (this broke test11 in the test set) MFS: . lookup() checked for sizeof(string) instead of sizeof(user_path), truncating long path names (caught by test 23) . truncate functions neglected to update ctime (this broke test16) VFS: . corner case of an empty filename lookup caused fields of a request not to be filled in in the lookup functions, not making it clear that the lookup had failed, causing messages to garbage processes, causing strange failures. (caught by test 30) . trust v_size in vnode when doing reads or writes on non-special files, truncating i/o where necessary; this is necessary for pipes, as MFS can't tell when a pipe has been truncated without it being told explicitly each time. when the last reader/writer on a pipe closes, tell FS about the new size using truncate_vn(). (this broke test 25, among others) . permission check for chdir() had disappeared; added a forbidden() call (caught by test 23) new code, shouldn't change anything: . introduced RTS_SET, RTS_UNSET, and RTS_ISSET macro's, and their LOCK variants. These macros set and clear the p_rts_flags field, causing a lot of duplicated logic like old_flags = rp->p_rts_flags; /* save value of the flags */ rp->p_rts_flags &= ~NO_PRIV; if (old_flags != 0 && rp->p_rts_flags == 0) lock_enqueue(rp); to change into the simpler RTS_LOCK_UNSET(rp, NO_PRIV); so the macros take care of calling dequeue() and enqueue() (or lock_*()), as the case may be). This makes the code a bit more readable and a bit less fragile. . removed return code from do_clocktick in CLOCK as it currently never replies . removed some debug code from VFS . fixed grant debug message in device.c preemptive checks, tests, changes: . added return code checks of receive() to SYSTEM and CLOCK . O_TRUNC should never arrive at MFS (added sanity check and removed O_TRUNC code) . user_path declared with PATH_MAX+1 to let it be null-terminated . checks in MFS to see if strings passed by VFS are null-terminated IS: . static irq name table thrown out
444 lines
13 KiB
C
444 lines
13 KiB
C
|
|
/* lookup() is the main routine that controls the path name lookup. It
|
|
* handles mountpoints and symbolic links. The actual lookup requests
|
|
* are sent through the req_lookup wrapper function.
|
|
*
|
|
* Jul 2006 (Balazs Gerofi)
|
|
*/
|
|
|
|
#include "fs.h"
|
|
#include <string.h>
|
|
#include <minix/callnr.h>
|
|
#include <minix/com.h>
|
|
#include <minix/keymap.h>
|
|
#include <minix/const.h>
|
|
#include <minix/endpoint.h>
|
|
#include <unistd.h>
|
|
|
|
#include <minix/vfsif.h>
|
|
#include "fproc.h"
|
|
#include "vmnt.h"
|
|
#include "vnode.h"
|
|
#include "param.h"
|
|
|
|
FORWARD _PROTOTYPE( int Xlookup, (lookup_req_t *lookup_req,
|
|
node_details_t *node, char **pathrem) );
|
|
|
|
/*===========================================================================*
|
|
* lookup *
|
|
*===========================================================================*/
|
|
PUBLIC int lookup(lookup_req, node)
|
|
lookup_req_t *lookup_req;
|
|
node_details_t *node;
|
|
{
|
|
struct vmnt *vmp;
|
|
struct vnode *start_node;
|
|
struct lookup_res res;
|
|
int r, symloop = 0;
|
|
int cum_path_processed = 0;
|
|
|
|
/* Make a copy of the request so that the original values will be kept */
|
|
struct lookup_req req = *lookup_req;
|
|
char *fullpath = lookup_req->path;
|
|
|
|
/* Empty (start) path? */
|
|
if (fullpath[0] == '\0') {
|
|
node->inode_nr = 0;
|
|
return ENOENT;
|
|
}
|
|
|
|
/* Set user and group ids according to the system call */
|
|
req.uid = (call_nr == ACCESS ? fp->fp_realuid : fp->fp_effuid);
|
|
req.gid = (call_nr == ACCESS ? fp->fp_realgid : fp->fp_effgid);
|
|
|
|
/* Set the starting directories inode number and FS endpoint */
|
|
start_node = (fullpath[0] == '/' ? fp->fp_rd : fp->fp_wd);
|
|
req.start_dir = start_node->v_inode_nr;
|
|
req.fs_e = start_node->v_fs_e;
|
|
|
|
/* Is the process' root directory on the same partition?,
|
|
* if so, set the chroot directory too. */
|
|
if (fp->fp_rd->v_dev == fp->fp_wd->v_dev)
|
|
req.root_dir = fp->fp_rd->v_inode_nr;
|
|
else
|
|
req.root_dir = 0;
|
|
|
|
req.symloop = symloop;
|
|
|
|
/* Issue the request */
|
|
r = req_lookup(&req, &res);
|
|
|
|
/* While the response is related to mount control set the
|
|
* new requests respectively */
|
|
while (r == EENTERMOUNT || r == ELEAVEMOUNT || r == ESYMLINK) {
|
|
|
|
/* If a symlink was encountered during the lookup the
|
|
* new path has been copied back and the number of characters
|
|
* processed has been started over. */
|
|
if (r == ESYMLINK || res.symloop > symloop) {
|
|
/* The link's content is copied back to the user_fullpath
|
|
* array. Use it as the path argument from now on... */
|
|
fullpath = user_fullpath;
|
|
cum_path_processed = res.char_processed;
|
|
}
|
|
else {
|
|
/* Otherwise, cumulate the characters already processsed from
|
|
* the path */
|
|
cum_path_processed += res.char_processed;
|
|
}
|
|
|
|
/* Remember the current value of the symloop counter */
|
|
symloop = res.symloop;
|
|
|
|
/* Symlink encountered with absolute path */
|
|
if (r == ESYMLINK) {
|
|
start_node = fp->fp_rd;
|
|
}
|
|
/* Entering a new partition */
|
|
else if (r == EENTERMOUNT) {
|
|
start_node = 0;
|
|
/* Start node is now the mounted partition's root node */
|
|
for (vmp = &vmnt[0]; vmp != &vmnt[NR_MNTS]; ++vmp) {
|
|
if (vmp->m_mounted_on->v_inode_nr == res.inode_nr
|
|
&& vmp->m_mounted_on->v_fs_e == res.fs_e) {
|
|
start_node = vmp->m_root_node;
|
|
break;
|
|
}
|
|
}
|
|
if (!start_node) {
|
|
printf("VFSlookup: mounted partition couldn't be found\n");
|
|
printf("VFSlookup: res.inode_nr = %d, res.fs_e = %d\n",
|
|
res.inode_nr, res.fs_e);
|
|
return ENOENT;
|
|
}
|
|
|
|
}
|
|
/* Climbing up mount */
|
|
else {
|
|
/* Find the vmnt that represents the partition on
|
|
* which we "climb up". */
|
|
if ((vmp = find_vmnt(res.fs_e)) == NIL_VMNT) {
|
|
printf("VFS: couldn't find vmnt during the climbup!\n");
|
|
return ENOENT;
|
|
}
|
|
/* Start node is the vnode on which the partition is
|
|
* mounted */
|
|
start_node = vmp->m_mounted_on;
|
|
}
|
|
/* Fill in the request fields */
|
|
req.start_dir = start_node->v_inode_nr;
|
|
req.fs_e = start_node->v_fs_e;
|
|
|
|
/* Is the process' root directory on the same partition?*/
|
|
if (start_node->v_dev == fp->fp_rd->v_dev)
|
|
req.root_dir = fp->fp_rd->v_inode_nr;
|
|
else
|
|
req.root_dir = 0;
|
|
|
|
/* Fill in the current path name */
|
|
req.path = &fullpath[cum_path_processed];
|
|
req.symloop = symloop;
|
|
|
|
/* Issue the request */
|
|
r = req_lookup(&req, &res);
|
|
}
|
|
|
|
/* Fill in response fields */
|
|
node->inode_nr = res.inode_nr;
|
|
node->fmode = res.fmode;
|
|
node->fsize = res.fsize;
|
|
node->dev = res.dev;
|
|
node->fs_e = res.fs_e;
|
|
node->uid = res.uid;
|
|
node->gid = res.gid;
|
|
|
|
return r;
|
|
}
|
|
|
|
|
|
/*===========================================================================*
|
|
* Xlookup *
|
|
*===========================================================================*/
|
|
PRIVATE int Xlookup(lookup_req, node, pathrem)
|
|
lookup_req_t *lookup_req;
|
|
node_details_t *node;
|
|
char **pathrem;
|
|
{
|
|
struct vmnt *vmp;
|
|
struct vnode *start_node;
|
|
struct lookup_res res;
|
|
int r, symloop = 0;
|
|
int cum_path_processed = 0;
|
|
|
|
/* Make a copy of the request so that the original values will be kept */
|
|
struct lookup_req req = *lookup_req;
|
|
char *fullpath = lookup_req->path;
|
|
|
|
/* Clear pathrem */
|
|
*pathrem= NULL;
|
|
|
|
/* Empty (start) path? */
|
|
if (fullpath[0] == '\0') {
|
|
node->inode_nr = 0;
|
|
*pathrem = fullpath;
|
|
return ENOENT;
|
|
}
|
|
|
|
/* Set user and group ids according to the system call */
|
|
req.uid = (call_nr == ACCESS ? fp->fp_realuid : fp->fp_effuid);
|
|
req.gid = (call_nr == ACCESS ? fp->fp_realgid : fp->fp_effgid);
|
|
|
|
/* Set the starting directories inode number and FS endpoint */
|
|
start_node = (fullpath[0] == '/' ? fp->fp_rd : fp->fp_wd);
|
|
req.start_dir = start_node->v_inode_nr;
|
|
req.fs_e = start_node->v_fs_e;
|
|
|
|
/* Is the process' root directory on the same partition?,
|
|
* if so, set the chroot directory too. */
|
|
if (fp->fp_rd->v_dev == fp->fp_wd->v_dev)
|
|
req.root_dir = fp->fp_rd->v_inode_nr;
|
|
else
|
|
req.root_dir = 0;
|
|
|
|
req.symloop = symloop;
|
|
|
|
/* Issue the request */
|
|
r = req_lookup(&req, &res);
|
|
|
|
/* While the response is related to mount control set the
|
|
* new requests respectively */
|
|
while (r == EENTERMOUNT || r == ELEAVEMOUNT || r == ESYMLINK) {
|
|
|
|
/* If a symlink was encountered during the lookup the
|
|
* new path has been copied back and the number of characters
|
|
* processed has been started over. */
|
|
if (r == ESYMLINK || res.symloop > symloop) {
|
|
/* The link's content is copied back to the user_fullpath
|
|
* array. Use it as the path argument from now on... */
|
|
fullpath = user_fullpath;
|
|
cum_path_processed = res.char_processed;
|
|
}
|
|
else {
|
|
/* Otherwise, cumulate the characters already processsed from
|
|
* the path */
|
|
cum_path_processed += res.char_processed;
|
|
}
|
|
|
|
/* Remember the current value of the symloop counter */
|
|
symloop = res.symloop;
|
|
|
|
/* Symlink encountered with absolute path */
|
|
if (r == ESYMLINK) {
|
|
start_node = fp->fp_rd;
|
|
}
|
|
/* Entering a new partition */
|
|
else if (r == EENTERMOUNT) {
|
|
start_node = 0;
|
|
/* Start node is now the mounted partition's root node */
|
|
for (vmp = &vmnt[0]; vmp != &vmnt[NR_MNTS]; ++vmp) {
|
|
if (vmp->m_mounted_on->v_inode_nr == res.inode_nr
|
|
&& vmp->m_mounted_on->v_fs_e == res.fs_e) {
|
|
start_node = vmp->m_root_node;
|
|
break;
|
|
}
|
|
}
|
|
if (!start_node) {
|
|
printf("VFSlookup: mounted partition couldn't be found\n");
|
|
printf("VFSlookup: res.inode_nr = %d, res.fs_e = %d\n",
|
|
res.inode_nr, res.fs_e);
|
|
return ENOENT;
|
|
}
|
|
|
|
}
|
|
/* Climbing up mount */
|
|
else {
|
|
/* Find the vmnt that represents the partition on
|
|
* which we "climb up". */
|
|
if ((vmp = find_vmnt(res.fs_e)) == NIL_VMNT) {
|
|
printf("VFS: couldn't find vmnt during the climbup!\n");
|
|
return ENOENT;
|
|
}
|
|
/* Start node is the vnode on which the partition is
|
|
* mounted */
|
|
start_node = vmp->m_mounted_on;
|
|
}
|
|
/* Fill in the request fields */
|
|
req.start_dir = start_node->v_inode_nr;
|
|
req.fs_e = start_node->v_fs_e;
|
|
|
|
/* Is the process' root directory on the same partition?*/
|
|
if (start_node->v_dev == fp->fp_rd->v_dev)
|
|
req.root_dir = fp->fp_rd->v_inode_nr;
|
|
else
|
|
req.root_dir = 0;
|
|
|
|
/* Fill in the current path name */
|
|
req.path = &fullpath[cum_path_processed];
|
|
req.symloop = symloop;
|
|
|
|
/* Issue the request */
|
|
r = req_lookup(&req, &res);
|
|
}
|
|
|
|
if (r == ENOENT)
|
|
{
|
|
cum_path_processed += res.char_processed;
|
|
*pathrem= &fullpath[cum_path_processed];
|
|
}
|
|
|
|
/* Fill in response fields */
|
|
node->inode_nr = res.inode_nr;
|
|
node->fmode = res.fmode;
|
|
node->fsize = res.fsize;
|
|
node->dev = res.dev;
|
|
node->fs_e = res.fs_e;
|
|
node->uid = res.uid;
|
|
node->gid = res.gid;
|
|
|
|
return r;
|
|
}
|
|
|
|
|
|
/*===========================================================================*
|
|
* lookup_vp *
|
|
*===========================================================================*/
|
|
PUBLIC int lookup_vp(lookup_req, vpp)
|
|
lookup_req_t *lookup_req;
|
|
struct vnode **vpp;
|
|
{
|
|
int r, lookup_res;
|
|
struct vnode *vp;
|
|
struct vmnt *vmp;
|
|
node_req_t node_req;
|
|
struct node_details res;
|
|
|
|
lookup_res = lookup(lookup_req, &res);
|
|
|
|
if (res.inode_nr == 0)
|
|
{
|
|
printf("lookup_vp: lookup returned no inode\n");
|
|
printf("lookup_res = %d, last = '%s'\n\n",
|
|
lookup_res, lookup_req->lastc);
|
|
*vpp= NULL;
|
|
return lookup_res;
|
|
}
|
|
|
|
/* Check whether vnode is already in use or not */
|
|
if ((vp = find_vnode(res.fs_e, res.inode_nr)) != NIL_VNODE) {
|
|
vp->v_ref_count++;
|
|
*vpp= vp;
|
|
return lookup_res;
|
|
}
|
|
|
|
/* See if free vnode is available */
|
|
if ((vp = get_free_vnode(__FILE__, __LINE__)) == NIL_VNODE) {
|
|
printf("VFS lookup_vp: no free vnode available\n");
|
|
*vpp= NULL;
|
|
return EINVAL;
|
|
}
|
|
|
|
/* Fill in request message fields.*/
|
|
node_req.fs_e = res.fs_e;
|
|
node_req.inode_nr = res.inode_nr;
|
|
|
|
/* Issue request */
|
|
if ((r = req_getnode(&node_req, &res)) != OK)
|
|
{
|
|
printf("lookup_vp: req_getnode failed: %d\n", r);
|
|
*vpp= NULL;
|
|
return r;
|
|
}
|
|
|
|
/* Fill in the free vnode's fields */
|
|
vp->v_fs_e = res.fs_e;
|
|
vp->v_inode_nr = res.inode_nr;
|
|
vp->v_mode = res.fmode;
|
|
vp->v_size = res.fsize;
|
|
vp->v_uid = res.uid;
|
|
vp->v_gid = res.gid;
|
|
vp->v_sdev = res.dev;
|
|
|
|
if ( (vmp = find_vmnt(vp->v_fs_e)) == NIL_VMNT)
|
|
panic(__FILE__, "lookup_vp: vmnt not found", NO_NUM);
|
|
|
|
vp->v_vmnt = vmp;
|
|
vp->v_dev = vmp->m_dev;
|
|
vp->v_fs_count = 1;
|
|
vp->v_ref_count = 1;
|
|
|
|
*vpp= vp;
|
|
return lookup_res;
|
|
}
|
|
|
|
/*===========================================================================*
|
|
* Xlookup_vp *
|
|
*===========================================================================*/
|
|
PUBLIC int Xlookup_vp(lookup_req, vpp, pathrem)
|
|
lookup_req_t *lookup_req;
|
|
struct vnode **vpp;
|
|
char **pathrem;
|
|
{
|
|
int r, lookup_res;
|
|
struct vnode *vp;
|
|
struct vmnt *vmp;
|
|
node_req_t node_req;
|
|
struct node_details res;
|
|
|
|
lookup_res = Xlookup(lookup_req, &res, pathrem);
|
|
|
|
if (res.inode_nr == 0)
|
|
{
|
|
printf("Xlookup_vp: lookup returned no inode\n");
|
|
printf("lookup_res = %d, last = '%s'\n\n",
|
|
lookup_res, lookup_req->lastc);
|
|
*vpp= NULL;
|
|
return lookup_res;
|
|
}
|
|
|
|
/* Check whether vnode is already in use or not */
|
|
if ((vp = find_vnode(res.fs_e, res.inode_nr)) != NIL_VNODE) {
|
|
vp->v_ref_count++;
|
|
*vpp= vp;
|
|
return lookup_res;
|
|
}
|
|
|
|
/* See if free vnode is available */
|
|
if ((vp = get_free_vnode(__FILE__, __LINE__)) == NIL_VNODE) {
|
|
printf("VFS Xlookup_vp: no free vnode available\n");
|
|
*vpp= NULL;
|
|
return EINVAL;
|
|
}
|
|
|
|
/* Fill in request message fields.*/
|
|
node_req.fs_e = res.fs_e;
|
|
node_req.inode_nr = res.inode_nr;
|
|
|
|
/* Issue request */
|
|
if ((r = req_getnode(&node_req, &res)) != OK)
|
|
{
|
|
printf("Xlookup_vp: req_getnode failed: %d\n", r);
|
|
*vpp= NULL;
|
|
return r;
|
|
}
|
|
|
|
/* Fill in the free vnode's fields */
|
|
vp->v_fs_e = res.fs_e;
|
|
vp->v_inode_nr = res.inode_nr;
|
|
vp->v_mode = res.fmode;
|
|
vp->v_size = res.fsize;
|
|
vp->v_uid = res.uid;
|
|
vp->v_gid = res.gid;
|
|
vp->v_sdev = res.dev;
|
|
|
|
if ( (vmp = find_vmnt(vp->v_fs_e)) == NIL_VMNT)
|
|
panic(__FILE__, "Xlookup_vp: vmnt not found", NO_NUM);
|
|
|
|
vp->v_vmnt = vmp;
|
|
vp->v_dev = vmp->m_dev;
|
|
vp->v_fs_count = 1;
|
|
vp->v_ref_count = 1;
|
|
|
|
*vpp= vp;
|
|
return lookup_res;
|
|
}
|
|
|