minix/servers/vfs/pipe.c

606 lines
17 KiB
C
Raw Normal View History

2005-04-21 16:53:53 +02:00
/* This file deals with the suspension and revival of processes. A process can
* be suspended because it wants to read or write from a pipe and can't, or
* because it wants to read or write from a special file and can't. When a
* process can't continue it is suspended, and revived later when it is able
* to continue.
*
* The entry points into this file are
* do_pipe: perform the PIPE system call
* pipe_check: check to see that a read or write on a pipe is feasible now
* suspend: suspend a process that cannot do a requested read or write
2005-08-29 18:47:18 +02:00
* release: check to see if a suspended process can be released and do
* it
2005-04-21 16:53:53 +02:00
* revive: mark a suspended process as able to run again
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
* unsuspend_by_endpt: revive all processes blocking on a given process
2005-04-21 16:53:53 +02:00
* do_unpause: a signal has been sent to a process; see if it suspended
*/
#include "fs.h"
#include <fcntl.h>
#include <signal.h>
#include <assert.h>
2005-04-21 16:53:53 +02:00
#include <minix/callnr.h>
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
#include <minix/endpoint.h>
2005-04-21 16:53:53 +02:00
#include <minix/com.h>
#include <minix/u64.h>
#include <sys/select.h>
#include <sys/time.h>
2005-04-21 16:53:53 +02:00
#include "file.h"
#include "fproc.h"
2012-02-13 16:28:04 +01:00
#include "scratchpad.h"
#include "dmap.h"
2005-04-21 16:53:53 +02:00
#include "param.h"
#include <minix/vfsif.h>
#include "vnode.h"
#include "vmnt.h"
2005-04-21 16:53:53 +02:00
/*===========================================================================*
* do_pipe *
*===========================================================================*/
2012-03-25 20:25:53 +02:00
int do_pipe()
2005-04-21 16:53:53 +02:00
{
/* Perform the pipe(fil_des) system call. */
register struct fproc *rfp;
int r;
struct filp *fil_ptr0, *fil_ptr1;
int fil_des[2]; /* reply goes here */
struct vnode *vp;
2012-02-13 16:28:04 +01:00
struct vmnt *vmp;
struct node_details res;
2012-02-13 16:28:04 +01:00
/* Get a lock on PFS */
if ((vmp = find_vmnt(PFS_PROC_NR)) == NULL) panic("PFS gone");
VFS: fix locking bugs .sync and fsync used unnecessarily restrictive locking type .fsync violated locking order by obtaining a vmnt lock after a filp lock .fsync contained a TOCTOU bug .new_node violated locking rules (didn't upgrade lock upon file creation) .do_pipe used unnecessarily restrictive locking type .always lock pipes exclusively; even a read operation might require to do a write on a vnode object (update pipe size) .when opening a file with O_TRUNC, upgrade vnode lock when truncating .utime used unnecessarily restrictive locking type .path parsing: .always acquire VMNT_WRITE or VMNT_EXCL on vmnt and downgrade to VMNT_READ if that was what was actually requested. This prevents the following deadlock scenario: thread A: lock_vmnt(vmp, TLL_READSER); lock_vnode(vp, TLL_READSER); upgrade_vmnt_lock(vmp, TLL_WRITE); thread B: lock_vmnt(vmp, TLL_READ); lock_vnode(vp, TLL_READSER); thread A will be stuck in upgrade_vmnt_lock and thread B is stuck in lock_vnode. This happens when, for example, thread A tries create a new node (open.c:new_node) and thread B tries to do eat_path to change dir (stadir.c:do_chdir). When the path is being resolved, a vnode is always locked with VNODE_OPCL (TLL_READSER) and then downgraded to VNODE_READ if read-only is actually requested. Thread A locks the vmnt with VMNT_WRITE (TLL_READSER) which still allows VMNT_READ locks. Thread B can't acquire a lock on the vnode because thread A has it; Thread A can't upgrade its vmnt lock to VMNT_WRITE (TLL_WRITE) because thread B has a VMNT_READ lock on it. By serializing vmnt locks during path parsing, thread B can only acquire a lock on vmp when thread A has completely finished its operation.
2012-11-30 13:49:53 +01:00
if ((r = lock_vmnt(vmp, VMNT_READ)) != OK) return(r);
2012-02-13 16:28:04 +01:00
/* See if a free vnode is available */
2012-04-23 15:45:14 +02:00
if ((vp = get_free_vnode()) == NULL) {
unlock_vmnt(vmp);
return(err_code);
}
2012-02-13 16:28:04 +01:00
lock_vnode(vp, VNODE_OPCL);
2005-04-21 16:53:53 +02:00
/* Acquire two file descriptors. */
rfp = fp;
2012-02-13 16:28:04 +01:00
if ((r = get_fd(0, R_BIT, &fil_des[0], &fil_ptr0)) != OK) {
unlock_vnode(vp);
unlock_vmnt(vmp);
return(r);
}
2005-04-21 16:53:53 +02:00
rfp->fp_filp[fil_des[0]] = fil_ptr0;
FD_SET(fil_des[0], &rfp->fp_filp_inuse);
2012-02-13 16:28:04 +01:00
fil_ptr0->filp_count = 1; /* mark filp in use */
if ((r = get_fd(0, W_BIT, &fil_des[1], &fil_ptr1)) != OK) {
rfp->fp_filp[fil_des[0]] = NULL;
FD_CLR(fil_des[0], &rfp->fp_filp_inuse);
2012-02-13 16:28:04 +01:00
fil_ptr0->filp_count = 0; /* mark filp free */
unlock_filp(fil_ptr0);
unlock_vnode(vp);
unlock_vmnt(vmp);
return(r);
2005-04-21 16:53:53 +02:00
}
rfp->fp_filp[fil_des[1]] = fil_ptr1;
FD_SET(fil_des[1], &rfp->fp_filp_inuse);
2005-04-21 16:53:53 +02:00
fil_ptr1->filp_count = 1;
/* Create a named pipe inode on PipeFS */
r = req_newnode(PFS_PROC_NR, fp->fp_effuid, fp->fp_effgid, I_NAMED_PIPE,
NO_DEV, &res);
if (r != OK) {
rfp->fp_filp[fil_des[0]] = NULL;
FD_CLR(fil_des[0], &rfp->fp_filp_inuse);
fil_ptr0->filp_count = 0;
rfp->fp_filp[fil_des[1]] = NULL;
FD_CLR(fil_des[1], &rfp->fp_filp_inuse);
fil_ptr1->filp_count = 0;
2012-02-13 16:28:04 +01:00
unlock_filp(fil_ptr1);
unlock_filp(fil_ptr0);
unlock_vnode(vp);
unlock_vmnt(vmp);
return(r);
}
/* Fill in vnode */
vp->v_fs_e = res.fs_e;
vp->v_mapfs_e = res.fs_e;
2012-02-13 16:28:04 +01:00
vp->v_inode_nr = res.inode_nr;
vp->v_mapinode_nr = res.inode_nr;
vp->v_mode = res.fmode;
2007-08-07 14:52:47 +02:00
vp->v_fs_count = 1;
vp->v_mapfs_count = 1;
2007-08-07 14:52:47 +02:00
vp->v_ref_count = 1;
vp->v_size = 0;
2012-02-13 16:28:04 +01:00
vp->v_vmnt = NULL;
vp->v_dev = NO_DEV;
2005-04-21 16:53:53 +02:00
/* Fill in filp objects */
fil_ptr0->filp_vno = vp;
2007-08-07 14:52:47 +02:00
dup_vnode(vp);
fil_ptr1->filp_vno = vp;
2005-04-21 16:53:53 +02:00
fil_ptr0->filp_flags = O_RDONLY;
fil_ptr1->filp_flags = O_WRONLY;
2005-04-21 16:53:53 +02:00
m_out.reply_i1 = fil_des[0];
m_out.reply_i2 = fil_des[1];
2012-02-13 16:28:04 +01:00
unlock_filps(fil_ptr0, fil_ptr1);
unlock_vmnt(vmp);
2005-04-21 16:53:53 +02:00
return(OK);
}
/*===========================================================================*
* map_vnode *
*===========================================================================*/
2012-03-25 20:25:53 +02:00
int map_vnode(vp, map_to_fs_e)
struct vnode *vp;
2012-02-13 16:28:04 +01:00
endpoint_t map_to_fs_e;
{
int r;
2012-02-13 16:28:04 +01:00
struct vmnt *vmp;
struct node_details res;
2012-02-13 16:28:04 +01:00
if(vp->v_mapfs_e != NONE) return(OK); /* Already mapped; nothing to do. */
if ((vmp = find_vmnt(map_to_fs_e)) == NULL)
panic("Can't map to unknown endpoint");
if ((r = lock_vmnt(vmp, VMNT_WRITE)) != OK) {
if (r == EBUSY)
vmp = NULL; /* Already locked, do not unlock */
else
return(r);
}
2012-02-13 16:28:04 +01:00
/* Create a temporary mapping of this inode to another FS. Read and write
* operations on data will be handled by that FS. The rest by the 'original'
* FS that holds the inode. */
2012-02-13 16:28:04 +01:00
if ((r = req_newnode(map_to_fs_e, fp->fp_effuid, fp->fp_effgid, I_NAMED_PIPE,
vp->v_dev, &res)) == OK) {
vp->v_mapfs_e = res.fs_e;
2012-02-13 16:28:04 +01:00
vp->v_mapinode_nr = res.inode_nr;
vp->v_mapfs_count = 1;
}
2012-02-13 16:28:04 +01:00
if (vmp) unlock_vmnt(vmp);
return(r);
}
2005-04-21 16:53:53 +02:00
/*===========================================================================*
* pipe_check *
*===========================================================================*/
int pipe_check(
struct vnode *vp, /* the inode of the pipe */
int rw_flag, /* READING or WRITING */
int oflags, /* flags set by open or fcntl */
int bytes, /* bytes to be read or written (all chunks) */
int notouch /* check only */
)
2005-04-21 16:53:53 +02:00
{
/* Pipes are a little different. If a process reads from an empty pipe for
* which a writer still exists, suspend the reader. If the pipe is empty
* and there is no writer, return 0 bytes. If a process is writing to a
* pipe and no one is reading from it, give a broken pipe error.
*/
off_t pos;
int r = OK;
/* Reads start at the beginning; writes append to pipes */
if (rw_flag == READING)
pos = 0;
else
pos = vp->v_size;
2005-04-21 16:53:53 +02:00
/* If reading, check for empty pipe. */
if (rw_flag == READING) {
if (vp->v_size == 0) {
2005-04-21 16:53:53 +02:00
/* Process is reading from an empty pipe. */
if (find_filp(vp, W_BIT) != NULL) {
2005-04-21 16:53:53 +02:00
/* Writer exists */
2012-02-13 16:28:04 +01:00
if (oflags & O_NONBLOCK)
2005-04-21 16:53:53 +02:00
r = EAGAIN;
2012-02-13 16:28:04 +01:00
else
2005-04-21 16:53:53 +02:00
r = SUSPEND;
2012-02-13 16:28:04 +01:00
2005-04-21 16:53:53 +02:00
/* If need be, activate sleeping writers. */
if (susp_count > 0)
release(vp, WRITE, susp_count);
2005-04-21 16:53:53 +02:00
}
return(r);
}
return(bytes);
2007-08-07 14:52:47 +02:00
}
/* Process is writing to a pipe. */
if (find_filp(vp, R_BIT) == NULL) {
/* Process is writing, but there is no reader. Tell kernel to generate
* a SIGPIPE signal. */
if (!notouch) sys_kill(fp->fp_endpoint, SIGPIPE);
2007-08-07 14:52:47 +02:00
return(EPIPE);
}
2005-04-21 16:53:53 +02:00
/* Calculate how many bytes can be written. */
2007-08-07 14:52:47 +02:00
if (pos + bytes > PIPE_BUF) {
if (oflags & O_NONBLOCK) {
2007-08-07 14:52:47 +02:00
if (bytes <= PIPE_BUF) {
/* Write has to be atomic */
2005-04-21 16:53:53 +02:00
return(EAGAIN);
}
2007-08-07 14:52:47 +02:00
/* Compute available space */
bytes = PIPE_BUF - pos;
2007-08-07 14:52:47 +02:00
if (bytes > 0) {
/* Do a partial write. Need to wakeup reader */
if (!notouch)
release(vp, READ, susp_count);
return(bytes);
} else {
/* Pipe is full */
return(EAGAIN);
}
}
if (bytes > PIPE_BUF) {
/* Compute available space */
bytes = PIPE_BUF - pos;
2007-08-07 14:52:47 +02:00
if (bytes > 0) {
/* Do a partial write. Need to wakeup reader
* since we'll suspend ourself in read_write()
*/
if (!notouch)
release(vp, READ, susp_count);
return(bytes);
2005-04-21 16:53:53 +02:00
}
}
/* Pipe is full */
2007-08-07 14:52:47 +02:00
return(SUSPEND);
2005-04-21 16:53:53 +02:00
}
2007-08-07 14:52:47 +02:00
/* Writing to an empty pipe. Search for suspended reader. */
if (pos == 0 && !notouch)
release(vp, READ, susp_count);
/* Requested amount fits */
return(bytes);
2005-04-21 16:53:53 +02:00
}
2005-04-21 16:53:53 +02:00
/*===========================================================================*
* suspend *
*===========================================================================*/
2012-03-25 20:25:53 +02:00
void suspend(int why)
2005-04-21 16:53:53 +02:00
{
/* Take measures to suspend the processing of the present system call.
* Store the parameters to be used upon resuming in the process table.
* (Actually they are not used when a process is waiting for an I/O device,
* but they are needed for pipes, and it is not worth making the distinction.)
* The SUSPEND pseudo error should be returned after calling suspend().
*/
2012-02-13 16:28:04 +01:00
if (why == FP_BLOCKED_ON_POPEN || why == FP_BLOCKED_ON_PIPE)
/* #procs susp'ed on pipe*/
susp_count++;
fp->fp_blocked_on = why;
assert(fp->fp_grant == GRANT_INVALID || !GRANT_VALID(fp->fp_grant));
fp->fp_block_callnr = job_call_nr;
2012-02-13 16:28:04 +01:00
fp->fp_flags &= ~FP_SUSP_REOPEN; /* Clear this flag. The caller
* can set it when needed.
*/
2005-04-21 16:53:53 +02:00
}
/*===========================================================================*
* wait_for *
*===========================================================================*/
2012-03-25 20:25:53 +02:00
void wait_for(endpoint_t who)
{
if(who == NONE || who == ANY)
panic("suspend on NONE or ANY");
suspend(FP_BLOCKED_ON_OTHER);
fp->fp_task = who;
}
2007-08-07 14:52:47 +02:00
/*===========================================================================*
* pipe_suspend *
2007-08-07 14:52:47 +02:00
*===========================================================================*/
2012-03-25 20:25:53 +02:00
void pipe_suspend(filp, buf, size)
2012-02-13 16:28:04 +01:00
struct filp *filp;
2007-08-07 14:52:47 +02:00
char *buf;
size_t size;
{
/* Take measures to suspend the processing of the present system call.
* Store the parameters to be used upon resuming in the process table.
*/
2012-02-13 16:28:04 +01:00
scratch(fp).file.filp = filp;
scratch(fp).io.io_buffer = buf;
scratch(fp).io.io_nbytes = size;
suspend(FP_BLOCKED_ON_PIPE);
2007-08-07 14:52:47 +02:00
}
/*===========================================================================*
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
* unsuspend_by_endpt *
*===========================================================================*/
2012-03-25 20:25:53 +02:00
void unsuspend_by_endpt(endpoint_t proc_e)
{
/* Revive processes waiting for drivers (SUSPENDed) that have disappeared with
* return code EAGAIN.
*/
struct fproc *rp;
for (rp = &fproc[0]; rp < &fproc[NR_PROCS]; rp++) {
2012-02-13 16:28:04 +01:00
if (rp->fp_pid == PID_FREE) continue;
if (rp->fp_blocked_on == FP_BLOCKED_ON_OTHER && rp->fp_task == proc_e)
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
revive(rp->fp_endpoint, EAGAIN);
}
/* Revive processes waiting in drivers on select()s with EAGAIN too */
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
select_unsuspend_by_endpt(proc_e);
return;
}
2005-04-21 16:53:53 +02:00
/*===========================================================================*
* release *
*===========================================================================*/
2012-03-25 20:25:53 +02:00
void release(vp, op, count)
register struct vnode *vp; /* inode of pipe */
2012-02-13 16:28:04 +01:00
int op; /* READ, WRITE, OPEN or CREAT */
2005-04-21 16:53:53 +02:00
int count; /* max number of processes to release */
{
2012-02-13 16:28:04 +01:00
/* Check to see if any process is hanging on vnode 'vp'. If one is, and it
* was trying to perform the call indicated by 'call_nr', release it.
2005-04-21 16:53:53 +02:00
*/
register struct fproc *rp;
struct filp *f;
2012-02-13 16:28:04 +01:00
int selop;
/* Trying to perform the call also includes SELECTing on it with that
* operation.
*/
2012-02-13 16:28:04 +01:00
if (op == READ || op == WRITE) {
if (op == READ)
selop = SEL_RD;
else
selop = SEL_WR;
for (f = &filp[0]; f < &filp[NR_FILPS]; f++) {
if (f->filp_count < 1 || !(f->filp_pipe_select_ops & selop) ||
f->filp_vno != vp)
continue;
select_callback(f, selop);
f->filp_pipe_select_ops &= ~selop;
}
}
2005-04-21 16:53:53 +02:00
/* Search the proc table. */
2009-04-02 13:44:26 +02:00
for (rp = &fproc[0]; rp < &fproc[NR_PROCS] && count > 0; rp++) {
if (rp->fp_pid != PID_FREE && fp_is_blocked(rp) &&
2012-02-13 16:28:04 +01:00
!(rp->fp_flags & FP_REVIVED) && rp->fp_block_callnr == op) {
/* Find the vnode. Depending on the reason the process was
* suspended, there are different ways of finding it.
*/
if (rp->fp_blocked_on == FP_BLOCKED_ON_POPEN ||
rp->fp_blocked_on == FP_BLOCKED_ON_DOPEN ||
rp->fp_blocked_on == FP_BLOCKED_ON_LOCK ||
rp->fp_blocked_on == FP_BLOCKED_ON_OTHER) {
if (!FD_ISSET(scratch(rp).file.fd_nr,
&rp->fp_filp_inuse))
continue;
if (rp->fp_filp[scratch(rp).file.fd_nr]->filp_vno != vp)
continue;
} else if (rp->fp_blocked_on == FP_BLOCKED_ON_PIPE) {
if (scratch(rp).file.filp == NULL)
continue;
if (scratch(rp).file.filp->filp_vno != vp)
continue;
} else
continue;
/* We found the vnode. Revive process. */
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
revive(rp->fp_endpoint, 0);
2005-04-21 16:53:53 +02:00
susp_count--; /* keep track of who is suspended */
2009-04-02 13:44:26 +02:00
if(susp_count < 0)
panic("susp_count now negative: %d", susp_count);
2005-04-21 16:53:53 +02:00
if (--count == 0) return;
}
}
}
2005-04-21 16:53:53 +02:00
/*===========================================================================*
* revive *
*===========================================================================*/
void revive(endpoint_t proc_e, int returned)
2005-04-21 16:53:53 +02:00
{
/* Revive a previously blocked process. When a process hangs on tty, this
* is the way it is eventually released.
*/
struct fproc *rfp;
int blocked_on;
2012-02-13 16:28:04 +01:00
int fd_nr, slot;
struct filp *fil_ptr;
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
if (proc_e == NONE || isokendpt(proc_e, &slot) != OK) return;
2005-04-21 16:53:53 +02:00
2012-02-13 16:28:04 +01:00
rfp = &fproc[slot];
if (!fp_is_blocked(rfp) || (rfp->fp_flags & FP_REVIVED)) return;
2005-04-21 16:53:53 +02:00
/* The 'reviving' flag only applies to pipes. Processes waiting for TTY get
* a message right away. The revival process is different for TTY and pipes.
* For select and TTY revival, the work is already done, for pipes it is not:
2012-02-13 16:28:04 +01:00
* the proc must be restarted so it can try again.
2005-04-21 16:53:53 +02:00
*/
blocked_on = rfp->fp_blocked_on;
2012-02-13 16:28:04 +01:00
fd_nr = scratch(rfp).file.fd_nr;
if (blocked_on == FP_BLOCKED_ON_PIPE || blocked_on == FP_BLOCKED_ON_LOCK) {
2005-04-21 16:53:53 +02:00
/* Revive a process suspended on a pipe or lock. */
2012-02-13 16:28:04 +01:00
rfp->fp_flags |= FP_REVIVED;
2005-04-21 16:53:53 +02:00
reviving++; /* process was waiting on pipe or lock */
} else if (blocked_on == FP_BLOCKED_ON_DOPEN) {
rfp->fp_blocked_on = FP_BLOCKED_ON_NONE;
2012-02-13 16:28:04 +01:00
scratch(rfp).file.fd_nr = 0;
if (returned < 0) {
fil_ptr = rfp->fp_filp[fd_nr];
2012-02-13 16:28:04 +01:00
lock_filp(fil_ptr, VNODE_OPCL);
rfp->fp_filp[fd_nr] = NULL;
FD_CLR(fd_nr, &rfp->fp_filp_inuse);
if (fil_ptr->filp_count != 1) {
2012-02-13 16:28:04 +01:00
panic("VFS: revive: bad count in filp: %d",
fil_ptr->filp_count);
}
fil_ptr->filp_count = 0;
2012-02-13 16:28:04 +01:00
unlock_filp(fil_ptr);
put_vnode(fil_ptr->filp_vno);
fil_ptr->filp_vno = NULL;
reply(proc_e, returned);
2012-02-13 16:28:04 +01:00
} else {
reply(proc_e, fd_nr);
2012-02-13 16:28:04 +01:00
}
} else {
rfp->fp_blocked_on = FP_BLOCKED_ON_NONE;
2012-02-13 16:28:04 +01:00
scratch(rfp).file.fd_nr = 0;
if (blocked_on == FP_BLOCKED_ON_POPEN) {
/* process blocked in open or create */
reply(proc_e, fd_nr);
} else if (blocked_on == FP_BLOCKED_ON_SELECT) {
reply(proc_e, returned);
} else {
2012-02-13 16:28:04 +01:00
/* Revive a process suspended on TTY or other device.
* Pretend it wants only what there is.
*/
2012-02-13 16:28:04 +01:00
scratch(rfp).io.io_nbytes = returned;
/* If a grant has been issued by FS for this I/O, revoke
* it again now that I/O is done.
*/
2012-02-13 16:28:04 +01:00
if (GRANT_VALID(rfp->fp_grant)) {
if(cpf_revoke(rfp->fp_grant)) {
2012-02-13 16:28:04 +01:00
panic("VFS: revoke failed for grant: %d",
rfp->fp_grant);
2012-02-13 16:28:04 +01:00
}
rfp->fp_grant = GRANT_INVALID;
}
reply(proc_e, returned);/* unblock the process */
2005-04-21 16:53:53 +02:00
}
}
}
2006-05-11 16:57:23 +02:00
/*===========================================================================*
* unpause *
*===========================================================================*/
2012-03-25 20:25:53 +02:00
void unpause(endpoint_t proc_e)
2006-05-11 16:57:23 +02:00
{
2005-04-21 16:53:53 +02:00
/* A signal has been sent to a user who is paused on the file system.
* Abort the system call with the EINTR error message.
*/
2012-02-13 16:28:04 +01:00
register struct fproc *rfp, *org_fp;
int slot, blocked_on, fild, status = EINTR, major_dev, minor_dev;
2005-04-21 16:53:53 +02:00
struct filp *f;
dev_t dev;
message mess;
int wasreviving = 0;
2005-04-21 16:53:53 +02:00
2012-02-13 16:28:04 +01:00
if (isokendpt(proc_e, &slot) != OK) {
printf("VFS: ignoring unpause for bogus endpoint %d\n", proc_e);
return;
}
2012-02-13 16:28:04 +01:00
rfp = &fproc[slot];
if (!fp_is_blocked(rfp)) return;
blocked_on = rfp->fp_blocked_on;
2005-04-21 16:53:53 +02:00
2012-02-13 16:28:04 +01:00
if (rfp->fp_flags & FP_REVIVED) {
rfp->fp_flags &= ~FP_REVIVED;
reviving--;
wasreviving = 1;
}
switch (blocked_on) {
case FP_BLOCKED_ON_PIPE:/* process trying to read or write a pipe */
2005-04-21 16:53:53 +02:00
break;
case FP_BLOCKED_ON_LOCK:/* process trying to set a lock with FCNTL */
2005-04-21 16:53:53 +02:00
break;
case FP_BLOCKED_ON_SELECT:/* process blocking on select() */
2012-02-13 16:28:04 +01:00
select_forget(proc_e);
break;
2012-02-13 16:28:04 +01:00
case FP_BLOCKED_ON_POPEN: /* process trying to open a fifo */
2005-04-21 16:53:53 +02:00
break;
case FP_BLOCKED_ON_DOPEN:/* process trying to open a device */
/* Don't cancel OPEN. Just wait until the open completes. */
2012-02-13 16:28:04 +01:00
return;
2012-02-13 16:28:04 +01:00
case FP_BLOCKED_ON_OTHER:/* process trying to do device I/O (e.g. tty)*/
if (rfp->fp_flags & FP_SUSP_REOPEN) {
/* Process is suspended while waiting for a reopen.
* Just reply EINTR.
*/
2012-02-13 16:28:04 +01:00
rfp->fp_flags &= ~FP_SUSP_REOPEN;
status = EINTR;
break;
}
2012-02-13 16:28:04 +01:00
fild = scratch(rfp).file.fd_nr;
if (fild < 0 || fild >= OPEN_MAX)
2012-02-13 16:28:04 +01:00
panic("file descriptor out-of-range");
2005-04-21 16:53:53 +02:00
f = rfp->fp_filp[fild];
dev = (dev_t) f->filp_vno->v_sdev; /* device hung on */
2012-02-13 16:28:04 +01:00
major_dev = major(dev);
minor_dev = minor(dev);
mess.TTY_LINE = minor_dev;
mess.USER_ENDPT = rfp->fp_ioproc;
mess.IO_GRANT = (char *) rfp->fp_grant;
2005-04-21 16:53:53 +02:00
/* Tell kernel R or W. Mode is from current call, not open. */
mess.COUNT = rfp->fp_block_callnr == READ ? R_BIT : W_BIT;
2005-04-21 16:53:53 +02:00
mess.m_type = CANCEL;
2012-02-13 16:28:04 +01:00
org_fp = fp;
2005-04-21 16:53:53 +02:00
fp = rfp; /* hack - ctty_io uses fp */
2012-02-13 16:28:04 +01:00
(*dmap[major_dev].dmap_io)(rfp->fp_task, &mess);
fp = org_fp;
status = mess.REP_STATUS;
if (status == SUSPEND)
return; /* Process will be revived at a
* later time.
*/
2012-02-13 16:28:04 +01:00
if (status == EAGAIN) status = EINTR;
if (GRANT_VALID(rfp->fp_grant)) {
(void) cpf_revoke(rfp->fp_grant);
rfp->fp_grant = GRANT_INVALID;
}
break;
default :
2012-02-13 16:28:04 +01:00
panic("VFS: unknown block reason: %d", blocked_on);
2005-04-21 16:53:53 +02:00
}
rfp->fp_blocked_on = FP_BLOCKED_ON_NONE;
2012-02-13 16:28:04 +01:00
if ((blocked_on == FP_BLOCKED_ON_PIPE || blocked_on == FP_BLOCKED_ON_POPEN)&&
!wasreviving) {
susp_count--;
}
2012-02-13 16:28:04 +01:00
reply(proc_e, status); /* signal interrupted call */
}