minix/servers/vfs/misc.c

629 lines
17 KiB
C
Raw Normal View History

2005-04-21 16:53:53 +02:00
/* This file contains a collection of miscellaneous procedures. Some of them
* perform simple system calls. Some others do a little part of system calls
* that are mostly performed by the Memory Manager.
*
* The entry points into this file are
* do_dup: perform the DUP system call
* do_fcntl: perform the FCNTL system call
* do_sync: perform the SYNC system call
* do_fsync: perform the FSYNC system call
* do_reboot: sync disks and prepare for shutdown
* do_fork: adjust the tables after PM has performed a FORK system call
* do_exec: handle files with FD_CLOEXEC on after PM has done an EXEC
2005-04-21 16:53:53 +02:00
* do_exit: a process has exited; note that in the tables
* do_set: set uid or gid for some process
* do_revive: revive a process that was waiting for something (e.g. TTY)
2005-04-21 16:53:53 +02:00
* do_svrctl: file system control
* do_getsysinfo: request copy of FS data structure
2006-05-11 16:57:23 +02:00
* pm_dumpcore: create a core dump
2005-04-21 16:53:53 +02:00
*/
#include "fs.h"
#include <fcntl.h>
#include <assert.h>
2005-04-21 16:53:53 +02:00
#include <unistd.h> /* cc runs out of memory with unistd.h :-( */
#include <minix/callnr.h>
#include <minix/safecopies.h>
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
#include <minix/endpoint.h>
2005-04-21 16:53:53 +02:00
#include <minix/com.h>
#include <minix/u64.h>
2006-05-11 16:57:23 +02:00
#include <sys/ptrace.h>
2005-04-21 16:53:53 +02:00
#include <sys/svrctl.h>
#include "file.h"
#include "fproc.h"
#include "param.h"
#include <minix/vfsif.h>
#include "vnode.h"
#include "vmnt.h"
2005-04-21 16:53:53 +02:00
2006-05-11 16:57:23 +02:00
#define CORE_NAME "core"
#define CORE_MODE 0777 /* mode to use on core image files */
#if ENABLE_SYSCALL_STATS
PUBLIC unsigned long calls_stats[NCALLS];
#endif
FORWARD _PROTOTYPE( void free_proc, (struct fproc *freed, int flags) );
FORWARD _PROTOTYPE( void unmount_all, (void) );
/*
FORWARD _PROTOTYPE( int dumpcore, (int proc_e, struct mem_map *seg_ptr) );
2006-05-11 16:57:23 +02:00
FORWARD _PROTOTYPE( int write_bytes, (struct inode *rip, off_t off,
char *buf, size_t bytes) );
2006-05-11 16:57:23 +02:00
FORWARD _PROTOTYPE( int write_seg, (struct inode *rip, off_t off, int proc_e,
int seg, off_t seg_off, phys_bytes seg_bytes) );
*/
#define FP_EXITING 1
2005-04-21 16:53:53 +02:00
/*===========================================================================*
2005-09-11 18:45:46 +02:00
* do_getsysinfo *
2005-04-21 16:53:53 +02:00
*===========================================================================*/
PUBLIC int do_getsysinfo()
{
struct fproc *proc_addr;
vir_bytes src_addr, dst_addr;
size_t len;
int s;
/* Only su may call do_getsysinfo. This call may leak information (and is not
* stable enough to be part of the API/ABI).
*/
if (!super_user) return(EPERM);
switch(m_in.info_what) {
case SI_PROC_ADDR:
proc_addr = &fproc[0];
src_addr = (vir_bytes) &proc_addr;
len = sizeof(struct fproc *);
break;
case SI_PROC_TAB:
src_addr = (vir_bytes) fproc;
len = sizeof(struct fproc) * NR_PROCS;
break;
case SI_DMAP_TAB:
src_addr = (vir_bytes) dmap;
len = sizeof(struct dmap) * NR_DEVICES;
break;
#if ENABLE_SYSCALL_STATS
case SI_CALL_STATS:
src_addr = (vir_bytes) calls_stats;
len = sizeof(calls_stats);
break;
#endif
default:
return(EINVAL);
}
dst_addr = (vir_bytes) m_in.info_where;
if (OK != (s = sys_datacopy(SELF, src_addr, who_e, dst_addr, len))) return(s);
2005-04-21 16:53:53 +02:00
return(OK);
2005-04-21 16:53:53 +02:00
}
/*===========================================================================*
* do_dup *
*===========================================================================*/
PUBLIC int do_dup()
{
/* Perform the dup(fd) or dup2(fd,fd2) system call. These system calls are
* obsolete. In fact, it is not even possible to invoke them using the
* current library because the library routines call fcntl(). They are
* provided to permit old binary programs to continue to run.
*/
register int rfd;
register struct filp *f;
struct filp *dummy;
int r;
/* Is the file descriptor valid? */
rfd = m_in.fd & ~DUP_MASK; /* kill off dup2 bit, if on */
if ((f = get_filp(rfd)) == NULL) return(err_code);
2005-04-21 16:53:53 +02:00
/* Distinguish between dup and dup2. */
if (m_in.fd == rfd) { /* bit not on */
/* dup(fd) */
if ((r = get_fd(0, 0, &m_in.fd2, &dummy)) != OK) return(r);
2005-04-21 16:53:53 +02:00
} else {
/* dup2(fd, fd2) */
if (m_in.fd2 < 0 || m_in.fd2 >= OPEN_MAX) return(EBADF);
if (rfd == m_in.fd2) return(m_in.fd2); /* ignore the call: dup2(x, x) */
m_in.fd = m_in.fd2; /* prepare to close fd2 */
(void) do_close(); /* cannot fail */
}
/* Success. Set up new file descriptors. */
f->filp_count++;
fp->fp_filp[m_in.fd2] = f;
FD_SET(m_in.fd2, &fp->fp_filp_inuse);
2005-04-21 16:53:53 +02:00
return(m_in.fd2);
}
2005-04-21 16:53:53 +02:00
/*===========================================================================*
* do_fcntl *
*===========================================================================*/
PUBLIC int do_fcntl()
{
/* Perform the fcntl(fd, request, ...) system call. */
register struct filp *f;
int new_fd, r, fl;
struct filp *dummy;
/* Is the file descriptor valid? */
if ((f = get_filp(m_in.fd)) == NULL) return(err_code);
2005-04-21 16:53:53 +02:00
switch (m_in.request) {
case F_DUPFD:
/* This replaces the old dup() system call. */
if (m_in.addr < 0 || m_in.addr >= OPEN_MAX) return(EINVAL);
if ((r = get_fd(m_in.addr, 0, &new_fd, &dummy)) != OK) return(r);
f->filp_count++;
fp->fp_filp[new_fd] = f;
return(new_fd);
case F_GETFD:
/* Get close-on-exec flag (FD_CLOEXEC in POSIX Table 6-2). */
return( FD_ISSET(m_in.fd, &fp->fp_cloexec_set) ? FD_CLOEXEC : 0);
2005-04-21 16:53:53 +02:00
case F_SETFD:
/* Set close-on-exec flag (FD_CLOEXEC in POSIX Table 6-2). */
if(m_in.addr & FD_CLOEXEC)
FD_SET(m_in.fd, &fp->fp_cloexec_set);
else
FD_CLR(m_in.fd, &fp->fp_cloexec_set);
2005-04-21 16:53:53 +02:00
return(OK);
case F_GETFL:
/* Get file status flags (O_NONBLOCK and O_APPEND). */
fl = f->filp_flags & (O_NONBLOCK | O_APPEND | O_ACCMODE);
return(fl);
case F_SETFL:
/* Set file status flags (O_NONBLOCK and O_APPEND). */
fl = O_NONBLOCK | O_APPEND | O_REOPEN;
2005-04-21 16:53:53 +02:00
f->filp_flags = (f->filp_flags & ~fl) | (m_in.addr & fl);
return(OK);
case F_GETLK:
case F_SETLK:
case F_SETLKW:
/* Set or clear a file lock. */
r = lock_op(f, m_in.request);
return(r);
case F_FREESP:
{
/* Free a section of a file. Preparation is done here, actual freeing
* in freesp_inode().
*/
off_t start, end;
struct flock flock_arg;
signed long offset;
/* Check if it's a regular file. */
if((f->filp_vno->v_mode & I_TYPE) != I_REGULAR) return(EINVAL);
if (!(f->filp_mode & W_BIT)) return(EBADF);
/* Copy flock data from userspace. */
if((r = sys_datacopy(who_e, (vir_bytes) m_in.name1, SELF,
(vir_bytes) &flock_arg, (phys_bytes) sizeof(flock_arg))) != OK)
return(r);
/* Convert starting offset to signed. */
offset = (signed long) flock_arg.l_start;
/* Figure out starting position base. */
switch(flock_arg.l_whence) {
case SEEK_SET: start = 0; break;
case SEEK_CUR:
if (ex64hi(f->filp_pos) != 0)
panic("do_fcntl: position in file too high");
start = ex64lo(f->filp_pos);
break;
case SEEK_END: start = f->filp_vno->v_size; break;
default: return EINVAL;
}
/* Check for overflow or underflow. */
if(offset > 0 && start + offset < start) return EINVAL;
if(offset < 0 && start + offset > start) return EINVAL;
start += offset;
if(start < 0) return EINVAL;
if(flock_arg.l_len != 0) {
if(start >= f->filp_vno->v_size) return EINVAL;
end = start + flock_arg.l_len;
if(end <= start) return EINVAL;
if(end > f->filp_vno->v_size) end = f->filp_vno->v_size;
} else {
end = 0;
}
r = req_ftrunc(f->filp_vno->v_fs_e, f->filp_vno->v_inode_nr, start,
end);
if(r == OK && flock_arg.l_len == 0)
f->filp_vno->v_size = start;
return(r);
}
2005-04-21 16:53:53 +02:00
default:
return(EINVAL);
}
}
2005-04-21 16:53:53 +02:00
/*===========================================================================*
* do_sync *
*===========================================================================*/
PUBLIC int do_sync()
{
struct vmnt *vmp;
for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp)
if (vmp->m_dev != NO_DEV)
req_sync(vmp->m_fs_e);
return(OK);
2005-04-21 16:53:53 +02:00
}
/*===========================================================================*
2005-09-11 18:45:46 +02:00
* do_fsync *
*===========================================================================*/
PUBLIC int do_fsync()
{
/* Perform the fsync() system call. For now, don't be unnecessarily smart. */
do_sync();
return(OK);
}
/*===========================================================================*
* unmount_all *
*===========================================================================*/
PRIVATE void unmount_all(void)
{
/* Unmount all filesystems. File systems are mounted on other file systems,
* so you have to pull off the loose bits repeatedly to get it all undone.
*/
int i;
for (i= 0; i < NR_MNTS; i++) {
struct vmnt *vmp;
/* Unmount at least one. */
for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; vmp++) {
if (vmp->m_dev != NO_DEV)
unmount(vmp->m_dev, NULL);
}
}
}
2005-04-21 16:53:53 +02:00
/*===========================================================================*
2006-05-11 16:57:23 +02:00
* pm_reboot *
2005-04-21 16:53:53 +02:00
*===========================================================================*/
2006-05-11 16:57:23 +02:00
PUBLIC void pm_reboot()
2005-04-21 16:53:53 +02:00
{
/* Perform the FS side of the reboot call. */
int i;
do_sync();
SANITYCHECK;
/* Do exit processing for all leftover processes and servers,
* but don't actually exit them (if they were really gone, PM
* will tell us about it).
*/
for (i = 0; i < NR_PROCS; i++)
if((m_in.endpt1 = fproc[i].fp_endpoint) != NONE) {
/* No FP_EXITING, just free the resources, otherwise
* consistency check for fp_endpoint (set to NONE) will
* fail if process wants to do something in the (short)
* future.
*/
free_proc(&fproc[i], 0);
}
SANITYCHECK;
2005-04-21 16:53:53 +02:00
unmount_all();
SANITYCHECK;
2005-04-21 16:53:53 +02:00
}
/*===========================================================================*
2006-05-11 16:57:23 +02:00
* pm_fork *
2005-04-21 16:53:53 +02:00
*===========================================================================*/
2006-05-11 16:57:23 +02:00
PUBLIC void pm_fork(pproc, cproc, cpid)
int pproc; /* Parent process */
int cproc; /* Child process */
int cpid; /* Child process id */
2005-04-21 16:53:53 +02:00
{
/* Perform those aspects of the fork() system call that relate to files.
* In particular, let the child inherit its parent's file descriptors.
* The parent and child parameters tell who forked off whom. The file
* system uses the same slot numbers as the kernel. Only PM makes this call.
2005-04-21 16:53:53 +02:00
*/
register struct fproc *cp;
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
int i, parentno, childno;
2005-04-21 16:53:53 +02:00
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
/* Check up-to-dateness of fproc. */
2006-05-11 16:57:23 +02:00
okendpt(pproc, &parentno);
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
/* PM gives child endpoint, which implies process slot information.
* Don't call isokendpt, because that will verify if the endpoint
* number is correct in fproc, which it won't be.
*/
2006-05-11 16:57:23 +02:00
childno = _ENDPOINT_P(cproc);
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
if(childno < 0 || childno >= NR_PROCS)
panic("FS: bogus child for forking: %d", m_in.child_endpt);
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
if(fproc[childno].fp_pid != PID_FREE)
panic("FS: forking on top of in-use child: %d", childno);
2005-04-21 16:53:53 +02:00
/* Copy the parent's fproc struct to the child. */
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
fproc[childno] = fproc[parentno];
2005-04-21 16:53:53 +02:00
/* Increase the counters in the 'filp' table. */
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
cp = &fproc[childno];
fp = &fproc[parentno];
2005-04-21 16:53:53 +02:00
for (i = 0; i < OPEN_MAX; i++)
if (cp->fp_filp[i] != NULL) cp->fp_filp[i]->filp_count++;
2005-04-21 16:53:53 +02:00
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
/* Fill in new process and endpoint id. */
2006-05-11 16:57:23 +02:00
cp->fp_pid = cpid;
cp->fp_endpoint = cproc;
2005-04-21 16:53:53 +02:00
/* A forking process never has an outstanding grant,
* as it isn't blocking on i/o.
*/
if(GRANT_VALID(fp->fp_grant)) {
printf("vfs: fork: fp (endpoint %d) has grant %d\n", fp->fp_endpoint, fp->fp_grant);
panic("fp contains valid grant");
}
if(GRANT_VALID(cp->fp_grant)) {
printf("vfs: fork: cp (endpoint %d) has grant %d\n", cp->fp_endpoint, cp->fp_grant);
panic("cp contains valid grant");
}
2005-04-21 16:53:53 +02:00
/* A child is not a process leader. */
cp->fp_sesldr = 0;
/* This child has not exec()ced yet. */
cp->fp_execced = 0;
2005-04-21 16:53:53 +02:00
/* Record the fact that both root and working dir have another user. */
if(cp->fp_rd) dup_vnode(cp->fp_rd);
if(cp->fp_wd) dup_vnode(cp->fp_wd);
2005-04-21 16:53:53 +02:00
}
/*===========================================================================*
* free_proc *
2005-04-21 16:53:53 +02:00
*===========================================================================*/
2006-05-11 16:57:23 +02:00
PRIVATE void free_proc(struct fproc *exiter, int flags)
2005-04-21 16:53:53 +02:00
{
int i;
2005-04-21 16:53:53 +02:00
register struct fproc *rfp;
register struct filp *rfilp;
register struct vnode *vp;
2005-04-21 16:53:53 +02:00
dev_t dev;
SANITYCHECK;
fp = exiter; /* get_filp() needs 'fp' */
2005-04-21 16:53:53 +02:00
if(fp->fp_endpoint == NONE) {
panic("free_proc: already free");
}
if (fp_is_blocked(fp)) {
SANITYCHECK;
2006-05-11 16:57:23 +02:00
unpause(fp->fp_endpoint);
SANITYCHECK;
2005-04-21 16:53:53 +02:00
}
SANITYCHECK;
2005-04-21 16:53:53 +02:00
/* Loop on file descriptors, closing any that are open. */
for (i = 0; i < OPEN_MAX; i++) {
2006-05-11 16:57:23 +02:00
(void) close_fd(fp, i);
2005-04-21 16:53:53 +02:00
}
/* Check if any process is SUSPENDed on this driver.
* If a driver exits, unmap its entries in the dmap table.
* (unmapping has to be done after the first step, because the
* dmap table is used in the first step.)
*/
unsuspend_by_endpt(fp->fp_endpoint);
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
/* Release root and working directories. */
if(fp->fp_rd) { put_vnode(fp->fp_rd); fp->fp_rd = NULL; }
if(fp->fp_wd) { put_vnode(fp->fp_wd); fp->fp_wd = NULL; }
/* The rest of these actions is only done when processes actually
* exit.
*/
if(!(flags & FP_EXITING)) {
SANITYCHECK;
2006-05-11 16:57:23 +02:00
return;
}
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
/* Invalidate endpoint number for error and sanity checks. */
fp->fp_endpoint = NONE;
2006-03-10 17:10:05 +01:00
/* If a session leader exits and it has a controlling tty, then revoke
* access to its controlling tty from all other processes using it.
2005-04-21 16:53:53 +02:00
*/
2006-03-10 17:10:05 +01:00
if (fp->fp_sesldr && fp->fp_tty != 0) {
dev = fp->fp_tty;
2005-04-21 16:53:53 +02:00
2006-03-10 17:10:05 +01:00
for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
if(rfp->fp_pid == PID_FREE) continue;
2006-03-10 17:10:05 +01:00
if (rfp->fp_tty == dev) rfp->fp_tty = 0;
2005-04-21 16:53:53 +02:00
2006-03-10 17:10:05 +01:00
for (i = 0; i < OPEN_MAX; i++) {
if ((rfilp = rfp->fp_filp[i]) == NULL) continue;
2005-04-21 16:53:53 +02:00
if (rfilp->filp_mode == FILP_CLOSED) continue;
vp = rfilp->filp_vno;
if ((vp->v_mode & I_TYPE) != I_CHAR_SPECIAL) continue;
if ((dev_t) vp->v_sdev != dev) continue;
(void) dev_close(dev, rfilp-filp);
/* Ignore any errors, even SUSPEND. */
2005-04-21 16:53:53 +02:00
rfilp->filp_mode = FILP_CLOSED;
2006-03-10 17:10:05 +01:00
}
}
2005-04-21 16:53:53 +02:00
}
2006-03-10 17:10:05 +01:00
/* Exit done. Mark slot as free. */
2005-04-21 16:53:53 +02:00
fp->fp_pid = PID_FREE;
SANITYCHECK;
}
/*===========================================================================*
2006-05-11 16:57:23 +02:00
* pm_exit *
*===========================================================================*/
2006-05-11 16:57:23 +02:00
PUBLIC void pm_exit(proc)
int proc;
{
2006-05-11 16:57:23 +02:00
int exitee_p;
/* Perform the file system portion of the exit(status) system call. */
/* Nevertheless, pretend that the call came from the user. */
2006-05-11 16:57:23 +02:00
okendpt(proc, &exitee_p);
free_proc(&fproc[exitee_p], FP_EXITING);
2005-04-21 16:53:53 +02:00
}
/*===========================================================================*
2006-05-11 16:57:23 +02:00
* pm_setgid *
2005-04-21 16:53:53 +02:00
*===========================================================================*/
2006-05-11 16:57:23 +02:00
PUBLIC void pm_setgid(proc_e, egid, rgid)
int proc_e;
int egid;
int rgid;
2005-04-21 16:53:53 +02:00
{
2006-05-11 16:57:23 +02:00
register struct fproc *tfp;
int slot;
okendpt(proc_e, &slot);
tfp = &fproc[slot];
tfp->fp_effgid = egid;
tfp->fp_realgid = rgid;
}
2005-04-21 16:53:53 +02:00
/*===========================================================================*
* pm_setgroups *
*===========================================================================*/
PUBLIC void pm_setgroups(proc_e, ngroups, groups)
int proc_e;
int ngroups;
gid_t *groups;
{
struct fproc *rfp;
int slot;
okendpt(proc_e, &slot);
rfp = &fproc[slot];
if (ngroups * sizeof(gid_t) > sizeof(rfp->fp_sgroups))
panic("VFS: pm_setgroups: too much data to copy");
if(sys_datacopy(who_e, (vir_bytes) groups, SELF, (vir_bytes) rfp->fp_sgroups,
ngroups * sizeof(gid_t)) == OK) {
rfp->fp_ngroups = ngroups;
} else
panic("VFS: pm_setgroups: datacopy failed");
}
2006-05-11 16:57:23 +02:00
/*===========================================================================*
* pm_setuid *
*===========================================================================*/
PUBLIC void pm_setuid(proc_e, euid, ruid)
int proc_e;
int euid;
int ruid;
{
2005-04-21 16:53:53 +02:00
register struct fproc *tfp;
2006-05-11 16:57:23 +02:00
int slot;
2005-04-21 16:53:53 +02:00
2006-05-11 16:57:23 +02:00
okendpt(proc_e, &slot);
tfp = &fproc[slot];
2005-04-21 16:53:53 +02:00
2006-05-11 16:57:23 +02:00
tfp->fp_effuid = euid;
tfp->fp_realuid = ruid;
2005-04-21 16:53:53 +02:00
}
/*===========================================================================*
* do_svrctl *
*===========================================================================*/
PUBLIC int do_svrctl()
{
switch (m_in.svrctl_req) {
/* No control request implemented yet. */
2005-04-21 16:53:53 +02:00
default:
return(EINVAL);
}
}
2006-05-11 16:57:23 +02:00
/*===========================================================================*
* pm_dumpcore *
*===========================================================================*/
PUBLIC int pm_dumpcore(proc_e, seg_ptr)
int proc_e;
struct mem_map *seg_ptr;
{
int proc_s;
2006-05-11 16:57:23 +02:00
/* Terminate the process */
okendpt(proc_e, &proc_s);
free_proc(&fproc[proc_s], FP_EXITING);
2006-05-11 16:57:23 +02:00
return OK;
}
Driver refactory for live update and crash recovery. SYSLIB CHANGES: - DS calls to publish / retrieve labels consider endpoints instead of u32_t. VFS CHANGES: - mapdriver() only adds an entry in the dmap table in VFS. - dev_up() is only executed upon reception of a driver up event. INET CHANGES: - INET no longer searches for existing drivers instances at startup. - A newtwork driver is (re)initialized upon reception of a driver up event. - Networking startup is now race-free by design. No need to waste 5 seconds at startup any more. DRIVER CHANGES: - Every driver publishes driver up events when starting for the first time or in case of restart when recovery actions must be taken in the upper layers. - Driver up events are published by drivers through DS. - For regular drivers, VFS is normally the only subscriber, but not necessarily. For instance, when the filter driver is in use, it must subscribe to driver up events to initiate recovery. - For network drivers, inet is the only subscriber for now. - Every VFS driver is statically linked with libdriver, every network driver is statically linked with libnetdriver. DRIVER LIBRARIES CHANGES: - Libdriver is extended to provide generic receive() and ds_publish() interfaces for VFS drivers. - driver_receive() is a wrapper for sef_receive() also used in driver_task() to discard spurious messages that were meant to be delivered to a previous version of the driver. - driver_receive_mq() is the same as driver_receive() but integrates support for queued messages. - driver_announce() publishes a driver up event for VFS drivers and marks the driver as initialized and expecting a DEV_OPEN message. - Libnetdriver is introduced to provide similar receive() and ds_publish() interfaces for network drivers (netdriver_announce() and netdriver_receive()). - Network drivers all support live update with no state transfer now. KERNEL CHANGES: - Added kernel call statectl for state management. Used by driver_announce() to unblock eventual callers sendrecing to the driver.
2010-04-08 15:41:35 +02:00
/*===========================================================================*
* ds_event *
*===========================================================================*/
PUBLIC void ds_event()
{
char key[DS_MAX_KEYLEN];
char *driver_prefix = "drv.vfs.";
u32_t value;
int type;
endpoint_t owner_endpoint;
int r;
/* Get the event and the owner from DS. */
r = ds_check(key, &type, &owner_endpoint);
if(r != OK) {
if(r != ENOENT)
printf("vfs: ds_event: ds_check failed: %d\n", r);
return;
}
r = ds_retrieve_u32(key, &value);
if(r != OK) {
printf("vfs: ds_event: ds_retrieve_u32 failed\n");
return;
}
2006-05-11 16:57:23 +02:00
Driver refactory for live update and crash recovery. SYSLIB CHANGES: - DS calls to publish / retrieve labels consider endpoints instead of u32_t. VFS CHANGES: - mapdriver() only adds an entry in the dmap table in VFS. - dev_up() is only executed upon reception of a driver up event. INET CHANGES: - INET no longer searches for existing drivers instances at startup. - A newtwork driver is (re)initialized upon reception of a driver up event. - Networking startup is now race-free by design. No need to waste 5 seconds at startup any more. DRIVER CHANGES: - Every driver publishes driver up events when starting for the first time or in case of restart when recovery actions must be taken in the upper layers. - Driver up events are published by drivers through DS. - For regular drivers, VFS is normally the only subscriber, but not necessarily. For instance, when the filter driver is in use, it must subscribe to driver up events to initiate recovery. - For network drivers, inet is the only subscriber for now. - Every VFS driver is statically linked with libdriver, every network driver is statically linked with libnetdriver. DRIVER LIBRARIES CHANGES: - Libdriver is extended to provide generic receive() and ds_publish() interfaces for VFS drivers. - driver_receive() is a wrapper for sef_receive() also used in driver_task() to discard spurious messages that were meant to be delivered to a previous version of the driver. - driver_receive_mq() is the same as driver_receive() but integrates support for queued messages. - driver_announce() publishes a driver up event for VFS drivers and marks the driver as initialized and expecting a DEV_OPEN message. - Libnetdriver is introduced to provide similar receive() and ds_publish() interfaces for network drivers (netdriver_announce() and netdriver_receive()). - Network drivers all support live update with no state transfer now. KERNEL CHANGES: - Added kernel call statectl for state management. Used by driver_announce() to unblock eventual callers sendrecing to the driver.
2010-04-08 15:41:35 +02:00
/* Only check for VFS driver up events. */
if(strncmp(key, driver_prefix, sizeof(driver_prefix))
|| value != DS_DRIVER_UP) {
return;
}
2006-05-11 16:57:23 +02:00
Driver refactory for live update and crash recovery. SYSLIB CHANGES: - DS calls to publish / retrieve labels consider endpoints instead of u32_t. VFS CHANGES: - mapdriver() only adds an entry in the dmap table in VFS. - dev_up() is only executed upon reception of a driver up event. INET CHANGES: - INET no longer searches for existing drivers instances at startup. - A newtwork driver is (re)initialized upon reception of a driver up event. - Networking startup is now race-free by design. No need to waste 5 seconds at startup any more. DRIVER CHANGES: - Every driver publishes driver up events when starting for the first time or in case of restart when recovery actions must be taken in the upper layers. - Driver up events are published by drivers through DS. - For regular drivers, VFS is normally the only subscriber, but not necessarily. For instance, when the filter driver is in use, it must subscribe to driver up events to initiate recovery. - For network drivers, inet is the only subscriber for now. - Every VFS driver is statically linked with libdriver, every network driver is statically linked with libnetdriver. DRIVER LIBRARIES CHANGES: - Libdriver is extended to provide generic receive() and ds_publish() interfaces for VFS drivers. - driver_receive() is a wrapper for sef_receive() also used in driver_task() to discard spurious messages that were meant to be delivered to a previous version of the driver. - driver_receive_mq() is the same as driver_receive() but integrates support for queued messages. - driver_announce() publishes a driver up event for VFS drivers and marks the driver as initialized and expecting a DEV_OPEN message. - Libnetdriver is introduced to provide similar receive() and ds_publish() interfaces for network drivers (netdriver_announce() and netdriver_receive()). - Network drivers all support live update with no state transfer now. KERNEL CHANGES: - Added kernel call statectl for state management. Used by driver_announce() to unblock eventual callers sendrecing to the driver.
2010-04-08 15:41:35 +02:00
/* Perform up. */
dmap_endpt_up(owner_endpoint);
}
2006-05-11 16:57:23 +02:00