minix/servers/vfs/main.c

599 lines
15 KiB
C
Raw Normal View History

/*
2005-04-21 16:53:53 +02:00
* a loop that gets messages requesting work, carries out the work, and sends
* replies.
*
* The entry points into this file are:
* main: main program of the Virtual File System
2005-04-21 16:53:53 +02:00
* reply: send a reply to a process after the requested work is done
*
* Changes for VFS:
* Jul 2006 (Balazs Gerofi)
2005-04-21 16:53:53 +02:00
*/
#include "fs.h"
#include <fcntl.h>
#include <string.h>
#include <stdio.h>
#include <signal.h>
#include <assert.h>
2005-04-21 16:53:53 +02:00
#include <stdlib.h>
#include <sys/ioc_memory.h>
#include <sys/svrctl.h>
#include <sys/select.h>
2005-04-21 16:53:53 +02:00
#include <minix/callnr.h>
#include <minix/com.h>
#include <minix/keymap.h>
#include <minix/const.h>
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
#include <minix/endpoint.h>
#include <minix/safecopies.h>
2005-04-21 16:53:53 +02:00
#include "file.h"
#include "fproc.h"
#include "param.h"
#include <minix/vfsif.h>
#include "vmnt.h"
#include "vnode.h"
2005-04-21 16:53:53 +02:00
#if ENABLE_SYSCALL_STATS
EXTERN unsigned long calls_stats[NCALLS];
#endif
2005-04-21 16:53:53 +02:00
FORWARD _PROTOTYPE( void fs_init, (void) );
FORWARD _PROTOTYPE( void get_work, (void) );
2006-03-10 17:10:05 +01:00
FORWARD _PROTOTYPE( void init_root, (void) );
2006-05-11 16:57:23 +02:00
FORWARD _PROTOTYPE( void service_pm, (void) );
2005-04-21 16:53:53 +02:00
2005-04-21 16:53:53 +02:00
/*===========================================================================*
* main *
*===========================================================================*/
2005-08-25 14:30:43 +02:00
PUBLIC int main()
2005-04-21 16:53:53 +02:00
{
/* This is the main program of the file system. The main loop consists of
* three major activities: getting new work, processing the work, and sending
* the reply. This loop never terminates as long as the file system runs.
*/
int error;
fs_init();
SANITYCHECK;
2005-04-21 16:53:53 +02:00
/* This is the main loop that gets work, processes it, and sends replies. */
while (TRUE) {
SANITYCHECK;
2005-04-21 16:53:53 +02:00
get_work(); /* sets who and call_nr */
2006-05-11 16:57:23 +02:00
if (who_e == PM_PROC_NR && call_nr != PROC_EVENT)
printf("FS: strange, got message %d from PM\n", call_nr);
if (call_nr == DEV_REVIVE)
{
endpoint_t endpt;
endpt = m_in.REP_ENDPT;
if(endpt == FS_PROC_NR) {
endpt = suspended_ep(m_in.m_source, m_in.REP_IO_GRANT);
if(endpt == NONE) {
printf("FS: proc with "
"grant %d from %d not found (revive)\n",
m_in.REP_IO_GRANT, m_in.m_source);
continue;
}
}
revive(endpt, m_in.REP_STATUS);
continue;
}
if (call_nr == DEV_REOPEN_REPL)
{
reopen_reply();
continue;
}
if (call_nr == DEV_CLOSE_REPL)
{
close_reply();
continue;
}
if (call_nr == DEV_SEL_REPL1)
{
select_reply1();
continue;
}
if (call_nr == DEV_SEL_REPL2)
{
select_reply2();
continue;
}
2005-04-21 16:53:53 +02:00
/* Check for special control messages first. */
if ((call_nr & NOTIFY_MESSAGE)) {
if (call_nr == PROC_EVENT && who_e == PM_PROC_NR)
{
/* PM tries to get FS to do something */
service_pm();
}
else if (call_nr == SYN_ALARM && who_e == CLOCK)
{
/* Alarm timer expired. Used only for select().
* Check it.
*/
fs_expire_timers(m_in.NOTIFY_TIMESTAMP);
}
else
{
/* Device notifies us of an event. */
dev_status(&m_in);
}
SANITYCHECK;
continue;
}
/* We only expect notify()s from tasks. */
if(who_p < 0) {
printf("FS: ignoring message from %d (%d)\n",
who_e, m_in.m_type);
continue;
}
/* Now it's safe to set and check fp. */
fp = &fproc[who_p]; /* pointer to proc table struct */
super_user = (fp->fp_effuid == SU_UID ? TRUE : FALSE); /* su? */
#if DO_SANITYCHECKS
if(fp->fp_suspended != NOT_SUSPENDED) {
printf("VFS: requester %d call %d: not not suspended\n",
who_e, call_nr);
panic(__FILE__, "requester suspended", NO_NUM);
}
#endif
/* Calls from VM. */
if(who_e == VM_PROC_NR) {
int caught = 1;
switch(call_nr)
{
case VM_VFS_OPEN:
error = do_vm_open();
break;
case VM_VFS_CLOSE:
error = do_vm_close();
break;
case VM_VFS_MMAP:
error = do_vm_mmap();
break;
default:
caught = 0;
break;
}
if(caught) {
reply(who_e, error);
continue;
}
}
SANITYCHECK;
/* Other calls. */
switch(call_nr)
{
case DEVCTL:
error= do_devctl();
if (error != SUSPEND) reply(who_e, error);
break;
case MAPDRIVER:
2007-08-07 14:52:47 +02:00
error= do_mapdriver();
if (error != SUSPEND) reply(who_e, error);
break;
default:
/* Call the internal function that does the work. */
if (call_nr < 0 || call_nr >= NCALLS) {
error = SUSPEND;
/* Not supposed to happen. */
printf("VFS: illegal %d system call by %d\n",
call_nr, who_e);
} else if (fp->fp_pid == PID_FREE) {
error = ENOSYS;
printf(
"FS, bad process, who = %d, call_nr = %d, endpt1 = %d\n",
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
who_e, call_nr, m_in.endpt1);
} else {
#if ENABLE_SYSCALL_STATS
calls_stats[call_nr]++;
#endif
SANITYCHECK;
error = (*call_vec[call_nr])();
SANITYCHECK;
}
2005-04-21 16:53:53 +02:00
/* Copy the results back to the user and send reply. */
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
if (error != SUSPEND) { reply(who_e, error); }
2005-04-21 16:53:53 +02:00
}
SANITYCHECK;
2005-04-21 16:53:53 +02:00
}
2005-08-25 14:30:43 +02:00
return(OK); /* shouldn't come here */
2005-04-21 16:53:53 +02:00
}
/*===========================================================================*
* get_work *
*===========================================================================*/
PRIVATE void get_work()
{
/* Normally wait for new input. However, if 'reviving' is
* nonzero, a suspended process must be awakened.
*/
2007-08-07 14:52:47 +02:00
int r, found_one, fd_nr;
struct filp *f;
2005-04-21 16:53:53 +02:00
register struct fproc *rp;
2007-08-07 14:52:47 +02:00
while (reviving != 0) {
found_one= FALSE;
2005-04-21 16:53:53 +02:00
/* Revive a suspended process. */
for (rp = &fproc[0]; rp < &fproc[NR_PROCS]; rp++)
if (rp->fp_pid != PID_FREE && rp->fp_revived == REVIVING) {
2007-08-07 14:52:47 +02:00
found_one= TRUE;
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
who_p = (int)(rp - fproc);
who_e = rp->fp_endpoint;
2005-04-21 16:53:53 +02:00
call_nr = rp->fp_fd & BYTE;
2007-08-07 14:52:47 +02:00
2005-04-21 16:53:53 +02:00
m_in.fd = (rp->fp_fd >>8) & BYTE;
m_in.buffer = rp->fp_buffer;
m_in.nbytes = rp->fp_nbytes;
rp->fp_suspended = NOT_SUSPENDED; /*no longer hanging*/
rp->fp_revived = NOT_REVIVING;
reviving--;
/* This should be a pipe I/O, not a device I/O.
* If it is, it'll 'leak' grants.
*/
assert(!GRANT_VALID(rp->fp_grant));
2007-08-07 14:52:47 +02:00
if (rp->fp_task == -XPIPE)
{
fp= rp;
fd_nr= (rp->fp_fd >> 8);
f= get_filp(fd_nr);
assert(f != NULL);
r= rw_pipe((call_nr == READ) ? READING :
WRITING, who_e, fd_nr, f,
rp->fp_buffer, rp->fp_nbytes);
if (r != SUSPEND)
reply(who_e, r);
continue;
}
2005-04-21 16:53:53 +02:00
return;
}
2007-08-07 14:52:47 +02:00
if (!found_one)
panic(__FILE__,"get_work couldn't revive anyone", NO_NUM);
2005-04-21 16:53:53 +02:00
}
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
for(;;) {
Mostly bugfixes of bugs triggered by the test set. bugfixes: SYSTEM: . removed rc->p_priv->s_flags = 0; for the priv struct shared by all user processes in get_priv(). this should only be done once. doing a SYS_PRIV_USER in sys_privctl() caused the flags of all user processes to be reset, so they were no longer PREEMPTIBLE. this happened when RS executed a policy script. (this broke test1 in the test set) VFS/MFS: . chown can change the mode of a file, and chmod arguments are only part of the full file mode so the full filemode is slightly magic. changed these calls so that the final modes are returned to VFS, so that the vnode can be kept up-to-date. (this broke test11 in the test set) MFS: . lookup() checked for sizeof(string) instead of sizeof(user_path), truncating long path names (caught by test 23) . truncate functions neglected to update ctime (this broke test16) VFS: . corner case of an empty filename lookup caused fields of a request not to be filled in in the lookup functions, not making it clear that the lookup had failed, causing messages to garbage processes, causing strange failures. (caught by test 30) . trust v_size in vnode when doing reads or writes on non-special files, truncating i/o where necessary; this is necessary for pipes, as MFS can't tell when a pipe has been truncated without it being told explicitly each time. when the last reader/writer on a pipe closes, tell FS about the new size using truncate_vn(). (this broke test 25, among others) . permission check for chdir() had disappeared; added a forbidden() call (caught by test 23) new code, shouldn't change anything: . introduced RTS_SET, RTS_UNSET, and RTS_ISSET macro's, and their LOCK variants. These macros set and clear the p_rts_flags field, causing a lot of duplicated logic like old_flags = rp->p_rts_flags; /* save value of the flags */ rp->p_rts_flags &= ~NO_PRIV; if (old_flags != 0 && rp->p_rts_flags == 0) lock_enqueue(rp); to change into the simpler RTS_LOCK_UNSET(rp, NO_PRIV); so the macros take care of calling dequeue() and enqueue() (or lock_*()), as the case may be). This makes the code a bit more readable and a bit less fragile. . removed return code from do_clocktick in CLOCK as it currently never replies . removed some debug code from VFS . fixed grant debug message in device.c preemptive checks, tests, changes: . added return code checks of receive() to SYSTEM and CLOCK . O_TRUNC should never arrive at MFS (added sanity check and removed O_TRUNC code) . user_path declared with PATH_MAX+1 to let it be null-terminated . checks in MFS to see if strings passed by VFS are null-terminated IS: . static irq name table thrown out
2007-02-01 18:50:02 +01:00
int r;
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
/* Normal case. No one to revive. */
Mostly bugfixes of bugs triggered by the test set. bugfixes: SYSTEM: . removed rc->p_priv->s_flags = 0; for the priv struct shared by all user processes in get_priv(). this should only be done once. doing a SYS_PRIV_USER in sys_privctl() caused the flags of all user processes to be reset, so they were no longer PREEMPTIBLE. this happened when RS executed a policy script. (this broke test1 in the test set) VFS/MFS: . chown can change the mode of a file, and chmod arguments are only part of the full file mode so the full filemode is slightly magic. changed these calls so that the final modes are returned to VFS, so that the vnode can be kept up-to-date. (this broke test11 in the test set) MFS: . lookup() checked for sizeof(string) instead of sizeof(user_path), truncating long path names (caught by test 23) . truncate functions neglected to update ctime (this broke test16) VFS: . corner case of an empty filename lookup caused fields of a request not to be filled in in the lookup functions, not making it clear that the lookup had failed, causing messages to garbage processes, causing strange failures. (caught by test 30) . trust v_size in vnode when doing reads or writes on non-special files, truncating i/o where necessary; this is necessary for pipes, as MFS can't tell when a pipe has been truncated without it being told explicitly each time. when the last reader/writer on a pipe closes, tell FS about the new size using truncate_vn(). (this broke test 25, among others) . permission check for chdir() had disappeared; added a forbidden() call (caught by test 23) new code, shouldn't change anything: . introduced RTS_SET, RTS_UNSET, and RTS_ISSET macro's, and their LOCK variants. These macros set and clear the p_rts_flags field, causing a lot of duplicated logic like old_flags = rp->p_rts_flags; /* save value of the flags */ rp->p_rts_flags &= ~NO_PRIV; if (old_flags != 0 && rp->p_rts_flags == 0) lock_enqueue(rp); to change into the simpler RTS_LOCK_UNSET(rp, NO_PRIV); so the macros take care of calling dequeue() and enqueue() (or lock_*()), as the case may be). This makes the code a bit more readable and a bit less fragile. . removed return code from do_clocktick in CLOCK as it currently never replies . removed some debug code from VFS . fixed grant debug message in device.c preemptive checks, tests, changes: . added return code checks of receive() to SYSTEM and CLOCK . O_TRUNC should never arrive at MFS (added sanity check and removed O_TRUNC code) . user_path declared with PATH_MAX+1 to let it be null-terminated . checks in MFS to see if strings passed by VFS are null-terminated IS: . static irq name table thrown out
2007-02-01 18:50:02 +01:00
if ((r=receive(ANY, &m_in)) != OK)
panic(__FILE__,"fs receive error", r);
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
who_e = m_in.m_source;
who_p = _ENDPOINT_P(who_e);
2006-05-11 16:57:23 +02:00
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
if(who_p < -NR_TASKS || who_p >= NR_PROCS)
panic(__FILE__,"receive process out of range", who_p);
if(who_p >= 0 && fproc[who_p].fp_endpoint == NONE) {
printf("FS: ignoring request from %d, endpointless slot %d (%d)\n",
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
m_in.m_source, who_p, m_in.m_type);
continue;
}
if(who_p >= 0 && fproc[who_p].fp_endpoint != who_e) {
2006-03-10 13:59:46 +01:00
printf("FS: receive endpoint inconsistent (%d, %d, %d).\n",
who_e, fproc[who_p].fp_endpoint, who_e);
panic(__FILE__, "FS: inconsistent endpoint ", NO_NUM);
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
continue;
}
call_nr = m_in.m_type;
return;
}
2005-04-21 16:53:53 +02:00
}
/*===========================================================================*
* reply *
*===========================================================================*/
PUBLIC void reply(whom, result)
int whom; /* process to reply to */
int result; /* result of the call (usually OK or error #) */
{
2006-03-10 17:10:05 +01:00
/* Send a reply to a user process. If the send fails, just ignore it. */
2005-04-21 16:53:53 +02:00
int s;
2007-08-07 14:52:47 +02:00
2008-12-11 15:47:48 +01:00
#if 0
2007-08-07 14:52:47 +02:00
if (call_nr == SYMLINK)
printf("vfs:reply: replying %d for call %d\n", result, call_nr);
2008-12-11 15:47:48 +01:00
#endif
2007-08-07 14:52:47 +02:00
2005-04-21 16:53:53 +02:00
m_out.reply_type = result;
s = sendnb(whom, &m_out);
if (s != OK) printf("VFS: couldn't send reply %d to %d: %d\n",
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
result, whom, s);
2005-04-21 16:53:53 +02:00
}
/*===========================================================================*
* fs_init *
*===========================================================================*/
PRIVATE void fs_init()
{
/* Initialize global variables, tables, etc. */
2007-08-07 14:52:47 +02:00
int s;
register struct fproc *rfp;
2007-08-07 14:52:47 +02:00
struct vmnt *vmp;
struct vnode *root_vp;
2005-04-21 16:53:53 +02:00
message mess;
/* Clear endpoint field */
last_login_fs_e = NONE;
mount_m_in.m1_p3 = (char *) NONE;
/* Initialize the process table with help of the process manager messages.
* Expect one message for each system process with its slot number and pid.
* When no more processes follow, the magic process number NONE is sent.
* Then, stop and synchronize with the PM.
*/
do {
if (OK != (s=receive(PM_PROC_NR, &mess)))
panic(__FILE__,"FS couldn't receive from PM", s);
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
if (NONE == mess.PR_ENDPT) break;
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
rfp = &fproc[mess.PR_SLOT];
rfp->fp_pid = mess.PR_PID;
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
rfp->fp_endpoint = mess.PR_ENDPT;
rfp->fp_realuid = (uid_t) SYS_UID;
rfp->fp_effuid = (uid_t) SYS_UID;
rfp->fp_realgid = (gid_t) SYS_GID;
rfp->fp_effgid = (gid_t) SYS_GID;
rfp->fp_umask = ~0;
rfp->fp_grant = GRANT_INVALID;
rfp->fp_suspended = NOT_SUSPENDED;
rfp->fp_revived = NOT_REVIVING;
} while (TRUE); /* continue until process NONE */
mess.m_type = OK; /* tell PM that we succeeded */
s = send(PM_PROC_NR, &mess); /* send synchronization message */
/* All process table entries have been set. Continue with FS initialization.
* Certain relations must hold for the file system to work at all. Some
* extra block_size requirements are checked at super-block-read-in time.
*/
if (OPEN_MAX > 127) panic(__FILE__,"OPEN_MAX > 127", NO_NUM);
2005-04-21 16:53:53 +02:00
/* The following initializations are needed to let dev_opcl succeed .*/
fp = (struct fproc *) NULL;
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
who_e = who_p = FS_PROC_NR;
2005-04-21 16:53:53 +02:00
build_dmap(); /* build device table and map boot driver */
2006-03-10 17:10:05 +01:00
init_root(); /* init root device and load super block */
init_select(); /* init select() structures */
2005-04-21 16:53:53 +02:00
2007-08-07 14:52:47 +02:00
vmp = &vmnt[0]; /* Should be the root filesystem */
if (vmp->m_dev == NO_DEV)
panic(__FILE__, "vfs:fs_init: no root filesystem", NO_NUM);
root_vp= vmp->m_root_node;
/* The root device can now be accessed; set process directories. */
for (rfp=&fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
FD_ZERO(&(rfp->fp_filp_inuse));
if (rfp->fp_pid != PID_FREE) {
2007-08-07 14:52:47 +02:00
dup_vnode(root_vp);
rfp->fp_rd = root_vp;
dup_vnode(root_vp);
rfp->fp_wd = root_vp;
endpoint-aware conversion of servers. 'who', indicating caller number in pm and fs and some other servers, has been removed in favour of 'who_e' (endpoint) and 'who_p' (proc nr.). In both PM and FS, isokendpt() convert endpoints to process slot numbers, returning OK if it was a valid and consistent endpoint number. okendpt() does the same but panic()s if it doesn't succeed. (In PM, this is pm_isok..) pm and fs keep their own records of process endpoints in their proc tables, which are needed to make kernel calls about those processes. message field names have changed. fs drivers are endpoints. fs now doesn't try to get out of driver deadlock, as the protocol isn't supposed to let that happen any more. (A warning is printed if ELOCKED is detected though.) fproc[].fp_task (indicating which driver the process is suspended on) became an int. PM and FS now get endpoint numbers of initial boot processes from the kernel. These happen to be the same as the old proc numbers, to let user processes reach them with the old numbers, but FS and PM don't know that. All new processes after INIT, even after the generation number wraps around, get endpoint numbers with generation 1 and higher, so the first instances of the boot processes are the only processes ever to have endpoint numbers in the old proc number range. More return code checks of sys_* functions have been added. IS has become endpoint-aware. Ditched the 'text' and 'data' fields in the kernel dump (which show locations, not sizes, so aren't terribly useful) in favour of the endpoint number. Proc number is still visible. Some other dumps (e.g. dmap, rs) show endpoint numbers now too which got the formatting changed. PM reading segments using rw_seg() has changed - it uses other fields in the message now instead of encoding the segment and process number and fd in the fd field. For that it uses _read_pm() and _write_pm() which to _taskcall()s directly in pm/misc.c. PM now sys_exit()s itself on panic(), instead of sys_abort(). RS also talks in endpoints instead of process numbers.
2006-03-03 11:20:58 +01:00
} else rfp->fp_endpoint = NONE;
}
2008-12-11 15:47:48 +01:00
system_hz = sys_hz();
2005-04-21 16:53:53 +02:00
}
/*===========================================================================*
2006-03-10 17:10:05 +01:00
* init_root *
2005-04-21 16:53:53 +02:00
*===========================================================================*/
2006-03-10 17:10:05 +01:00
PRIVATE void init_root()
2005-04-21 16:53:53 +02:00
{
2007-08-07 14:52:47 +02:00
int r = OK;
struct vmnt *vmp;
struct vnode *root_node;
struct dmap *dp;
2007-08-07 14:52:47 +02:00
char *label;
message m;
2007-08-07 14:52:47 +02:00
struct node_details resX;
2005-04-21 16:53:53 +02:00
/* Open the root device. */
2006-03-10 17:10:05 +01:00
root_dev = DEV_IMGRD;
ROOT_FS_E = MFS_PROC_NR;
/* Wait FS login message */
if (last_login_fs_e != ROOT_FS_E) {
/* Wait FS login message */
if (receive(ROOT_FS_E, &m) != OK) {
printf("VFS: Error receiving login request from FS_e %d\n",
ROOT_FS_E);
panic(__FILE__, "Error receiving login request from root filesystem\n", ROOT_FS_E);
}
if (m.m_type != FS_READY) {
printf("VFS: Invalid login request from FS_e %d\n",
ROOT_FS_E);
panic(__FILE__, "Error receiving login request from root filesystem\n", ROOT_FS_E);
}
}
last_login_fs_e = NONE;
/* Initialize vmnt table */
for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp)
vmp->m_dev = NO_DEV;
vmp = &vmnt[0];
/* We'll need a vnode for the root inode, check whether there is one */
2007-01-05 17:36:55 +01:00
if ((root_node = get_free_vnode(__FILE__, __LINE__)) == NIL_VNODE) {
panic(__FILE__,"Cannot get free vnode", r);
}
/* Get driver process' endpoint */
dp = &dmap[(root_dev >> MAJOR) & BYTE];
if (dp->dmap_driver == NONE) {
panic(__FILE__,"No driver for root device", r);
2005-04-21 16:53:53 +02:00
}
2007-08-07 14:52:47 +02:00
label= dp->dmap_label;
if (strlen(label) == 0)
{
panic(__FILE__, "vfs:init_root: no label for major", root_dev >> MAJOR);
}
/* Issue request */
2007-08-07 14:52:47 +02:00
r = req_readsuper(ROOT_FS_E, label, root_dev, 0 /*!readonly*/,
1 /*isroot*/, &resX);
if (r != OK) {
panic(__FILE__,"Cannot read superblock from root", r);
}
/* Fill in root node's fields */
2007-08-07 14:52:47 +02:00
root_node->v_fs_e = resX.fs_e;
root_node->v_inode_nr = resX.inode_nr;
root_node->v_mode = resX.fmode;
root_node->v_size = resX.fsize;
root_node->v_sdev = NO_DEV;
2007-08-07 14:52:47 +02:00
root_node->v_fs_count = 1;
2007-01-05 17:36:55 +01:00
root_node->v_ref_count = 1;
/* Fill in max file size and blocksize for the vmnt */
2007-08-07 14:52:47 +02:00
vmp->m_fs_e = resX.fs_e;
vmp->m_dev = root_dev;
vmp->m_flags = 0;
/* Root node is indeed on the partition */
root_node->v_vmnt = vmp;
root_node->v_dev = vmp->m_dev;
/* Root directory is not mounted on a vnode. */
vmp->m_mounted_on = NULL;
vmp->m_root_node = root_node;
2005-04-21 16:53:53 +02:00
}
2006-05-11 16:57:23 +02:00
/*===========================================================================*
* service_pm *
*===========================================================================*/
PRIVATE void service_pm()
{
int r, call;
struct vmnt *vmp;
2006-05-11 16:57:23 +02:00
message m;
/* Ask PM for work until there is nothing left to do */
for (;;)
{
m.m_type= PM_GET_WORK;
r= sendrec(PM_PROC_NR, &m);
if (r != OK)
{
panic("VFS", "service_pm: sendrec failed", r);
2006-05-11 16:57:23 +02:00
}
if (m.m_type == PM_IDLE) {
2006-05-11 16:57:23 +02:00
break;
}
2006-05-11 16:57:23 +02:00
call= m.m_type;
switch(call)
{
case PM_SETSID:
pm_setsid(m.PM_SETSID_PROC);
/* No need to report status to PM */
break;
case PM_SETGID:
pm_setgid(m.PM_SETGID_PROC, m.PM_SETGID_EGID,
m.PM_SETGID_RGID);
/* No need to report status to PM */
break;
case PM_SETUID:
pm_setuid(m.PM_SETUID_PROC, m.PM_SETUID_EGID,
m.PM_SETUID_RGID);
/* No need to report status to PM */
break;
case PM_FORK:
pm_fork(m.PM_FORK_PPROC, m.PM_FORK_CPROC,
m.PM_FORK_CPID);
/* No need to report status to PM */
break;
case PM_EXIT:
pm_exit(m.PM_EXIT_PROC);
/* Reply dummy status to PM for synchronization */
m.m_type= PM_EXIT_REPLY;
2006-05-11 16:57:23 +02:00
/* Keep m.PM_EXIT_PROC */
r= send(PM_PROC_NR, &m);
if (r != OK)
panic(__FILE__, "service_pm: send failed", r);
break;
case PM_UNPAUSE:
case PM_UNPAUSE_TR:
unpause(m.PM_UNPAUSE_PROC);
/* No need to report status to PM */
break;
case PM_REBOOT:
pm_reboot();
/* Reply dummy status to PM for synchronization */
m.m_type= PM_REBOOT_REPLY;
r= send(PM_PROC_NR, &m);
if (r != OK)
panic(__FILE__, "service_pm: send failed", r);
break;
case PM_EXEC:
r= pm_exec(m.PM_EXEC_PROC, m.PM_EXEC_PATH,
m.PM_EXEC_PATH_LEN, m.PM_EXEC_FRAME,
m.PM_EXEC_FRAME_LEN);
/* Reply status to PM */
m.m_type= PM_EXEC_REPLY;
/* Keep m.PM_EXEC_PROC */
m.PM_EXEC_STATUS= r;
r= send(PM_PROC_NR, &m);
if (r != OK)
panic(__FILE__, "service_pm: send failed", r);
break;
case PM_DUMPCORE:
r= pm_dumpcore(m.PM_CORE_PROC,
(struct mem_map *)m.PM_CORE_SEGPTR);
/* Reply status to PM */
m.m_type= PM_CORE_REPLY;
/* Keep m.PM_CORE_PROC */
m.PM_CORE_STATUS= r;
r= send(PM_PROC_NR, &m);
if (r != OK)
panic(__FILE__, "service_pm: send failed", r);
break;
default:
panic("VFS", "service_pm: unknown call", m.m_type);
2006-05-11 16:57:23 +02:00
}
}
}