VFS: during initial mount, receive but block work
For VFS, initialization is a special case for processing work: PFS and the ramdisk MFS must be fully mounted before VFS can process any other requests, in particular from init(8). This case was handled by receiving reply messages only from the FS service being mounted, but this effectively disallowed PFS from calling setuid(2) at startup. This patch lets VFS receive all messages during the mounting process, but defer processing any new requests. As a result, the FS services have a bit more freedom in what they can do during startup. Change-Id: I18275f458952a8d790736a9c9559b27bbef97b7b
This commit is contained in:
parent
179bddcf5d
commit
7eb698ea4a
4 changed files with 106 additions and 38 deletions
|
@ -14,7 +14,6 @@ EXTERN struct fproc *fp; /* pointer to caller's fproc struct */
|
||||||
EXTERN int susp_count; /* number of procs suspended on pipe */
|
EXTERN int susp_count; /* number of procs suspended on pipe */
|
||||||
EXTERN int nr_locks; /* number of locks currently in place */
|
EXTERN int nr_locks; /* number of locks currently in place */
|
||||||
EXTERN int reviving; /* number of pipe processes to be revived */
|
EXTERN int reviving; /* number of pipe processes to be revived */
|
||||||
EXTERN int pending;
|
|
||||||
EXTERN int sending;
|
EXTERN int sending;
|
||||||
EXTERN int verbose;
|
EXTERN int verbose;
|
||||||
|
|
||||||
|
|
|
@ -48,7 +48,6 @@ static int unblock(struct fproc *rfp);
|
||||||
/* SEF functions and variables. */
|
/* SEF functions and variables. */
|
||||||
static void sef_local_startup(void);
|
static void sef_local_startup(void);
|
||||||
static int sef_cb_init_fresh(int type, sef_init_info_t *info);
|
static int sef_cb_init_fresh(int type, sef_init_info_t *info);
|
||||||
static endpoint_t receive_from;
|
|
||||||
|
|
||||||
/*===========================================================================*
|
/*===========================================================================*
|
||||||
* main *
|
* main *
|
||||||
|
@ -298,7 +297,6 @@ static int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *info)
|
||||||
message mess;
|
message mess;
|
||||||
struct rprocpub rprocpub[NR_BOOT_PROCS];
|
struct rprocpub rprocpub[NR_BOOT_PROCS];
|
||||||
|
|
||||||
receive_from = NONE;
|
|
||||||
self = NULL;
|
self = NULL;
|
||||||
verbose = 0;
|
verbose = 0;
|
||||||
|
|
||||||
|
@ -404,14 +402,13 @@ static void do_init_root(void)
|
||||||
char *mount_type, *mount_label;
|
char *mount_type, *mount_label;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
/* Mount the pipe file server. */
|
/* Disallow requests from e.g. init(8) while doing the initial mounting. */
|
||||||
receive_from = PFS_PROC_NR;
|
worker_allow(FALSE);
|
||||||
|
|
||||||
|
/* Mount the pipe file server. */
|
||||||
mount_pfs();
|
mount_pfs();
|
||||||
|
|
||||||
/* Mount the root file system. */
|
/* Mount the root file system. */
|
||||||
receive_from = MFS_PROC_NR;
|
|
||||||
|
|
||||||
mount_type = "mfs"; /* FIXME: use boot image process name instead */
|
mount_type = "mfs"; /* FIXME: use boot image process name instead */
|
||||||
mount_label = "fs_imgrd"; /* FIXME: obtain this from RS */
|
mount_label = "fs_imgrd"; /* FIXME: obtain this from RS */
|
||||||
|
|
||||||
|
@ -419,7 +416,9 @@ static void do_init_root(void)
|
||||||
mount_label);
|
mount_label);
|
||||||
if (r != OK)
|
if (r != OK)
|
||||||
panic("Failed to initialize root");
|
panic("Failed to initialize root");
|
||||||
receive_from = ANY;
|
|
||||||
|
/* All done with mounting, allow requests now. */
|
||||||
|
worker_allow(TRUE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*===========================================================================*
|
/*===========================================================================*
|
||||||
|
@ -502,10 +501,8 @@ static void get_work()
|
||||||
}
|
}
|
||||||
|
|
||||||
for(;;) {
|
for(;;) {
|
||||||
assert(receive_from != NONE);
|
|
||||||
|
|
||||||
/* Normal case. No one to revive. Get a useful request. */
|
/* Normal case. No one to revive. Get a useful request. */
|
||||||
if ((r = sef_receive(receive_from, &m_in)) != OK) {
|
if ((r = sef_receive(ANY, &m_in)) != OK) {
|
||||||
panic("VFS: sef_receive error: %d", r);
|
panic("VFS: sef_receive error: %d", r);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -336,6 +336,7 @@ void select_unsuspend_by_endpt(endpoint_t proc);
|
||||||
/* worker.c */
|
/* worker.c */
|
||||||
void worker_init(void);
|
void worker_init(void);
|
||||||
int worker_available(void);
|
int worker_available(void);
|
||||||
|
void worker_allow(int allow);
|
||||||
struct worker_thread *worker_get(thread_t worker_tid);
|
struct worker_thread *worker_get(thread_t worker_tid);
|
||||||
void worker_signal(struct worker_thread *worker);
|
void worker_signal(struct worker_thread *worker);
|
||||||
int worker_can_start(struct fproc *rfp);
|
int worker_can_start(struct fproc *rfp);
|
||||||
|
|
|
@ -5,7 +5,11 @@ static void worker_get_work(void);
|
||||||
static void *worker_main(void *arg);
|
static void *worker_main(void *arg);
|
||||||
static void worker_sleep(void);
|
static void worker_sleep(void);
|
||||||
static void worker_wake(struct worker_thread *worker);
|
static void worker_wake(struct worker_thread *worker);
|
||||||
|
|
||||||
static mthread_attr_t tattr;
|
static mthread_attr_t tattr;
|
||||||
|
static unsigned int pending;
|
||||||
|
static unsigned int busy;
|
||||||
|
static int block_all;
|
||||||
|
|
||||||
#ifdef MKCOVERAGE
|
#ifdef MKCOVERAGE
|
||||||
# define TH_STACKSIZE (40 * 1024)
|
# define TH_STACKSIZE (40 * 1024)
|
||||||
|
@ -31,6 +35,8 @@ void worker_init(void)
|
||||||
if (mthread_attr_setdetachstate(&tattr, MTHREAD_CREATE_DETACHED) != 0)
|
if (mthread_attr_setdetachstate(&tattr, MTHREAD_CREATE_DETACHED) != 0)
|
||||||
panic("couldn't set default thread detach state");
|
panic("couldn't set default thread detach state");
|
||||||
pending = 0;
|
pending = 0;
|
||||||
|
busy = 0;
|
||||||
|
block_all = FALSE;
|
||||||
|
|
||||||
for (i = 0; i < NR_WTHREADS; i++) {
|
for (i = 0; i < NR_WTHREADS; i++) {
|
||||||
wp = &workers[i];
|
wp = &workers[i];
|
||||||
|
@ -50,6 +56,79 @@ void worker_init(void)
|
||||||
yield_all();
|
yield_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*===========================================================================*
|
||||||
|
* worker_assign *
|
||||||
|
*===========================================================================*/
|
||||||
|
static void worker_assign(struct fproc *rfp)
|
||||||
|
{
|
||||||
|
/* Assign the work for the given process to a free thread. The caller must
|
||||||
|
* ensure that there is in fact at least one free thread.
|
||||||
|
*/
|
||||||
|
struct worker_thread *worker;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/* Find a free worker thread. */
|
||||||
|
for (i = 0; i < NR_WTHREADS; i++) {
|
||||||
|
worker = &workers[i];
|
||||||
|
|
||||||
|
if (worker->w_fp == NULL)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
assert(worker != NULL);
|
||||||
|
|
||||||
|
/* Assign work to it. */
|
||||||
|
rfp->fp_worker = worker;
|
||||||
|
worker->w_fp = rfp;
|
||||||
|
busy++;
|
||||||
|
|
||||||
|
worker_wake(worker);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*===========================================================================*
|
||||||
|
* worker_may_do_pending *
|
||||||
|
*===========================================================================*/
|
||||||
|
static int worker_may_do_pending(void)
|
||||||
|
{
|
||||||
|
/* Return whether there is a free thread that may do pending work. This is true
|
||||||
|
* only if there is pending work at all, and there is a free non-spare thread
|
||||||
|
* (the spare thread is never used for pending work), and VFS is currently
|
||||||
|
* processing new requests at all (this may not be true during initialization).
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Ordered by likelihood to be false. */
|
||||||
|
return (pending > 0 && worker_available() > 1 && !block_all);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*===========================================================================*
|
||||||
|
* worker_allow *
|
||||||
|
*===========================================================================*/
|
||||||
|
void worker_allow(int allow)
|
||||||
|
{
|
||||||
|
/* Allow or disallow workers to process new work. If disallowed, any new work
|
||||||
|
* will be stored as pending, even when there are free worker threads. There is
|
||||||
|
* no facility to stop active workers. To be used only during initialization!
|
||||||
|
*/
|
||||||
|
struct fproc *rfp;
|
||||||
|
|
||||||
|
block_all = !allow;
|
||||||
|
|
||||||
|
if (!worker_may_do_pending())
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* Assign any pending work to workers. */
|
||||||
|
for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
|
||||||
|
if (rfp->fp_flags & FP_PENDING) {
|
||||||
|
rfp->fp_flags &= ~FP_PENDING; /* No longer pending */
|
||||||
|
assert(pending > 0);
|
||||||
|
pending--;
|
||||||
|
worker_assign(rfp);
|
||||||
|
|
||||||
|
if (!worker_may_do_pending())
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*===========================================================================*
|
/*===========================================================================*
|
||||||
* worker_get_work *
|
* worker_get_work *
|
||||||
*===========================================================================*/
|
*===========================================================================*/
|
||||||
|
@ -60,16 +139,19 @@ static void worker_get_work(void)
|
||||||
*/
|
*/
|
||||||
struct fproc *rfp;
|
struct fproc *rfp;
|
||||||
|
|
||||||
/* Do we have queued work to do? */
|
assert(self->w_fp == NULL);
|
||||||
if (pending > 0) {
|
|
||||||
|
/* Is there pending work, and should we do it? */
|
||||||
|
if (worker_may_do_pending()) {
|
||||||
/* Find pending work */
|
/* Find pending work */
|
||||||
for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
|
for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
|
||||||
if (rfp->fp_flags & FP_PENDING) {
|
if (rfp->fp_flags & FP_PENDING) {
|
||||||
self->w_fp = rfp;
|
self->w_fp = rfp;
|
||||||
rfp->fp_worker = self;
|
rfp->fp_worker = self;
|
||||||
|
busy++;
|
||||||
rfp->fp_flags &= ~FP_PENDING; /* No longer pending */
|
rfp->fp_flags &= ~FP_PENDING; /* No longer pending */
|
||||||
|
assert(pending > 0);
|
||||||
pending--;
|
pending--;
|
||||||
assert(pending >= 0);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -85,13 +167,8 @@ static void worker_get_work(void)
|
||||||
*===========================================================================*/
|
*===========================================================================*/
|
||||||
int worker_available(void)
|
int worker_available(void)
|
||||||
{
|
{
|
||||||
int busy, i;
|
/* Return the number of threads that are available, including the spare thread.
|
||||||
|
*/
|
||||||
busy = 0;
|
|
||||||
for (i = 0; i < NR_WTHREADS; i++) {
|
|
||||||
if (workers[i].w_fp != NULL)
|
|
||||||
busy++;
|
|
||||||
}
|
|
||||||
|
|
||||||
return(NR_WTHREADS - busy);
|
return(NR_WTHREADS - busy);
|
||||||
}
|
}
|
||||||
|
@ -146,6 +223,8 @@ static void *worker_main(void *arg)
|
||||||
|
|
||||||
fp->fp_worker = NULL;
|
fp->fp_worker = NULL;
|
||||||
self->w_fp = NULL;
|
self->w_fp = NULL;
|
||||||
|
assert(busy > 0);
|
||||||
|
busy--;
|
||||||
}
|
}
|
||||||
|
|
||||||
return(NULL); /* Unreachable */
|
return(NULL); /* Unreachable */
|
||||||
|
@ -196,29 +275,21 @@ static void worker_try_activate(struct fproc *rfp, int use_spare)
|
||||||
/* See if we can wake up a thread to do the work scheduled for the given
|
/* See if we can wake up a thread to do the work scheduled for the given
|
||||||
* process. If not, mark the process as having pending work for later.
|
* process. If not, mark the process as having pending work for later.
|
||||||
*/
|
*/
|
||||||
int i, available, needed;
|
int needed;
|
||||||
struct worker_thread *worker;
|
|
||||||
|
|
||||||
/* Use the last available thread only if requested. Otherwise, leave at least
|
/* Use the last available thread only if requested. Otherwise, leave at least
|
||||||
* one spare thread for deadlock resolution.
|
* one spare thread for deadlock resolution.
|
||||||
*/
|
*/
|
||||||
needed = use_spare ? 1 : 2;
|
needed = use_spare ? 1 : 2;
|
||||||
|
|
||||||
worker = NULL;
|
/* Also make sure that doing new work is allowed at all right now, which may
|
||||||
for (i = available = 0; i < NR_WTHREADS; i++) {
|
* not be the case during VFS initialization. We do always allow callback
|
||||||
if (workers[i].w_fp == NULL) {
|
* calls, i.e., calls that may use the spare thread. The reason is that we do
|
||||||
if (worker == NULL)
|
* not support callback calls being marked as pending, so the (entirely
|
||||||
worker = &workers[i];
|
* theoretical) exception here may (entirely theoretically) avoid deadlocks.
|
||||||
if (++available >= needed)
|
*/
|
||||||
break;
|
if (needed <= worker_available() && (!block_all || use_spare)) {
|
||||||
}
|
worker_assign(rfp);
|
||||||
}
|
|
||||||
|
|
||||||
if (available >= needed) {
|
|
||||||
assert(worker != NULL);
|
|
||||||
rfp->fp_worker = worker;
|
|
||||||
worker->w_fp = rfp;
|
|
||||||
worker_wake(worker);
|
|
||||||
} else {
|
} else {
|
||||||
rfp->fp_flags |= FP_PENDING;
|
rfp->fp_flags |= FP_PENDING;
|
||||||
pending++;
|
pending++;
|
||||||
|
|
Loading…
Reference in a new issue