VFS: make message pointer management more robust
The previous approach of storing pointers to messages structures for thread-blocking sendrec operations relied on several assumptions, which if violated could lead to odd cases of memory corruption. With this patch, VFS resets pointers right after use, avoiding that any dangling pointers are accidentally dereferenced later. This approach was already used in some cases, but not all of them. Change-Id: I752d994ea847b46228bd2ccf4e537deceb78fbaf
This commit is contained in:
parent
56ac45c10b
commit
29e004d23b
4 changed files with 26 additions and 8 deletions
|
@ -114,17 +114,18 @@ int drv_sendrec(endpoint_t drv_e, message *reqmp)
|
|||
if ((r = asynsend3(drv_e, self->w_drv_sendrec, AMF_NOREPLY)) == OK) {
|
||||
/* Yield execution until we've received the reply */
|
||||
worker_wait();
|
||||
|
||||
} else {
|
||||
printf("VFS: drv_sendrec: error sending msg to driver %d: %d\n",
|
||||
drv_e, r);
|
||||
util_stacktrace();
|
||||
self->w_drv_sendrec = NULL;
|
||||
}
|
||||
|
||||
assert(self->w_drv_sendrec == NULL);
|
||||
dp->dmap_servicing = INVALID_THREAD;
|
||||
self->w_task = NONE;
|
||||
self->w_drv_sendrec = NULL;
|
||||
unlock_dmap(dp);
|
||||
return(OK);
|
||||
return(r);
|
||||
}
|
||||
|
||||
/*===========================================================================*
|
||||
|
@ -141,6 +142,7 @@ int fs_sendrec(endpoint_t fs_e, message *reqmp)
|
|||
}
|
||||
if (fs_e == fp->fp_endpoint) return(EDEADLK);
|
||||
|
||||
assert(self->w_sendrec == NULL);
|
||||
self->w_sendrec = reqmp; /* Where to store request and reply */
|
||||
|
||||
/* Find out whether we can send right away or have to enqueue */
|
||||
|
@ -157,6 +159,8 @@ int fs_sendrec(endpoint_t fs_e, message *reqmp)
|
|||
|
||||
worker_wait(); /* Yield execution until we've received the reply. */
|
||||
|
||||
assert(self->w_sendrec == NULL);
|
||||
|
||||
return(reqmp->m_type);
|
||||
}
|
||||
|
||||
|
@ -170,6 +174,7 @@ int vm_sendrec(message *reqmp)
|
|||
assert(self);
|
||||
assert(reqmp);
|
||||
|
||||
assert(self->w_sendrec == NULL);
|
||||
self->w_sendrec = reqmp; /* Where to store request and reply */
|
||||
|
||||
r = sendmsg(NULL, VM_PROC_NR, self);
|
||||
|
@ -180,6 +185,8 @@ int vm_sendrec(message *reqmp)
|
|||
|
||||
worker_wait(); /* Yield execution until we've received the reply. */
|
||||
|
||||
assert(self->w_sendrec == NULL);
|
||||
|
||||
return(reqmp->m_type);
|
||||
}
|
||||
|
||||
|
|
|
@ -445,7 +445,7 @@ static int cdev_opcl(
|
|||
worker_wait();
|
||||
|
||||
self->w_task = NONE;
|
||||
self->w_drv_sendrec = NULL;
|
||||
assert(self->w_drv_sendrec == NULL);
|
||||
|
||||
/* Process the reply. */
|
||||
r = dev_mess.m_lchardriver_vfs_reply.status;
|
||||
|
@ -613,7 +613,7 @@ int cdev_cancel(dev_t dev)
|
|||
worker_wait();
|
||||
|
||||
self->w_task = NONE;
|
||||
self->w_drv_sendrec = NULL;
|
||||
assert(self->w_drv_sendrec == NULL);
|
||||
|
||||
/* Clean up and return the result (note: the request may have completed). */
|
||||
if (GRANT_VALID(fp->fp_grant)) {
|
||||
|
@ -765,9 +765,10 @@ static void cdev_generic_reply(message *m_ptr)
|
|||
}
|
||||
rfp = &fproc[slot];
|
||||
wp = rfp->fp_worker;
|
||||
if (wp != NULL && wp->w_task == who_e) {
|
||||
if (wp != NULL && wp->w_task == who_e && wp->w_drv_sendrec != NULL) {
|
||||
assert(!fp_is_blocked(rfp));
|
||||
*wp->w_drv_sendrec = *m_ptr;
|
||||
wp->w_drv_sendrec = NULL;
|
||||
worker_signal(wp); /* Continue open/close/cancel */
|
||||
} else if (rfp->fp_blocked_on != FP_BLOCKED_ON_OTHER ||
|
||||
rfp->fp_task != m_ptr->m_source) {
|
||||
|
@ -840,12 +841,11 @@ void bdev_reply(void)
|
|||
}
|
||||
|
||||
wp = worker_get(dp->dmap_servicing);
|
||||
if (wp == NULL || wp->w_task != who_e) {
|
||||
if (wp == NULL || wp->w_task != who_e || wp->w_drv_sendrec == NULL) {
|
||||
printf("VFS: no worker thread waiting for a reply from %d\n", who_e);
|
||||
return;
|
||||
}
|
||||
|
||||
assert(wp->w_drv_sendrec != NULL);
|
||||
*wp->w_drv_sendrec = m_in;
|
||||
wp->w_drv_sendrec = NULL;
|
||||
worker_signal(wp);
|
||||
|
|
|
@ -194,8 +194,17 @@ static void do_reply(struct worker_thread *wp)
|
|||
if (wp->w_task != who_e) {
|
||||
printf("VFS: tid %d: expected %d to reply, not %d\n",
|
||||
wp->w_tid, wp->w_task, who_e);
|
||||
return;
|
||||
}
|
||||
/* It should be impossible to trigger the following case, but it is here for
|
||||
* consistency reasons: worker_stop() resets w_sendrec but not w_task.
|
||||
*/
|
||||
if (wp->w_sendrec == NULL) {
|
||||
printf("VFS: tid %d: late reply from %d ignored\n", wp->w_tid, who_e);
|
||||
return;
|
||||
}
|
||||
*wp->w_sendrec = m_in;
|
||||
wp->w_sendrec = NULL;
|
||||
wp->w_task = NONE;
|
||||
if(vmp) vmp->m_comm.c_cur_reqs--; /* We've got our reply, make room for others */
|
||||
worker_signal(wp); /* Continue this thread */
|
||||
|
|
|
@ -469,8 +469,10 @@ void worker_stop(struct worker_thread *worker)
|
|||
/* This thread is communicating with a driver or file server */
|
||||
if (worker->w_drv_sendrec != NULL) { /* Driver */
|
||||
worker->w_drv_sendrec->m_type = EIO;
|
||||
worker->w_drv_sendrec = NULL;
|
||||
} else if (worker->w_sendrec != NULL) { /* FS */
|
||||
worker->w_sendrec->m_type = EIO;
|
||||
worker->w_sendrec = NULL;
|
||||
} else {
|
||||
panic("reply storage consistency error"); /* Oh dear */
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue