723e51327f
The main purpose of this patch is to fix handling of unpause calls from PM while another call is ongoing. The solution to this problem sparked a full revision of the threading model, consisting of a large number of related changes: - all active worker threads are now always associated with a process, and every process has at most one active thread working for it; - the process lock is always held by a process's worker thread; - a process can now have both normal work and postponed PM work associated to it; - timer expiry and non-postponed PM work is done from the main thread; - filp garbage collection is done from a thread associated with VFS; - reboot calls from PM are now done from a thread associated with PM; - the DS events handler is protected from starting multiple threads; - support for a system worker thread has been removed; - the deadlock recovery thread has been replaced by a parameter to the worker_start() function; the number of worker threads has consequently been increased by one; - saving and restoring of global but per-thread variables is now centralized in worker_suspend() and worker_resume(); err_code is now saved and restored in all cases; - the concept of jobs has been removed, and job_m_in now points to a message stored in the worker thread structure instead; - the PM lock has been removed; - the separate exec lock has been replaced by a lock on the VM process, which was already being locked for exec calls anyway; - PM_UNPAUSE is now processed as a postponed PM request, from a thread associated with the target process; - the FP_DROP_WORK flag has been removed, since it is no longer more than just an optimization and only applied to processes operating on a pipe when getting killed; - assignment to "fp" now takes place only when obtaining new work in the main thread or a worker thread, when resuming execution of a thread, and in the special case of exiting processes during reboot; - there are no longer special cases where the yield() call is used to force a thread to run. Change-Id: I7a97b9b95c2450454a9b5318dfa0e6150d4e6858
175 lines
4.9 KiB
C
175 lines
4.9 KiB
C
#include "fs.h"
|
|
#include <minix/vfsif.h>
|
|
#include <assert.h>
|
|
|
|
static int sendmsg(struct vmnt *vmp, struct worker_thread *wp);
|
|
static int queuemsg(struct vmnt *vmp);
|
|
|
|
/*===========================================================================*
|
|
* sendmsg *
|
|
*===========================================================================*/
|
|
static int sendmsg(struct vmnt *vmp, struct worker_thread *wp)
|
|
{
|
|
/* This is the low level function that sends requests to FS processes.
|
|
*/
|
|
int r, transid;
|
|
|
|
vmp->m_comm.c_cur_reqs++; /* One more request awaiting a reply */
|
|
transid = wp->w_tid + VFS_TRANSID;
|
|
wp->w_fs_sendrec->m_type = TRNS_ADD_ID(wp->w_fs_sendrec->m_type, transid);
|
|
wp->w_task = vmp->m_fs_e;
|
|
if ((r = asynsend3(vmp->m_fs_e, wp->w_fs_sendrec, AMF_NOREPLY)) != OK) {
|
|
printf("VFS: sendmsg: error sending message. "
|
|
"FS_e: %d req_nr: %d err: %d\n", vmp->m_fs_e,
|
|
wp->w_fs_sendrec->m_type, r);
|
|
util_stacktrace();
|
|
return(r);
|
|
}
|
|
|
|
return(r);
|
|
}
|
|
|
|
/*===========================================================================*
|
|
* send_work *
|
|
*===========================================================================*/
|
|
void send_work(void)
|
|
{
|
|
/* Try to send out as many requests as possible */
|
|
struct vmnt *vmp;
|
|
|
|
if (sending == 0) return;
|
|
for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; vmp++)
|
|
fs_sendmore(vmp);
|
|
}
|
|
|
|
/*===========================================================================*
|
|
* fs_cancel *
|
|
*===========================================================================*/
|
|
void fs_cancel(struct vmnt *vmp)
|
|
{
|
|
/* Cancel all pending requests for this vmp */
|
|
struct worker_thread *worker;
|
|
|
|
while ((worker = vmp->m_comm.c_req_queue) != NULL) {
|
|
vmp->m_comm.c_req_queue = worker->w_next;
|
|
worker->w_next = NULL;
|
|
sending--;
|
|
worker_stop(worker);
|
|
}
|
|
}
|
|
|
|
/*===========================================================================*
|
|
* fs_sendmore *
|
|
*===========================================================================*/
|
|
void fs_sendmore(struct vmnt *vmp)
|
|
{
|
|
struct worker_thread *worker;
|
|
|
|
/* Can we send more requests? */
|
|
if (vmp->m_fs_e == NONE) return;
|
|
if ((worker = vmp->m_comm.c_req_queue) == NULL) /* No process is queued */
|
|
return;
|
|
if (vmp->m_comm.c_cur_reqs >= vmp->m_comm.c_max_reqs)/*No room to send more*/
|
|
return;
|
|
if (vmp->m_flags & VMNT_CALLBACK) /* Hold off for now */
|
|
return;
|
|
|
|
vmp->m_comm.c_req_queue = worker->w_next; /* Remove head */
|
|
worker->w_next = NULL;
|
|
sending--;
|
|
assert(sending >= 0);
|
|
(void) sendmsg(vmp, worker);
|
|
}
|
|
|
|
/*===========================================================================*
|
|
* drv_sendrec *
|
|
*===========================================================================*/
|
|
int drv_sendrec(endpoint_t drv_e, message *reqmp)
|
|
{
|
|
int r;
|
|
struct dmap *dp;
|
|
|
|
if ((dp = get_dmap(drv_e)) == NULL)
|
|
panic("driver endpoint %d invalid", drv_e);
|
|
|
|
lock_dmap(dp);
|
|
if (dp->dmap_servicing != NONE)
|
|
panic("driver locking inconsistency");
|
|
dp->dmap_servicing = self->w_tid;
|
|
self->w_task = drv_e;
|
|
self->w_drv_sendrec = reqmp;
|
|
|
|
if ((r = asynsend3(drv_e, self->w_drv_sendrec, AMF_NOREPLY)) == OK) {
|
|
/* Yield execution until we've received the reply */
|
|
worker_wait();
|
|
} else {
|
|
printf("VFS: drv_sendrec: error sending msg to driver %d: %d\n",
|
|
drv_e, r);
|
|
util_stacktrace();
|
|
}
|
|
|
|
dp->dmap_servicing = NONE;
|
|
self->w_task = NONE;
|
|
self->w_drv_sendrec = NULL;
|
|
unlock_dmap(dp);
|
|
return(OK);
|
|
}
|
|
|
|
/*===========================================================================*
|
|
* fs_sendrec *
|
|
*===========================================================================*/
|
|
int fs_sendrec(endpoint_t fs_e, message *reqmp)
|
|
{
|
|
struct vmnt *vmp;
|
|
int r;
|
|
|
|
if ((vmp = find_vmnt(fs_e)) == NULL) {
|
|
printf("Trying to talk to non-existent FS endpoint %d\n", fs_e);
|
|
return(EIO);
|
|
}
|
|
if (fs_e == fp->fp_endpoint) return(EDEADLK);
|
|
|
|
self->w_fs_sendrec = reqmp; /* Where to store request and reply */
|
|
|
|
/* Find out whether we can send right away or have to enqueue */
|
|
if ( !(vmp->m_flags & VMNT_CALLBACK) &&
|
|
vmp->m_comm.c_cur_reqs < vmp->m_comm.c_max_reqs) {
|
|
/* There's still room to send more and no proc is queued */
|
|
r = sendmsg(vmp, self);
|
|
} else {
|
|
r = queuemsg(vmp);
|
|
}
|
|
self->w_next = NULL; /* End of list */
|
|
|
|
if (r != OK) return(r);
|
|
|
|
worker_wait(); /* Yield execution until we've received the reply. */
|
|
|
|
return(reqmp->m_type);
|
|
}
|
|
|
|
/*===========================================================================*
|
|
* queuemsg *
|
|
*===========================================================================*/
|
|
static int queuemsg(struct vmnt *vmp)
|
|
{
|
|
/* Put request on queue for vmnt */
|
|
|
|
struct worker_thread *queue;
|
|
|
|
if (vmp->m_comm.c_req_queue == NULL) {
|
|
vmp->m_comm.c_req_queue = self;
|
|
} else {
|
|
/* Walk the list ... */
|
|
queue = vmp->m_comm.c_req_queue;
|
|
while (queue->w_next != NULL) queue = queue->w_next;
|
|
|
|
/* ... and append this worker */
|
|
queue->w_next = self;
|
|
}
|
|
|
|
self->w_next = NULL; /* End of list */
|
|
sending++;
|
|
|
|
return(OK);
|
|
}
|