Kernel: IPC filter support for VM memory requests

The filtering also exposed the risk that a process be killed or
swapped while on the list of VM memory requests.  These cases are
now handled properly as well.

Change-Id: Ibd3897b34abdf33bce19d37b8e5f65fbd0fd9316
This commit is contained in:
David van Moolenbroek 2015-07-12 07:58:07 +02:00
parent 8b0f8559ee
commit 3779ed93c3
4 changed files with 136 additions and 18 deletions

View file

@ -110,6 +110,7 @@ void clear_ipc_filters(struct proc *rp);
int check_ipc_filter(struct ipc_filter_s *ipcf, int fill_flags);
int allow_ipc_filtered_msg(struct proc *rp, endpoint_t src_e,
vir_bytes m_src_v, message *m_src_p);
int allow_ipc_filtered_memreq(struct proc *src_rp, struct proc *dst_rp);
int priv_add_irq(struct proc *rp, int irq);
int priv_add_io(struct proc *rp, struct io_range *ior);
int priv_add_mem(struct proc *rp, struct minix_mem_range *memr);

View file

@ -486,6 +486,27 @@ void send_diag_sig(void)
}
}
/*===========================================================================*
* clear_memreq *
*===========================================================================*/
static void clear_memreq(struct proc *rp)
{
struct proc **rpp;
if (!RTS_ISSET(rp, RTS_VMREQUEST))
return; /* nothing to do */
for (rpp = &vmrequest; *rpp != NULL;
rpp = &(*rpp)->p_vmrequest.nextrequestor) {
if (*rpp == rp) {
*rpp = rp->p_vmrequest.nextrequestor;
break;
}
}
RTS_UNSET(rp, RTS_VMREQUEST);
}
/*===========================================================================*
* clear_ipc *
*===========================================================================*/
@ -548,6 +569,10 @@ register struct proc *rc; /* slot of process to clean up */
*/
clear_ipc_refs(rc, EDEADSRCDST);
/* Finally, if the process was blocked on a VM request, remove it from the
* queue of processes waiting to be processed by VM.
*/
clear_memreq(rc);
}
/*===========================================================================*
@ -736,6 +761,14 @@ void clear_ipc_filters(struct proc *rp)
}
priv(rp)->s_ipcf = NULL;
/* VM is a special case here: since the cleared IPC filter may have
* blocked memory handling requests, we may now have to tell VM that
* there are "new" requests pending.
*/
if (rp->p_endpoint == VM_PROC_NR && vmrequest != NULL)
if (send_sig(VM_PROC_NR, SIGKMEM) != OK)
panic("send_sig failed");
}
/*===========================================================================*
@ -841,6 +874,46 @@ int allow_ipc_filtered_msg(struct proc *rp, endpoint_t src_e,
return allow;
}
/*===========================================================================*
* allow_ipc_filtered_memreq *
*===========================================================================*/
int allow_ipc_filtered_memreq(struct proc *src_rp, struct proc *dst_rp)
{
/* Determine whether VM should receive a request to handle memory
* that is the result of process 'src_rp' trying to access currently
* unavailable memory in process 'dst_rp'. Return TRUE if VM should
* be given the request, FALSE otherwise.
*/
struct proc *vmp;
message m_buf;
int allow_src, allow_dst;
vmp = proc_addr(VM_PROC_NR);
/* If VM has no filter in place, all requests should go through. */
if (priv(vmp)->s_ipcf == NULL)
return TRUE;
/* VM obtains memory requests in response to a SIGKMEM signal, which
* is a notification sent from SYSTEM. Thus, if VM blocks such
* notifications, it also should not get any memory requests. Of
* course, VM should not be asking for requests in that case either,
* but the extra check doesn't hurt.
*/
m_buf.m_type = NOTIFY_MESSAGE;
if (!allow_ipc_filtered_msg(vmp, SYSTEM, 0, &m_buf))
return FALSE;
/* A more refined policy may be implemented here, for example to
* ensure that both the source and the destination (if different)
* are in the group of processes that VM wants to talk to. Since VM
* is basically not able to handle any memory requests during an
* update, we will not get here, and none of that is needed.
*/
return TRUE;
}
/*===========================================================================*
* priv_add_irq *
*===========================================================================*/

View file

@ -30,6 +30,7 @@ static void adjust_priv_slot(struct priv *privp, struct priv
static void adjust_asyn_table(struct priv *src_privp, struct priv *dst_privp);
static void swap_proc_slot_pointer(struct proc **rpp, struct proc
*src_rp, struct proc *dst_rp);
static void swap_memreq(struct proc *src_rp, struct proc *dst_rp);
/*===========================================================================*
* do_update *
@ -143,6 +144,9 @@ int do_update(struct proc * caller, message * m_ptr)
/* Swap global process slot addresses. */
swap_proc_slot_pointer(get_cpulocal_var_ptr(ptproc), src_rp, dst_rp);
/* Swap VM request entries. */
swap_memreq(src_rp, dst_rp);
#if DEBUG
printf("do_update: updated %d (%s, %d, %d) into %d (%s, %d, %d)\n",
src_rp->p_endpoint, src_rp->p_name, src_rp->p_nr, priv(src_rp)->s_proc_nr,
@ -303,5 +307,34 @@ static void swap_proc_slot_pointer(struct proc **rpp, struct proc *src_rp,
}
}
/*===========================================================================*
* swap_memreq *
*===========================================================================*/
static void swap_memreq(struct proc *src_rp, struct proc *dst_rp)
{
/* If either the source or the destination process is part of the VM request
* chain, but not both, then swap the process pointers in the chain.
*/
struct proc **rpp;
if (RTS_ISSET(src_rp, RTS_VMREQUEST) == RTS_ISSET(dst_rp, RTS_VMREQUEST))
return; /* nothing to do */
for (rpp = &vmrequest; *rpp != NULL;
rpp = &(*rpp)->p_vmrequest.nextrequestor) {
if (*rpp == src_rp) {
dst_rp->p_vmrequest.nextrequestor =
src_rp->p_vmrequest.nextrequestor;
*rpp = dst_rp;
break;
} else if (*rpp == dst_rp) {
src_rp->p_vmrequest.nextrequestor =
dst_rp->p_vmrequest.nextrequestor;
*rpp = src_rp;
break;
}
}
}
#endif /* USE_UPDATE */

View file

@ -20,7 +20,7 @@ int do_vmctl(struct proc * caller, message * m_ptr)
{
int proc_nr;
endpoint_t ep = m_ptr->SVMCTL_WHO;
struct proc *p, *rp, *target;
struct proc *p, *rp, **rpp, *target;
if(ep == SELF) { ep = caller->p_endpoint; }
@ -37,17 +37,28 @@ int do_vmctl(struct proc * caller, message * m_ptr)
RTS_UNSET(p, RTS_PAGEFAULT);
return OK;
case VMCTL_MEMREQ_GET:
/* Send VM the information about the memory request. */
if(!(rp = vmrequest))
return ESRCH;
assert(RTS_ISSET(rp, RTS_VMREQUEST));
/* Send VM the information about the memory request. We can
* not simply send the first request on the list, because IPC
* filters may forbid VM from getting requests for particular
* sources. However, IPC filters are used only in rare cases.
*/
for (rpp = &vmrequest; *rpp != NULL;
rpp = &(*rpp)->p_vmrequest.nextrequestor) {
rp = *rpp;
okendpt(rp->p_vmrequest.target, &proc_nr);
target = proc_addr(proc_nr);
assert(RTS_ISSET(rp, RTS_VMREQUEST));
okendpt(rp->p_vmrequest.target, &proc_nr);
target = proc_addr(proc_nr);
/* Check against IPC filters. */
if (!allow_ipc_filtered_memreq(rp, target))
continue;
/* Reply with request fields. */
if (rp->p_vmrequest.req_type != VMPTYPE_CHECK)
panic("VMREQUEST wrong type");
/* Reply with request fields. */
switch(rp->p_vmrequest.req_type) {
case VMPTYPE_CHECK:
m_ptr->SVMCTL_MRG_TARGET =
rp->p_vmrequest.target;
m_ptr->SVMCTL_MRG_ADDR =
@ -58,17 +69,17 @@ int do_vmctl(struct proc * caller, message * m_ptr)
rp->p_vmrequest.params.check.writeflag;
m_ptr->SVMCTL_MRG_REQUESTOR =
(void *) rp->p_endpoint;
break;
default:
panic("VMREQUEST wrong type");
rp->p_vmrequest.vmresult = VMSUSPEND;
/* Remove from request chain. */
*rpp = rp->p_vmrequest.nextrequestor;
return rp->p_vmrequest.req_type;
}
rp->p_vmrequest.vmresult = VMSUSPEND;
return ENOENT;
/* Remove from request chain. */
vmrequest = vmrequest->p_vmrequest.nextrequestor;
return rp->p_vmrequest.req_type;
case VMCTL_MEMREQ_REPLY:
assert(RTS_ISSET(p, RTS_VMREQUEST));
assert(p->p_vmrequest.vmresult == VMSUSPEND);