minix/kernel/system/do_vmctl.c
Tomas Hruby 758d788bbe SMP - asyn send SMP safe
- we must not deliver messages from/to unstable address spaces.
  In such a case, we must postpone the delivery. To make sute
  that a process which is expecting an asynchronous message does
  not starve, we must remember that we skipped delivery of some
  messages and we must try to deliver again once the source
  address space is stable again.
2012-01-13 11:30:01 +00:00

185 lines
5 KiB
C

/* The kernel call implemented in this file:
* m_type: SYS_VMCTL
*
* The parameters for this kernel call are:
* SVMCTL_WHO which process
* SVMCTL_PARAM set this setting (VMCTL_*)
* SVMCTL_VALUE to this value
*/
#include "kernel/system.h"
#include "kernel/vm.h"
#include "kernel/debug.h"
#include <assert.h>
#include <minix/type.h>
/*===========================================================================*
* do_vmctl *
*===========================================================================*/
PUBLIC int do_vmctl(struct proc * caller, message * m_ptr)
{
int proc_nr;
endpoint_t ep = m_ptr->SVMCTL_WHO;
struct proc *p, *rp, *target;
if(ep == SELF) { ep = caller->p_endpoint; }
if(!isokendpt(ep, &proc_nr)) {
printf("do_vmctl: unexpected endpoint %d from VM\n", ep);
return EINVAL;
}
p = proc_addr(proc_nr);
switch(m_ptr->SVMCTL_PARAM) {
case VMCTL_CLEAR_PAGEFAULT:
assert(RTS_ISSET(p,RTS_PAGEFAULT));
RTS_UNSET(p, RTS_PAGEFAULT);
return OK;
case VMCTL_MEMREQ_GET:
/* Send VM the information about the memory request. */
if(!(rp = vmrequest))
return ESRCH;
assert(RTS_ISSET(rp, RTS_VMREQUEST));
okendpt(rp->p_vmrequest.target, &proc_nr);
target = proc_addr(proc_nr);
/* Reply with request fields. */
switch(rp->p_vmrequest.req_type) {
case VMPTYPE_CHECK:
m_ptr->SVMCTL_MRG_TARGET =
rp->p_vmrequest.target;
m_ptr->SVMCTL_MRG_ADDR =
rp->p_vmrequest.params.check.start;
m_ptr->SVMCTL_MRG_LENGTH =
rp->p_vmrequest.params.check.length;
m_ptr->SVMCTL_MRG_FLAG =
rp->p_vmrequest.params.check.writeflag;
m_ptr->SVMCTL_MRG_REQUESTOR =
(void *) rp->p_endpoint;
break;
case VMPTYPE_SMAP:
case VMPTYPE_SUNMAP:
case VMPTYPE_COWMAP:
assert(RTS_ISSET(target,RTS_VMREQTARGET));
RTS_UNSET(target, RTS_VMREQTARGET);
m_ptr->SVMCTL_MRG_TARGET =
rp->p_vmrequest.target;
m_ptr->SVMCTL_MRG_ADDR =
rp->p_vmrequest.params.map.vir_d;
m_ptr->SVMCTL_MRG_EP2 =
rp->p_vmrequest.params.map.ep_s;
m_ptr->SVMCTL_MRG_ADDR2 =
rp->p_vmrequest.params.map.vir_s;
m_ptr->SVMCTL_MRG_LENGTH =
rp->p_vmrequest.params.map.length;
m_ptr->SVMCTL_MRG_FLAG =
rp->p_vmrequest.params.map.writeflag;
m_ptr->SVMCTL_MRG_REQUESTOR =
(void *) rp->p_endpoint;
break;
default:
panic("VMREQUEST wrong type");
}
rp->p_vmrequest.vmresult = VMSUSPEND;
/* Remove from request chain. */
vmrequest = vmrequest->p_vmrequest.nextrequestor;
return rp->p_vmrequest.req_type;
case VMCTL_MEMREQ_REPLY:
assert(RTS_ISSET(p, RTS_VMREQUEST));
assert(p->p_vmrequest.vmresult == VMSUSPEND);
okendpt(p->p_vmrequest.target, &proc_nr);
target = proc_addr(proc_nr);
p->p_vmrequest.vmresult = m_ptr->SVMCTL_VALUE;
assert(p->p_vmrequest.vmresult != VMSUSPEND);
switch(p->p_vmrequest.type) {
case VMSTYPE_KERNELCALL:
/*
* we will have to resume execution of the kernel call
* as soon the scheduler picks up this process again
*/
p->p_misc_flags |= MF_KCALL_RESUME;
break;
case VMSTYPE_DELIVERMSG:
assert(p->p_misc_flags & MF_DELIVERMSG);
assert(p == target);
assert(RTS_ISSET(p, RTS_VMREQUEST));
break;
case VMSTYPE_MAP:
assert(RTS_ISSET(p, RTS_VMREQUEST));
break;
default:
panic("strange request type: %d",p->p_vmrequest.type);
}
RTS_UNSET(p, RTS_VMREQUEST);
return OK;
case VMCTL_ENABLE_PAGING:
if(vm_running)
panic("do_vmctl: paging already enabled");
if (arch_enable_paging(caller, m_ptr) != OK)
panic("do_vmctl: paging enabling failed");
return OK;
case VMCTL_KERN_PHYSMAP:
{
int i = m_ptr->SVMCTL_VALUE;
return arch_phys_map(i,
(phys_bytes *) &m_ptr->SVMCTL_MAP_PHYS_ADDR,
(phys_bytes *) &m_ptr->SVMCTL_MAP_PHYS_LEN,
&m_ptr->SVMCTL_MAP_FLAGS);
}
case VMCTL_KERN_MAP_REPLY:
{
return arch_phys_map_reply(m_ptr->SVMCTL_VALUE,
(vir_bytes) m_ptr->SVMCTL_MAP_VIR_ADDR);
}
case VMCTL_VMINHIBIT_SET:
/* check if we must stop a process on a different CPU */
#if CONFIG_SMP
if (p->p_cpu != cpuid) {
smp_schedule_vminhibit(p);
} else
#endif
RTS_SET(p, RTS_VMINHIBIT);
#if CONFIG_SMP
p->p_misc_flags |= MF_FLUSH_TLB;
#endif
return OK;
case VMCTL_VMINHIBIT_CLEAR:
assert(RTS_ISSET(p, RTS_VMINHIBIT));
/*
* the processes is certainly not runnable, no need to tell its
* cpu
*/
RTS_UNSET(p, RTS_VMINHIBIT);
#ifdef CONFIG_SMP
if (p->p_misc_flags & MF_SENDA_VM_MISS) {
struct priv *privp;
p->p_misc_flags &= ~MF_SENDA_VM_MISS;
privp = priv(p);
try_deliver_senda(p, (asynmsg_t *) privp->s_asyntab,
privp->s_asynsize);
}
/*
* We don't know whether kernel has the changed mapping
* installed to access userspace memory. And if so, on what CPU.
* More over we don't know what mapping has changed and how and
* therefore we must invalidate all mappings we have anywhere.
* Next time we map memory, we map it fresh.
*/
bits_fill(p->p_stale_tlb, CONFIG_MAX_CPUS);
#endif
return OK;
}
/* Try architecture-specific vmctls. */
return arch_do_vmctl(m_ptr, p);
}