kvm: Shutdown KVM and disconnect performance counters on fork
We can't/shouldn't use KVM after a fork since the child and parent probably point to the same VM. Knowing the exact effects of this is hard, but they are likely to be messy. We also disconnect the performance counters attached to the guest. This works around what seems to be a kernel bug where spurious SIGIOs get delivered to the forked child process. Signed-off-by: Andreas Sandberg <andreas@sandberg.pp.se> [sascha.bischoff@arm.com: Rebased patches onto a newer gem5 version] Signed-off-by: Sascha Bischoff <sascha.bischoff@arm.com> [andreas.sandberg@arm.com: Fatal if entering KVM in child process ] Signed-off-by: Andreas Sandberg <andreas.sandberg@arm.com>
This commit is contained in:
parent
a91c1e69a8
commit
4f303785dc
5 changed files with 57 additions and 10 deletions
|
@ -125,7 +125,7 @@ BaseKvmCPU::startup()
|
|||
const BaseKvmCPUParams * const p(
|
||||
dynamic_cast<const BaseKvmCPUParams *>(params()));
|
||||
|
||||
Kvm &kvm(vm.kvm);
|
||||
Kvm &kvm(*vm.kvm);
|
||||
|
||||
BaseCPU::startup();
|
||||
|
||||
|
@ -362,6 +362,29 @@ BaseKvmCPU::drainResume()
|
|||
}
|
||||
}
|
||||
|
||||
void
|
||||
BaseKvmCPU::notifyFork()
|
||||
{
|
||||
// We should have drained prior to forking, which means that the
|
||||
// tick event shouldn't be scheduled and the CPU is idle.
|
||||
assert(!tickEvent.scheduled());
|
||||
assert(_status == Idle);
|
||||
|
||||
if (vcpuFD != -1) {
|
||||
if (close(vcpuFD) == -1)
|
||||
warn("kvm CPU: notifyFork failed to close vcpuFD\n");
|
||||
|
||||
if (_kvmRun)
|
||||
munmap(_kvmRun, vcpuMMapSize);
|
||||
|
||||
vcpuFD = -1;
|
||||
_kvmRun = NULL;
|
||||
|
||||
hwInstructions.detach();
|
||||
hwCycles.detach();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
BaseKvmCPU::switchOut()
|
||||
{
|
||||
|
@ -617,6 +640,9 @@ Tick
|
|||
BaseKvmCPU::kvmRun(Tick ticks)
|
||||
{
|
||||
Tick ticksExecuted;
|
||||
fatal_if(vcpuFD == -1,
|
||||
"Trying to run a KVM CPU in a forked child process. "
|
||||
"This is not supported.\n");
|
||||
DPRINTF(KvmRun, "KVM: Executing for %i ticks\n", ticks);
|
||||
|
||||
if (ticks == 0) {
|
||||
|
|
|
@ -89,6 +89,7 @@ class BaseKvmCPU : public BaseCPU
|
|||
|
||||
DrainState drain() override;
|
||||
void drainResume() override;
|
||||
void notifyFork() override;
|
||||
|
||||
void switchOut() override;
|
||||
void takeOverFrom(BaseCPU *cpu) override;
|
||||
|
|
|
@ -291,12 +291,12 @@ Kvm::createVM()
|
|||
|
||||
KvmVM::KvmVM(KvmVMParams *params)
|
||||
: SimObject(params),
|
||||
kvm(), system(params->system),
|
||||
vmFD(kvm.createVM()),
|
||||
kvm(new Kvm()), system(params->system),
|
||||
vmFD(kvm->createVM()),
|
||||
started(false),
|
||||
nextVCPUID(0)
|
||||
{
|
||||
maxMemorySlot = kvm.capNumMemSlots();
|
||||
maxMemorySlot = kvm->capNumMemSlots();
|
||||
/* If we couldn't determine how memory slots there are, guess 32. */
|
||||
if (!maxMemorySlot)
|
||||
maxMemorySlot = 32;
|
||||
|
@ -307,7 +307,25 @@ KvmVM::KvmVM(KvmVMParams *params)
|
|||
|
||||
KvmVM::~KvmVM()
|
||||
{
|
||||
close(vmFD);
|
||||
if (vmFD != -1)
|
||||
close(vmFD);
|
||||
|
||||
if (kvm)
|
||||
delete kvm;
|
||||
}
|
||||
|
||||
void
|
||||
KvmVM::notifyFork()
|
||||
{
|
||||
if (vmFD != -1) {
|
||||
if (close(vmFD) == -1)
|
||||
warn("kvm VM: notifyFork failed to close vmFD\n");
|
||||
|
||||
vmFD = -1;
|
||||
|
||||
delete kvm;
|
||||
kvm = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -295,6 +295,8 @@ class KvmVM : public SimObject
|
|||
KvmVM(KvmVMParams *params);
|
||||
virtual ~KvmVM();
|
||||
|
||||
void notifyFork();
|
||||
|
||||
/**
|
||||
* Setup a shared three-page memory region used by the internals
|
||||
* of KVM. This is currently only needed by x86 implementations.
|
||||
|
@ -396,7 +398,7 @@ class KvmVM : public SimObject
|
|||
int createDevice(uint32_t type, uint32_t flags = 0);
|
||||
|
||||
/** Global KVM interface */
|
||||
Kvm kvm;
|
||||
Kvm *kvm;
|
||||
|
||||
#if defined(__aarch64__)
|
||||
public: // ARM-specific
|
||||
|
@ -504,7 +506,7 @@ class KvmVM : public SimObject
|
|||
System *system;
|
||||
|
||||
/** KVM VM file descriptor */
|
||||
const int vmFD;
|
||||
int vmFD;
|
||||
|
||||
/** Has delayedStartup() already been called? */
|
||||
bool started;
|
||||
|
|
|
@ -519,7 +519,7 @@ X86KvmCPU::X86KvmCPU(X86KvmCPUParams *params)
|
|||
: BaseKvmCPU(params),
|
||||
useXSave(params->useXSave)
|
||||
{
|
||||
Kvm &kvm(vm.kvm);
|
||||
Kvm &kvm(*vm.kvm);
|
||||
|
||||
if (!kvm.capSetTSSAddress())
|
||||
panic("KVM: Missing capability (KVM_CAP_SET_TSS_ADDR)\n");
|
||||
|
@ -649,7 +649,7 @@ X86KvmCPU::dumpVCpuEvents() const
|
|||
void
|
||||
X86KvmCPU::dumpMSRs() const
|
||||
{
|
||||
const Kvm::MSRIndexVector &supported_msrs(vm.kvm.getSupportedMSRs());
|
||||
const Kvm::MSRIndexVector &supported_msrs(vm.kvm->getSupportedMSRs());
|
||||
std::unique_ptr<struct kvm_msrs> msrs(
|
||||
newVarStruct<struct kvm_msrs, struct kvm_msr_entry>(
|
||||
supported_msrs.size()));
|
||||
|
@ -1539,7 +1539,7 @@ const Kvm::MSRIndexVector &
|
|||
X86KvmCPU::getMsrIntersection() const
|
||||
{
|
||||
if (cachedMsrIntersection.empty()) {
|
||||
const Kvm::MSRIndexVector &kvm_msrs(vm.kvm.getSupportedMSRs());
|
||||
const Kvm::MSRIndexVector &kvm_msrs(vm.kvm->getSupportedMSRs());
|
||||
|
||||
DPRINTF(Kvm, "kvm-x86: Updating MSR intersection\n");
|
||||
for (auto it = kvm_msrs.cbegin(); it != kvm_msrs.cend(); ++it) {
|
||||
|
|
Loading…
Reference in a new issue