kvm, x86: Add initial support for multicore simulation

Simulating a SMP or multicore requires devices to be shared between
multiple KVM vCPUs. This means that locking is required when accessing
devices. This changeset adds the necessary locking to allow devices to
execute correctly. It is implemented by temporarily migrating the KVM
CPU to the VM's (and devices) event queue when handling
MMIO. Similarly, the VM migrates to the interrupt controller's event
queue when delivering an interrupt.

The support for fast-forwarding of multicore simulations added by this
changeset assumes that all devices in a system are simulated in the
same thread and each vCPU has its own thread. Special care must be
taken to ensure that devices living under the CPU in the object
hierarchy (e.g., the interrupt controller) do not inherit the parent
CPUs thread and are assigned to device thread. The KvmVM object is
assumed to live in the same thread as the other devices in the system.
This commit is contained in:
Andreas Sandberg 2014-04-09 16:01:58 +02:00
parent 221f4f232a
commit 02b51afb7e
2 changed files with 39 additions and 2 deletions

View file

@ -415,6 +415,13 @@ void
BaseKvmCPU::wakeup() BaseKvmCPU::wakeup()
{ {
DPRINTF(Kvm, "wakeup()\n"); DPRINTF(Kvm, "wakeup()\n");
// This method might have been called from another
// context. Migrate to this SimObject's event queue when
// delivering the wakeup signal.
EventQueue::ScopedMigration migrate(eventQueue());
// Kick the vCPU to get it to come out of KVM.
kick();
if (thread->status() != ThreadContext::Suspended) if (thread->status() != ThreadContext::Suspended)
return; return;
@ -635,6 +642,14 @@ BaseKvmCPU::kvmRun(Tick ticks)
// twice. // twice.
ticksExecuted = clockPeriod(); ticksExecuted = clockPeriod();
} else { } else {
// This method is executed as a result of a tick event. That
// means that the event queue will be locked when entering the
// method. We temporarily unlock the event queue to allow
// other threads to steal control of this thread to inject
// interrupts. They will typically lock the queue and then
// force an exit from KVM by kicking the vCPU.
EventQueue::ScopedRelease release(curEventQueue());
if (ticks < runTimer->resolution()) { if (ticks < runTimer->resolution()) {
DPRINTF(KvmRun, "KVM: Adjusting tick count (%i -> %i)\n", DPRINTF(KvmRun, "KVM: Adjusting tick count (%i -> %i)\n",
ticks, runTimer->resolution()); ticks, runTimer->resolution());
@ -990,11 +1005,19 @@ BaseKvmCPU::doMMIOAccess(Addr paddr, void *data, int size, bool write)
pkt.dataStatic(data); pkt.dataStatic(data);
if (mmio_req.isMmappedIpr()) { if (mmio_req.isMmappedIpr()) {
// We currently assume that there is no need to migrate to a
// different event queue when doing IPRs. Currently, IPRs are
// only used for m5ops, so it should be a valid assumption.
const Cycles ipr_delay(write ? const Cycles ipr_delay(write ?
TheISA::handleIprWrite(tc, &pkt) : TheISA::handleIprWrite(tc, &pkt) :
TheISA::handleIprRead(tc, &pkt)); TheISA::handleIprRead(tc, &pkt));
return clockPeriod() * ipr_delay; return clockPeriod() * ipr_delay;
} else { } else {
// Temporarily lock and migrate to the event queue of the
// VM. This queue is assumed to "own" all devices we need to
// access if running in multi-core mode.
EventQueue::ScopedMigration migrate(vm.eventQueue());
return dataPort.sendAtomic(&pkt); return dataPort.sendAtomic(&pkt);
} }
} }

View file

@ -1134,10 +1134,20 @@ X86KvmCPU::updateThreadContextMSRs()
void void
X86KvmCPU::deliverInterrupts() X86KvmCPU::deliverInterrupts()
{ {
Fault fault;
syncThreadContext(); syncThreadContext();
Fault fault(interrupts->getInterrupt(tc)); {
interrupts->updateIntrInfo(tc); // Migrate to the interrupt controller's thread to get the
// interrupt. Even though the individual methods are safe to
// call across threads, we might still lose interrupts unless
// they are getInterrupt() and updateIntrInfo() are called
// atomically.
EventQueue::ScopedMigration migrate(interrupts->eventQueue());
fault = interrupts->getInterrupt(tc);
interrupts->updateIntrInfo(tc);
}
X86Interrupt *x86int(dynamic_cast<X86Interrupt *>(fault.get())); X86Interrupt *x86int(dynamic_cast<X86Interrupt *>(fault.get()));
if (dynamic_cast<NonMaskableInterrupt *>(fault.get())) { if (dynamic_cast<NonMaskableInterrupt *>(fault.get())) {
@ -1340,6 +1350,10 @@ X86KvmCPU::handleKvmExitIO()
dataMasterId()); dataMasterId());
const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq); const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq);
// Temporarily lock and migrate to the event queue of the
// VM. This queue is assumed to "own" all devices we need to
// access if running in multi-core mode.
EventQueue::ScopedMigration migrate(vm.eventQueue());
for (int i = 0; i < count; ++i) { for (int i = 0; i < count; ++i) {
Packet pkt(&io_req, cmd); Packet pkt(&io_req, cmd);