cpu, mem, sim: Change how KVM maps memory

Only map memories into the KVM guest address space that are
marked as usable by KVM. Create BackingStoreEntry class
containing flags for is_conf_reported, in_addr_map, and
kvm_map.
This commit is contained in:
David Hashe 2016-08-22 11:41:05 -04:00
parent d80a613990
commit f3ccaab1e9
6 changed files with 120 additions and 19 deletions

View file

@ -341,13 +341,18 @@ KvmVM::cpuStartup()
void void
KvmVM::delayedStartup() KvmVM::delayedStartup()
{ {
const std::vector<std::pair<AddrRange, uint8_t*> >&memories( const std::vector<BackingStoreEntry> &memories(
system->getPhysMem().getBackingStore()); system->getPhysMem().getBackingStore());
DPRINTF(Kvm, "Mapping %i memory region(s)\n", memories.size()); DPRINTF(Kvm, "Mapping %i memory region(s)\n", memories.size());
for (int slot(0); slot < memories.size(); ++slot) { for (int slot(0); slot < memories.size(); ++slot) {
const AddrRange &range(memories[slot].first); if (!memories[slot].kvmMap) {
void *pmem(memories[slot].second); DPRINTF(Kvm, "Skipping region marked as not usable by KVM\n");
continue;
}
const AddrRange &range(memories[slot].range);
void *pmem(memories[slot].pmem);
if (pmem) { if (pmem) {
DPRINTF(Kvm, "Mapping region: 0x%p -> 0x%llx [size: 0x%llx]\n", DPRINTF(Kvm, "Mapping region: 0x%p -> 0x%llx [size: 0x%llx]\n",

View file

@ -57,6 +57,12 @@ class AbstractMemory(MemObject):
# e.g. by the testers that use shadow memories as a reference # e.g. by the testers that use shadow memories as a reference
in_addr_map = Param.Bool(True, "Memory part of the global address map") in_addr_map = Param.Bool(True, "Memory part of the global address map")
# When KVM acceleration is used, memory is mapped into the guest process
# address space and accessed directly. Some memories may need to be
# excluded from this mapping if they overlap with other memory ranges or
# are not accessible by the CPU.
kvm_map = Param.Bool(True, "Should KVM map this memory for the guest")
# Should the bootloader include this memory when passing # Should the bootloader include this memory when passing
# configuration information about the physical memory layout to # configuration information about the physical memory layout to
# the kernel, e.g. using ATAG or ACPI # the kernel, e.g. using ATAG or ACPI

View file

@ -57,7 +57,7 @@ using namespace std;
AbstractMemory::AbstractMemory(const Params *p) : AbstractMemory::AbstractMemory(const Params *p) :
MemObject(p), range(params()->range), pmemAddr(NULL), MemObject(p), range(params()->range), pmemAddr(NULL),
confTableReported(p->conf_table_reported), inAddrMap(p->in_addr_map), confTableReported(p->conf_table_reported), inAddrMap(p->in_addr_map),
_system(NULL) kvmMap(p->kvm_map), _system(NULL)
{ {
} }

View file

@ -111,10 +111,13 @@ class AbstractMemory : public MemObject
uint8_t* pmemAddr; uint8_t* pmemAddr;
// Enable specific memories to be reported to the configuration table // Enable specific memories to be reported to the configuration table
bool confTableReported; const bool confTableReported;
// Should the memory appear in the global address map // Should the memory appear in the global address map
bool inAddrMap; const bool inAddrMap;
// Should KVM map this memory for the guest
const bool kvmMap;
std::list<LockedAddr> lockedAddrList; std::list<LockedAddr> lockedAddrList;
@ -282,6 +285,14 @@ class AbstractMemory : public MemObject
*/ */
bool isInAddrMap() const { return inAddrMap; } bool isInAddrMap() const { return inAddrMap; }
/**
* When shadow memories are in use, KVM may want to make one or the other,
* but cannot map both into the guest address space.
*
* @return if this memory should be mapped into the KVM guest address space
*/
bool isKvmMap() const { return kvmMap; }
/** /**
* Perform an untimed memory access and update all the state * Perform an untimed memory access and update all the state
* (e.g. locked addresses) and statistics accordingly. The packet * (e.g. locked addresses) and statistics accordingly. The packet

View file

@ -111,7 +111,9 @@ PhysicalMemory::PhysicalMemory(const string& _name,
// memories are allowed to overlap in the logic address // memories are allowed to overlap in the logic address
// map // map
vector<AbstractMemory*> unmapped_mems{m}; vector<AbstractMemory*> unmapped_mems{m};
createBackingStore(m->getAddrRange(), unmapped_mems); createBackingStore(m->getAddrRange(), unmapped_mems,
m->isConfReported(), m->isInAddrMap(),
m->isKvmMap());
} }
} }
@ -132,7 +134,19 @@ PhysicalMemory::PhysicalMemory(const string& _name,
if (!intlv_ranges.empty() && if (!intlv_ranges.empty() &&
!intlv_ranges.back().mergesWith(r.first)) { !intlv_ranges.back().mergesWith(r.first)) {
AddrRange merged_range(intlv_ranges); AddrRange merged_range(intlv_ranges);
createBackingStore(merged_range, curr_memories);
AbstractMemory *f = curr_memories.front();
for (const auto& c : curr_memories)
if (f->isConfReported() != c->isConfReported() ||
f->isInAddrMap() != c->isInAddrMap() ||
f->isKvmMap() != c->isKvmMap())
fatal("Inconsistent flags in an interleaved "
"range\n");
createBackingStore(merged_range, curr_memories,
f->isConfReported(), f->isInAddrMap(),
f->isKvmMap());
intlv_ranges.clear(); intlv_ranges.clear();
curr_memories.clear(); curr_memories.clear();
} }
@ -140,7 +154,10 @@ PhysicalMemory::PhysicalMemory(const string& _name,
curr_memories.push_back(r.second); curr_memories.push_back(r.second);
} else { } else {
vector<AbstractMemory*> single_memory{r.second}; vector<AbstractMemory*> single_memory{r.second};
createBackingStore(r.first, single_memory); createBackingStore(r.first, single_memory,
r.second->isConfReported(),
r.second->isInAddrMap(),
r.second->isKvmMap());
} }
} }
} }
@ -149,13 +166,26 @@ PhysicalMemory::PhysicalMemory(const string& _name,
// ahead and do it // ahead and do it
if (!intlv_ranges.empty()) { if (!intlv_ranges.empty()) {
AddrRange merged_range(intlv_ranges); AddrRange merged_range(intlv_ranges);
createBackingStore(merged_range, curr_memories);
AbstractMemory *f = curr_memories.front();
for (const auto& c : curr_memories)
if (f->isConfReported() != c->isConfReported() ||
f->isInAddrMap() != c->isInAddrMap() ||
f->isKvmMap() != c->isKvmMap())
fatal("Inconsistent flags in an interleaved "
"range\n");
createBackingStore(merged_range, curr_memories,
f->isConfReported(), f->isInAddrMap(),
f->isKvmMap());
} }
} }
void void
PhysicalMemory::createBackingStore(AddrRange range, PhysicalMemory::createBackingStore(AddrRange range,
const vector<AbstractMemory*>& _memories) const vector<AbstractMemory*>& _memories,
bool conf_table_reported,
bool in_addr_map, bool kvm_map)
{ {
panic_if(range.interleaved(), panic_if(range.interleaved(),
"Cannot create backing store for interleaved range %s\n", "Cannot create backing store for interleaved range %s\n",
@ -184,7 +214,8 @@ PhysicalMemory::createBackingStore(AddrRange range,
// remember this backing store so we can checkpoint it and unmap // remember this backing store so we can checkpoint it and unmap
// it appropriately // it appropriately
backingStore.push_back(make_pair(range, pmem)); backingStore.emplace_back(range, pmem,
conf_table_reported, in_addr_map, kvm_map);
// point the memories to their backing store // point the memories to their backing store
for (const auto& m : _memories) { for (const auto& m : _memories) {
@ -198,7 +229,7 @@ PhysicalMemory::~PhysicalMemory()
{ {
// unmap the backing store // unmap the backing store
for (auto& s : backingStore) for (auto& s : backingStore)
munmap((char*)s.second, s.first.size()); munmap((char*)s.pmem, s.range.size());
} }
bool bool
@ -314,7 +345,7 @@ PhysicalMemory::serialize(CheckpointOut &cp) const
// store each backing store memory segment in a file // store each backing store memory segment in a file
for (auto& s : backingStore) { for (auto& s : backingStore) {
ScopedCheckpointSection sec(cp, csprintf("store%d", store_id)); ScopedCheckpointSection sec(cp, csprintf("store%d", store_id));
serializeStore(cp, store_id++, s.first, s.second); serializeStore(cp, store_id++, s.range, s.pmem);
} }
} }
@ -407,8 +438,8 @@ PhysicalMemory::unserializeStore(CheckpointIn &cp)
fatal("Can't open physical memory checkpoint file '%s'", filename); fatal("Can't open physical memory checkpoint file '%s'", filename);
// we've already got the actual backing store mapped // we've already got the actual backing store mapped
uint8_t* pmem = backingStore[store_id].second; uint8_t* pmem = backingStore[store_id].pmem;
AddrRange range = backingStore[store_id].first; AddrRange range = backingStore[store_id].range;
long range_size; long range_size;
UNSERIALIZE_SCALAR(range_size); UNSERIALIZE_SCALAR(range_size);

View file

@ -48,6 +48,51 @@
*/ */
class AbstractMemory; class AbstractMemory;
/**
* A single entry for the backing store.
*/
class BackingStoreEntry
{
public:
/**
* Create a backing store entry. Don't worry about managing the memory
* pointers, because PhysicalMemory is responsible for that.
*/
BackingStoreEntry(AddrRange range, uint8_t* pmem,
bool conf_table_reported, bool in_addr_map, bool kvm_map)
: range(range), pmem(pmem), confTableReported(conf_table_reported),
inAddrMap(in_addr_map), kvmMap(kvm_map)
{}
/**
* The address range covered in the guest.
*/
AddrRange range;
/**
* Pointer to the host memory this range maps to. This memory is the same
* size as the range field.
*/
uint8_t* pmem;
/**
* Whether this memory should be reported to the configuration table
*/
bool confTableReported;
/**
* Whether this memory should appear in the global address map
*/
bool inAddrMap;
/**
* Whether KVM should map this memory into the guest address space during
* acceleration.
*/
bool kvmMap;
};
/** /**
* The physical memory encapsulates all memories in the system and * The physical memory encapsulates all memories in the system and
* provides basic functionality for accessing those memories without * provides basic functionality for accessing those memories without
@ -90,7 +135,7 @@ class PhysicalMemory : public Serializable
// The physical memory used to provide the memory in the simulated // The physical memory used to provide the memory in the simulated
// system // system
std::vector<std::pair<AddrRange, uint8_t*>> backingStore; std::vector<BackingStoreEntry> backingStore;
// Prevent copying // Prevent copying
PhysicalMemory(const PhysicalMemory&); PhysicalMemory(const PhysicalMemory&);
@ -105,9 +150,12 @@ class PhysicalMemory : public Serializable
* *
* @param range The address range covered * @param range The address range covered
* @param memories The memories this range maps to * @param memories The memories this range maps to
* @param kvm_map Should KVM map this memory for the guest
*/ */
void createBackingStore(AddrRange range, void createBackingStore(AddrRange range,
const std::vector<AbstractMemory*>& _memories); const std::vector<AbstractMemory*>& _memories,
bool conf_table_reported,
bool in_addr_map, bool kvm_map);
public: public:
@ -167,7 +215,7 @@ class PhysicalMemory : public Serializable
* *
* @return Pointers to the memory backing store * @return Pointers to the memory backing store
*/ */
std::vector<std::pair<AddrRange, uint8_t*>> getBackingStore() const std::vector<BackingStoreEntry> getBackingStore() const
{ return backingStore; } { return backingStore; }
/** /**