mem: Fix bug in PhysicalMemory use of mmap and munmap

This patch fixes a bug in how physical memory used to be mapped and
unmapped. Previously we unmapped and re-mapped if restoring from a
checkpoint. However, we never checked that the new mapping was
actually the same, it was just magically working as the OS seems to
fairly reliably give us the same chunk back. This patch fixes this
issue by relying entirely on the mmap call in the constructor.
This commit is contained in:
Andreas Hansson 2014-02-18 05:51:01 -05:00
parent f0ea79c41f
commit 4b81585c49

View file

@ -95,10 +95,9 @@ PhysicalMemory::PhysicalMemory(const string& _name,
} }
} }
// iterate over the increasing addresses and chunks of contigous // iterate over the increasing addresses and chunks of contiguous
// space to be mapped to backing store, also remember what // space to be mapped to backing store, create it and inform the
// memories constitute the range so we can go and find out if we // memories
// have to init their parts to zero
vector<AddrRange> intlv_ranges; vector<AddrRange> intlv_ranges;
vector<AbstractMemory*> curr_memories; vector<AbstractMemory*> curr_memories;
for (AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.begin(); for (AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.begin();
@ -162,8 +161,7 @@ PhysicalMemory::createBackingStore(AddrRange range,
// it appropriately // it appropriately
backingStore.push_back(make_pair(range, pmem)); backingStore.push_back(make_pair(range, pmem));
// point the memories to their backing store, and if requested, // point the memories to their backing store
// initialize the memory range to 0
for (vector<AbstractMemory*>::const_iterator m = _memories.begin(); for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
m != _memories.end(); ++m) { m != _memories.end(); ++m) {
DPRINTF(BusAddrRanges, "Mapping memory %s to backing store\n", DPRINTF(BusAddrRanges, "Mapping memory %s to backing store\n",
@ -393,14 +391,10 @@ PhysicalMemory::unserializeStore(Checkpoint* cp, const string& section)
fatal("Insufficient memory to allocate compression state for %s\n", fatal("Insufficient memory to allocate compression state for %s\n",
filename); filename);
// we've already got the actual backing store mapped
uint8_t* pmem = backingStore[store_id].second; uint8_t* pmem = backingStore[store_id].second;
AddrRange range = backingStore[store_id].first; AddrRange range = backingStore[store_id].first;
// unmap file that was mmapped in the constructor, this is
// done here to make sure that gzip and open don't muck with
// our nice large space of memory before we reallocate it
munmap((char*) pmem, range.size());
long range_size; long range_size;
UNSERIALIZE_SCALAR(range_size); UNSERIALIZE_SCALAR(range_size);
@ -411,14 +405,6 @@ PhysicalMemory::unserializeStore(Checkpoint* cp, const string& section)
fatal("Memory range size has changed! Saw %lld, expected %lld\n", fatal("Memory range size has changed! Saw %lld, expected %lld\n",
range_size, range.size()); range_size, range.size());
pmem = (uint8_t*) mmap(NULL, range.size(), PROT_READ | PROT_WRITE,
MAP_ANON | MAP_PRIVATE, -1, 0);
if (pmem == (void*) MAP_FAILED) {
perror("mmap");
fatal("Could not mmap physical memory!\n");
}
uint64_t curr_size = 0; uint64_t curr_size = 0;
long* temp_page = new long[chunk_size]; long* temp_page = new long[chunk_size];
long* pmem_current; long* pmem_current;