Mem: Separate the host and guest views of memory backing store

This patch moves all the memory backing store operations from the
independent memory controllers to the global physical memory. The main
reason for this patch is to allow address striping in a future set of
patches, but at this point it already provides some useful
functionality in that it is now possible to change the number of
memory controllers and their address mapping in combination with
checkpointing. Thus, the host and guest view of the memory backing
store are now completely separate.

With this patch, the individual memory controllers are far simpler as
all responsibility for serializing/unserializing is moved to the
physical memory. Currently, the functionality is more or less moved
from AbstractMemory to PhysicalMemory without any major
changes. However, in a future patch the physical memory will also
resolve any ranges that are interleaved and properly assign the
backing store to the memory controllers, and keep the host memory as a
single contigous chunk per address range.

Functionality for future extensions which involve CPU virtualization
also enable the host to get pointers to the backing store.
This commit is contained in:
Andreas Hansson 2012-10-15 08:12:32 -04:00
parent d7ad8dc608
commit 9baa35ba80
8 changed files with 550 additions and 230 deletions

View file

@ -42,19 +42,6 @@
* Andreas Hansson
*/
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/user.h>
#include <fcntl.h>
#include <unistd.h>
#include <zlib.h>
#include <cerrno>
#include <cstdio>
#include <climits>
#include <iostream>
#include <string>
#include "arch/registers.hh"
#include "config/the_isa.hh"
#include "debug/LLSC.hh"
@ -72,29 +59,12 @@ AbstractMemory::AbstractMemory(const Params *p) :
{
if (size() % TheISA::PageBytes != 0)
panic("Memory Size not divisible by page size\n");
if (params()->null)
return;
int map_flags = MAP_ANON | MAP_PRIVATE;
pmemAddr = (uint8_t *)mmap(NULL, size(),
PROT_READ | PROT_WRITE, map_flags, -1, 0);
if (pmemAddr == (void *)MAP_FAILED) {
perror("mmap");
fatal("Could not mmap!\n");
}
//If requested, initialize all the memory to 0
if (p->zero)
memset(pmemAddr, 0, size());
}
AbstractMemory::~AbstractMemory()
void
AbstractMemory::setBackingStore(uint8_t* pmem_addr)
{
if (pmemAddr)
munmap((char*)pmemAddr, size());
pmemAddr = pmem_addr;
}
void
@ -443,146 +413,3 @@ AbstractMemory::functionalAccess(PacketPtr pkt)
pkt->cmdString());
}
}
void
AbstractMemory::serialize(ostream &os)
{
if (!pmemAddr)
return;
gzFile compressedMem;
string filename = name() + ".physmem";
long _size = range.size();
SERIALIZE_SCALAR(filename);
SERIALIZE_SCALAR(_size);
// write memory file
string thefile = Checkpoint::dir() + "/" + filename.c_str();
int fd = creat(thefile.c_str(), 0664);
if (fd < 0) {
perror("creat");
fatal("Can't open physical memory checkpoint file '%s'\n", filename);
}
compressedMem = gzdopen(fd, "wb");
if (compressedMem == NULL)
fatal("Insufficient memory to allocate compression state for %s\n",
filename);
uint64_t pass_size = 0;
// gzwrite fails if (int)len < 0 (gzwrite returns int)
for (uint64_t written = 0; written < size(); written += pass_size) {
pass_size = (uint64_t)INT_MAX < (size() - written) ?
(uint64_t)INT_MAX : (size() - written);
if (gzwrite(compressedMem, pmemAddr + written,
(unsigned int) pass_size) != (int)pass_size) {
fatal("Write failed on physical memory checkpoint file '%s'\n",
filename);
}
}
if (gzclose(compressedMem))
fatal("Close failed on physical memory checkpoint file '%s'\n",
filename);
list<LockedAddr>::iterator i = lockedAddrList.begin();
vector<Addr> lal_addr;
vector<int> lal_cid;
while (i != lockedAddrList.end()) {
lal_addr.push_back(i->addr);
lal_cid.push_back(i->contextId);
i++;
}
arrayParamOut(os, "lal_addr", lal_addr);
arrayParamOut(os, "lal_cid", lal_cid);
}
void
AbstractMemory::unserialize(Checkpoint *cp, const string &section)
{
if (!pmemAddr)
return;
gzFile compressedMem;
long *tempPage;
long *pmem_current;
uint64_t curSize;
uint32_t bytesRead;
const uint32_t chunkSize = 16384;
string filename;
UNSERIALIZE_SCALAR(filename);
filename = cp->cptDir + "/" + filename;
// mmap memoryfile
int fd = open(filename.c_str(), O_RDONLY);
if (fd < 0) {
perror("open");
fatal("Can't open physical memory checkpoint file '%s'", filename);
}
compressedMem = gzdopen(fd, "rb");
if (compressedMem == NULL)
fatal("Insufficient memory to allocate compression state for %s\n",
filename);
// unmap file that was mmapped in the constructor
// This is done here to make sure that gzip and open don't muck with our
// nice large space of memory before we reallocate it
munmap((char*)pmemAddr, size());
long _size;
UNSERIALIZE_SCALAR(_size);
if (_size > params()->range.size())
fatal("Memory size has changed! size %lld, param size %lld\n",
_size, params()->range.size());
pmemAddr = (uint8_t *)mmap(NULL, size(),
PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
if (pmemAddr == (void *)MAP_FAILED) {
perror("mmap");
fatal("Could not mmap physical memory!\n");
}
curSize = 0;
tempPage = (long*)malloc(chunkSize);
if (tempPage == NULL)
fatal("Unable to malloc memory to read file %s\n", filename);
/* Only copy bytes that are non-zero, so we don't give the VM system hell */
while (curSize < size()) {
bytesRead = gzread(compressedMem, tempPage, chunkSize);
if (bytesRead == 0)
break;
assert(bytesRead % sizeof(long) == 0);
for (uint32_t x = 0; x < bytesRead / sizeof(long); x++)
{
if (*(tempPage+x) != 0) {
pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long));
*pmem_current = *(tempPage+x);
}
}
curSize += bytesRead;
}
free(tempPage);
if (gzclose(compressedMem))
fatal("Close failed on physical memory checkpoint file '%s'\n",
filename);
vector<Addr> lal_addr;
vector<int> lal_cid;
arrayParamIn(cp, section, "lal_addr", lal_addr);
arrayParamIn(cp, section, "lal_cid", lal_cid);
for(int i = 0; i < lal_addr.size(); i++)
lockedAddrList.push_front(LockedAddr(lal_addr[i], lal_cid[i]));
}

View file

@ -56,6 +56,43 @@
class System;
/**
* Locked address class that represents a physical address and a
* context id.
*/
class LockedAddr {
private:
// on alpha, minimum LL/SC granularity is 16 bytes, so lower
// bits need to masked off.
static const Addr Addr_Mask = 0xf;
public:
// locked address
Addr addr;
// locking hw context
const int contextId;
static Addr mask(Addr paddr) { return (paddr & ~Addr_Mask); }
// check for matching execution context
bool matchesContext(Request *req) const
{
return (contextId == req->contextId());
}
LockedAddr(Request *req) : addr(mask(req->getPaddr())),
contextId(req->contextId())
{}
// constructor for unserialization use
LockedAddr(Addr _addr, int _cid) : addr(_addr), contextId(_cid)
{}
};
/**
* An abstract memory represents a contiguous block of physical
* memory, with an associated address range, and also provides basic
@ -79,34 +116,6 @@ class AbstractMemory : public MemObject
// Should the memory appear in the global address map
bool inAddrMap;
class LockedAddr {
public:
// on alpha, minimum LL/SC granularity is 16 bytes, so lower
// bits need to masked off.
static const Addr Addr_Mask = 0xf;
static Addr mask(Addr paddr) { return (paddr & ~Addr_Mask); }
Addr addr; // locked address
int contextId; // locking hw context
// check for matching execution context
bool matchesContext(Request *req)
{
return (contextId == req->contextId());
}
LockedAddr(Request *req) : addr(mask(req->getPaddr())),
contextId(req->contextId())
{
}
// constructor for unserialization use
LockedAddr(Addr _addr, int _cid) : addr(_addr), contextId(_cid)
{
}
};
std::list<LockedAddr> lockedAddrList;
// helper function for checkLockedAddrs(): we really want to
@ -183,7 +192,41 @@ class AbstractMemory : public MemObject
typedef AbstractMemoryParams Params;
AbstractMemory(const Params* p);
virtual ~AbstractMemory();
virtual ~AbstractMemory() {}
/**
* See if this is a null memory that should never store data and
* always return zero.
*
* @return true if null
*/
bool isNull() const { return params()->null; }
/**
* See if this memory should be initialized to zero or not.
*
* @return true if zero
*/
bool initToZero() const { return params()->zero; }
/**
* Set the host memory backing store to be used by this memory
* controller.
*
* @param pmem_addr Pointer to a segment of host memory
*/
void setBackingStore(uint8_t* pmem_addr);
/**
* Get the list of locked addresses to allow checkpointing.
*/
const std::list<LockedAddr>& getLockedAddrList() const
{ return lockedAddrList; }
/**
* Add a locked address to allow for checkpointing.
*/
void addLockedAddr(LockedAddr addr) { lockedAddrList.push_back(addr); }
/** read the system pointer
* Implemented for completeness with the setter
@ -265,9 +308,6 @@ class AbstractMemory : public MemObject
*/
virtual void regStats();
virtual void serialize(std::ostream &os);
virtual void unserialize(Checkpoint *cp, const std::string &section);
};
#endif //__ABSTRACT_MEMORY_HH__

View file

@ -37,14 +37,32 @@
* Authors: Andreas Hansson
*/
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/user.h>
#include <fcntl.h>
#include <unistd.h>
#include <zlib.h>
#include <cerrno>
#include <climits>
#include <cstdio>
#include <iostream>
#include <string>
#include "debug/BusAddrRanges.hh"
#include "debug/Checkpoint.hh"
#include "mem/abstract_mem.hh"
#include "mem/physical.hh"
using namespace std;
PhysicalMemory::PhysicalMemory(const vector<AbstractMemory*>& _memories) :
size(0)
PhysicalMemory::PhysicalMemory(const string& _name,
const vector<AbstractMemory*>& _memories) :
_name(_name), size(0)
{
// add the memories from the system to the address map as
// appropriate
for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
m != _memories.end(); ++m) {
// only add the memory if it is part of the global address map
@ -59,11 +77,123 @@ PhysicalMemory::PhysicalMemory(const vector<AbstractMemory*>& _memories) :
if (addrMap.insert((*m)->getAddrRange(), *m) == addrMap.end())
fatal("Memory address range for %s is overlapping\n",
(*m)->name());
} else {
DPRINTF(BusAddrRanges,
"Skipping memory %s that is not in global address map\n",
(*m)->name());
// this type of memory is used e.g. as reference memory by
// Ruby, and they also needs a backing store, but should
// not be part of the global address map
// simply do it independently, also note that this kind of
// memories are allowed to overlap in the logic address
// map
vector<AbstractMemory*> unmapped_mems;
unmapped_mems.push_back(*m);
createBackingStore((*m)->getAddrRange(), unmapped_mems);
}
DPRINTF(BusAddrRanges,
"Skipping memory %s that is not in global address map\n",
(*m)->name());
}
// iterate over the increasing addresses and create as large
// chunks as possible of contigous space to be mapped to backing
// store, also remember what memories constitute the range so we
// can go and find out if we have to init their parts to zero
AddrRange curr_range;
vector<AbstractMemory*> curr_memories;
for (AddrRangeMap<AbstractMemory*>::const_iterator r = addrMap.begin();
r != addrMap.end(); ++r) {
// simply skip past all memories that are null and hence do
// not need any backing store
if (!r->second->isNull()) {
// if the current range is valid, decide if we split or
// not
if (curr_range.valid()) {
// if the ranges are neighbours, then append, this
// will eventually be extended to include support for
// address striping and merge the interleaved ranges
if (curr_range.end + 1 == r->first.start) {
DPRINTF(BusAddrRanges,
"Merging neighbouring ranges %x:%x and %x:%x\n",
curr_range.start, curr_range.end, r->first.start,
r->first.end);
// update the end of the range and add the current
// memory to the list of memories
curr_range.end = r->first.end;
curr_memories.push_back(r->second);
} else {
// what we already have is valid, and this is not
// contigious, so create the backing store and
// then start over
createBackingStore(curr_range, curr_memories);
// remember the current range and reset the current
// set of memories to contain this one
curr_range = r->first;
curr_memories.clear();
curr_memories.push_back(r->second);
}
} else {
// we haven't seen any valid ranges yet, so remember
// the current range and reset the current set of
// memories to contain this one
curr_range = r->first;
curr_memories.clear();
curr_memories.push_back(r->second);
}
}
}
// if we have a valid range upon finishing the iteration, then
// create the backing store
if (curr_range.valid())
createBackingStore(curr_range, curr_memories);
}
void
PhysicalMemory::createBackingStore(AddrRange range,
const vector<AbstractMemory*>& _memories)
{
// perform the actual mmap
DPRINTF(BusAddrRanges, "Creating backing store for range %x:%x\n",
range.start, range.end);
int map_flags = MAP_ANON | MAP_PRIVATE;
uint8_t* pmem = (uint8_t*) mmap(NULL, range.size(),
PROT_READ | PROT_WRITE,
map_flags, -1, 0);
if (pmem == (uint8_t*) MAP_FAILED) {
perror("mmap");
fatal("Could not mmap %d bytes for range %x:%x!\n", range.size(),
range.start, range.end);
}
// remember this backing store so we can checkpoint it and unmap
// it appropriately
backingStore.push_back(make_pair(range, pmem));
// point the memories to their backing store, and if requested,
// initialize the memory range to 0
for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
m != _memories.end(); ++m) {
DPRINTF(BusAddrRanges, "Mapping memory %s to backing store\n",
(*m)->name());
(*m)->setBackingStore(pmem);
// if it should be zero, then go and make it so
if ((*m)->initToZero())
memset(pmem, 0, (*m)->size());
// advance the pointer for the next memory in line
pmem += (*m)->size();
}
}
PhysicalMemory::~PhysicalMemory()
{
// unmap the backing store
for (vector<pair<AddrRange, uint8_t*> >::iterator s = backingStore.begin();
s != backingStore.end(); ++s)
munmap((char*)s->second, s->first.size());
}
bool
@ -122,3 +252,192 @@ PhysicalMemory::functionalAccess(PacketPtr pkt)
assert(m != addrMap.end());
m->second->functionalAccess(pkt);
}
void
PhysicalMemory::serialize(ostream& os)
{
// serialize all the locked addresses and their context ids
vector<Addr> lal_addr;
vector<int> lal_cid;
for (vector<AbstractMemory*>::iterator m = memories.begin();
m != memories.end(); ++m) {
const list<LockedAddr>& locked_addrs = (*m)->getLockedAddrList();
for (list<LockedAddr>::const_iterator l = locked_addrs.begin();
l != locked_addrs.end(); ++l) {
lal_addr.push_back(l->addr);
lal_cid.push_back(l->contextId);
}
}
arrayParamOut(os, "lal_addr", lal_addr);
arrayParamOut(os, "lal_cid", lal_cid);
// serialize the backing stores
unsigned int nbr_of_stores = backingStore.size();
SERIALIZE_SCALAR(nbr_of_stores);
unsigned int store_id = 0;
// store each backing store memory segment in a file
for (vector<pair<AddrRange, uint8_t*> >::iterator s = backingStore.begin();
s != backingStore.end(); ++s) {
nameOut(os, csprintf("%s.store%d", name(), store_id));
serializeStore(os, store_id++, s->first, s->second);
}
}
void
PhysicalMemory::serializeStore(ostream& os, unsigned int store_id,
AddrRange range, uint8_t* pmem)
{
// we cannot use the address range for the name as the
// memories that are not part of the address map can overlap
string filename = "store" + to_string(store_id) + ".pmem";
long range_size = range.size();
DPRINTF(Checkpoint, "Serializing physical memory %s with size %d\n",
filename, range_size);
SERIALIZE_SCALAR(store_id);
SERIALIZE_SCALAR(filename);
SERIALIZE_SCALAR(range_size);
// write memory file
string filepath = Checkpoint::dir() + "/" + filename.c_str();
int fd = creat(filepath.c_str(), 0664);
if (fd < 0) {
perror("creat");
fatal("Can't open physical memory checkpoint file '%s'\n",
filename);
}
gzFile compressed_mem = gzdopen(fd, "wb");
if (compressed_mem == NULL)
fatal("Insufficient memory to allocate compression state for %s\n",
filename);
uint64_t pass_size = 0;
// gzwrite fails if (int)len < 0 (gzwrite returns int)
for (uint64_t written = 0; written < range.size();
written += pass_size) {
pass_size = (uint64_t)INT_MAX < (range.size() - written) ?
(uint64_t)INT_MAX : (range.size() - written);
if (gzwrite(compressed_mem, pmem + written,
(unsigned int) pass_size) != (int) pass_size) {
fatal("Write failed on physical memory checkpoint file '%s'\n",
filename);
}
}
// close the compressed stream and check that the exit status
// is zero
if (gzclose(compressed_mem))
fatal("Close failed on physical memory checkpoint file '%s'\n",
filename);
}
void
PhysicalMemory::unserialize(Checkpoint* cp, const string& section)
{
// unserialize the locked addresses and map them to the
// appropriate memory controller
vector<Addr> lal_addr;
vector<int> lal_cid;
arrayParamIn(cp, section, "lal_addr", lal_addr);
arrayParamIn(cp, section, "lal_cid", lal_cid);
for(size_t i = 0; i < lal_addr.size(); ++i) {
AddrRangeMap<AbstractMemory*>::iterator m = addrMap.find(lal_addr[i]);
m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
}
// unserialize the backing stores
unsigned int nbr_of_stores;
UNSERIALIZE_SCALAR(nbr_of_stores);
for (unsigned int i = 0; i < nbr_of_stores; ++i) {
unserializeStore(cp, csprintf("%s.store%d", section, i));
}
}
void
PhysicalMemory::unserializeStore(Checkpoint* cp, const string& section)
{
const uint32_t chunk_size = 16384;
unsigned int store_id;
UNSERIALIZE_SCALAR(store_id);
string filename;
UNSERIALIZE_SCALAR(filename);
string filepath = cp->cptDir + "/" + filename;
// mmap memoryfile
int fd = open(filepath.c_str(), O_RDONLY);
if (fd < 0) {
perror("open");
fatal("Can't open physical memory checkpoint file '%s'", filename);
}
gzFile compressed_mem = gzdopen(fd, "rb");
if (compressed_mem == NULL)
fatal("Insufficient memory to allocate compression state for %s\n",
filename);
uint8_t* pmem = backingStore[store_id].second;
AddrRange range = backingStore[store_id].first;
// unmap file that was mmapped in the constructor, this is
// done here to make sure that gzip and open don't muck with
// our nice large space of memory before we reallocate it
munmap((char*) pmem, range.size());
long range_size;
UNSERIALIZE_SCALAR(range_size);
DPRINTF(Checkpoint, "Unserializing physical memory %s with size %d\n",
filename, range_size);
if (range_size != range.size())
fatal("Memory range size has changed! Saw %lld, expected %lld\n",
range_size, range.size());
pmem = (uint8_t*) mmap(NULL, range.size(), PROT_READ | PROT_WRITE,
MAP_ANON | MAP_PRIVATE, -1, 0);
if (pmem == (void*) MAP_FAILED) {
perror("mmap");
fatal("Could not mmap physical memory!\n");
}
uint64_t curr_size = 0;
long* temp_page = new long[chunk_size];
long* pmem_current;
uint32_t bytes_read;
while (curr_size < range.size()) {
bytes_read = gzread(compressed_mem, temp_page, chunk_size);
if (bytes_read == 0)
break;
assert(bytes_read % sizeof(long) == 0);
for (uint32_t x = 0; x < bytes_read / sizeof(long); x++) {
// Only copy bytes that are non-zero, so we don't give
// the VM system hell
if (*(temp_page + x) != 0) {
pmem_current = (long*)(pmem + curr_size + x * sizeof(long));
*pmem_current = *(temp_page + x);
}
}
curr_size += bytes_read;
}
delete[] temp_page;
if (gzclose(compressed_mem))
fatal("Close failed on physical memory checkpoint file '%s'\n",
filename);
}

View file

@ -41,19 +41,37 @@
#define __PHYSICAL_MEMORY_HH__
#include "base/addr_range_map.hh"
#include "mem/abstract_mem.hh"
#include "mem/packet.hh"
#include "mem/port.hh"
/**
* Forward declaration to avoid header dependencies.
*/
class AbstractMemory;
/**
* The physical memory encapsulates all memories in the system and
* provides basic functionality for accessing those memories without
* going through the memory system and interconnect.
*
* The physical memory is also responsible for providing the host
* system backingstore used by the memories in the simulated guest
* system. When the system is created, the physical memory allocates
* the backing store based on the address ranges that are populated in
* the system, and does so indepentent of how those map to actual
* memory controllers. Thus, the physical memory completely abstracts
* the mapping of the backing store of the host system and the address
* mapping in the guest system. This enables us to arbitrarily change
* the number of memory controllers, and their address mapping, as
* long as the ranges stay the same.
*/
class PhysicalMemory
class PhysicalMemory : public Serializable
{
private:
// Name for debugging
std::string _name;
// Global address map
AddrRangeMap<AbstractMemory*> addrMap;
@ -66,23 +84,45 @@ class PhysicalMemory
// The total memory size
uint64_t size;
// The physical memory used to provide the memory in the simulated
// system
std::vector<std::pair<AddrRange, uint8_t*> > backingStore;
// Prevent copying
PhysicalMemory(const PhysicalMemory&);
// Prevent assignment
PhysicalMemory& operator=(const PhysicalMemory&);
/**
* Create the memory region providing the backing store for a
* given address range that corresponds to a set of memories in
* the simulated system.
*
* @param range The address range covered
* @param memories The memories this range maps to
*/
void createBackingStore(AddrRange range,
const std::vector<AbstractMemory*>& _memories);
public:
/**
* Create a physical memory object, wrapping a number of memories.
*/
PhysicalMemory(const std::vector<AbstractMemory*>& _memories);
PhysicalMemory(const std::string& _name,
const std::vector<AbstractMemory*>& _memories);
/**
* Nothing to destruct.
* Unmap all the backing store we have used.
*/
~PhysicalMemory() { }
~PhysicalMemory();
/**
* Return the name for debugging and for creation of sections for
* checkpointing.
*/
const std::string name() const { return _name; }
/**
* Check if a physical address is within a range of a memory that
@ -108,14 +148,72 @@ class PhysicalMemory
*/
uint64_t totalSize() const { return size; }
/**
/**
* Get the pointers to the backing store for external host
* access. Note that memory in the guest should be accessed using
* access() or functionalAccess(). This interface is primarily
* intended for CPU models using hardware virtualization. Note
* that memories that are null are not present, and that the
* backing store may also contain memories that are not part of
* the OS-visible global address map and thus are allowed to
* overlap.
*
* @return Pointers to the memory backing store
*/
std::vector<std::pair<AddrRange, uint8_t*> > getBackingStore() const
{ return backingStore; }
/**
* Perform an untimed memory access and update all the state
* (e.g. locked addresses) and statistics accordingly. The packet
* is turned into a response if required.
*
* @param pkt Packet performing the access
*/
void access(PacketPtr pkt);
/**
* Perform an untimed memory read or write without changing
* anything but the memory itself. No stats are affected by this
* access. In addition to normal accesses this also facilitates
* print requests.
*
* @param pkt Packet performing the access
*/
void functionalAccess(PacketPtr pkt);
/**
* Serialize all the memories in the system. This is independent
* of the logical memory layout, and the serialization only sees
* the contigous backing store, independent of how this maps to
* logical memories in the guest system.
*
* @param os stream to serialize to
*/
void serialize(std::ostream& os);
/**
* Serialize a specific store.
*
* @param store_id Unique identifier of this backing store
* @param range The address range of this backing store
* @param pmem The host pointer to this backing store
*/
void serializeStore(std::ostream& os, unsigned int store_id,
AddrRange range, uint8_t* pmem);
/**
* Unserialize the memories in the system. As with the
* serialization, this action is independent of how the address
* ranges are mapped to logical memories in the guest system.
*/
void unserialize(Checkpoint* cp, const std::string& section);
/**
* Unserialize a specific backing store, identified by a section.
*/
void unserializeStore(Checkpoint* cp, const std::string& section);
};
#endif //__PHYSICAL_MEMORY_HH__

View file

@ -146,7 +146,7 @@ Root::unserialize(Checkpoint *cp, const std::string &section)
warn("!!!! Checkpoint ver %#x is older than current ver %#x !!!!\n",
cpt_ver, gem5CheckpointVersion);
warn("You might experience some issues when restoring and should run "
"the checkpoint upgrader (util/cpt_upgrade.py) on your "
"the checkpoint upgrader (util/cpt_upgrader.py) on your "
"checkpoint\n");
warn("**********************************************************\n");
} else if (cpt_ver > gem5CheckpointVersion) {

View file

@ -57,7 +57,7 @@ class SimObject;
* SimObject shouldn't cause the version number to increase, only changes to
* existing objects such as serializing/unserializing more state, changing sizes
* of serialized arrays, etc. */
static const uint64_t gem5CheckpointVersion = 0x0000000000000001;
static const uint64_t gem5CheckpointVersion = 0x0000000000000002;
template <class T>
void paramOut(std::ostream &os, const std::string &name, const T &param);

View file

@ -58,6 +58,7 @@
#include "debug/Loader.hh"
#include "debug/WorkItems.hh"
#include "kern/kernel_stats.hh"
#include "mem/abstract_mem.hh"
#include "mem/physical.hh"
#include "params/System.hh"
#include "sim/byteswap.hh"
@ -81,7 +82,7 @@ System::System(Params *p)
virtProxy(_systemPort),
loadAddrMask(p->load_addr_mask),
nextPID(0),
physmem(p->memories),
physmem(name() + ".physmem", p->memories),
memoryMode(p->mem_mode),
workItemsBegin(0),
workItemsEnd(0),
@ -342,6 +343,10 @@ System::serialize(ostream &os)
SERIALIZE_SCALAR(pagePtr);
SERIALIZE_SCALAR(nextPID);
serializeSymtab(os);
// also serialize the memories in the system
nameOut(os, csprintf("%s.physmem", name()));
physmem.serialize(os);
}
@ -353,6 +358,9 @@ System::unserialize(Checkpoint *cp, const string &section)
UNSERIALIZE_SCALAR(pagePtr);
UNSERIALIZE_SCALAR(nextPID);
unserializeSymtab(cp, section);
// also unserialize the memories in the system
physmem.unserialize(cp, csprintf("%s.physmem", name()));
}
void

View file

@ -61,11 +61,8 @@ import ConfigParser
import sys, os
import os.path as osp
def from_0(cpt):
pass
# An example of a translator
def from_1(cpt):
def from_0(cpt):
if cpt.get('root','isa') == 'arm':
for sec in cpt.sections():
import re
@ -77,6 +74,37 @@ def from_1(cpt):
#mr.insert(26,0)
cpt.set(sec, 'miscRegs', ' '.join(str(x) for x in mr))
# The backing store supporting the memories in the system has changed
# in that it is now stored globally per address range. As a result the
# actual storage is separate from the memory controllers themselves.
def from_1(cpt):
for sec in cpt.sections():
import re
# Search for a physical memory
if re.search('.*sys.*\.physmem$', sec):
# Add the number of stores attribute to the global physmem
cpt.set(sec, 'nbr_of_stores', '1')
# Get the filename and size as this is moving to the
# specific backing store
mem_filename = cpt.get(sec, 'filename')
mem_size = cpt.get(sec, '_size')
cpt.remove_option(sec, 'filename')
cpt.remove_option(sec, '_size')
# Get the name so that we can create the new section
system_name = str(sec).split('.')[0]
section_name = system_name + '.physmem.store0'
cpt.add_section(section_name)
cpt.set(section_name, 'store_id', '0')
cpt.set(section_name, 'range_size', mem_size)
cpt.set(section_name, 'filename', mem_filename)
elif re.search('.*sys.*\.\w*mem$', sec):
# Due to the lack of information about a start address,
# this migration only works if there is a single memory in
# the system, thus starting at 0
raise ValueError("more than one memory detected (" + sec + ")")
migrations = []
migrations.append(from_0)
migrations.append(from_1)