mem: More descriptive address-mapping scheme names

This patch adds the row bits to the name of the address mapping
schemes to make it more clear that all the current schemes places the
row bits as the most significant bits.
This commit is contained in:
Andreas Hansson 2014-03-23 11:11:53 -04:00
parent a3d582f8e6
commit 7e7b67472a
3 changed files with 27 additions and 26 deletions

View file

@ -168,7 +168,7 @@ def config_mem(options, system):
# If the channel bits are appearing after the column
# bits, we need to add the appropriate number of bits
# for the row buffer size
if ctrl.addr_mapping.value == 'RaBaChCo':
if ctrl.addr_mapping.value == 'RoRaBaChCo':
# This computation only really needs to happen
# once, but as we rely on having an instance we
# end up having to repeat it for each and every

View file

@ -46,12 +46,13 @@ from AbstractMemory import *
# First-Served and a First-Row Hit then First-Come First-Served
class MemSched(Enum): vals = ['fcfs', 'frfcfs']
# Enum for the address mapping. With Ra, Co, Ba and Ch denoting rank,
# column, bank and channel, respectively, and going from MSB to LSB.
# Available are RaBaChCo and RaBaCoCh, that are suitable for an
# open-page policy, optimising for sequential accesses hitting in the
# open row. For a closed-page policy, CoRaBaCh maximises parallelism.
class AddrMap(Enum): vals = ['RaBaChCo', 'RaBaCoCh', 'CoRaBaCh']
# Enum for the address mapping. With Ch, Ra, Ba, Ro and Co denoting
# channel, rank, bank, row and column, respectively, and going from
# MSB to LSB. Available are RoRaBaChCo and RoRaBaCoCh, that are
# suitable for an open-page policy, optimising for sequential accesses
# hitting in the open row. For a closed-page policy, RoCoRaBaCh
# maximises parallelism.
class AddrMap(Enum): vals = ['RoRaBaChCo', 'RoRaBaCoCh', 'RoCoRaBaCh']
# Enum for the page policy, either open, open_adaptive or close.
class PageManage(Enum): vals = ['open', 'open_adaptive', 'close']
@ -84,7 +85,7 @@ class SimpleDRAM(AbstractMemory):
# scheduler, address map and page policy
mem_sched_policy = Param.MemSched('frfcfs', "Memory scheduling policy")
addr_mapping = Param.AddrMap('RaBaChCo', "Address mapping policy")
addr_mapping = Param.AddrMap('RoRaBaChCo', "Address mapping policy")
page_policy = Param.PageManage('open', "Page closure management policy")
# pipeline latency of the controller and PHY, split into a

View file

@ -128,20 +128,20 @@ SimpleDRAM::init()
panic("%s has %d interleaved address stripes but %d channel(s)\n",
name(), range.stripes(), channels);
if (addrMapping == Enums::RaBaChCo) {
if (addrMapping == Enums::RoRaBaChCo) {
if (rowBufferSize != range.granularity()) {
panic("Interleaving of %s doesn't match RaBaChCo address map\n",
name());
panic("Interleaving of %s doesn't match RoRaBaChCo "
"address map\n", name());
}
} else if (addrMapping == Enums::RaBaCoCh) {
if (burstSize != range.granularity()) {
panic("Interleaving of %s doesn't match RaBaCoCh address map\n",
name());
} else if (addrMapping == Enums::RoRaBaCoCh) {
if (system()->cacheLineSize() != range.granularity()) {
panic("Interleaving of %s doesn't match RoRaBaCoCh "
"address map\n", name());
}
} else if (addrMapping == Enums::CoRaBaCh) {
if (burstSize != range.granularity())
panic("Interleaving of %s doesn't match CoRaBaCh address map\n",
name());
} else if (addrMapping == Enums::RoCoRaBaCh) {
if (system()->cacheLineSize() != range.granularity())
panic("Interleaving of %s doesn't match RoCoRaBaCh "
"address map\n", name());
}
}
}
@ -196,8 +196,8 @@ SimpleDRAM::DRAMPacket*
SimpleDRAM::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, bool isRead)
{
// decode the address based on the address mapping scheme, with
// Ra, Co, Ba and Ch denoting rank, column, bank and channel,
// respectively
// Ro, Ra, Co, Ba and Ch denoting row, rank, column, bank and
// channel, respectively
uint8_t rank;
uint8_t bank;
uint16_t row;
@ -207,7 +207,7 @@ SimpleDRAM::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, bool isRe
// we have removed the lowest order address bits that denote the
// position within the column
if (addrMapping == Enums::RaBaChCo) {
if (addrMapping == Enums::RoRaBaChCo) {
// the lowest order bits denote the column to ensure that
// sequential cache lines occupy the same row
addr = addr / columnsPerRowBuffer;
@ -228,7 +228,7 @@ SimpleDRAM::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, bool isRe
// lastly, get the row bits
row = addr % rowsPerBank;
addr = addr / rowsPerBank;
} else if (addrMapping == Enums::RaBaCoCh) {
} else if (addrMapping == Enums::RoRaBaCoCh) {
// take out the channel part of the address
addr = addr / channels;
@ -248,7 +248,7 @@ SimpleDRAM::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, bool isRe
// lastly, get the row bits
row = addr % rowsPerBank;
addr = addr / rowsPerBank;
} else if (addrMapping == Enums::CoRaBaCh) {
} else if (addrMapping == Enums::RoCoRaBaCh) {
// optimise for closed page mode and utilise maximum
// parallelism of the DRAM (at the cost of power)
@ -591,8 +591,8 @@ SimpleDRAM::printParams() const
rowBufferSize * rowsPerBank * banksPerRank * ranksPerChannel);
string scheduler = memSchedPolicy == Enums::fcfs ? "FCFS" : "FR-FCFS";
string address_mapping = addrMapping == Enums::RaBaChCo ? "RaBaChCo" :
(addrMapping == Enums::RaBaCoCh ? "RaBaCoCh" : "CoRaBaCh");
string address_mapping = addrMapping == Enums::RoRaBaChCo ? "RoRaBaChCo" :
(addrMapping == Enums::RoRaBaCoCh ? "RoRaBaCoCh" : "RoCoRaBaCh");
string page_policy = pageMgmt == Enums::open ? "OPEN" :
(pageMgmt == Enums::open_adaptive ? "OPEN (adaptive)" : "CLOSE");