mem: Limit the accesses to a page before forcing a precharge
This patch adds a basic starvation-prevention mechanism where a DRAM page is forced to close after a certain number of accesses. The limit is combined with the open and open-adaptive page policy and if reached causes an auto-precharge.
This commit is contained in:
parent
6557741311
commit
116985d661
3 changed files with 42 additions and 16 deletions
|
@ -90,6 +90,10 @@ class SimpleDRAM(AbstractMemory):
|
|||
addr_mapping = Param.AddrMap('RoRaBaChCo', "Address mapping policy")
|
||||
page_policy = Param.PageManage('open', "Page closure management policy")
|
||||
|
||||
# enforce a limit on the number of accesses per row
|
||||
max_accesses_per_row = Param.Unsigned(16, "Max accesses per row before "
|
||||
"closing");
|
||||
|
||||
# pipeline latency of the controller and PHY, split into a
|
||||
# frontend part and a backend part, with reads and writes serviced
|
||||
# by the queues only seeing the frontend contribution, and reads
|
||||
|
|
|
@ -77,6 +77,7 @@ SimpleDRAM::SimpleDRAM(const SimpleDRAMParams* p) :
|
|||
tXAW(p->tXAW), activationLimit(p->activation_limit),
|
||||
memSchedPolicy(p->mem_sched_policy), addrMapping(p->addr_mapping),
|
||||
pageMgmt(p->page_policy),
|
||||
maxAccessesPerRow(p->max_accesses_per_row),
|
||||
frontendLatency(p->static_frontend_latency),
|
||||
backendLatency(p->static_backend_latency),
|
||||
busBusyUntil(0), writeStartTime(0),
|
||||
|
@ -1067,7 +1068,6 @@ SimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt)
|
|||
if (pageMgmt == Enums::open || pageMgmt == Enums::open_adaptive) {
|
||||
bank.openRow = dram_pkt->row;
|
||||
bank.freeAt = curTick() + addDelay + accessLat;
|
||||
bank.bytesAccessed += burstSize;
|
||||
|
||||
// If you activated a new row do to this access, the next access
|
||||
// will have to respect tRAS for this bank.
|
||||
|
@ -1081,9 +1081,19 @@ SimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt)
|
|||
// we are now closing this row
|
||||
bytesPerActivate.sample(bank.bytesAccessed);
|
||||
bank.bytesAccessed = 0;
|
||||
bank.rowAccesses = 0;
|
||||
}
|
||||
|
||||
if (pageMgmt == Enums::open_adaptive) {
|
||||
// increment the bytes accessed and the accesses per row
|
||||
bank.bytesAccessed += burstSize;
|
||||
++bank.rowAccesses;
|
||||
|
||||
// if we reached the max, then issue with an auto-precharge
|
||||
bool auto_precharge = bank.rowAccesses == maxAccessesPerRow;
|
||||
|
||||
// if we did not hit the limit, we might still want to
|
||||
// auto-precharge
|
||||
if (!auto_precharge && pageMgmt == Enums::open_adaptive) {
|
||||
// a twist on the open page policy is to not blindly keep the
|
||||
// page open, but close it if there are no row hits, and there
|
||||
// are bank conflicts in the queue
|
||||
|
@ -1110,19 +1120,24 @@ SimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt)
|
|||
++p;
|
||||
}
|
||||
|
||||
// auto pre-charge
|
||||
if (!got_more_hits && got_bank_conflict) {
|
||||
bank.openRow = -1;
|
||||
bank.freeAt = std::max(bank.freeAt, bank.tRASDoneAt) + tRP;
|
||||
--numBanksActive;
|
||||
if (numBanksActive == 0) {
|
||||
startTickPrechargeAll = std::max(startTickPrechargeAll,
|
||||
bank.freeAt);
|
||||
DPRINTF(DRAM, "All banks precharged at tick: %ld\n",
|
||||
startTickPrechargeAll);
|
||||
}
|
||||
DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId);
|
||||
// auto pre-charge if we have not got any more hits, and
|
||||
// have a bank conflict
|
||||
auto_precharge = !got_more_hits && got_bank_conflict;
|
||||
}
|
||||
|
||||
// if this access should use auto-precharge, then we are
|
||||
// closing the row
|
||||
if (auto_precharge) {
|
||||
bank.openRow = -1;
|
||||
bank.freeAt = std::max(bank.freeAt, bank.tRASDoneAt) + tRP;
|
||||
--numBanksActive;
|
||||
if (numBanksActive == 0) {
|
||||
startTickPrechargeAll = std::max(startTickPrechargeAll,
|
||||
bank.freeAt);
|
||||
DPRINTF(DRAM, "All banks precharged at tick: %ld\n",
|
||||
startTickPrechargeAll);
|
||||
}
|
||||
DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId);
|
||||
}
|
||||
|
||||
DPRINTF(DRAM, "doDRAMAccess::bank.freeAt is %lld\n", bank.freeAt);
|
||||
|
@ -1480,7 +1495,7 @@ SimpleDRAM::regStats()
|
|||
.desc("What write queue length does an incoming req see");
|
||||
|
||||
bytesPerActivate
|
||||
.init(rowBufferSize)
|
||||
.init(maxAccessesPerRow)
|
||||
.name(name() + ".bytesPerActivate")
|
||||
.desc("Bytes accessed per row activation")
|
||||
.flags(nozero);
|
||||
|
|
|
@ -155,11 +155,12 @@ class SimpleDRAM : public AbstractMemory
|
|||
Tick tRASDoneAt;
|
||||
Tick actAllowedAt;
|
||||
|
||||
uint32_t rowAccesses;
|
||||
uint32_t bytesAccessed;
|
||||
|
||||
Bank() :
|
||||
openRow(INVALID_ROW), freeAt(0), tRASDoneAt(0), actAllowedAt(0),
|
||||
bytesAccessed(0)
|
||||
rowAccesses(0), bytesAccessed(0)
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -507,6 +508,12 @@ class SimpleDRAM : public AbstractMemory
|
|||
Enums::AddrMap addrMapping;
|
||||
Enums::PageManage pageMgmt;
|
||||
|
||||
/**
|
||||
* Max column accesses (read and write) per row, before forefully
|
||||
* closing it.
|
||||
*/
|
||||
const uint32_t maxAccessesPerRow;
|
||||
|
||||
/**
|
||||
* Pipeline latency of the controller frontend. The frontend
|
||||
* contribution is added to writes (that complete when they are in
|
||||
|
|
Loading…
Reference in a new issue