mem: Create a separate class for the cache write buffer

This patch breaks out the cache write buffer into a separate class,
without affecting any stats. The goal of the patch is to avoid
encumbering the much-simpler write queue with the complex MSHR
handling. In a follow on patch this simplification allows us to
implement write combining.

The WriteQueue gets its own class, but shares a common ancestor, the
generic Queue, with the MSHRQueue.
This commit is contained in:
Andreas Hansson 2016-03-17 09:51:18 -04:00
parent f5d1dd75e5
commit 041ea8107e
15 changed files with 1284 additions and 548 deletions

View file

@ -37,6 +37,8 @@ Source('cache.cc')
Source('blk.cc')
Source('mshr.cc')
Source('mshr_queue.cc')
Source('write_queue.cc')
Source('write_queue_entry.cc')
DebugFlag('Cache')
DebugFlag('CachePort')

View file

@ -68,9 +68,8 @@ BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name,
BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size)
: MemObject(p),
cpuSidePort(nullptr), memSidePort(nullptr),
mshrQueue("MSHRs", p->mshrs, 4, p->demand_mshr_reserve, MSHRQueue_MSHRs),
writeBuffer("write buffer", p->write_buffers, p->mshrs+1000, 0,
MSHRQueue_WriteBuffer),
mshrQueue("MSHRs", p->mshrs, 4, p->demand_mshr_reserve),
writeBuffer("write buffer", p->write_buffers, p->mshrs+1000),
blkSize(blk_size),
lookupLatency(p->hit_latency),
forwardLatency(p->hit_latency),

90
src/mem/cache/base.hh vendored
View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012-2013, 2015 ARM Limited
* Copyright (c) 2012-2013, 2015-2016 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
@ -62,6 +62,7 @@
#include "debug/Cache.hh"
#include "debug/CachePort.hh"
#include "mem/cache/mshr_queue.hh"
#include "mem/cache/write_queue.hh"
#include "mem/mem_object.hh"
#include "mem/packet.hh"
#include "mem/qport.hh"
@ -72,12 +73,12 @@
#include "sim/sim_exit.hh"
#include "sim/system.hh"
class MSHR;
/**
* A basic cache interface. Implements some common functions for speed.
*/
class BaseCache : public MemObject
{
protected:
/**
* Indexes to enumerate the MSHR queues.
*/
@ -190,47 +191,29 @@ class BaseCache : public MemObject
MSHRQueue mshrQueue;
/** Write/writeback buffer */
MSHRQueue writeBuffer;
WriteQueue writeBuffer;
/**
* Allocate a buffer, passing the time indicating when schedule an
* event to the queued port to go and ask the MSHR and write queue
* if they have packets to send.
*
* allocateBufferInternal() function is called in:
* - MSHR allocateWriteBuffer (unchached write forwarded to WriteBuffer);
* - MSHR allocateMissBuffer (miss in MSHR queue);
* Mark a request as in service (sent downstream in the memory
* system), effectively making this MSHR the ordering point.
*/
MSHR *allocateBufferInternal(MSHRQueue *mq, Addr addr, int size,
PacketPtr pkt, Tick time,
bool sched_send)
void markInService(MSHR *mshr, bool pending_modified_resp)
{
// check that the address is block aligned since we rely on
// this in a number of places when checking for matches and
// overlap
assert(addr == blockAlign(addr));
bool wasFull = mshrQueue.isFull();
mshrQueue.markInService(mshr, pending_modified_resp);
MSHR *mshr = mq->allocate(addr, size, pkt, time, order++,
allocOnFill(pkt->cmd));
if (mq->isFull()) {
setBlocked((BlockedCause)mq->index);
if (wasFull && !mshrQueue.isFull()) {
clearBlocked(Blocked_NoMSHRs);
}
if (sched_send)
// schedule the send
schedMemSideSendEvent(time);
return mshr;
}
void markInServiceInternal(MSHR *mshr, bool pending_modified_resp)
void markInService(WriteQueueEntry *entry)
{
MSHRQueue *mq = mshr->queue;
bool wasFull = mq->isFull();
mq->markInService(mshr, pending_modified_resp);
if (wasFull && !mq->isFull()) {
clearBlocked((BlockedCause)mq->index);
bool wasFull = writeBuffer.isFull();
writeBuffer.markInService(entry);
if (wasFull && !writeBuffer.isFull()) {
clearBlocked(Blocked_NoWBBuffers);
}
}
@ -511,19 +494,44 @@ class BaseCache : public MemObject
MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true)
{
return allocateBufferInternal(&mshrQueue,
blockAlign(pkt->getAddr()), blkSize,
pkt, time, sched_send);
MSHR *mshr = mshrQueue.allocate(blockAlign(pkt->getAddr()), blkSize,
pkt, time, order++,
allocOnFill(pkt->cmd));
if (mshrQueue.isFull()) {
setBlocked((BlockedCause)MSHRQueue_MSHRs);
}
if (sched_send) {
// schedule the send
schedMemSideSendEvent(time);
}
return mshr;
}
MSHR *allocateWriteBuffer(PacketPtr pkt, Tick time)
void allocateWriteBuffer(PacketPtr pkt, Tick time)
{
// should only see writes or clean evicts here
assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict);
return allocateBufferInternal(&writeBuffer,
blockAlign(pkt->getAddr()), blkSize,
pkt, time, true);
Addr blk_addr = blockAlign(pkt->getAddr());
WriteQueueEntry *wq_entry =
writeBuffer.findMatch(blk_addr, pkt->isSecure());
if (wq_entry && !wq_entry->inService) {
DPRINTF(Cache, "Potential to merge writeback %s to %#llx",
pkt->cmdString(), pkt->getAddr());
}
writeBuffer.allocate(blk_addr, blkSize, pkt, time, order++);
if (writeBuffer.isFull()) {
setBlocked((BlockedCause)MSHRQueue_WriteBuffer);
}
// schedule the send
schedMemSideSendEvent(time);
}
/**

381
src/mem/cache/cache.cc vendored
View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2010-2015 ARM Limited
* Copyright (c) 2010-2016 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
@ -286,20 +286,6 @@ Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
}
}
/////////////////////////////////////////////////////
//
// MSHR helper functions
//
/////////////////////////////////////////////////////
void
Cache::markInService(MSHR *mshr, bool pending_modified_resp)
{
markInServiceInternal(mshr, pending_modified_resp);
}
/////////////////////////////////////////////////////
//
// Access path: requests coming in from the CPU side
@ -363,11 +349,9 @@ Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
// generating CleanEvict and Writeback or simply CleanEvict and
// CleanEvict almost simultaneously will be caught by snoops sent out
// by crossbar.
std::vector<MSHR *> outgoing;
if (writeBuffer.findMatches(pkt->getAddr(), pkt->isSecure(),
outgoing)) {
assert(outgoing.size() == 1);
MSHR *wb_entry = outgoing[0];
WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(),
pkt->isSecure());
if (wb_entry) {
assert(wb_entry->getNumTargets() == 1);
PacketPtr wbPkt = wb_entry->getTarget()->pkt;
assert(wbPkt->isWriteback());
@ -388,7 +372,7 @@ Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
// Dirty writeback from above trumps our clean
// writeback... discard here
// Note: markInService will remove entry from writeback buffer.
markInService(wb_entry, false);
markInService(wb_entry);
delete wbPkt;
}
}
@ -1238,6 +1222,50 @@ Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide)
/////////////////////////////////////////////////////
void
Cache::handleUncacheableWriteResp(PacketPtr pkt)
{
WriteQueueEntry *wq_entry =
dynamic_cast<WriteQueueEntry*>(pkt->senderState);
assert(wq_entry);
WriteQueueEntry::Target *target = wq_entry->getTarget();
Packet *tgt_pkt = target->pkt;
// we send out invalidation reqs and get invalidation
// responses for write-line requests
assert(tgt_pkt->cmd != MemCmd::WriteLineReq);
int stats_cmd_idx = tgt_pkt->cmdToIndex();
Tick miss_latency = curTick() - target->recvTime;
assert(pkt->req->masterId() < system->maxMasters());
mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
miss_latency;
tgt_pkt->makeTimingResponse();
// if this packet is an error copy that to the new packet
if (pkt->isError())
tgt_pkt->copyError(pkt);
// Reset the bus additional time as it is now accounted for
tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
Tick completion_time = clockEdge(responseLatency) +
pkt->headerDelay + pkt->payloadDelay;
cpuSidePort->schedTimingResp(tgt_pkt, completion_time, true);
wq_entry->popTarget();
assert(!wq_entry->hasTargets());
bool wasFull = writeBuffer.isFull();
writeBuffer.deallocate(wq_entry);
if (wasFull && !writeBuffer.isFull()) {
clearBlocked(Blocked_NoWBBuffers);
}
delete pkt;
}
void
Cache::recvTimingResp(PacketPtr pkt)
{
@ -1248,11 +1276,8 @@ Cache::recvTimingResp(PacketPtr pkt)
panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
"%s saw a non-zero packet delay\n", name());
MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
bool is_error = pkt->isError();
assert(mshr);
if (is_error) {
DPRINTF(Cache, "Cache received packet with error for addr %#llx (%s), "
"cmd: %s\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns",
@ -1263,8 +1288,18 @@ Cache::recvTimingResp(PacketPtr pkt)
pkt->cmdString(), pkt->getAddr(), pkt->getSize(),
pkt->isSecure() ? "s" : "ns");
MSHRQueue *mq = mshr->queue;
bool wasFull = mq->isFull();
// if this is a write, we should be looking at an uncacheable
// write
if (pkt->isWrite()) {
assert(pkt->req->isUncacheable());
handleUncacheableWriteResp(pkt);
return;
}
// we have dealt with any (uncacheable) writes above, from here on
// we know we are dealing with an MSHR due to a miss or a prefetch
MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
assert(mshr);
if (mshr == noTargetMSHR) {
// we always clear at least one target
@ -1276,14 +1311,6 @@ Cache::recvTimingResp(PacketPtr pkt)
MSHR::Target *initial_tgt = mshr->getTarget();
int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
Tick miss_latency = curTick() - initial_tgt->recvTime;
PacketList writebacks;
// We need forward_time here because we have a call of
// allocateWriteBuffer() that need this parameter to specify the
// time to request the bus. In this case we use forward latency
// because there is a writeback. We pay also here for headerDelay
// that is charged of bus latencies if the packet comes from the
// bus.
Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
if (pkt->req->isUncacheable()) {
assert(pkt->req->masterId() < system->maxMasters());
@ -1295,6 +1322,12 @@ Cache::recvTimingResp(PacketPtr pkt)
miss_latency;
}
bool wasFull = mshrQueue.isFull();
PacketList writebacks;
Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
// upgrade deferred targets if the response has no sharers, and is
// thus passing writable
if (!pkt->hasSharers()) {
@ -1470,18 +1503,17 @@ Cache::recvTimingResp(PacketPtr pkt)
if (blk) {
blk->status &= ~BlkReadable;
}
mq = mshr->queue;
mq->markPending(mshr);
mshrQueue.markPending(mshr);
schedMemSideSendEvent(clockEdge() + pkt->payloadDelay);
} else {
mq->deallocate(mshr);
if (wasFull && !mq->isFull()) {
clearBlocked((BlockedCause)mq->index);
mshrQueue.deallocate(mshr);
if (wasFull && !mshrQueue.isFull()) {
clearBlocked(Blocked_NoMSHRs);
}
// Request the bus for a prefetch if this deallocation freed enough
// MSHRs for a prefetch to take place
if (prefetcher && mq == &mshrQueue && mshrQueue.canPrefetch()) {
if (prefetcher && mshrQueue.canPrefetch()) {
Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
clockEdge());
if (next_pf_time != MaxTick)
@ -1715,11 +1747,9 @@ Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
CacheBlk::State old_state = blk ? blk->status : 0;
#endif
// When handling a fill, discard any CleanEvicts for the
// same address in write buffer.
Addr M5_VAR_USED blk_addr = blockAlign(pkt->getAddr());
std::vector<MSHR *> M5_VAR_USED wbs;
assert (!writeBuffer.findMatches(blk_addr, is_secure, wbs));
// When handling a fill, we should have no writes to this line.
assert(addr == blockAlign(addr));
assert(!writeBuffer.findMatch(addr, is_secure));
if (blk == NULL) {
// better have read new data...
@ -2107,15 +2137,10 @@ Cache::recvTimingSnoopReq(PacketPtr pkt)
}
//We also need to check the writeback buffers and handle those
std::vector<MSHR *> writebacks;
if (writeBuffer.findMatches(blk_addr, is_secure, writebacks)) {
WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure);
if (wb_entry) {
DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n",
pkt->getAddr(), is_secure ? "s" : "ns");
// Look through writebacks for any cachable writes.
// We should only ever find a single match
assert(writebacks.size() == 1);
MSHR *wb_entry = writebacks[0];
// Expect to see only Writebacks and/or CleanEvicts here, both of
// which should not be generated for uncacheable data.
assert(!wb_entry->isUncacheable());
@ -2166,7 +2191,7 @@ Cache::recvTimingSnoopReq(PacketPtr pkt)
if (invalidate) {
// Invalidation trumps our writeback... discard here
// Note: markInService will remove entry from writeback buffer.
markInService(wb_entry, false);
markInService(wb_entry);
delete wb_pkt;
}
}
@ -2209,26 +2234,28 @@ Cache::recvAtomicSnoop(PacketPtr pkt)
}
MSHR *
Cache::getNextMSHR()
QueueEntry*
Cache::getNextQueueEntry()
{
// Check both MSHR queue and write buffer for potential requests,
// note that null does not mean there is no request, it could
// simply be that it is not ready
MSHR *miss_mshr = mshrQueue.getNextMSHR();
MSHR *write_mshr = writeBuffer.getNextMSHR();
MSHR *miss_mshr = mshrQueue.getNext();
WriteQueueEntry *wq_entry = writeBuffer.getNext();
// If we got a write buffer request ready, first priority is a
// full write buffer, otherwhise we favour the miss requests
if (write_mshr &&
((writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) ||
// full write buffer (but only if we have no uncacheable write
// responses outstanding, possibly revisit this last part),
// otherwhise we favour the miss requests
if (wq_entry &&
((writeBuffer.isFull() && writeBuffer.numInService() == 0) ||
!miss_mshr)) {
// need to search MSHR queue for conflicting earlier miss.
MSHR *conflict_mshr =
mshrQueue.findPending(write_mshr->blkAddr,
write_mshr->isSecure);
mshrQueue.findPending(wq_entry->blkAddr,
wq_entry->isSecure);
if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
if (conflict_mshr && conflict_mshr->order < wq_entry->order) {
// Service misses in order until conflict is cleared.
return conflict_mshr;
@ -2236,10 +2263,10 @@ Cache::getNextMSHR()
}
// No conflicts; issue write
return write_mshr;
return wq_entry;
} else if (miss_mshr) {
// need to check for conflicting earlier writeback
MSHR *conflict_mshr =
WriteQueueEntry *conflict_mshr =
writeBuffer.findPending(miss_mshr->blkAddr,
miss_mshr->isSecure);
if (conflict_mshr) {
@ -2252,7 +2279,7 @@ Cache::getNextMSHR()
// We need to make sure to perform the writeback first
// To preserve the dirty data, then we can issue the write
// should we return write_mshr here instead? I.e. do we
// should we return wq_entry here instead? I.e. do we
// have to flush writes in order? I don't think so... not
// for Alpha anyway. Maybe for x86?
return conflict_mshr;
@ -2265,7 +2292,7 @@ Cache::getNextMSHR()
}
// fall through... no pending requests. Try a prefetch.
assert(!miss_mshr && !write_mshr);
assert(!miss_mshr && !wq_entry);
if (prefetcher && mshrQueue.canPrefetch()) {
// If we have a miss queue slot, we can try a prefetch
PacketPtr pkt = prefetcher->getPacket();
@ -2291,7 +2318,7 @@ Cache::getNextMSHR()
}
}
return NULL;
return nullptr;
}
bool
@ -2322,25 +2349,41 @@ Cache::isCachedAbove(PacketPtr pkt, bool is_timing) const
}
}
PacketPtr
Cache::getTimingPacket()
Tick
Cache::nextQueueReadyTime() const
{
MSHR *mshr = getNextMSHR();
Tick nextReady = std::min(mshrQueue.nextReadyTime(),
writeBuffer.nextReadyTime());
if (mshr == NULL) {
return NULL;
// Don't signal prefetch ready time if no MSHRs available
// Will signal once enoguh MSHRs are deallocated
if (prefetcher && mshrQueue.canPrefetch()) {
nextReady = std::min(nextReady,
prefetcher->nextPrefetchReadyTime());
}
return nextReady;
}
bool
Cache::sendMSHRQueuePacket(MSHR* mshr)
{
assert(mshr);
// use request from 1st target
PacketPtr tgt_pkt = mshr->getTarget()->pkt;
PacketPtr pkt = NULL;
DPRINTF(CachePort, "%s %s for addr %#llx size %d\n", __func__,
tgt_pkt->cmdString(), tgt_pkt->getAddr(), tgt_pkt->getSize());
DPRINTF(Cache, "%s MSHR %s for addr %#llx size %d\n", __func__,
tgt_pkt->cmdString(), tgt_pkt->getAddr(),
tgt_pkt->getSize());
CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
// we should never have hardware prefetches to allocated
// blocks
assert(blk == NULL);
// We need to check the caches above us to verify that
// they don't have a copy of this block in the dirty state
// at the moment. Without this check we could get a stale
@ -2379,65 +2422,119 @@ Cache::getTimingPacket()
DPRINTF(Cache, "Upward snoop of prefetch for addr"
" %#x (%s) hit\n",
tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
return NULL;
return false;
}
if (snoop_pkt.isBlockCached() || blk != NULL) {
if (snoop_pkt.isBlockCached()) {
DPRINTF(Cache, "Block present, prefetch squashed by cache. "
"Deallocating mshr target %#x.\n",
mshr->blkAddr);
// Deallocate the mshr target
if (mshr->queue->forceDeallocateTarget(mshr)) {
if (mshrQueue.forceDeallocateTarget(mshr)) {
// Clear block if this deallocation resulted freed an
// mshr when all had previously been utilized
clearBlocked((BlockedCause)(mshr->queue->index));
clearBlocked(Blocked_NoMSHRs);
}
return NULL;
return false;
}
}
if (mshr->isForwardNoResponse()) {
// no response expected, just forward packet as it is
assert(tags->findBlock(mshr->blkAddr, mshr->isSecure) == NULL);
pkt = tgt_pkt;
} else {
pkt = getBusPacket(tgt_pkt, blk, mshr->needsWritable());
// either a prefetch that is not present upstream, or a normal
// MSHR request, proceed to get the packet to send downstream
PacketPtr pkt = getBusPacket(tgt_pkt, blk, mshr->needsWritable());
mshr->isForward = (pkt == NULL);
mshr->isForward = (pkt == NULL);
if (mshr->isForward) {
// not a cache block request, but a response is expected
// make copy of current packet to forward, keep current
// copy for response handling
pkt = new Packet(tgt_pkt, false, true);
if (pkt->isWrite()) {
pkt->setData(tgt_pkt->getConstPtr<uint8_t>());
}
}
if (mshr->isForward) {
// not a cache block request, but a response is expected
// make copy of current packet to forward, keep current
// copy for response handling
pkt = new Packet(tgt_pkt, false, true);
assert(!pkt->isWrite());
}
assert(pkt != NULL);
// play it safe and append (rather than set) the sender state, as
// forwarded packets may already have existing state
// play it safe and append (rather than set) the sender state,
// as forwarded packets may already have existing state
pkt->pushSenderState(mshr);
return pkt;
if (!memSidePort->sendTimingReq(pkt)) {
// we are awaiting a retry, but we
// delete the packet and will be creating a new packet
// when we get the opportunity
delete pkt;
// note that we have now masked any requestBus and
// schedSendEvent (we will wait for a retry before
// doing anything), and this is so even if we do not
// care about this packet and might override it before
// it gets retried
return true;
} else {
// As part of the call to sendTimingReq the packet is
// forwarded to all neighbouring caches (and any caches
// above them) as a snoop. Thus at this point we know if
// any of the neighbouring caches are responding, and if
// so, we know it is dirty, and we can determine if it is
// being passed as Modified, making our MSHR the ordering
// point
bool pending_modified_resp = !pkt->hasSharers() &&
pkt->cacheResponding();
markInService(mshr, pending_modified_resp);
return false;
}
}
Tick
Cache::nextMSHRReadyTime() const
bool
Cache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
{
Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
writeBuffer.nextMSHRReadyTime());
assert(wq_entry);
// Don't signal prefetch ready time if no MSHRs available
// Will signal once enoguh MSHRs are deallocated
if (prefetcher && mshrQueue.canPrefetch()) {
nextReady = std::min(nextReady,
prefetcher->nextPrefetchReadyTime());
// always a single target for write queue entries
PacketPtr tgt_pkt = wq_entry->getTarget()->pkt;
DPRINTF(Cache, "%s write %s for addr %#llx size %d\n", __func__,
tgt_pkt->cmdString(), tgt_pkt->getAddr(),
tgt_pkt->getSize());
PacketPtr pkt = nullptr;
bool delete_pkt = false;
if (tgt_pkt->isEviction()) {
assert(!wq_entry->isUncacheable());
// no response expected, just forward packet as it is
pkt = tgt_pkt;
} else {
// the only thing we deal with besides eviction commands
// are uncacheable writes
assert(tgt_pkt->req->isUncacheable() && tgt_pkt->isWrite());
// not a cache block request, but a response is expected
// make copy of current packet to forward, keep current
// copy for response handling
pkt = new Packet(tgt_pkt, false, true);
pkt->setData(tgt_pkt->getConstPtr<uint8_t>());
delete_pkt = true;
}
return nextReady;
pkt->pushSenderState(wq_entry);
if (!memSidePort->sendTimingReq(pkt)) {
if (delete_pkt) {
// we are awaiting a retry, but we
// delete the packet and will be creating a new packet
// when we get the opportunity
delete pkt;
}
// note that we have now masked any requestBus and
// schedSendEvent (we will wait for a retry before
// doing anything), and this is so even if we do not
// care about this packet and might override it before
// it gets retried
return true;
} else {
markInService(wq_entry);
return false;
}
}
void
@ -2586,71 +2683,27 @@ Cache::CacheReqPacketQueue::sendDeferredPacket()
assert(deferredPacketReadyTime() == MaxTick);
// check for request packets (requests & writebacks)
PacketPtr pkt = cache.getTimingPacket();
if (pkt == NULL) {
QueueEntry* entry = cache.getNextQueueEntry();
if (!entry) {
// can happen if e.g. we attempt a writeback and fail, but
// before the retry, the writeback is eliminated because
// we snoop another cache's ReadEx.
} else {
MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
// in most cases getTimingPacket allocates a new packet, and
// we must delete it unless it is successfully sent
bool delete_pkt = !mshr->isForwardNoResponse();
// let our snoop responses go first if there are responses to
// the same addresses we are about to writeback, note that
// this creates a dependency between requests and snoop
// responses, but that should not be a problem since there is
// a chain already and the key is that the snoop responses can
// sink unconditionally
if (snoopRespQueue.hasAddr(pkt->getAddr())) {
DPRINTF(CachePort, "Waiting for snoop response to be sent\n");
Tick when = snoopRespQueue.deferredPacketReadyTime();
schedSendEvent(when);
if (delete_pkt)
delete pkt;
// the same addresses
if (checkConflictingSnoop(entry->blkAddr)) {
return;
}
waitingOnRetry = !masterPort.sendTimingReq(pkt);
if (waitingOnRetry) {
DPRINTF(CachePort, "now waiting on a retry\n");
if (delete_pkt) {
// we are awaiting a retry, but we
// delete the packet and will be creating a new packet
// when we get the opportunity
delete pkt;
}
// note that we have now masked any requestBus and
// schedSendEvent (we will wait for a retry before
// doing anything), and this is so even if we do not
// care about this packet and might override it before
// it gets retried
} else {
// As part of the call to sendTimingReq the packet is
// forwarded to all neighbouring caches (and any caches
// above them) as a snoop. Thus at this point we know if
// any of the neighbouring caches are responding, and if
// so, we know it is dirty, and we can determine if it is
// being passed as Modified, making our MSHR the ordering
// point
bool pending_modified_resp = !pkt->hasSharers() &&
pkt->cacheResponding();
cache.markInService(mshr, pending_modified_resp);
}
waitingOnRetry = entry->sendPacket(cache);
}
// if we succeeded and are not waiting for a retry, schedule the
// next send considering when the next MSHR is ready, note that
// next send considering when the next queue is ready, note that
// snoop responses have their own packet queue and thus schedule
// their own events
if (!waitingOnRetry) {
schedSendEvent(cache.nextMSHRReadyTime());
schedSendEvent(cache.nextQueueReadyTime());
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012-2015 ARM Limited
* Copyright (c) 2012-2016 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
@ -137,6 +137,23 @@ class Cache : public BaseCache
*/
virtual void sendDeferredPacket();
/**
* Check if there is a conflicting snoop response about to be
* send out, and if so simply stall any requests, and schedule
* a send event at the same time as the next snoop response is
* being sent out.
*/
bool checkConflictingSnoop(Addr addr)
{
if (snoopRespQueue.hasAddr(addr)) {
DPRINTF(CachePort, "Waiting for snoop response to be "
"sent\n");
Tick when = snoopRespQueue.deferredPacketReadyTime();
schedSendEvent(when);
return true;
}
return false;
}
};
/**
@ -338,6 +355,12 @@ class Cache : public BaseCache
*/
void doWritebacksAtomic(PacketList& writebacks);
/**
* Handling the special case of uncacheable write responses to
* make recvTimingResp less cluttered.
*/
void handleUncacheableWriteResp(PacketPtr pkt);
/**
* Handles a response (cache line fill/write ack) from the bus.
* @param pkt The response packet
@ -451,12 +474,12 @@ class Cache : public BaseCache
bool needsExclusive) const;
/**
* Return the next MSHR to service, either a pending miss from the
* mshrQueue, a buffered write from the write buffer, or something
* from the prefetcher. This function is responsible for
* prioritizing among those sources on the fly.
* Return the next queue entry to service, either a pending miss
* from the MSHR queue, a buffered write from the write buffer, or
* something from the prefetcher. This function is responsible
* for prioritizing among those sources on the fly.
*/
MSHR *getNextMSHR();
QueueEntry* getNextQueueEntry();
/**
* Send up a snoop request and find cached copies. If cached copies are
@ -464,29 +487,12 @@ class Cache : public BaseCache
*/
bool isCachedAbove(PacketPtr pkt, bool is_timing = true) const;
/**
* Selects an outstanding request to service. Called when the
* cache gets granted the downstream bus in timing mode.
* @return The request to service, NULL if none found.
*/
PacketPtr getTimingPacket();
/**
* Marks a request as in service (sent downstream in the memory
* system). This can have side effect since storage for no
* response commands is deallocated once they are successfully
* sent. Also remember if we are expecting a Modified (dirty and
* writable) response from another cache, effectively making this
* MSHR the ordering point.
*/
void markInService(MSHR *mshr, bool pending_modified_resp);
/**
* Return whether there are any outstanding misses.
*/
bool outstandingMisses() const
{
return mshrQueue.allocated != 0;
return !mshrQueue.isEmpty();
}
CacheBlk *findBlock(Addr addr, bool is_secure) const {
@ -504,7 +510,7 @@ class Cache : public BaseCache
/**
* Find next request ready time from among possible sources.
*/
Tick nextMSHRReadyTime() const;
Tick nextQueueReadyTime() const;
public:
/** Instantiates a basic cache object. */
@ -515,6 +521,26 @@ class Cache : public BaseCache
void regStats() override;
/**
* Take an MSHR, turn it into a suitable downstream packet, and
* send it out. This construct allows a queue entry to choose a suitable
* approach based on its type.
*
* @param mshr The MSHR to turn into a packet and send
* @return True if the port is waiting for a retry
*/
bool sendMSHRQueuePacket(MSHR* mshr);
/**
* Similar to sendMSHR, but for a write-queue entry
* instead. Create the packet, and send it, and if successful also
* mark the entry in service.
*
* @param wq_entry The write-queue entry to turn into a packet and send
* @return True if the port is waiting for a retry
*/
bool sendWriteQueuePacket(WriteQueueEntry* wq_entry);
/** serialize the state of the caches
* We currently don't support checkpointing cache state, so this panics.
*/

27
src/mem/cache/mshr.cc vendored
View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012-2013, 2015 ARM Limited
* Copyright (c) 2012-2013, 2015-2016 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
@ -61,17 +61,13 @@
using namespace std;
MSHR::MSHR() : readyTime(0), _isUncacheable(false), downstreamPending(false),
MSHR::MSHR() : downstreamPending(false),
pendingModified(false),
postInvalidate(false), postDowngrade(false),
queue(NULL), order(0), blkAddr(0),
blkSize(0), isSecure(false), inService(false),
isForward(false), allocOnFill(false),
data(NULL)
isForward(false), allocOnFill(false)
{
}
MSHR::TargetList::TargetList()
: needsWritable(false), hasUpgrade(false)
{}
@ -239,7 +235,6 @@ MSHR::allocate(Addr blk_addr, unsigned blk_size, PacketPtr target,
Target::FromPrefetcher : Target::FromCPU;
targets.add(target, when_ready, _order, source, true);
assert(deferredTargets.isReset());
data = NULL;
}
@ -253,17 +248,10 @@ MSHR::clearDownstreamPending()
targets.clearDownstreamPending();
}
bool
void
MSHR::markInService(bool pending_modified_resp)
{
assert(!inService);
if (isForwardNoResponse()) {
// we just forwarded the request packet & don't expect a
// response, so get rid of it
assert(getNumTargets() == 1);
popTarget();
return true;
}
inService = true;
pendingModified = targets.needsWritable || pending_modified_resp;
@ -274,7 +262,6 @@ MSHR::markInService(bool pending_modified_resp)
// level where it's going to get a response
targets.clearDownstreamPending();
}
return false;
}
@ -512,6 +499,11 @@ MSHR::checkFunctional(PacketPtr pkt)
}
}
bool
MSHR::sendPacket(Cache &cache)
{
return cache.sendMSHRQueuePacket(this);
}
void
MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
@ -521,7 +513,6 @@ MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
isSecure ? "s" : "ns",
isForward ? "Forward" : "",
allocOnFill ? "AllocOnFill" : "",
isForwardNoResponse() ? "ForwNoResp" : "",
needsWritable() ? "Wrtbl" : "",
_isUncacheable ? "Unc" : "",
inService ? "InSvc" : "",

63
src/mem/cache/mshr.hh vendored
View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012-2013, 2015 ARM Limited
* Copyright (c) 2012-2013, 2015-2016 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
@ -51,32 +51,27 @@
#include <list>
#include "base/printable.hh"
#include "mem/packet.hh"
#include "mem/cache/queue_entry.hh"
class CacheBlk;
class MSHRQueue;
class Cache;
/**
* Miss Status and handling Register. This class keeps all the information
* needed to handle a cache miss including a list of target requests.
* @sa \ref gem5MemorySystem "gem5 Memory System"
*/
class MSHR : public Packet::SenderState, public Printable
class MSHR : public QueueEntry, public Printable
{
/**
* Consider the MSHRQueue a friend to avoid making everything public
* Consider the queues friends to avoid making everything public.
*/
template<typename Entry>
friend class Queue;
friend class MSHRQueue;
private:
/** Cycle when ready to issue */
Tick readyTime;
/** True if the request is uncacheable */
bool _isUncacheable;
/** Flag set by downstream caches */
bool downstreamPending;
@ -114,6 +109,9 @@ class MSHR : public Packet::SenderState, public Printable
public:
/** True if the entry is just a simple forward from an upper level */
bool isForward;
class Target {
public:
@ -166,29 +164,6 @@ class MSHR : public Packet::SenderState, public Printable
typedef std::list<MSHR *> List;
/** MSHR list iterator. */
typedef List::iterator Iterator;
/** MSHR list const_iterator. */
typedef List::const_iterator ConstIterator;
/** Pointer to queue containing this MSHR. */
MSHRQueue *queue;
/** Order number assigned by the miss queue. */
Counter order;
/** Block aligned address of the MSHR. */
Addr blkAddr;
/** Block size of the cache. */
unsigned blkSize;
/** True if the request targets the secure memory space. */
bool isSecure;
/** True if the request has been sent to the bus. */
bool inService;
/** True if the request is just a simple forward from an upper level */
bool isForward;
/** Keep track of whether we should allocate on fill or not */
bool allocOnFill;
@ -213,11 +188,9 @@ class MSHR : public Packet::SenderState, public Printable
assert(inService); return postDowngrade;
}
private:
bool sendPacket(Cache &cache);
/** Data buffer (if needed). Currently used only for pending
* upgrade handling. */
uint8_t *data;
private:
/**
* Pointer to this MSHR on the ready list.
@ -238,8 +211,6 @@ class MSHR : public Packet::SenderState, public Printable
public:
bool isUncacheable() const { return _isUncacheable; }
/**
* Allocate a miss to this MSHR.
* @param blk_addr The address of the block.
@ -252,7 +223,7 @@ class MSHR : public Packet::SenderState, public Printable
void allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt,
Tick when_ready, Counter _order, bool alloc_on_fill);
bool markInService(bool pending_modified_resp);
void markInService(bool pending_modified_resp);
void clearDownstreamPending();
@ -303,14 +274,6 @@ class MSHR : public Packet::SenderState, public Printable
targets.pop_front();
}
bool isForwardNoResponse() const
{
if (getNumTargets() != 1)
return false;
const Target *tgt = &targets.front();
return tgt->source == Target::FromCPU && !tgt->pkt->needsResponse();
}
bool promoteDeferredTargets();
void promoteWritable();

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012-2013, 2015 ARM Limited
* Copyright (c) 2012-2013, 2015-2016 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
@ -45,104 +45,15 @@
* Definition of MSHRQueue class functions.
*/
#include "base/trace.hh"
#include "mem/cache/mshr_queue.hh"
#include "debug/Drain.hh"
using namespace std;
MSHRQueue::MSHRQueue(const std::string &_label,
int num_entries, int reserve, int demand_reserve,
int _index)
: label(_label), numEntries(num_entries + reserve - 1),
numReserve(reserve), demandReserve(demand_reserve),
registers(numEntries), allocated(0),
inServiceEntries(0), index(_index)
{
for (int i = 0; i < numEntries; ++i) {
registers[i].queue = this;
freeList.push_back(&registers[i]);
}
}
MSHR *
MSHRQueue::findMatch(Addr blk_addr, bool is_secure) const
{
for (const auto& mshr : allocatedList) {
// we ignore any MSHRs allocated for uncacheable accesses and
// simply ignore them when matching, in the cache we never
// check for matches when adding new uncacheable entries, and
// we do not want normal cacheable accesses being added to an
// MSHR serving an uncacheable access
if (!mshr->isUncacheable() && mshr->blkAddr == blk_addr &&
mshr->isSecure == is_secure) {
return mshr;
}
}
return NULL;
}
bool
MSHRQueue::findMatches(Addr blk_addr, bool is_secure,
vector<MSHR*>& matches) const
{
// Need an empty vector
assert(matches.empty());
bool retval = false;
for (const auto& mshr : allocatedList) {
if (!mshr->isUncacheable() && mshr->blkAddr == blk_addr &&
mshr->isSecure == is_secure) {
retval = true;
matches.push_back(mshr);
}
}
return retval;
}
bool
MSHRQueue::checkFunctional(PacketPtr pkt, Addr blk_addr)
{
pkt->pushLabel(label);
for (const auto& mshr : allocatedList) {
if (mshr->blkAddr == blk_addr && mshr->checkFunctional(pkt)) {
pkt->popLabel();
return true;
}
}
pkt->popLabel();
return false;
}
MSHR *
MSHRQueue::findPending(Addr blk_addr, bool is_secure) const
{
for (const auto& mshr : readyList) {
if (mshr->blkAddr == blk_addr && mshr->isSecure == is_secure) {
return mshr;
}
}
return NULL;
}
MSHR::Iterator
MSHRQueue::addToReadyList(MSHR *mshr)
{
if (readyList.empty() || readyList.back()->readyTime <= mshr->readyTime) {
return readyList.insert(readyList.end(), mshr);
}
for (auto i = readyList.begin(); i != readyList.end(); ++i) {
if ((*i)->readyTime > mshr->readyTime) {
return readyList.insert(i, mshr);
}
}
assert(false);
return readyList.end(); // keep stupid compilers happy
}
int num_entries, int reserve, int demand_reserve)
: Queue<MSHR>(_label, num_entries, reserve),
demandReserve(demand_reserve)
{}
MSHR *
MSHRQueue::allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt,
@ -161,34 +72,6 @@ MSHRQueue::allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt,
return mshr;
}
void
MSHRQueue::deallocate(MSHR *mshr)
{
deallocateOne(mshr);
}
MSHR::Iterator
MSHRQueue::deallocateOne(MSHR *mshr)
{
MSHR::Iterator retval = allocatedList.erase(mshr->allocIter);
freeList.push_front(mshr);
allocated--;
if (mshr->inService) {
inServiceEntries--;
} else {
readyList.erase(mshr->readyIter);
}
mshr->deallocate();
if (drainState() == DrainState::Draining && allocated == 0) {
// Notify the drain manager that we have completed draining if
// there are no other outstanding requests in this MSHR queue.
DPRINTF(Drain, "MSHRQueue now empty, signalling drained\n");
signalDrainDone();
}
return retval;
}
void
MSHRQueue::moveToFront(MSHR *mshr)
{
@ -202,12 +85,9 @@ MSHRQueue::moveToFront(MSHR *mshr)
void
MSHRQueue::markInService(MSHR *mshr, bool pending_modified_resp)
{
if (mshr->markInService(pending_modified_resp)) {
deallocate(mshr);
} else {
readyList.erase(mshr->readyIter);
inServiceEntries += 1;
}
mshr->markInService(pending_modified_resp);
readyList.erase(mshr->readyIter);
_numInService += 1;
}
void
@ -215,7 +95,7 @@ MSHRQueue::markPending(MSHR *mshr)
{
assert(mshr->inService);
mshr->inService = false;
--inServiceEntries;
--_numInService;
/**
* @ todo might want to add rerequests to front of pending list for
* performance.
@ -232,15 +112,9 @@ MSHRQueue::forceDeallocateTarget(MSHR *mshr)
mshr->popTarget();
// Delete mshr if no remaining targets
if (!mshr->hasTargets() && !mshr->promoteDeferredTargets()) {
deallocateOne(mshr);
deallocate(mshr);
}
// Notify if MSHR queue no longer full
return was_full && !isFull();
}
DrainState
MSHRQueue::drain()
{
return allocated == 0 ? DrainState::Drained : DrainState::Draining;
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012-2013, 2015 ARM Limited
* Copyright (c) 2012-2013, 2015-2016 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
@ -51,31 +51,14 @@
#include <vector>
#include "mem/cache/mshr.hh"
#include "mem/packet.hh"
#include "sim/drain.hh"
#include "mem/cache/queue.hh"
/**
* A Class for maintaining a list of pending and allocated memory requests.
*/
class MSHRQueue : public Drainable
class MSHRQueue : public Queue<MSHR>
{
private:
/** Local label (for functional print requests) */
const std::string label;
// Parameters
/**
* The total number of entries in this queue. This number is set as the
* number of entries requested plus (numReserve - 1). This allows for
* the same number of effective entries while still maintaining the reserve.
*/
const int numEntries;
/**
* The number of entries to hold in reserve. This is needed because copy
* operations can allocate upto 4 entries at one time.
*/
const int numReserve;
/**
* The number of entries to reserve for future demand accesses.
@ -83,26 +66,7 @@ class MSHRQueue : public Drainable
*/
const int demandReserve;
/** MSHR storage. */
std::vector<MSHR> registers;
/** Holds pointers to all allocated entries. */
MSHR::List allocatedList;
/** Holds pointers to entries that haven't been sent to the bus. */
MSHR::List readyList;
/** Holds non allocated entries. */
MSHR::List freeList;
MSHR::Iterator addToReadyList(MSHR *mshr);
public:
/** The number of allocated entries. */
int allocated;
/** The number of entries that have been forwarded to the bus. */
int inServiceEntries;
/** The index of this queue within the cache (MSHR queue vs. write
* buffer). */
const int index;
/**
* Create a queue with a given number of entries.
@ -113,35 +77,7 @@ class MSHRQueue : public Drainable
* demand accesses.
*/
MSHRQueue(const std::string &_label, int num_entries, int reserve,
int demand_reserve, int index);
/**
* Find the first MSHR that matches the provided address.
* @param blk_addr The block address to find.
* @param is_secure True if the target memory space is secure.
* @return Pointer to the matching MSHR, null if not found.
*/
MSHR *findMatch(Addr blk_addr, bool is_secure) const;
/**
* Find and return all the matching entries in the provided vector.
* @param blk_addr The block address to find.
* @param is_secure True if the target memory space is secure.
* @param matches The vector to return pointers to the matching entries.
* @return True if any matches are found, false otherwise.
*/
bool findMatches(Addr blk_addr, bool is_secure,
std::vector<MSHR*>& matches) const;
/**
* Find any pending requests that overlap the given request.
* @param blk_addr Block address.
* @param is_secure True if the target memory space is secure.
* @return A pointer to the earliest matching MSHR.
*/
MSHR *findPending(Addr blk_addr, bool is_secure) const;
bool checkFunctional(PacketPtr pkt, Addr blk_addr);
int demand_reserve);
/**
* Allocates a new MSHR for the request and size. This places the request
@ -161,21 +97,6 @@ class MSHRQueue : public Drainable
MSHR *allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt,
Tick when_ready, Counter order, bool alloc_on_fill);
/**
* Removes the given MSHR from the queue. This places the MSHR on the
* free list.
* @param mshr
*/
void deallocate(MSHR *mshr);
/**
* Remove a MSHR from the queue. Returns an iterator into the
* allocatedList.
* @param mshr The MSHR to remove.
* @return An iterator to the next entry in the allocatedList.
*/
MSHR::Iterator deallocateOne(MSHR *mshr);
/**
* Moves the MSHR to the front of the pending list if it is not
* in service.
@ -214,15 +135,6 @@ class MSHRQueue : public Drainable
return !readyList.empty();
}
/**
* Returns true if there are no free entries.
* @return True if this queue is full.
*/
bool isFull() const
{
return (allocated > numEntries - numReserve);
}
/**
* Returns true if sufficient mshrs for prefetch.
* @return True if sufficient mshrs for prefetch.
@ -231,25 +143,6 @@ class MSHRQueue : public Drainable
{
return (allocated < numEntries - (numReserve + demandReserve));
}
/**
* Returns the MSHR at the head of the readyList.
* @return The next request to service.
*/
MSHR *getNextMSHR() const
{
if (readyList.empty() || readyList.front()->readyTime > curTick()) {
return NULL;
}
return readyList.front();
}
Tick nextMSHRReadyTime() const
{
return readyList.empty() ? MaxTick : readyList.front()->readyTime;
}
DrainState drain() override;
};
#endif //__MEM_CACHE_MSHR_QUEUE_HH__

251
src/mem/cache/queue.hh vendored Normal file
View file

@ -0,0 +1,251 @@
/*
* Copyright (c) 2012-2013, 2015-2016 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2003-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
* Andreas Sandberg
* Andreas Hansson
*/
/** @file
* Declaration of a high-level queue structure
*/
#ifndef __MEM_CACHE_QUEUE_HH__
#define __MEM_CACHE_QUEUE_HH__
#include <cassert>
#include "base/trace.hh"
#include "debug/Drain.hh"
#include "mem/cache/queue_entry.hh"
#include "sim/drain.hh"
/**
* A high-level queue interface, to be used by both the MSHR queue and
* the write buffer.
*/
template<class Entry>
class Queue : public Drainable
{
protected:
/** Local label (for functional print requests) */
const std::string label;
/**
* The total number of entries in this queue. This number is set
* as the number of entries requested plus (numReserve - 1). This
* allows for the same number of effective entries while still
* maintaining an overflow reserve.
*/
const int numEntries;
/**
* The number of entries to hold as a temporary overflow
* space. This is used to allow temporary overflow of the number
* of entries as we only check the full condition under certain
* conditions.
*/
const int numReserve;
/** Actual storage. */
std::vector<Entry> entries;
/** Holds pointers to all allocated entries. */
typename Entry::List allocatedList;
/** Holds pointers to entries that haven't been sent downstream. */
typename Entry::List readyList;
/** Holds non allocated entries. */
typename Entry::List freeList;
typename Entry::Iterator addToReadyList(Entry* entry)
{
if (readyList.empty() ||
readyList.back()->readyTime <= entry->readyTime) {
return readyList.insert(readyList.end(), entry);
}
for (auto i = readyList.begin(); i != readyList.end(); ++i) {
if ((*i)->readyTime > entry->readyTime) {
return readyList.insert(i, entry);
}
}
assert(false);
return readyList.end(); // keep stupid compilers happy
}
/** The number of entries that are in service. */
int _numInService;
/** The number of currently allocated entries. */
int allocated;
public:
/**
* Create a queue with a given number of entries.
*
* @param num_entries The number of entries in this queue.
* @param num_overflow The extra overflow entries needed.
*/
Queue(const std::string &_label, int num_entries, int reserve) :
label(_label), numEntries(num_entries + reserve - 1),
numReserve(reserve), entries(numEntries), _numInService(0),
allocated(0)
{
for (int i = 0; i < numEntries; ++i) {
freeList.push_back(&entries[i]);
}
}
bool isEmpty() const
{
return allocated == 0;
}
bool isFull() const
{
return (allocated > numEntries - numReserve);
}
int numInService() const
{
return _numInService;
}
/**
* Find the first WriteQueueEntry that matches the provided address.
* @param blk_addr The block address to find.
* @param is_secure True if the target memory space is secure.
* @return Pointer to the matching WriteQueueEntry, null if not found.
*/
Entry* findMatch(Addr blk_addr, bool is_secure) const
{
for (const auto& entry : allocatedList) {
// we ignore any entries allocated for uncacheable
// accesses and simply ignore them when matching, in the
// cache we never check for matches when adding new
// uncacheable entries, and we do not want normal
// cacheable accesses being added to an WriteQueueEntry
// serving an uncacheable access
if (!entry->isUncacheable() && entry->blkAddr == blk_addr &&
entry->isSecure == is_secure) {
return entry;
}
}
return nullptr;
}
bool checkFunctional(PacketPtr pkt, Addr blk_addr)
{
pkt->pushLabel(label);
for (const auto& entry : allocatedList) {
if (entry->blkAddr == blk_addr && entry->checkFunctional(pkt)) {
pkt->popLabel();
return true;
}
}
pkt->popLabel();
return false;
}
/**
* Find any pending requests that overlap the given request.
* @param blk_addr Block address.
* @param is_secure True if the target memory space is secure.
* @return A pointer to the earliest matching WriteQueueEntry.
*/
Entry* findPending(Addr blk_addr, bool is_secure) const
{
for (const auto& entry : readyList) {
if (entry->blkAddr == blk_addr && entry->isSecure == is_secure) {
return entry;
}
}
return nullptr;
}
/**
* Returns the WriteQueueEntry at the head of the readyList.
* @return The next request to service.
*/
Entry* getNext() const
{
if (readyList.empty() || readyList.front()->readyTime > curTick()) {
return NULL;
}
return readyList.front();
}
Tick nextReadyTime() const
{
return readyList.empty() ? MaxTick : readyList.front()->readyTime;
}
/**
* Removes the given entry from the queue. This places the entry
* on the free list.
*
* @param entry
*/
void deallocate(Entry *entry)
{
allocatedList.erase(entry->allocIter);
freeList.push_front(entry);
allocated--;
if (entry->inService) {
_numInService--;
} else {
readyList.erase(entry->readyIter);
}
entry->deallocate();
if (drainState() == DrainState::Draining && allocated == 0) {
// Notify the drain manager that we have completed
// draining if there are no other outstanding requests in
// this queue.
DPRINTF(Drain, "Queue now empty, signalling drained\n");
signalDrainDone();
}
}
DrainState drain() override
{
return allocated == 0 ? DrainState::Drained : DrainState::Draining;
}
};
#endif //__MEM_CACHE_QUEUE_HH__

109
src/mem/cache/queue_entry.hh vendored Normal file
View file

@ -0,0 +1,109 @@
/*
* Copyright (c) 2012-2013, 2015-2016 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2002-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
* Andreas Hansson
*/
/**
* @file
* Generic queue entry
*/
#ifndef __MEM_CACHE_QUEUE_ENTRY_HH__
#define __MEM_CACHE_QUEUE_ENTRY_HH__
#include "mem/packet.hh"
class Cache;
/**
* A queue entry base class, to be used by both the MSHRs and
* write-queue entries.
*/
class QueueEntry : public Packet::SenderState
{
/**
* Consider the Queue a friend to avoid making everything public
*/
template <class Entry>
friend class Queue;
protected:
/** Tick when ready to issue */
Tick readyTime;
/** True if the entry is uncacheable */
bool _isUncacheable;
public:
/** True if the entry has been sent downstream. */
bool inService;
/** Order number assigned to disambiguate writes and misses. */
Counter order;
/** Block aligned address. */
Addr blkAddr;
/** Block size of the cache. */
unsigned blkSize;
/** True if the entry targets the secure memory space. */
bool isSecure;
QueueEntry() : readyTime(0), _isUncacheable(false),
inService(false), order(0), blkAddr(0), blkSize(0),
isSecure(false)
{}
bool isUncacheable() const { return _isUncacheable; }
/**
* Send this queue entry as a downstream packet, with the exact
* behaviour depending on the specific entry type.
*/
virtual bool sendPacket(Cache &cache) = 0;
};
#endif // __MEM_CACHE_QUEUE_ENTRY_HH__

91
src/mem/cache/write_queue.cc vendored Normal file
View file

@ -0,0 +1,91 @@
/*
* Copyright (c) 2012-2013, 2015-2016 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2003-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
* Andreas Sandberg
* Andreas Hansson
*/
/** @file
* Definition of WriteQueue class functions.
*/
#include "mem/cache/write_queue.hh"
using namespace std;
WriteQueue::WriteQueue(const std::string &_label,
int num_entries, int reserve)
: Queue<WriteQueueEntry>(_label, num_entries, reserve)
{}
WriteQueueEntry *
WriteQueue::allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt,
Tick when_ready, Counter order)
{
assert(!freeList.empty());
WriteQueueEntry *entry = freeList.front();
assert(entry->getNumTargets() == 0);
freeList.pop_front();
entry->allocate(blk_addr, blk_size, pkt, when_ready, order);
entry->allocIter = allocatedList.insert(allocatedList.end(), entry);
entry->readyIter = addToReadyList(entry);
allocated += 1;
return entry;
}
void
WriteQueue::markInService(WriteQueueEntry *entry)
{
if (!entry->isUncacheable()) {
// a normal eviction, such as a writeback or a clean evict, no
// more to do as we are done from the perspective of this
// cache
entry->popTarget();
deallocate(entry);
} else {
// uncacheable write, and we will eventually receive a
// response
entry->markInService();
readyList.erase(entry->readyIter);
_numInService += 1;
}
}

97
src/mem/cache/write_queue.hh vendored Normal file
View file

@ -0,0 +1,97 @@
/*
* Copyright (c) 2015-2016 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Andreas Hansson
*/
/**
* @file Declaration of a queue structure to manage uncacheable write
* and writebacks.
*/
#ifndef __MEM_CACHE_WRITE_QUEUE_HH__
#define __MEM_CACHE_WRITE_QUEUE_HH__
#include <vector>
#include "mem/cache/queue.hh"
#include "mem/cache/write_queue_entry.hh"
/**
* A write queue for all eviction packets, i.e. writebacks and clean
* evictions, as well as uncacheable writes.
*/
class WriteQueue : public Queue<WriteQueueEntry>
{
public:
/**
* Create a write queue with a given number of entries.
* @param num_entries The number of entries in this queue.
* @param reserve The maximum number of entries needed to satisfy
* any access.
*/
WriteQueue(const std::string &_label, int num_entries, int reserve);
/**
* Allocates a new WriteQueueEntry for the request and size. This
* places the request as the first target in the WriteQueueEntry.
*
* @param blk_addr The address of the block.
* @param blk_size The number of bytes to request.
* @param pkt The original write.
* @param when_ready When is the WriteQueueEntry be ready to act upon.
* @param order The logical order of this WriteQueueEntry
*
* @return The a pointer to the WriteQueueEntry allocated.
*
* @pre There are free entries.
*/
WriteQueueEntry *allocate(Addr blk_addr, unsigned blk_size,
PacketPtr pkt, Tick when_ready, Counter order);
/**
* Mark the given entry as in service. This removes the entry from
* the readyList or deallocates the entry if it does not expect a
* response (writeback/eviction rather than an uncacheable write).
*
* @param entry The entry to mark in service.
*/
void markInService(WriteQueueEntry *entry);
};
#endif //__MEM_CACHE_WRITE_QUEUE_HH__

185
src/mem/cache/write_queue_entry.cc vendored Normal file
View file

@ -0,0 +1,185 @@
/*
* Copyright (c) 2012-2013, 2015-2016 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2002-2005 The Regents of The University of Michigan
* Copyright (c) 2010 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
* Dave Greene
* Andreas Hansson
*/
/**
* @file
* Miss Status and Handling Register (WriteQueueEntry) definitions.
*/
#include "mem/cache/write_queue_entry.hh"
#include <algorithm>
#include <cassert>
#include <string>
#include <vector>
#include "base/misc.hh"
#include "base/types.hh"
#include "debug/Cache.hh"
#include "mem/cache/cache.hh"
#include "sim/core.hh"
using namespace std;
inline void
WriteQueueEntry::TargetList::add(PacketPtr pkt, Tick readyTime,
Counter order)
{
emplace_back(pkt, readyTime, order);
}
bool
WriteQueueEntry::TargetList::checkFunctional(PacketPtr pkt)
{
for (auto& t : *this) {
if (pkt->checkFunctional(t.pkt)) {
return true;
}
}
return false;
}
void
WriteQueueEntry::TargetList::print(std::ostream &os, int verbosity,
const std::string &prefix) const
{
for (auto& t : *this) {
ccprintf(os, "%sFromCPU: ", prefix);
t.pkt->print(os, verbosity, "");
}
}
void
WriteQueueEntry::allocate(Addr blk_addr, unsigned blk_size, PacketPtr target,
Tick when_ready, Counter _order)
{
blkAddr = blk_addr;
blkSize = blk_size;
isSecure = target->isSecure();
readyTime = when_ready;
order = _order;
assert(target);
_isUncacheable = target->req->isUncacheable();
inService = false;
// we should never have more than a single target for cacheable
// writes (writebacks and clean evictions)
panic_if(!_isUncacheable && !targets.empty(),
"Write queue entry %#llx should never have more than one "
"cacheable target", blkAddr);
panic_if(!((target->isWrite() && _isUncacheable) ||
(target->isEviction() && !_isUncacheable)),
"Write queue entry %#llx should either be uncacheable write or "
"a cacheable eviction");
targets.add(target, when_ready, _order);
}
bool
WriteQueueEntry::markInService()
{
assert(!inService);
if (!isUncacheable()) {
// we just forwarded the request packet & don't expect a
// response, so get rid of it
assert(getNumTargets() == 1);
popTarget();
return true;
}
inService = true;
return false;
}
void
WriteQueueEntry::deallocate()
{
assert(targets.empty());
inService = false;
}
bool
WriteQueueEntry::checkFunctional(PacketPtr pkt)
{
// For printing, we treat the WriteQueueEntry as a whole as single
// entity. For other requests, we iterate over the individual
// targets since that's where the actual data lies.
if (pkt->isPrint()) {
pkt->checkFunctional(this, blkAddr, isSecure, blkSize, NULL);
return false;
} else {
return targets.checkFunctional(pkt);
}
}
bool
WriteQueueEntry::sendPacket(Cache &cache)
{
return cache.sendWriteQueuePacket(this);
}
void
WriteQueueEntry::print(std::ostream &os, int verbosity,
const std::string &prefix) const
{
ccprintf(os, "%s[%#llx:%#llx](%s) %s %s %s state: %s %s %s %s %s\n",
prefix, blkAddr, blkAddr + blkSize - 1,
isSecure ? "s" : "ns",
_isUncacheable ? "Unc" : "",
inService ? "InSvc" : "");
ccprintf(os, "%s Targets:\n", prefix);
targets.print(os, verbosity, prefix + " ");
}
std::string
WriteQueueEntry::print() const
{
ostringstream str;
print(str);
return str.str();
}

194
src/mem/cache/write_queue_entry.hh vendored Normal file
View file

@ -0,0 +1,194 @@
/*
* Copyright (c) 2012-2013, 2015-2016 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2002-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
* Andreas Hansson
*/
/**
* @file
* Write queue entry
*/
#ifndef __MEM_CACHE_WRITE_QUEUE_ENTRY_HH__
#define __MEM_CACHE_WRITE_QUEUE_ENTRY_HH__
#include <list>
#include "base/printable.hh"
#include "mem/cache/queue_entry.hh"
class Cache;
/**
* Write queue entry
*/
class WriteQueueEntry : public QueueEntry, public Printable
{
/**
* Consider the queues friends to avoid making everything public.
*/
template<typename Entry>
friend class Queue;
friend class WriteQueue;
public:
class Target {
public:
const Tick recvTime; //!< Time when request was received (for stats)
const Tick readyTime; //!< Time when request is ready to be serviced
const Counter order; //!< Global order (for memory consistency mgmt)
const PacketPtr pkt; //!< Pending request packet.
Target(PacketPtr _pkt, Tick _readyTime, Counter _order)
: recvTime(curTick()), readyTime(_readyTime), order(_order),
pkt(_pkt)
{}
};
class TargetList : public std::list<Target> {
public:
TargetList() {}
void add(PacketPtr pkt, Tick readyTime, Counter order);
bool checkFunctional(PacketPtr pkt);
void print(std::ostream &os, int verbosity,
const std::string &prefix) const;
};
/** A list of write queue entriess. */
typedef std::list<WriteQueueEntry *> List;
/** WriteQueueEntry list iterator. */
typedef List::iterator Iterator;
bool sendPacket(Cache &cache);
private:
/**
* Pointer to this entry on the ready list.
* @sa MissQueue, WriteQueue::readyList
*/
Iterator readyIter;
/**
* Pointer to this entry on the allocated list.
* @sa MissQueue, WriteQueue::allocatedList
*/
Iterator allocIter;
/** List of all requests that match the address */
TargetList targets;
public:
/** A simple constructor. */
WriteQueueEntry() {}
/**
* Allocate a miss to this entry.
* @param blk_addr The address of the block.
* @param blk_size The number of bytes to request.
* @param pkt The original write.
* @param when_ready When should the write be sent out.
* @param _order The logical order of this write.
*/
void allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt,
Tick when_ready, Counter _order);
bool markInService();
/**
* Mark this entry as free.
*/
void deallocate();
/**
* Returns the current number of allocated targets.
* @return The current number of allocated targets.
*/
int getNumTargets() const
{ return targets.size(); }
/**
* Returns true if there are targets left.
* @return true if there are targets
*/
bool hasTargets() const { return !targets.empty(); }
/**
* Returns a reference to the first target.
* @return A pointer to the first target.
*/
Target *getTarget()
{
assert(hasTargets());
return &targets.front();
}
/**
* Pop first target.
*/
void popTarget()
{
targets.pop_front();
}
bool checkFunctional(PacketPtr pkt);
/**
* Prints the contents of this MSHR for debugging.
*/
void print(std::ostream &os,
int verbosity = 0,
const std::string &prefix = "") const;
/**
* A no-args wrapper of print(std::ostream...) meant to be
* invoked from DPRINTFs avoiding string overheads in fast mode
*
* @return string with mshr fields
*/
std::string print() const;
};
#endif // __MEM_CACHE_WRITE_QUEUE_ENTRY_HH__