2006-06-28 17:02:14 +02:00
|
|
|
/*
|
2010-08-23 18:18:41 +02:00
|
|
|
* Copyright (c) 2010 ARM Limited
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* The license below extends only to copyright in the software and shall
|
|
|
|
* not be construed as granting a license to any other intellectual
|
|
|
|
* property including but not limited to intellectual property relating
|
|
|
|
* to a hardware implementation of the functionality of the software
|
|
|
|
* licensed hereunder. You may use the software subject to the license
|
|
|
|
* terms below provided that you ensure that this notice is replicated
|
|
|
|
* unmodified and in its entirety in all distributions of the software,
|
|
|
|
* modified or unmodified, in source code or in binary form.
|
|
|
|
*
|
2006-06-28 17:02:14 +02:00
|
|
|
* Copyright (c) 2002-2005 The Regents of The University of Michigan
|
2010-06-17 00:25:57 +02:00
|
|
|
* Copyright (c) 2010 Advanced Micro Devices, Inc.
|
2006-06-28 17:02:14 +02:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are
|
|
|
|
* met: redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer;
|
|
|
|
* redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution;
|
|
|
|
* neither the name of the copyright holders nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived from
|
|
|
|
* this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* Authors: Erik Hallnor
|
|
|
|
* Dave Greene
|
|
|
|
* Nathan Binkert
|
2007-05-19 07:35:04 +02:00
|
|
|
* Steve Reinhardt
|
|
|
|
* Ron Dreslinski
|
2006-06-28 17:02:14 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
* Cache definitions.
|
|
|
|
*/
|
|
|
|
|
2008-03-24 06:08:02 +01:00
|
|
|
#include "base/fast_alloc.hh"
|
2006-06-28 17:02:14 +02:00
|
|
|
#include "base/misc.hh"
|
2008-07-16 20:10:33 +02:00
|
|
|
#include "base/range.hh"
|
2009-05-17 23:34:52 +02:00
|
|
|
#include "base/types.hh"
|
2008-02-10 23:45:25 +01:00
|
|
|
#include "mem/cache/blk.hh"
|
2009-05-17 23:34:52 +02:00
|
|
|
#include "mem/cache/cache.hh"
|
2008-02-10 23:45:25 +01:00
|
|
|
#include "mem/cache/mshr.hh"
|
|
|
|
#include "mem/cache/prefetch/base.hh"
|
2009-05-17 23:34:52 +02:00
|
|
|
#include "sim/sim_exit.hh"
|
2006-11-12 15:06:15 +01:00
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-08-30 21:16:59 +02:00
|
|
|
Cache<TagStore>::Cache(const Params *p, TagStore *tags, BasePrefetcher *pf)
|
|
|
|
: BaseCache(p),
|
|
|
|
tags(tags),
|
|
|
|
prefetcher(pf),
|
|
|
|
doFastWrites(true),
|
2009-02-16 17:56:40 +01:00
|
|
|
prefetchOnAccess(p->prefetch_on_access)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
2007-06-27 08:30:30 +02:00
|
|
|
tempBlock = new BlkType();
|
|
|
|
tempBlock->data = new uint8_t[blkSize];
|
|
|
|
|
2007-08-30 21:16:59 +02:00
|
|
|
cpuSidePort = new CpuSidePort(p->name + "-cpu_side_port", this,
|
2008-07-16 20:10:33 +02:00
|
|
|
"CpuSidePort");
|
2007-08-30 21:16:59 +02:00
|
|
|
memSidePort = new MemSidePort(p->name + "-mem_side_port", this,
|
2008-07-16 20:10:33 +02:00
|
|
|
"MemSidePort");
|
2007-05-19 07:35:04 +02:00
|
|
|
cpuSidePort->setOtherPort(memSidePort);
|
|
|
|
memSidePort->setOtherPort(cpuSidePort);
|
|
|
|
|
2006-10-12 20:21:25 +02:00
|
|
|
tags->setCache(this);
|
2009-02-16 17:56:40 +01:00
|
|
|
if (prefetcher)
|
|
|
|
prefetcher->setCache(this);
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2006-06-28 17:02:14 +02:00
|
|
|
void
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::regStats()
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
|
|
|
BaseCache::regStats();
|
|
|
|
tags->regStats(name());
|
2009-02-16 17:56:40 +01:00
|
|
|
if (prefetcher)
|
|
|
|
prefetcher->regStats(name());
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-06-18 02:27:53 +02:00
|
|
|
Port *
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::getPort(const std::string &if_name, int idx)
|
2007-06-18 02:27:53 +02:00
|
|
|
{
|
|
|
|
if (if_name == "" || if_name == "cpu_side") {
|
|
|
|
return cpuSidePort;
|
|
|
|
} else if (if_name == "mem_side") {
|
|
|
|
return memSidePort;
|
|
|
|
} else if (if_name == "functional") {
|
2008-01-02 23:42:42 +01:00
|
|
|
CpuSidePort *funcPort =
|
|
|
|
new CpuSidePort(name() + "-cpu_side_funcport", this,
|
2008-07-16 20:10:33 +02:00
|
|
|
"CpuSideFuncPort");
|
2008-01-02 23:42:42 +01:00
|
|
|
funcPort->setOtherPort(memSidePort);
|
|
|
|
return funcPort;
|
2007-06-18 02:27:53 +02:00
|
|
|
} else {
|
|
|
|
panic("Port name %s unrecognized\n", if_name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2008-06-28 19:19:38 +02:00
|
|
|
void
|
|
|
|
Cache<TagStore>::deletePortRefs(Port *p)
|
2007-06-18 02:27:53 +02:00
|
|
|
{
|
|
|
|
if (cpuSidePort == p || memSidePort == p)
|
|
|
|
panic("Can only delete functional ports\n");
|
|
|
|
|
|
|
|
delete p;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-06-18 02:27:53 +02:00
|
|
|
void
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
|
2006-12-19 05:47:12 +01:00
|
|
|
{
|
2007-06-18 02:27:53 +02:00
|
|
|
uint64_t overwrite_val;
|
|
|
|
bool overwrite_mem;
|
|
|
|
uint64_t condition_val64;
|
|
|
|
uint32_t condition_val32;
|
|
|
|
|
2006-12-19 05:47:12 +01:00
|
|
|
int offset = tags->extractBlkOffset(pkt->getAddr());
|
2007-06-18 02:27:53 +02:00
|
|
|
uint8_t *blk_data = blk->data + offset;
|
|
|
|
|
|
|
|
assert(sizeof(uint64_t) >= pkt->getSize());
|
|
|
|
|
|
|
|
overwrite_mem = true;
|
|
|
|
// keep a copy of our possible write value, and copy what is at the
|
|
|
|
// memory address into the packet
|
|
|
|
pkt->writeData((uint8_t *)&overwrite_val);
|
|
|
|
pkt->setData(blk_data);
|
|
|
|
|
|
|
|
if (pkt->req->isCondSwap()) {
|
|
|
|
if (pkt->getSize() == sizeof(uint64_t)) {
|
|
|
|
condition_val64 = pkt->req->getExtraData();
|
|
|
|
overwrite_mem = !std::memcmp(&condition_val64, blk_data,
|
|
|
|
sizeof(uint64_t));
|
|
|
|
} else if (pkt->getSize() == sizeof(uint32_t)) {
|
|
|
|
condition_val32 = (uint32_t)pkt->req->getExtraData();
|
|
|
|
overwrite_mem = !std::memcmp(&condition_val32, blk_data,
|
|
|
|
sizeof(uint32_t));
|
|
|
|
} else
|
|
|
|
panic("Invalid size for conditional read/write\n");
|
|
|
|
}
|
|
|
|
|
2009-03-12 07:05:26 +01:00
|
|
|
if (overwrite_mem) {
|
2007-06-18 02:27:53 +02:00
|
|
|
std::memcpy(blk_data, &overwrite_val, pkt->getSize());
|
2009-03-12 07:05:26 +01:00
|
|
|
blk->status |= BlkDirty;
|
|
|
|
}
|
2007-06-18 02:27:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-30 22:56:25 +02:00
|
|
|
template<class TagStore>
|
|
|
|
void
|
|
|
|
Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
|
|
|
|
{
|
|
|
|
assert(blk);
|
2007-07-24 07:28:40 +02:00
|
|
|
// Occasionally this is not true... if we are a lower-level cache
|
|
|
|
// satisfying a string of Read and ReadEx requests from
|
|
|
|
// upper-level caches, a Read will mark the block as shared but we
|
|
|
|
// can satisfy a following ReadEx anyway since we can rely on the
|
|
|
|
// Read requester(s) to have buffered the ReadEx snoop and to
|
|
|
|
// invalidate their blocks after receiving them.
|
|
|
|
// assert(pkt->needsExclusive() ? blk->isWritable() : blk->isValid());
|
2007-06-30 22:56:25 +02:00
|
|
|
assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
|
|
|
|
|
|
|
|
// Check RMW operations first since both isRead() and
|
|
|
|
// isWrite() will be true for them
|
|
|
|
if (pkt->cmd == MemCmd::SwapReq) {
|
|
|
|
cmpAndSwap(blk, pkt);
|
|
|
|
} else if (pkt->isWrite()) {
|
|
|
|
if (blk->checkWrite(pkt)) {
|
|
|
|
pkt->writeDataToBlock(blk->data, blkSize);
|
2010-06-17 00:25:57 +02:00
|
|
|
blk->status |= BlkDirty;
|
2007-06-30 22:56:25 +02:00
|
|
|
}
|
|
|
|
} else if (pkt->isRead()) {
|
2009-04-20 06:44:15 +02:00
|
|
|
if (pkt->isLLSC()) {
|
2007-06-30 22:56:25 +02:00
|
|
|
blk->trackLoadLocked(pkt);
|
|
|
|
}
|
|
|
|
pkt->setDataFromBlock(blk->data, blkSize);
|
2007-07-16 05:11:06 +02:00
|
|
|
if (pkt->getSize() == blkSize) {
|
|
|
|
// special handling for coherent block requests from
|
|
|
|
// upper-level caches
|
|
|
|
if (pkt->needsExclusive()) {
|
|
|
|
// on ReadExReq we give up our copy
|
|
|
|
tags->invalidateBlk(blk);
|
|
|
|
} else {
|
|
|
|
// on ReadReq we create shareable copies here and in
|
|
|
|
// the requester
|
|
|
|
pkt->assertShared();
|
|
|
|
blk->status &= ~BlkWritable;
|
|
|
|
}
|
|
|
|
}
|
2007-06-30 22:56:25 +02:00
|
|
|
} else {
|
|
|
|
// Not a read or write... must be an upgrade. it's OK
|
|
|
|
// to just ack those as long as we have an exclusive
|
|
|
|
// copy at this level.
|
2010-06-17 00:25:57 +02:00
|
|
|
assert(pkt->isUpgrade());
|
2007-07-16 05:11:06 +02:00
|
|
|
tags->invalidateBlk(blk);
|
2007-06-30 22:56:25 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
/////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// MSHR helper functions
|
|
|
|
//
|
|
|
|
/////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-06-18 02:27:53 +02:00
|
|
|
void
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::markInService(MSHR *mshr)
|
2007-06-18 02:27:53 +02:00
|
|
|
{
|
2007-06-21 20:59:17 +02:00
|
|
|
markInServiceInternal(mshr);
|
2007-06-18 02:27:53 +02:00
|
|
|
#if 0
|
|
|
|
if (mshr->originalCmd == MemCmd::HardPFReq) {
|
|
|
|
DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
|
|
|
|
name());
|
|
|
|
//Also clear pending if need be
|
|
|
|
if (!prefetcher->havePending())
|
|
|
|
{
|
|
|
|
deassertMemSideBusRequest(Request_PF);
|
2006-12-19 05:47:12 +01:00
|
|
|
}
|
|
|
|
}
|
2007-06-18 02:27:53 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-06-18 02:27:53 +02:00
|
|
|
void
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::squash(int threadNum)
|
2007-06-18 02:27:53 +02:00
|
|
|
{
|
|
|
|
bool unblock = false;
|
|
|
|
BlockedCause cause = NUM_BLOCKED_CAUSES;
|
2006-12-19 05:47:12 +01:00
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
|
|
|
|
noTargetMSHR = NULL;
|
|
|
|
unblock = true;
|
|
|
|
cause = Blocked_NoTargets;
|
|
|
|
}
|
|
|
|
if (mshrQueue.isFull()) {
|
|
|
|
unblock = true;
|
|
|
|
cause = Blocked_NoMSHRs;
|
|
|
|
}
|
|
|
|
mshrQueue.squash(threadNum);
|
|
|
|
if (unblock && !mshrQueue.isFull()) {
|
|
|
|
clearBlocked(cause);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// Access path: requests coming in from the CPU side
|
|
|
|
//
|
|
|
|
/////////////////////////////////////////////////////
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-06-18 02:27:53 +02:00
|
|
|
bool
|
2008-03-25 15:01:21 +01:00
|
|
|
Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
|
|
|
|
int &lat, PacketList &writebacks)
|
2007-06-18 02:27:53 +02:00
|
|
|
{
|
2010-08-23 18:18:41 +02:00
|
|
|
int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
|
|
|
|
blk = tags->accessBlock(pkt->getAddr(), lat, id);
|
|
|
|
|
2007-06-21 20:59:17 +02:00
|
|
|
if (pkt->req->isUncacheable()) {
|
2010-08-23 18:18:41 +02:00
|
|
|
if (blk != NULL) {
|
|
|
|
tags->invalidateBlk(blk);
|
|
|
|
}
|
|
|
|
|
2007-06-21 20:59:17 +02:00
|
|
|
blk = NULL;
|
|
|
|
lat = hitLatency;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
|
2009-08-02 07:50:14 +02:00
|
|
|
DPRINTF(Cache, "%s%s %x %s\n", pkt->cmdString(),
|
|
|
|
pkt->req->isInstFetch() ? " (ifetch)" : "",
|
|
|
|
pkt->getAddr(), (blk) ? "hit" : "miss");
|
2007-06-18 02:27:53 +02:00
|
|
|
|
|
|
|
if (blk != NULL) {
|
2008-02-16 20:58:03 +01:00
|
|
|
|
2008-02-27 07:03:28 +01:00
|
|
|
if (pkt->needsExclusive() ? blk->isWritable() : blk->isReadable()) {
|
2007-06-18 02:27:53 +02:00
|
|
|
// OK to satisfy access
|
2010-02-23 18:34:22 +01:00
|
|
|
incHitCount(pkt, id);
|
2007-06-30 22:56:25 +02:00
|
|
|
satisfyCpuSideRequest(pkt, blk);
|
2008-02-16 20:58:03 +01:00
|
|
|
return true;
|
2006-12-19 05:47:12 +01:00
|
|
|
}
|
2008-02-16 20:58:03 +01:00
|
|
|
}
|
2007-06-18 02:27:53 +02:00
|
|
|
|
2008-02-16 20:58:03 +01:00
|
|
|
// Can't satisfy access normally... either no block (blk == NULL)
|
|
|
|
// or have block but need exclusive & only have shared.
|
|
|
|
|
|
|
|
// Writeback handling is special case. We can write the block
|
|
|
|
// into the cache without having a writeable copy (or any copy at
|
|
|
|
// all).
|
|
|
|
if (pkt->cmd == MemCmd::Writeback) {
|
|
|
|
assert(blkSize == pkt->getSize());
|
|
|
|
if (blk == NULL) {
|
|
|
|
// need to do a replacement
|
|
|
|
blk = allocateBlock(pkt->getAddr(), writebacks);
|
|
|
|
if (blk == NULL) {
|
|
|
|
// no replaceable block available, give up.
|
|
|
|
// writeback will be forwarded to next level.
|
2010-02-23 18:34:22 +01:00
|
|
|
incMissCount(pkt, id);
|
2008-02-16 20:58:03 +01:00
|
|
|
return false;
|
|
|
|
}
|
2010-01-12 19:53:02 +01:00
|
|
|
int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
|
|
|
|
tags->insertBlock(pkt->getAddr(), blk, id);
|
2008-02-27 07:03:28 +01:00
|
|
|
blk->status = BlkValid | BlkReadable;
|
2008-02-16 20:58:03 +01:00
|
|
|
}
|
|
|
|
std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
|
|
|
|
blk->status |= BlkDirty;
|
|
|
|
// nothing else to do; writeback doesn't expect response
|
|
|
|
assert(!pkt->needsResponse());
|
2010-02-23 18:34:22 +01:00
|
|
|
incHitCount(pkt, id);
|
2008-02-16 20:58:03 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-02-23 18:34:22 +01:00
|
|
|
incMissCount(pkt, id);
|
2008-02-16 20:58:03 +01:00
|
|
|
|
2009-04-20 06:44:15 +02:00
|
|
|
if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
|
2008-02-16 20:58:03 +01:00
|
|
|
// complete miss on store conditional... just give up now
|
|
|
|
pkt->req->setExtraData(0);
|
|
|
|
return true;
|
2006-12-19 05:47:12 +01:00
|
|
|
}
|
|
|
|
|
2008-02-16 20:58:03 +01:00
|
|
|
return false;
|
2006-12-19 05:47:12 +01:00
|
|
|
}
|
|
|
|
|
2007-03-28 00:05:25 +02:00
|
|
|
|
2008-03-24 06:08:02 +01:00
|
|
|
class ForwardResponseRecord : public Packet::SenderState, public FastAlloc
|
2007-07-17 15:33:28 +02:00
|
|
|
{
|
|
|
|
Packet::SenderState *prevSenderState;
|
|
|
|
int prevSrc;
|
|
|
|
#ifndef NDEBUG
|
|
|
|
BaseCache *cache;
|
|
|
|
#endif
|
|
|
|
public:
|
|
|
|
ForwardResponseRecord(Packet *pkt, BaseCache *_cache)
|
|
|
|
: prevSenderState(pkt->senderState), prevSrc(pkt->getSrc())
|
|
|
|
#ifndef NDEBUG
|
|
|
|
, cache(_cache)
|
|
|
|
#endif
|
|
|
|
{}
|
|
|
|
void restore(Packet *pkt, BaseCache *_cache)
|
|
|
|
{
|
|
|
|
assert(_cache == cache);
|
|
|
|
pkt->senderState = prevSenderState;
|
|
|
|
pkt->setDest(prevSrc);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-06-18 02:27:53 +02:00
|
|
|
bool
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::timingAccess(PacketPtr pkt)
|
2006-12-19 05:47:12 +01:00
|
|
|
{
|
2007-06-18 02:27:53 +02:00
|
|
|
//@todo Add back in MemDebug Calls
|
|
|
|
// MemDebug::cacheAccess(pkt);
|
2006-12-19 05:47:12 +01:00
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
// we charge hitLatency for doing just about anything here
|
|
|
|
Tick time = curTick + hitLatency;
|
2006-12-19 05:47:12 +01:00
|
|
|
|
2007-07-17 15:33:28 +02:00
|
|
|
if (pkt->isResponse()) {
|
|
|
|
// must be cache-to-cache response from upper to lower level
|
|
|
|
ForwardResponseRecord *rec =
|
|
|
|
dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
|
|
|
|
assert(rec != NULL);
|
|
|
|
rec->restore(pkt, this);
|
|
|
|
delete rec;
|
|
|
|
memSidePort->respond(pkt, time);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(pkt->isRequest());
|
|
|
|
|
2007-06-21 20:59:17 +02:00
|
|
|
if (pkt->memInhibitAsserted()) {
|
|
|
|
DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
|
|
|
|
pkt->getAddr());
|
|
|
|
assert(!pkt->req->isUncacheable());
|
2007-07-27 02:04:17 +02:00
|
|
|
// Special tweak for multilevel coherence: snoop downward here
|
|
|
|
// on invalidates since there may be other caches below here
|
|
|
|
// that have shared copies. Not necessary if we know that
|
|
|
|
// supplier had exclusive copy to begin with.
|
|
|
|
if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
|
|
|
|
Packet *snoopPkt = new Packet(pkt, true); // clear flags
|
|
|
|
snoopPkt->setExpressSnoop();
|
|
|
|
snoopPkt->assertMemInhibit();
|
|
|
|
memSidePort->sendTiming(snoopPkt);
|
|
|
|
// main memory will delete snoopPkt
|
|
|
|
}
|
2008-03-17 08:08:28 +01:00
|
|
|
// since we're the official target but we aren't responding,
|
|
|
|
// delete the packet now.
|
|
|
|
delete pkt;
|
2007-06-21 20:59:17 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
if (pkt->req->isUncacheable()) {
|
2010-08-23 18:18:41 +02:00
|
|
|
int lat = hitLatency;
|
|
|
|
int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
|
|
|
|
BlkType *blk = tags->accessBlock(pkt->getAddr(), lat, id);
|
|
|
|
if (blk != NULL) {
|
|
|
|
tags->invalidateBlk(blk);
|
|
|
|
}
|
|
|
|
|
2007-07-21 22:45:17 +02:00
|
|
|
// writes go in write buffer, reads use MSHR
|
|
|
|
if (pkt->isWrite() && !pkt->isRead()) {
|
|
|
|
allocateWriteBuffer(pkt, time, true);
|
|
|
|
} else {
|
|
|
|
allocateUncachedReadBuffer(pkt, time, true);
|
|
|
|
}
|
2007-06-18 02:27:53 +02:00
|
|
|
assert(pkt->needsResponse()); // else we should delete it here??
|
|
|
|
return true;
|
2006-12-19 05:47:12 +01:00
|
|
|
}
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
int lat = hitLatency;
|
2008-02-27 07:03:28 +01:00
|
|
|
BlkType *blk = NULL;
|
2008-03-25 15:01:21 +01:00
|
|
|
PacketList writebacks;
|
|
|
|
|
|
|
|
bool satisfied = access(pkt, blk, lat, writebacks);
|
2006-12-19 05:47:12 +01:00
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
#if 0
|
2008-02-27 05:17:26 +01:00
|
|
|
/** @todo make the fast write alloc (wh64) work with coherence. */
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
// If this is a block size write/hint (WH64) allocate the block here
|
|
|
|
// if the coherence protocol allows it.
|
|
|
|
if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
|
|
|
|
(pkt->cmd == MemCmd::WriteReq
|
|
|
|
|| pkt->cmd == MemCmd::WriteInvalidateReq) ) {
|
|
|
|
// not outstanding misses, can do this
|
|
|
|
MSHR *outstanding_miss = mshrQueue.findMatch(pkt->getAddr());
|
|
|
|
if (pkt->cmd == MemCmd::WriteInvalidateReq || !outstanding_miss) {
|
|
|
|
if (outstanding_miss) {
|
|
|
|
warn("WriteInv doing a fastallocate"
|
|
|
|
"with an outstanding miss to the same address\n");
|
|
|
|
}
|
|
|
|
blk = handleFill(NULL, pkt, BlkValid | BlkWritable,
|
|
|
|
writebacks);
|
|
|
|
++fastWrites;
|
2006-12-19 05:47:12 +01:00
|
|
|
}
|
2007-06-18 02:27:53 +02:00
|
|
|
}
|
2007-06-26 23:53:15 +02:00
|
|
|
#endif
|
2007-06-18 02:27:53 +02:00
|
|
|
|
2009-02-16 17:56:40 +01:00
|
|
|
// track time of availability of next prefetch, if any
|
|
|
|
Tick next_pf_time = 0;
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
bool needsResponse = pkt->needsResponse();
|
|
|
|
|
|
|
|
if (satisfied) {
|
2007-07-16 05:11:06 +02:00
|
|
|
if (needsResponse) {
|
|
|
|
pkt->makeTimingResponse();
|
|
|
|
cpuSidePort->respond(pkt, curTick+lat);
|
2007-07-17 15:33:28 +02:00
|
|
|
} else {
|
|
|
|
delete pkt;
|
2007-07-16 05:11:06 +02:00
|
|
|
}
|
2009-02-16 17:56:40 +01:00
|
|
|
|
|
|
|
if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
|
|
|
|
if (blk)
|
|
|
|
blk->status &= ~BlkHWPrefetched;
|
|
|
|
next_pf_time = prefetcher->notify(pkt, time);
|
|
|
|
}
|
2007-06-18 02:27:53 +02:00
|
|
|
} else {
|
|
|
|
// miss
|
|
|
|
|
2009-09-26 19:50:50 +02:00
|
|
|
Addr blk_addr = blockAlign(pkt->getAddr());
|
2008-02-27 07:03:28 +01:00
|
|
|
MSHR *mshr = mshrQueue.findMatch(blk_addr);
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
if (mshr) {
|
|
|
|
// MSHR hit
|
|
|
|
//@todo remove hw_pf here
|
2008-11-04 17:35:42 +01:00
|
|
|
mshr_hits[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
|
|
|
|
if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
|
2007-06-18 02:27:53 +02:00
|
|
|
mshr->threadNum = -1;
|
2006-12-19 05:47:12 +01:00
|
|
|
}
|
2007-06-25 15:47:05 +02:00
|
|
|
mshr->allocateTarget(pkt, time, order++);
|
2007-06-18 02:27:53 +02:00
|
|
|
if (mshr->getNumTargets() == numTarget) {
|
|
|
|
noTargetMSHR = mshr;
|
|
|
|
setBlocked(Blocked_NoTargets);
|
2007-06-25 15:47:05 +02:00
|
|
|
// need to be careful with this... if this mshr isn't
|
|
|
|
// ready yet (i.e. time > curTick_, we don't want to
|
|
|
|
// move it ahead of mshrs that are ready
|
|
|
|
// mshrQueue.moveToFront(mshr);
|
2007-06-18 02:27:53 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// no MSHR
|
2008-11-04 17:35:42 +01:00
|
|
|
mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
|
2007-06-18 02:27:53 +02:00
|
|
|
// always mark as cache fill for now... if we implement
|
|
|
|
// no-write-allocate or bypass accesses this will have to
|
|
|
|
// be changed.
|
2007-07-21 22:45:17 +02:00
|
|
|
if (pkt->cmd == MemCmd::Writeback) {
|
|
|
|
allocateWriteBuffer(pkt, time, true);
|
|
|
|
} else {
|
2008-02-27 07:03:28 +01:00
|
|
|
if (blk && blk->isValid()) {
|
|
|
|
// If we have a write miss to a valid block, we
|
|
|
|
// need to mark the block non-readable. Otherwise
|
|
|
|
// if we allow reads while there's an outstanding
|
|
|
|
// write miss, the read could return stale data
|
|
|
|
// out of the cache block... a more aggressive
|
|
|
|
// system could detect the overlap (if any) and
|
|
|
|
// forward data out of the MSHRs, but we don't do
|
|
|
|
// that yet. Note that we do need to leave the
|
|
|
|
// block valid so that it stays in the cache, in
|
|
|
|
// case we get an upgrade response (and hence no
|
|
|
|
// new data) when the write miss completes.
|
|
|
|
// As long as CPUs do proper store/load forwarding
|
|
|
|
// internally, and have a sufficiently weak memory
|
|
|
|
// model, this is probably unnecessary, but at some
|
|
|
|
// point it must have seemed like we needed it...
|
|
|
|
assert(pkt->needsExclusive() && !blk->isWritable());
|
|
|
|
blk->status &= ~BlkReadable;
|
|
|
|
}
|
|
|
|
|
2007-07-21 22:45:17 +02:00
|
|
|
allocateMissBuffer(pkt, time, true);
|
|
|
|
}
|
2009-02-16 17:56:40 +01:00
|
|
|
|
|
|
|
if (prefetcher) {
|
|
|
|
next_pf_time = prefetcher->notify(pkt, time);
|
|
|
|
}
|
2006-12-19 05:47:12 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-16 17:56:40 +01:00
|
|
|
if (next_pf_time != 0)
|
|
|
|
requestMemSideBus(Request_PF, std::max(time, next_pf_time));
|
|
|
|
|
2008-03-25 15:01:21 +01:00
|
|
|
// copy writebacks to write buffer
|
|
|
|
while (!writebacks.empty()) {
|
|
|
|
PacketPtr wbPkt = writebacks.front();
|
|
|
|
allocateWriteBuffer(wbPkt, time, true);
|
|
|
|
writebacks.pop_front();
|
|
|
|
}
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
return true;
|
2006-12-19 05:47:12 +01:00
|
|
|
}
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
|
2008-02-27 05:17:26 +01:00
|
|
|
// See comment in cache.hh.
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-06-21 20:59:17 +02:00
|
|
|
PacketPtr
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
|
|
|
|
bool needsExclusive)
|
2007-06-21 20:59:17 +02:00
|
|
|
{
|
|
|
|
bool blkValid = blk && blk->isValid();
|
|
|
|
|
|
|
|
if (cpu_pkt->req->isUncacheable()) {
|
2010-08-23 18:18:41 +02:00
|
|
|
//assert(blk == NULL);
|
2007-06-21 20:59:17 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2010-06-17 00:25:57 +02:00
|
|
|
if (!blkValid &&
|
|
|
|
(cpu_pkt->cmd == MemCmd::Writeback || cpu_pkt->isUpgrade())) {
|
2008-02-27 05:17:26 +01:00
|
|
|
// Writebacks that weren't allocated in access() and upgrades
|
|
|
|
// from upper-level caches that missed completely just go
|
|
|
|
// through.
|
2007-06-21 20:59:17 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2007-06-22 18:24:07 +02:00
|
|
|
assert(cpu_pkt->needsResponse());
|
|
|
|
|
2007-06-21 20:59:17 +02:00
|
|
|
MemCmd cmd;
|
2007-07-21 22:45:17 +02:00
|
|
|
// @TODO make useUpgrades a parameter.
|
|
|
|
// Note that ownership protocols require upgrade, otherwise a
|
|
|
|
// write miss on a shared owned block will generate a ReadExcl,
|
|
|
|
// which will clobber the owned copy.
|
2007-06-21 20:59:17 +02:00
|
|
|
const bool useUpgrades = true;
|
|
|
|
if (blkValid && useUpgrades) {
|
|
|
|
// only reason to be here is that blk is shared
|
|
|
|
// (read-only) and we need exclusive
|
|
|
|
assert(needsExclusive && !blk->isWritable());
|
2010-06-17 00:25:57 +02:00
|
|
|
cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
|
2007-06-21 20:59:17 +02:00
|
|
|
} else {
|
|
|
|
// block is invalid
|
|
|
|
cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
|
|
|
|
}
|
|
|
|
PacketPtr pkt = new Packet(cpu_pkt->req, cmd, Packet::Broadcast, blkSize);
|
|
|
|
|
|
|
|
pkt->allocate();
|
|
|
|
return pkt;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-06-18 02:27:53 +02:00
|
|
|
Tick
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::atomicAccess(PacketPtr pkt)
|
2006-12-19 05:47:12 +01:00
|
|
|
{
|
2007-06-21 20:59:17 +02:00
|
|
|
int lat = hitLatency;
|
|
|
|
|
2007-07-16 05:11:06 +02:00
|
|
|
// @TODO: make this a parameter
|
|
|
|
bool last_level_cache = false;
|
|
|
|
|
2007-06-21 20:59:17 +02:00
|
|
|
if (pkt->memInhibitAsserted()) {
|
|
|
|
assert(!pkt->req->isUncacheable());
|
2007-07-16 05:11:06 +02:00
|
|
|
// have to invalidate ourselves and any lower caches even if
|
|
|
|
// upper cache will be responding
|
|
|
|
if (pkt->isInvalidate()) {
|
|
|
|
BlkType *blk = tags->findBlock(pkt->getAddr());
|
|
|
|
if (blk && blk->isValid()) {
|
|
|
|
tags->invalidateBlk(blk);
|
|
|
|
DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: invalidating\n",
|
|
|
|
pkt->cmdString(), pkt->getAddr());
|
|
|
|
}
|
|
|
|
if (!last_level_cache) {
|
|
|
|
DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x\n",
|
|
|
|
pkt->cmdString(), pkt->getAddr());
|
|
|
|
lat += memSidePort->sendAtomic(pkt);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",
|
|
|
|
pkt->cmdString(), pkt->getAddr());
|
|
|
|
}
|
|
|
|
|
2007-06-21 20:59:17 +02:00
|
|
|
return lat;
|
|
|
|
}
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
// should assert here that there are no outstanding MSHRs or
|
|
|
|
// writebacks... that would mean that someone used an atomic
|
|
|
|
// access in timing mode
|
2006-12-19 05:47:12 +01:00
|
|
|
|
2007-06-21 20:59:17 +02:00
|
|
|
BlkType *blk = NULL;
|
2008-03-25 15:01:21 +01:00
|
|
|
PacketList writebacks;
|
2006-12-19 05:47:12 +01:00
|
|
|
|
2008-03-25 15:01:21 +01:00
|
|
|
if (!access(pkt, blk, lat, writebacks)) {
|
2007-06-18 02:27:53 +02:00
|
|
|
// MISS
|
2008-11-10 23:10:28 +01:00
|
|
|
PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
|
2006-12-19 05:47:12 +01:00
|
|
|
|
2008-11-10 23:10:28 +01:00
|
|
|
bool is_forward = (bus_pkt == NULL);
|
2006-12-19 05:47:12 +01:00
|
|
|
|
2008-11-10 23:10:28 +01:00
|
|
|
if (is_forward) {
|
2007-06-21 20:59:17 +02:00
|
|
|
// just forwarding the same request to the next level
|
|
|
|
// no local cache operation involved
|
2008-11-10 23:10:28 +01:00
|
|
|
bus_pkt = pkt;
|
2007-06-21 20:59:17 +02:00
|
|
|
}
|
2006-12-19 05:47:12 +01:00
|
|
|
|
2007-06-21 20:59:17 +02:00
|
|
|
DPRINTF(Cache, "Sending an atomic %s for %x\n",
|
2008-11-10 23:10:28 +01:00
|
|
|
bus_pkt->cmdString(), bus_pkt->getAddr());
|
2007-06-18 02:27:53 +02:00
|
|
|
|
2007-06-21 20:59:17 +02:00
|
|
|
#if TRACING_ON
|
|
|
|
CacheBlk::State old_state = blk ? blk->status : 0;
|
|
|
|
#endif
|
2006-12-19 05:47:12 +01:00
|
|
|
|
2008-11-10 23:10:28 +01:00
|
|
|
lat += memSidePort->sendAtomic(bus_pkt);
|
2006-12-19 05:47:12 +01:00
|
|
|
|
2007-06-21 20:59:17 +02:00
|
|
|
DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",
|
2008-11-10 23:10:28 +01:00
|
|
|
bus_pkt->cmdString(), bus_pkt->getAddr(), old_state);
|
|
|
|
|
|
|
|
assert(!bus_pkt->wasNacked());
|
|
|
|
|
|
|
|
// If packet was a forward, the response (if any) is already
|
|
|
|
// in place in the bus_pkt == pkt structure, so we don't need
|
|
|
|
// to do anything. Otherwise, use the separate bus_pkt to
|
|
|
|
// generate response to pkt and then delete it.
|
|
|
|
if (!is_forward) {
|
|
|
|
if (pkt->needsResponse()) {
|
|
|
|
assert(bus_pkt->isResponse());
|
|
|
|
if (bus_pkt->isError()) {
|
|
|
|
pkt->makeAtomicResponse();
|
|
|
|
pkt->copyError(bus_pkt);
|
|
|
|
} else if (bus_pkt->isRead() ||
|
|
|
|
bus_pkt->cmd == MemCmd::UpgradeResp) {
|
|
|
|
// we're updating cache state to allow us to
|
|
|
|
// satisfy the upstream request from the cache
|
|
|
|
blk = handleFill(bus_pkt, blk, writebacks);
|
|
|
|
satisfyCpuSideRequest(pkt, blk);
|
|
|
|
} else {
|
|
|
|
// we're satisfying the upstream request without
|
|
|
|
// modifying cache state, e.g., a write-through
|
|
|
|
pkt->makeAtomicResponse();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
delete bus_pkt;
|
2007-06-21 20:59:17 +02:00
|
|
|
}
|
2007-06-18 02:27:53 +02:00
|
|
|
}
|
2006-12-19 05:47:12 +01:00
|
|
|
|
2009-02-16 17:56:40 +01:00
|
|
|
// Note that we don't invoke the prefetcher at all in atomic mode.
|
|
|
|
// It's not clear how to do it properly, particularly for
|
|
|
|
// prefetchers that aggressively generate prefetch candidates and
|
|
|
|
// rely on bandwidth contention to throttle them; these will tend
|
|
|
|
// to pollute the cache in atomic mode since there is no bandwidth
|
|
|
|
// contention. If we ever do want to enable prefetching in atomic
|
|
|
|
// mode, though, this is the place to do it... see timingAccess()
|
|
|
|
// for an example (though we'd want to issue the prefetch(es)
|
|
|
|
// immediately rather than calling requestMemSideBus() as we do
|
|
|
|
// there).
|
|
|
|
|
2008-03-25 15:01:21 +01:00
|
|
|
// Handle writebacks if needed
|
|
|
|
while (!writebacks.empty()){
|
|
|
|
PacketPtr wbPkt = writebacks.front();
|
|
|
|
memSidePort->sendAtomic(wbPkt);
|
|
|
|
writebacks.pop_front();
|
|
|
|
delete wbPkt;
|
|
|
|
}
|
|
|
|
|
2007-06-21 20:59:17 +02:00
|
|
|
// We now have the block one way or another (hit or completed miss)
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
if (pkt->needsResponse()) {
|
|
|
|
pkt->makeAtomicResponse();
|
|
|
|
}
|
2006-12-19 05:47:12 +01:00
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
return lat;
|
|
|
|
}
|
2006-12-19 05:47:12 +01:00
|
|
|
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-06-18 02:27:53 +02:00
|
|
|
void
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::functionalAccess(PacketPtr pkt,
|
2008-01-02 21:20:15 +01:00
|
|
|
CachePort *incomingPort,
|
2007-06-28 05:54:13 +02:00
|
|
|
CachePort *otherSidePort)
|
2007-06-18 02:27:53 +02:00
|
|
|
{
|
2009-09-26 19:50:50 +02:00
|
|
|
Addr blk_addr = blockAlign(pkt->getAddr());
|
2007-06-18 02:27:53 +02:00
|
|
|
BlkType *blk = tags->findBlock(pkt->getAddr());
|
2006-12-19 05:47:12 +01:00
|
|
|
|
2008-01-02 21:20:15 +01:00
|
|
|
pkt->pushLabel(name());
|
|
|
|
|
|
|
|
CacheBlkPrintWrapper cbpw(blk);
|
|
|
|
bool done =
|
|
|
|
(blk && pkt->checkFunctional(&cbpw, blk_addr, blkSize, blk->data))
|
|
|
|
|| incomingPort->checkFunctional(pkt)
|
|
|
|
|| mshrQueue.checkFunctional(pkt, blk_addr)
|
|
|
|
|| writeBuffer.checkFunctional(pkt, blk_addr)
|
|
|
|
|| otherSidePort->checkFunctional(pkt);
|
2006-12-19 05:47:12 +01:00
|
|
|
|
2008-01-02 21:20:15 +01:00
|
|
|
// We're leaving the cache, so pop cache->name() label
|
|
|
|
pkt->popLabel();
|
|
|
|
|
|
|
|
if (!done) {
|
|
|
|
otherSidePort->sendFunctional(pkt);
|
2006-12-19 05:47:12 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
/////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// Response handling: responses from the memory side
|
|
|
|
//
|
|
|
|
/////////////////////////////////////////////////////
|
|
|
|
|
2007-06-21 20:59:17 +02:00
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2006-12-19 05:47:12 +01:00
|
|
|
void
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::handleResponse(PacketPtr pkt)
|
2006-12-19 05:47:12 +01:00
|
|
|
{
|
2007-06-18 02:27:53 +02:00
|
|
|
Tick time = curTick + hitLatency;
|
|
|
|
MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
|
2007-08-27 06:45:40 +02:00
|
|
|
bool is_error = pkt->isError();
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
assert(mshr);
|
2007-06-25 02:32:31 +02:00
|
|
|
|
2007-06-30 19:16:18 +02:00
|
|
|
if (pkt->wasNacked()) {
|
2007-06-18 02:27:53 +02:00
|
|
|
//pkt->reinitFromRequest();
|
|
|
|
warn("NACKs from devices not connected to the same bus "
|
|
|
|
"not implemented\n");
|
|
|
|
return;
|
|
|
|
}
|
2007-08-27 06:45:40 +02:00
|
|
|
if (is_error) {
|
|
|
|
DPRINTF(Cache, "Cache received packet with error for address %x, "
|
|
|
|
"cmd: %s\n", pkt->getAddr(), pkt->cmdString());
|
|
|
|
}
|
|
|
|
|
2007-06-25 02:32:31 +02:00
|
|
|
DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr());
|
2007-06-18 02:27:53 +02:00
|
|
|
|
2007-06-21 20:59:17 +02:00
|
|
|
MSHRQueue *mq = mshr->queue;
|
|
|
|
bool wasFull = mq->isFull();
|
|
|
|
|
|
|
|
if (mshr == noTargetMSHR) {
|
|
|
|
// we always clear at least one target
|
|
|
|
clearBlocked(Blocked_NoTargets);
|
|
|
|
noTargetMSHR = NULL;
|
|
|
|
}
|
|
|
|
|
2007-06-30 22:34:16 +02:00
|
|
|
// Initial target is used just for stats
|
|
|
|
MSHR::Target *initial_tgt = mshr->getTarget();
|
2007-07-21 22:45:17 +02:00
|
|
|
BlkType *blk = tags->findBlock(pkt->getAddr());
|
2007-06-30 22:34:16 +02:00
|
|
|
int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
|
|
|
|
Tick miss_latency = curTick - initial_tgt->recvTime;
|
2007-07-21 22:45:17 +02:00
|
|
|
PacketList writebacks;
|
2007-06-30 22:34:16 +02:00
|
|
|
|
2007-07-21 22:45:17 +02:00
|
|
|
if (pkt->req->isUncacheable()) {
|
2008-11-04 17:35:42 +01:00
|
|
|
mshr_uncacheable_lat[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
|
2007-07-21 22:45:17 +02:00
|
|
|
miss_latency;
|
|
|
|
} else {
|
2008-11-04 17:35:42 +01:00
|
|
|
mshr_miss_latency[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
|
2007-06-30 22:34:16 +02:00
|
|
|
miss_latency;
|
2007-07-21 22:45:17 +02:00
|
|
|
}
|
|
|
|
|
2008-11-10 23:10:28 +01:00
|
|
|
bool is_fill = !mshr->isForward &&
|
|
|
|
(pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
|
|
|
|
|
|
|
|
if (is_fill && !is_error) {
|
2007-06-18 02:27:53 +02:00
|
|
|
DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
|
|
|
|
pkt->getAddr());
|
2007-06-26 23:53:15 +02:00
|
|
|
|
2007-06-27 08:30:30 +02:00
|
|
|
// give mshr a chance to do some dirty work
|
|
|
|
mshr->handleFill(pkt, blk);
|
2007-06-26 23:53:15 +02:00
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
blk = handleFill(pkt, blk, writebacks);
|
2007-07-21 22:45:17 +02:00
|
|
|
assert(blk != NULL);
|
|
|
|
}
|
2007-06-21 20:59:17 +02:00
|
|
|
|
2007-07-21 22:45:17 +02:00
|
|
|
// First offset for critical word first calculations
|
|
|
|
int initial_offset = 0;
|
|
|
|
|
|
|
|
if (mshr->hasTargets()) {
|
|
|
|
initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
while (mshr->hasTargets()) {
|
|
|
|
MSHR::Target *target = mshr->getTarget();
|
|
|
|
|
2009-02-16 17:56:40 +01:00
|
|
|
switch (target->source) {
|
|
|
|
case MSHR::Target::FromCPU:
|
2007-07-21 22:45:17 +02:00
|
|
|
Tick completion_time;
|
2008-11-10 23:10:28 +01:00
|
|
|
if (is_fill) {
|
2007-07-21 22:45:17 +02:00
|
|
|
satisfyCpuSideRequest(target->pkt, blk);
|
2007-07-24 07:28:40 +02:00
|
|
|
// How many bytes past the first request is this one
|
2007-07-21 22:45:17 +02:00
|
|
|
int transfer_offset =
|
|
|
|
target->pkt->getOffset(blkSize) - initial_offset;
|
|
|
|
if (transfer_offset < 0) {
|
|
|
|
transfer_offset += blkSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If critical word (no offset) return first word time
|
|
|
|
completion_time = tags->getHitLatency() +
|
2007-08-13 01:43:54 +02:00
|
|
|
(transfer_offset ? pkt->finishTime : pkt->firstWordTime);
|
2007-07-21 22:45:17 +02:00
|
|
|
|
2007-07-24 07:28:40 +02:00
|
|
|
assert(!target->pkt->req->isUncacheable());
|
2008-11-04 17:35:42 +01:00
|
|
|
missLatency[target->pkt->cmdToIndex()][0/*pkt->req->threadId()*/] +=
|
2007-07-24 07:28:40 +02:00
|
|
|
completion_time - target->recvTime;
|
2010-06-17 00:25:57 +02:00
|
|
|
} else if (target->pkt->cmd == MemCmd::StoreCondReq &&
|
|
|
|
pkt->cmd == MemCmd::UpgradeFailResp) {
|
|
|
|
// failed StoreCond upgrade
|
|
|
|
completion_time = tags->getHitLatency() + pkt->finishTime;
|
|
|
|
target->pkt->req->setExtraData(0);
|
2007-07-21 22:45:17 +02:00
|
|
|
} else {
|
|
|
|
// not a cache fill, just forwarding response
|
|
|
|
completion_time = tags->getHitLatency() + pkt->finishTime;
|
2007-08-27 06:45:40 +02:00
|
|
|
if (pkt->isRead() && !is_error) {
|
2007-07-21 22:45:17 +02:00
|
|
|
target->pkt->setData(pkt->getPtr<uint8_t>());
|
|
|
|
}
|
2007-06-21 20:59:17 +02:00
|
|
|
}
|
2007-07-02 22:57:45 +02:00
|
|
|
target->pkt->makeTimingResponse();
|
2007-08-27 06:45:40 +02:00
|
|
|
// if this packet is an error copy that to the new packet
|
|
|
|
if (is_error)
|
|
|
|
target->pkt->copyError(pkt);
|
2008-01-03 00:22:38 +01:00
|
|
|
if (pkt->isInvalidate()) {
|
|
|
|
// If intermediate cache got ReadRespWithInvalidate,
|
|
|
|
// propagate that. Response should not have
|
|
|
|
// isInvalidate() set otherwise.
|
|
|
|
assert(target->pkt->cmd == MemCmd::ReadResp);
|
|
|
|
assert(pkt->cmd == MemCmd::ReadRespWithInvalidate);
|
|
|
|
target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
|
|
|
|
}
|
2007-07-21 22:45:17 +02:00
|
|
|
cpuSidePort->respond(target->pkt, completion_time);
|
2009-02-16 17:56:40 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MSHR::Target::FromPrefetcher:
|
|
|
|
assert(target->pkt->cmd == MemCmd::HardPFReq);
|
|
|
|
if (blk)
|
|
|
|
blk->status |= BlkHWPrefetched;
|
|
|
|
delete target->pkt->req;
|
|
|
|
delete target->pkt;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MSHR::Target::FromSnoop:
|
2007-08-27 06:45:40 +02:00
|
|
|
// I don't believe that a snoop can be in an error state
|
|
|
|
assert(!is_error);
|
2007-07-21 22:45:17 +02:00
|
|
|
// response to snoop request
|
|
|
|
DPRINTF(Cache, "processing deferred snoop...\n");
|
2008-01-03 00:22:38 +01:00
|
|
|
handleSnoop(target->pkt, blk, true, true,
|
|
|
|
mshr->pendingInvalidate || pkt->isInvalidate());
|
2009-02-16 17:56:40 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic("Illegal target->source enum %d\n", target->source);
|
2007-06-21 20:59:17 +02:00
|
|
|
}
|
|
|
|
|
2007-07-21 22:45:17 +02:00
|
|
|
mshr->popTarget();
|
|
|
|
}
|
2007-07-03 06:40:31 +02:00
|
|
|
|
2008-01-03 00:22:38 +01:00
|
|
|
if (pkt->isInvalidate()) {
|
|
|
|
tags->invalidateBlk(blk);
|
|
|
|
}
|
|
|
|
|
2007-07-21 22:45:17 +02:00
|
|
|
if (mshr->promoteDeferredTargets()) {
|
2008-03-15 13:03:55 +01:00
|
|
|
// avoid later read getting stale data while write miss is
|
|
|
|
// outstanding.. see comment in timingAccess()
|
|
|
|
blk->status &= ~BlkReadable;
|
2007-07-21 22:45:17 +02:00
|
|
|
MSHRQueue *mq = mshr->queue;
|
|
|
|
mq->markPending(mshr);
|
|
|
|
requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
|
|
|
|
} else {
|
2007-06-21 20:59:17 +02:00
|
|
|
mq->deallocate(mshr);
|
|
|
|
if (wasFull && !mq->isFull()) {
|
|
|
|
clearBlocked((BlockedCause)mq->index);
|
|
|
|
}
|
2006-12-19 05:47:12 +01:00
|
|
|
}
|
2007-07-21 22:45:17 +02:00
|
|
|
|
|
|
|
// copy writebacks to write buffer
|
|
|
|
while (!writebacks.empty()) {
|
|
|
|
PacketPtr wbPkt = writebacks.front();
|
|
|
|
allocateWriteBuffer(wbPkt, time, true);
|
|
|
|
writebacks.pop_front();
|
|
|
|
}
|
|
|
|
// if we used temp block, clear it out
|
|
|
|
if (blk == tempBlock) {
|
|
|
|
if (blk->isDirty()) {
|
|
|
|
allocateWriteBuffer(writebackBlk(blk), time, true);
|
|
|
|
}
|
|
|
|
tags->invalidateBlk(blk);
|
|
|
|
}
|
|
|
|
|
|
|
|
delete pkt;
|
2006-12-19 05:47:12 +01:00
|
|
|
}
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2006-12-19 05:47:12 +01:00
|
|
|
PacketPtr
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::writebackBlk(BlkType *blk)
|
2006-12-19 05:47:12 +01:00
|
|
|
{
|
2007-06-18 02:27:53 +02:00
|
|
|
assert(blk && blk->isValid() && blk->isDirty());
|
2006-12-19 05:47:12 +01:00
|
|
|
|
2008-11-04 17:35:42 +01:00
|
|
|
writebacks[0/*pkt->req->threadId()*/]++;
|
2006-12-19 05:47:12 +01:00
|
|
|
|
|
|
|
Request *writebackReq =
|
|
|
|
new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0);
|
2007-02-07 19:53:37 +01:00
|
|
|
PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback, -1);
|
2006-12-19 05:47:12 +01:00
|
|
|
writeback->allocate();
|
2007-06-18 02:27:53 +02:00
|
|
|
std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
|
2006-12-19 05:47:12 +01:00
|
|
|
|
|
|
|
blk->status &= ~BlkDirty;
|
|
|
|
return writeback;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-02-16 20:58:03 +01:00
|
|
|
template<class TagStore>
|
|
|
|
typename Cache<TagStore>::BlkType*
|
|
|
|
Cache<TagStore>::allocateBlock(Addr addr, PacketList &writebacks)
|
|
|
|
{
|
2008-11-04 17:35:58 +01:00
|
|
|
BlkType *blk = tags->findVictim(addr, writebacks);
|
2008-02-16 20:58:03 +01:00
|
|
|
|
|
|
|
if (blk->isValid()) {
|
|
|
|
Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
|
|
|
|
MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);
|
|
|
|
if (repl_mshr) {
|
|
|
|
// must be an outstanding upgrade request on block
|
|
|
|
// we're about to replace...
|
|
|
|
assert(!blk->isWritable());
|
|
|
|
assert(repl_mshr->needsExclusive());
|
|
|
|
// too hard to replace block with transient state
|
2008-11-04 17:35:58 +01:00
|
|
|
// allocation failed, block not inserted
|
2008-02-16 20:58:03 +01:00
|
|
|
return NULL;
|
|
|
|
} else {
|
|
|
|
DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
|
|
|
|
repl_addr, addr,
|
|
|
|
blk->isDirty() ? "writeback" : "clean");
|
|
|
|
|
|
|
|
if (blk->isDirty()) {
|
|
|
|
// Save writeback packet for handling by caller
|
|
|
|
writebacks.push_back(writebackBlk(blk));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return blk;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
// Note that the reason we return a list of writebacks rather than
|
|
|
|
// inserting them directly in the write buffer is that this function
|
|
|
|
// is called by both atomic and timing-mode accesses, and in atomic
|
|
|
|
// mode we don't mess with the write buffer (we just perform the
|
|
|
|
// writebacks atomically once the original request is complete).
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
|
|
|
typename Cache<TagStore>::BlkType*
|
|
|
|
Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
|
|
|
|
PacketList &writebacks)
|
2006-12-19 05:47:12 +01:00
|
|
|
{
|
2007-06-18 02:27:53 +02:00
|
|
|
Addr addr = pkt->getAddr();
|
2007-07-22 09:07:26 +02:00
|
|
|
#if TRACING_ON
|
2007-06-28 05:54:13 +02:00
|
|
|
CacheBlk::State old_state = blk ? blk->status : 0;
|
2007-07-22 09:07:26 +02:00
|
|
|
#endif
|
2007-06-18 02:27:53 +02:00
|
|
|
|
2006-12-19 05:47:12 +01:00
|
|
|
if (blk == NULL) {
|
2007-06-26 23:53:15 +02:00
|
|
|
// better have read new data...
|
2008-01-03 00:22:38 +01:00
|
|
|
assert(pkt->hasData());
|
2006-12-19 05:47:12 +01:00
|
|
|
// need to do a replacement
|
2008-02-16 20:58:03 +01:00
|
|
|
blk = allocateBlock(addr, writebacks);
|
|
|
|
if (blk == NULL) {
|
|
|
|
// No replaceable block... just use temporary storage to
|
|
|
|
// complete the current request and then get rid of it
|
|
|
|
assert(!tempBlock->isValid());
|
|
|
|
blk = tempBlock;
|
|
|
|
tempBlock->set = tags->extractSet(addr);
|
2008-03-23 03:17:15 +01:00
|
|
|
tempBlock->tag = tags->extractTag(addr);
|
2008-02-16 20:58:03 +01:00
|
|
|
DPRINTF(Cache, "using temp block for %x\n", addr);
|
2008-11-04 17:35:58 +01:00
|
|
|
} else {
|
2010-01-12 19:53:02 +01:00
|
|
|
int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
|
|
|
|
tags->insertBlock(pkt->getAddr(), blk, id);
|
2006-12-19 05:47:12 +01:00
|
|
|
}
|
|
|
|
} else {
|
2007-06-18 02:27:53 +02:00
|
|
|
// existing block... probably an upgrade
|
|
|
|
assert(blk->tag == tags->extractTag(addr));
|
|
|
|
// either we're getting new data or the block should already be valid
|
2008-01-03 00:22:38 +01:00
|
|
|
assert(pkt->hasData() || blk->isValid());
|
2006-12-19 05:47:12 +01:00
|
|
|
}
|
|
|
|
|
2010-06-17 00:25:57 +02:00
|
|
|
blk->status = BlkValid | BlkReadable;
|
|
|
|
|
2007-11-17 05:10:33 +01:00
|
|
|
if (!pkt->sharedAsserted()) {
|
2010-06-17 00:25:57 +02:00
|
|
|
blk->status |= BlkWritable;
|
|
|
|
// If we got this via cache-to-cache transfer (i.e., from a
|
|
|
|
// cache that was an owner) and took away that owner's copy,
|
|
|
|
// then we need to write it back. Normally this happens
|
|
|
|
// anyway as a side effect of getting a copy to write it, but
|
|
|
|
// there are cases (such as failed store conditionals or
|
|
|
|
// compare-and-swaps) where we'll demand an exclusive copy but
|
|
|
|
// end up not writing it.
|
|
|
|
if (pkt->memInhibitAsserted())
|
|
|
|
blk->status |= BlkDirty;
|
2007-06-28 05:54:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
|
|
|
|
addr, old_state, blk->status);
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
// if we got new data, copy it in
|
|
|
|
if (pkt->isRead()) {
|
|
|
|
std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
blk->whenReady = pkt->finishTime;
|
|
|
|
|
2006-12-19 05:47:12 +01:00
|
|
|
return blk;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
/////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// Snoop path: requests coming in from the memory side
|
|
|
|
//
|
|
|
|
/////////////////////////////////////////////////////
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2006-06-28 17:02:14 +02:00
|
|
|
void
|
2008-01-03 00:22:38 +01:00
|
|
|
Cache<TagStore>::
|
|
|
|
doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
|
|
|
|
bool already_copied, bool pending_inval)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
2007-06-27 07:23:10 +02:00
|
|
|
// timing-mode snoop responses require a new packet, unless we
|
|
|
|
// already made a copy...
|
2010-06-17 00:25:57 +02:00
|
|
|
PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt);
|
|
|
|
assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
|
2007-06-18 02:27:53 +02:00
|
|
|
pkt->allocate();
|
|
|
|
pkt->makeTimingResponse();
|
2007-07-27 02:04:17 +02:00
|
|
|
if (pkt->isRead()) {
|
|
|
|
pkt->setDataFromBlock(blk_data, blkSize);
|
|
|
|
}
|
2008-01-03 00:22:38 +01:00
|
|
|
if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
|
|
|
|
// Assume we defer a response to a read from a far-away cache
|
|
|
|
// A, then later defer a ReadExcl from a cache B on the same
|
|
|
|
// bus as us. We'll assert MemInhibit in both cases, but in
|
|
|
|
// the latter case MemInhibit will keep the invalidation from
|
|
|
|
// reaching cache A. This special response tells cache A that
|
|
|
|
// it gets the block to satisfy its read, but must immediately
|
|
|
|
// invalidate it.
|
|
|
|
pkt->cmd = MemCmd::ReadRespWithInvalidate;
|
|
|
|
}
|
2007-06-18 02:27:53 +02:00
|
|
|
memSidePort->respond(pkt, curTick + hitLatency);
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2006-06-28 17:02:14 +02:00
|
|
|
void
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
|
2008-01-03 00:22:38 +01:00
|
|
|
bool is_timing, bool is_deferred,
|
|
|
|
bool pending_inval)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
2008-01-03 00:22:38 +01:00
|
|
|
// deferred snoops can only happen in timing mode
|
|
|
|
assert(!(is_deferred && !is_timing));
|
|
|
|
// pending_inval only makes sense on deferred snoops
|
|
|
|
assert(!(pending_inval && !is_deferred));
|
2007-07-16 05:11:06 +02:00
|
|
|
assert(pkt->isRequest());
|
|
|
|
|
2008-07-16 20:10:33 +02:00
|
|
|
if (forwardSnoops) {
|
|
|
|
// first propagate snoop upward to see if anyone above us wants to
|
|
|
|
// handle it. save & restore packet src since it will get
|
|
|
|
// rewritten to be relative to cpu-side bus (if any)
|
|
|
|
bool alreadyResponded = pkt->memInhibitAsserted();
|
|
|
|
if (is_timing) {
|
|
|
|
Packet *snoopPkt = new Packet(pkt, true); // clear flags
|
|
|
|
snoopPkt->setExpressSnoop();
|
|
|
|
snoopPkt->senderState = new ForwardResponseRecord(pkt, this);
|
|
|
|
cpuSidePort->sendTiming(snoopPkt);
|
|
|
|
if (snoopPkt->memInhibitAsserted()) {
|
|
|
|
// cache-to-cache response from some upper cache
|
|
|
|
assert(!alreadyResponded);
|
|
|
|
pkt->assertMemInhibit();
|
|
|
|
} else {
|
|
|
|
delete snoopPkt->senderState;
|
|
|
|
}
|
|
|
|
if (snoopPkt->sharedAsserted()) {
|
|
|
|
pkt->assertShared();
|
|
|
|
}
|
|
|
|
delete snoopPkt;
|
2007-07-17 15:33:28 +02:00
|
|
|
} else {
|
2008-07-16 20:10:33 +02:00
|
|
|
int origSrc = pkt->getSrc();
|
|
|
|
cpuSidePort->sendAtomic(pkt);
|
|
|
|
if (!alreadyResponded && pkt->memInhibitAsserted()) {
|
|
|
|
// cache-to-cache response from some upper cache:
|
|
|
|
// forward response to original requester
|
|
|
|
assert(pkt->isResponse());
|
|
|
|
}
|
|
|
|
pkt->setSrc(origSrc);
|
2007-07-16 05:11:06 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
if (!blk || !blk->isValid()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// we may end up modifying both the block state and the packet (if
|
|
|
|
// we respond in atomic mode), so just figure out what to do now
|
|
|
|
// and then do it later
|
2007-07-27 02:04:17 +02:00
|
|
|
bool respond = blk->isDirty() && pkt->needsResponse();
|
2007-07-27 02:04:17 +02:00
|
|
|
bool have_exclusive = blk->isWritable();
|
2007-06-18 02:27:53 +02:00
|
|
|
bool invalidate = pkt->isInvalidate();
|
|
|
|
|
|
|
|
if (pkt->isRead() && !pkt->isInvalidate()) {
|
|
|
|
assert(!pkt->needsExclusive());
|
|
|
|
pkt->assertShared();
|
|
|
|
int bits_to_clear = BlkWritable;
|
|
|
|
const bool haveOwnershipState = true; // for now
|
|
|
|
if (!haveOwnershipState) {
|
|
|
|
// if we don't support pure ownership (dirty && !writable),
|
|
|
|
// have to clear dirty bit here, assume memory snarfs data
|
|
|
|
// on cache-to-cache xfer
|
|
|
|
bits_to_clear |= BlkDirty;
|
2006-10-10 00:52:20 +02:00
|
|
|
}
|
2007-06-18 02:27:53 +02:00
|
|
|
blk->status &= ~bits_to_clear;
|
|
|
|
}
|
|
|
|
|
2007-09-17 01:46:38 +02:00
|
|
|
DPRINTF(Cache, "snooped a %s request for addr %x, %snew state is %i\n",
|
|
|
|
pkt->cmdString(), blockAlign(pkt->getAddr()),
|
|
|
|
respond ? "responding, " : "", invalidate ? 0 : blk->status);
|
|
|
|
|
2007-07-27 02:04:17 +02:00
|
|
|
if (respond) {
|
2007-06-27 07:23:10 +02:00
|
|
|
assert(!pkt->memInhibitAsserted());
|
2007-06-18 02:27:53 +02:00
|
|
|
pkt->assertMemInhibit();
|
2007-07-27 02:04:17 +02:00
|
|
|
if (have_exclusive) {
|
|
|
|
pkt->setSupplyExclusive();
|
|
|
|
}
|
2007-06-18 02:27:53 +02:00
|
|
|
if (is_timing) {
|
2008-01-03 00:22:38 +01:00
|
|
|
doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
|
2007-06-18 02:27:53 +02:00
|
|
|
} else {
|
|
|
|
pkt->makeAtomicResponse();
|
|
|
|
pkt->setDataFromBlock(blk->data, blkSize);
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
2008-03-17 08:08:28 +01:00
|
|
|
} else if (is_timing && is_deferred) {
|
|
|
|
// if it's a deferred timing snoop then we've made a copy of
|
|
|
|
// the packet, and so if we're not using that copy to respond
|
|
|
|
// then we need to delete it here.
|
|
|
|
delete pkt;
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
2007-06-18 02:27:53 +02:00
|
|
|
|
|
|
|
// Do this last in case it deallocates block data or something
|
|
|
|
// like that
|
|
|
|
if (invalidate) {
|
|
|
|
tags->invalidateBlk(blk);
|
|
|
|
}
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2006-06-28 17:02:14 +02:00
|
|
|
void
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::snoopTiming(PacketPtr pkt)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
2007-07-24 07:28:40 +02:00
|
|
|
// Note that some deferred snoops don't have requests, since the
|
|
|
|
// original access may have already completed
|
2007-07-27 02:04:17 +02:00
|
|
|
if ((pkt->req && pkt->req->isUncacheable()) ||
|
|
|
|
pkt->cmd == MemCmd::Writeback) {
|
2006-10-12 19:33:21 +02:00
|
|
|
//Can't get a hit on an uncacheable address
|
|
|
|
//Revisit this for multi level coherence
|
|
|
|
return;
|
|
|
|
}
|
2006-10-20 02:02:57 +02:00
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
BlkType *blk = tags->findBlock(pkt->getAddr());
|
2006-10-20 02:02:57 +02:00
|
|
|
|
2009-09-26 19:50:50 +02:00
|
|
|
Addr blk_addr = blockAlign(pkt->getAddr());
|
2007-06-18 02:27:53 +02:00
|
|
|
MSHR *mshr = mshrQueue.findMatch(blk_addr);
|
2007-07-22 17:09:24 +02:00
|
|
|
|
|
|
|
// Let the MSHR itself track the snoop and decide whether we want
|
|
|
|
// to go ahead and do the regular cache snoop
|
2007-07-21 22:45:17 +02:00
|
|
|
if (mshr && mshr->handleSnoop(pkt, order++)) {
|
2007-06-27 07:23:10 +02:00
|
|
|
DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x\n",
|
|
|
|
blk_addr);
|
2007-06-27 00:01:22 +02:00
|
|
|
if (mshr->getNumTargets() > numTarget)
|
2007-07-21 22:45:17 +02:00
|
|
|
warn("allocating bonus target for snoop"); //handle later
|
2007-06-25 02:32:31 +02:00
|
|
|
return;
|
|
|
|
}
|
2007-06-18 02:27:53 +02:00
|
|
|
|
|
|
|
//We also need to check the writeback buffers and handle those
|
|
|
|
std::vector<MSHR *> writebacks;
|
|
|
|
if (writeBuffer.findMatches(blk_addr, writebacks)) {
|
|
|
|
DPRINTF(Cache, "Snoop hit in writeback to addr: %x\n",
|
|
|
|
pkt->getAddr());
|
|
|
|
|
|
|
|
//Look through writebacks for any non-uncachable writes, use that
|
2008-01-02 21:15:48 +01:00
|
|
|
for (int i = 0; i < writebacks.size(); i++) {
|
2007-06-18 02:27:53 +02:00
|
|
|
mshr = writebacks[i];
|
|
|
|
assert(!mshr->isUncacheable());
|
2007-06-25 02:32:31 +02:00
|
|
|
assert(mshr->getNumTargets() == 1);
|
|
|
|
PacketPtr wb_pkt = mshr->getTarget()->pkt;
|
|
|
|
assert(wb_pkt->cmd == MemCmd::Writeback);
|
2007-06-18 02:27:53 +02:00
|
|
|
|
2007-07-27 02:04:17 +02:00
|
|
|
assert(!pkt->memInhibitAsserted());
|
|
|
|
pkt->assertMemInhibit();
|
|
|
|
if (!pkt->needsExclusive()) {
|
|
|
|
pkt->assertShared();
|
|
|
|
} else {
|
|
|
|
// if we're not asserting the shared line, we need to
|
|
|
|
// invalidate our copy. we'll do that below as long as
|
|
|
|
// the packet's invalidate flag is set...
|
|
|
|
assert(pkt->isInvalidate());
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
2008-01-03 00:22:38 +01:00
|
|
|
doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(),
|
|
|
|
false, false);
|
2007-06-18 02:27:53 +02:00
|
|
|
|
|
|
|
if (pkt->isInvalidate()) {
|
|
|
|
// Invalidation trumps our writeback... discard here
|
|
|
|
markInService(mshr);
|
2008-03-17 08:08:28 +01:00
|
|
|
delete wb_pkt;
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
2007-07-27 02:04:16 +02:00
|
|
|
|
|
|
|
// If this was a shared writeback, there may still be
|
|
|
|
// other shared copies above that require invalidation.
|
|
|
|
// We could be more selective and return here if the
|
|
|
|
// request is non-exclusive or if the writeback is
|
|
|
|
// exclusive.
|
|
|
|
break;
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
|
|
|
}
|
2006-11-12 12:44:05 +01:00
|
|
|
|
2008-01-03 00:22:38 +01:00
|
|
|
handleSnoop(pkt, blk, true, false, false);
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-06-18 02:27:53 +02:00
|
|
|
Tick
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::snoopAtomic(PacketPtr pkt)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
2007-07-27 02:04:17 +02:00
|
|
|
if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
|
2007-06-18 02:27:53 +02:00
|
|
|
// Can't get a hit on an uncacheable address
|
|
|
|
// Revisit this for multi level coherence
|
|
|
|
return hitLatency;
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
2007-06-18 02:27:53 +02:00
|
|
|
|
|
|
|
BlkType *blk = tags->findBlock(pkt->getAddr());
|
2008-01-03 00:22:38 +01:00
|
|
|
handleSnoop(pkt, blk, false, false, false);
|
2007-06-18 02:27:53 +02:00
|
|
|
return hitLatency;
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-06-18 02:27:53 +02:00
|
|
|
MSHR *
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::getNextMSHR()
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
2007-06-18 02:27:53 +02:00
|
|
|
// Check both MSHR queue and write buffer for potential requests
|
|
|
|
MSHR *miss_mshr = mshrQueue.getNextMSHR();
|
|
|
|
MSHR *write_mshr = writeBuffer.getNextMSHR();
|
2006-06-28 17:02:14 +02:00
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
// Now figure out which one to send... some cases are easy
|
|
|
|
if (miss_mshr && !write_mshr) {
|
|
|
|
return miss_mshr;
|
|
|
|
}
|
|
|
|
if (write_mshr && !miss_mshr) {
|
|
|
|
return write_mshr;
|
2006-10-20 01:00:27 +02:00
|
|
|
}
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
if (miss_mshr && write_mshr) {
|
|
|
|
// We have one of each... normally we favor the miss request
|
|
|
|
// unless the write buffer is full
|
|
|
|
if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) {
|
|
|
|
// Write buffer is full, so we'd like to issue a write;
|
|
|
|
// need to search MSHR queue for conflicting earlier miss.
|
|
|
|
MSHR *conflict_mshr =
|
|
|
|
mshrQueue.findPending(write_mshr->addr, write_mshr->size);
|
|
|
|
|
|
|
|
if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
|
|
|
|
// Service misses in order until conflict is cleared.
|
|
|
|
return conflict_mshr;
|
2006-10-20 01:00:27 +02:00
|
|
|
}
|
2006-10-06 05:28:03 +02:00
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
// No conflicts; issue write
|
|
|
|
return write_mshr;
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
// Write buffer isn't full, but need to check it for
|
|
|
|
// conflicting earlier writeback
|
|
|
|
MSHR *conflict_mshr =
|
|
|
|
writeBuffer.findPending(miss_mshr->addr, miss_mshr->size);
|
|
|
|
if (conflict_mshr) {
|
|
|
|
// not sure why we don't check order here... it was in the
|
|
|
|
// original code but commented out.
|
|
|
|
|
|
|
|
// The only way this happens is if we are
|
|
|
|
// doing a write and we didn't have permissions
|
|
|
|
// then subsequently saw a writeback (owned got evicted)
|
|
|
|
// We need to make sure to perform the writeback first
|
|
|
|
// To preserve the dirty data, then we can issue the write
|
|
|
|
|
|
|
|
// should we return write_mshr here instead? I.e. do we
|
|
|
|
// have to flush writes in order? I don't think so... not
|
|
|
|
// for Alpha anyway. Maybe for x86?
|
|
|
|
return conflict_mshr;
|
2006-10-22 02:19:33 +02:00
|
|
|
}
|
2006-10-20 01:00:27 +02:00
|
|
|
|
2007-06-26 23:53:15 +02:00
|
|
|
// No conflicts; issue read
|
2007-06-18 02:27:53 +02:00
|
|
|
return miss_mshr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// fall through... no pending requests. Try a prefetch.
|
|
|
|
assert(!miss_mshr && !write_mshr);
|
2010-06-23 06:29:43 +02:00
|
|
|
if (prefetcher && !mshrQueue.isFull()) {
|
2007-06-18 02:27:53 +02:00
|
|
|
// If we have a miss queue slot, we can try a prefetch
|
|
|
|
PacketPtr pkt = prefetcher->getPacket();
|
|
|
|
if (pkt) {
|
2009-09-26 19:50:50 +02:00
|
|
|
Addr pf_addr = blockAlign(pkt->getAddr());
|
|
|
|
if (!tags->findBlock(pf_addr) && !mshrQueue.findMatch(pf_addr)) {
|
|
|
|
// Update statistic on number of prefetches issued
|
|
|
|
// (hwpf_mshr_misses)
|
|
|
|
mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
|
|
|
|
// Don't request bus, since we already have it
|
|
|
|
return allocateMissBuffer(pkt, curTick, false);
|
|
|
|
}
|
2007-06-18 02:27:53 +02:00
|
|
|
}
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
2006-10-22 02:19:33 +02:00
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
return NULL;
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-06-18 02:27:53 +02:00
|
|
|
PacketPtr
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::getTimingPacket()
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
2007-06-18 02:27:53 +02:00
|
|
|
MSHR *mshr = getNextMSHR();
|
2006-10-20 02:02:57 +02:00
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
if (mshr == NULL) {
|
|
|
|
return NULL;
|
2006-10-20 02:02:57 +02:00
|
|
|
}
|
2006-06-28 17:02:14 +02:00
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
// use request from 1st target
|
2007-06-21 20:59:17 +02:00
|
|
|
PacketPtr tgt_pkt = mshr->getTarget()->pkt;
|
2007-06-22 18:24:07 +02:00
|
|
|
PacketPtr pkt = NULL;
|
2007-06-18 02:27:53 +02:00
|
|
|
|
2010-06-17 00:25:57 +02:00
|
|
|
if (tgt_pkt->cmd == MemCmd::SCUpgradeFailReq) {
|
|
|
|
// SCUpgradeReq saw invalidation while queued in MSHR, so now
|
|
|
|
// that we are getting around to processing it, just treat it
|
|
|
|
// as if we got a failure response
|
|
|
|
pkt = new Packet(tgt_pkt);
|
|
|
|
pkt->cmd = MemCmd::UpgradeFailResp;
|
2010-07-09 02:56:13 +02:00
|
|
|
pkt->senderState = mshr;
|
|
|
|
pkt->firstWordTime = pkt->finishTime = curTick;
|
2010-06-17 00:25:57 +02:00
|
|
|
handleResponse(pkt);
|
|
|
|
return NULL;
|
|
|
|
} else if (mshr->isForwardNoResponse()) {
|
2007-06-22 18:24:07 +02:00
|
|
|
// no response expected, just forward packet as it is
|
|
|
|
assert(tags->findBlock(mshr->addr) == NULL);
|
|
|
|
pkt = tgt_pkt;
|
|
|
|
} else {
|
|
|
|
BlkType *blk = tags->findBlock(mshr->addr);
|
2007-07-22 03:18:42 +02:00
|
|
|
pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
|
2007-06-22 18:24:07 +02:00
|
|
|
|
2008-11-10 23:10:28 +01:00
|
|
|
mshr->isForward = (pkt == NULL);
|
2007-06-22 18:24:07 +02:00
|
|
|
|
2008-11-10 23:10:28 +01:00
|
|
|
if (mshr->isForward) {
|
2007-06-22 18:24:07 +02:00
|
|
|
// not a cache block request, but a response is expected
|
|
|
|
// make copy of current packet to forward, keep current
|
|
|
|
// copy for response handling
|
|
|
|
pkt = new Packet(tgt_pkt);
|
|
|
|
pkt->allocate();
|
|
|
|
if (pkt->isWrite()) {
|
|
|
|
pkt->setData(tgt_pkt->getPtr<uint8_t>());
|
|
|
|
}
|
2007-06-18 02:27:53 +02:00
|
|
|
}
|
2006-12-14 07:04:36 +01:00
|
|
|
}
|
2007-04-04 19:56:38 +02:00
|
|
|
|
2007-06-22 18:24:07 +02:00
|
|
|
assert(pkt != NULL);
|
2007-06-18 02:27:53 +02:00
|
|
|
pkt->senderState = mshr;
|
|
|
|
return pkt;
|
2007-03-09 00:57:15 +01:00
|
|
|
}
|
|
|
|
|
2006-12-14 07:04:36 +01:00
|
|
|
|
2009-02-16 17:56:40 +01:00
|
|
|
template<class TagStore>
|
|
|
|
Tick
|
|
|
|
Cache<TagStore>::nextMSHRReadyTime()
|
|
|
|
{
|
|
|
|
Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
|
|
|
|
writeBuffer.nextMSHRReadyTime());
|
|
|
|
|
|
|
|
if (prefetcher) {
|
|
|
|
nextReady = std::min(nextReady,
|
|
|
|
prefetcher->nextPrefetchReadyTime());
|
|
|
|
}
|
|
|
|
|
|
|
|
return nextReady;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
///////////////
|
|
|
|
//
|
|
|
|
// CpuSidePort
|
|
|
|
//
|
|
|
|
///////////////
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-05-22 16:30:55 +02:00
|
|
|
void
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::CpuSidePort::
|
2007-05-22 16:30:55 +02:00
|
|
|
getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
|
|
|
|
{
|
2008-07-16 20:10:33 +02:00
|
|
|
// CPU side port doesn't snoop; it's a target only. It can
|
|
|
|
// potentially respond to any address.
|
2007-05-22 16:30:55 +02:00
|
|
|
snoop = false;
|
2008-07-16 20:10:33 +02:00
|
|
|
resp.push_back(myCache()->getAddrRange());
|
2007-05-22 16:30:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2006-12-14 07:04:36 +01:00
|
|
|
bool
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
|
2006-12-14 07:04:36 +01:00
|
|
|
{
|
2007-07-21 22:45:17 +02:00
|
|
|
// illegal to block responses... can lead to deadlock
|
2007-07-25 16:47:37 +02:00
|
|
|
if (pkt->isRequest() && !pkt->memInhibitAsserted() && blocked) {
|
2006-12-14 07:04:36 +01:00
|
|
|
DPRINTF(Cache,"Scheduling a retry while blocked\n");
|
|
|
|
mustSendRetry = true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
myCache()->timingAccess(pkt);
|
2006-12-14 07:04:36 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2007-05-19 07:35:04 +02:00
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-06-18 02:27:53 +02:00
|
|
|
Tick
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
|
2007-05-19 07:35:04 +02:00
|
|
|
{
|
2007-06-18 02:27:53 +02:00
|
|
|
return myCache()->atomicAccess(pkt);
|
2007-05-19 07:35:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-05-19 07:35:04 +02:00
|
|
|
void
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
|
2007-05-19 07:35:04 +02:00
|
|
|
{
|
2008-01-02 21:20:15 +01:00
|
|
|
myCache()->functionalAccess(pkt, this, otherPort);
|
2007-05-19 07:35:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
|
|
|
Cache<TagStore>::
|
2008-01-02 21:20:15 +01:00
|
|
|
CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
|
2008-07-16 20:10:33 +02:00
|
|
|
const std::string &_label)
|
|
|
|
: BaseCache::CachePort(_name, _cache, _label)
|
2006-12-14 07:04:36 +01:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
///////////////
|
|
|
|
//
|
|
|
|
// MemSidePort
|
|
|
|
//
|
|
|
|
///////////////
|
2006-12-14 07:04:36 +01:00
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-05-22 16:30:55 +02:00
|
|
|
void
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::MemSidePort::
|
2007-05-22 16:30:55 +02:00
|
|
|
getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
|
|
|
|
{
|
2008-07-16 20:10:33 +02:00
|
|
|
// Memory-side port always snoops, but never passes requests
|
|
|
|
// through to targets on the cpu side (so we don't add anything to
|
|
|
|
// the address range list).
|
2007-05-22 16:30:55 +02:00
|
|
|
snoop = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2006-12-14 07:04:36 +01:00
|
|
|
bool
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
|
2006-12-14 07:04:36 +01:00
|
|
|
{
|
2007-05-10 00:20:24 +02:00
|
|
|
// this needs to be fixed so that the cache updates the mshr and sends the
|
|
|
|
// packet back out on the link, but it probably won't happen so until this
|
|
|
|
// gets fixed, just panic when it does
|
2007-06-30 19:16:18 +02:00
|
|
|
if (pkt->wasNacked())
|
2007-05-10 00:20:24 +02:00
|
|
|
panic("Need to implement cache resending nacked packets!\n");
|
|
|
|
|
2007-05-19 07:35:04 +02:00
|
|
|
if (pkt->isRequest() && blocked) {
|
2006-12-14 07:04:36 +01:00
|
|
|
DPRINTF(Cache,"Scheduling a retry while blocked\n");
|
|
|
|
mustSendRetry = true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2007-05-19 07:35:04 +02:00
|
|
|
if (pkt->isResponse()) {
|
2006-12-14 07:04:36 +01:00
|
|
|
myCache()->handleResponse(pkt);
|
2007-05-19 07:35:04 +02:00
|
|
|
} else {
|
2007-06-18 02:27:53 +02:00
|
|
|
myCache()->snoopTiming(pkt);
|
2006-12-14 07:04:36 +01:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-06-18 02:27:53 +02:00
|
|
|
Tick
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::MemSidePort::recvAtomic(PacketPtr pkt)
|
2007-05-19 07:35:04 +02:00
|
|
|
{
|
2007-06-18 02:27:53 +02:00
|
|
|
// in atomic mode, responses go back to the sender via the
|
|
|
|
// function return from sendAtomic(), not via a separate
|
|
|
|
// sendAtomic() from the responder. Thus we should never see a
|
|
|
|
// response packet in recvAtomic() (anywhere, not just here).
|
|
|
|
assert(!pkt->isResponse());
|
|
|
|
return myCache()->snoopAtomic(pkt);
|
|
|
|
}
|
2007-05-19 07:35:04 +02:00
|
|
|
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-06-18 02:27:53 +02:00
|
|
|
void
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt)
|
2007-06-18 02:27:53 +02:00
|
|
|
{
|
2008-01-02 21:20:15 +01:00
|
|
|
myCache()->functionalAccess(pkt, this, otherPort);
|
2007-05-19 07:35:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-05-19 07:35:04 +02:00
|
|
|
void
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::MemSidePort::sendPacket()
|
2007-05-19 07:35:04 +02:00
|
|
|
{
|
2007-06-18 02:27:53 +02:00
|
|
|
// if we have responses that are ready, they take precedence
|
|
|
|
if (deferredPacketReady()) {
|
|
|
|
bool success = sendTiming(transmitList.front().pkt);
|
2007-05-19 07:35:04 +02:00
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
if (success) {
|
|
|
|
//send successful, remove packet
|
|
|
|
transmitList.pop_front();
|
|
|
|
}
|
2007-05-19 07:35:04 +02:00
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
waitingOnRetry = !success;
|
|
|
|
} else {
|
|
|
|
// check for non-response packets (requests & writebacks)
|
2007-06-21 20:59:17 +02:00
|
|
|
PacketPtr pkt = myCache()->getTimingPacket();
|
2007-06-26 07:23:29 +02:00
|
|
|
if (pkt == NULL) {
|
|
|
|
// can happen if e.g. we attempt a writeback and fail, but
|
|
|
|
// before the retry, the writeback is eliminated because
|
|
|
|
// we snoop another cache's ReadEx.
|
|
|
|
waitingOnRetry = false;
|
|
|
|
} else {
|
|
|
|
MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
|
2007-05-19 07:35:04 +02:00
|
|
|
|
2007-06-26 07:23:29 +02:00
|
|
|
bool success = sendTiming(pkt);
|
2007-05-19 07:35:04 +02:00
|
|
|
|
2007-06-26 07:23:29 +02:00
|
|
|
waitingOnRetry = !success;
|
|
|
|
if (waitingOnRetry) {
|
|
|
|
DPRINTF(CachePort, "now waiting on a retry\n");
|
2008-11-10 23:10:28 +01:00
|
|
|
if (!mshr->isForwardNoResponse()) {
|
2007-07-03 06:40:31 +02:00
|
|
|
delete pkt;
|
|
|
|
}
|
2007-06-26 07:23:29 +02:00
|
|
|
} else {
|
|
|
|
myCache()->markInService(mshr);
|
|
|
|
}
|
2007-06-18 02:27:53 +02:00
|
|
|
}
|
2007-05-19 07:35:04 +02:00
|
|
|
}
|
|
|
|
|
2007-06-18 02:27:53 +02:00
|
|
|
|
|
|
|
// tried to send packet... if it was successful (no retry), see if
|
|
|
|
// we need to rerequest bus or not
|
|
|
|
if (!waitingOnRetry) {
|
2007-06-30 22:34:16 +02:00
|
|
|
Tick nextReady = std::min(deferredPacketReadyTime(),
|
|
|
|
myCache()->nextMSHRReadyTime());
|
2007-06-25 15:47:05 +02:00
|
|
|
// @TODO: need to facotr in prefetch requests here somehow
|
|
|
|
if (nextReady != MaxTick) {
|
|
|
|
DPRINTF(CachePort, "more packets to send @ %d\n", nextReady);
|
2008-10-09 13:58:24 +02:00
|
|
|
schedule(sendEvent, std::max(nextReady, curTick + 1));
|
2007-06-18 02:27:53 +02:00
|
|
|
} else {
|
|
|
|
// no more to send right now: if we're draining, we may be done
|
2010-07-22 19:54:37 +02:00
|
|
|
if (drainEvent && !sendEvent->scheduled()) {
|
2007-06-18 02:27:53 +02:00
|
|
|
drainEvent->process();
|
|
|
|
drainEvent = NULL;
|
|
|
|
}
|
|
|
|
}
|
2007-05-19 07:35:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2007-05-19 07:35:04 +02:00
|
|
|
void
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::MemSidePort::recvRetry()
|
2007-05-19 07:35:04 +02:00
|
|
|
{
|
2007-06-18 02:27:53 +02:00
|
|
|
assert(waitingOnRetry);
|
|
|
|
sendPacket();
|
2007-05-19 07:35:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
2006-12-14 07:04:36 +01:00
|
|
|
void
|
2007-06-28 05:54:13 +02:00
|
|
|
Cache<TagStore>::MemSidePort::processSendEvent()
|
2006-12-14 07:04:36 +01:00
|
|
|
{
|
2007-06-18 02:27:53 +02:00
|
|
|
assert(!waitingOnRetry);
|
|
|
|
sendPacket();
|
2006-12-14 07:04:36 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-28 05:54:13 +02:00
|
|
|
template<class TagStore>
|
|
|
|
Cache<TagStore>::
|
2007-08-10 22:14:01 +02:00
|
|
|
MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
|
2008-07-16 20:10:33 +02:00
|
|
|
const std::string &_label)
|
|
|
|
: BaseCache::CachePort(_name, _cache, _label)
|
2006-12-14 07:04:36 +01:00
|
|
|
{
|
2007-06-18 02:27:53 +02:00
|
|
|
// override default send event from SimpleTimingPort
|
|
|
|
delete sendEvent;
|
|
|
|
sendEvent = new SendEvent(this);
|
2006-12-14 07:04:36 +01:00
|
|
|
}
|