2006-06-28 17:02:14 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2003-2005 The Regents of The University of Michigan
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are
|
|
|
|
* met: redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer;
|
|
|
|
* redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution;
|
|
|
|
* neither the name of the copyright holders nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived from
|
|
|
|
* this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* Authors: Erik Hallnor
|
|
|
|
* Ron Dreslinski
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
* Miss and writeback queue definitions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "cpu/smt.hh" //for maxThreadsPerCPU
|
|
|
|
#include "mem/cache/base_cache.hh"
|
|
|
|
#include "mem/cache/miss/miss_queue.hh"
|
|
|
|
#include "mem/cache/prefetch/base_prefetcher.hh"
|
|
|
|
|
|
|
|
using namespace std;
|
|
|
|
|
|
|
|
// simple constructor
|
|
|
|
/**
|
|
|
|
* @todo Remove the +16 from the write buffer constructor once we handle
|
|
|
|
* stalling on writebacks do to compression writes.
|
|
|
|
*/
|
|
|
|
MissQueue::MissQueue(int numMSHRs, int numTargets, int write_buffers,
|
|
|
|
bool write_allocate, bool prefetch_miss)
|
2006-12-04 18:10:53 +01:00
|
|
|
: MissBuffer(write_allocate),
|
|
|
|
mq(numMSHRs, 4), wb(write_buffers,numMSHRs+1000), numMSHR(numMSHRs),
|
2006-06-28 17:02:14 +02:00
|
|
|
numTarget(numTargets), writeBuffers(write_buffers),
|
2006-12-04 18:10:53 +01:00
|
|
|
order(0), prefetchMiss(prefetch_miss)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
|
|
|
noTargetMSHR = NULL;
|
|
|
|
}
|
|
|
|
|
2006-12-04 18:10:53 +01:00
|
|
|
|
|
|
|
MissQueue::~MissQueue()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-06-28 17:02:14 +02:00
|
|
|
void
|
|
|
|
MissQueue::regStats(const string &name)
|
|
|
|
{
|
2006-12-04 18:10:53 +01:00
|
|
|
MissBuffer::regStats(name);
|
|
|
|
|
2006-06-28 17:02:14 +02:00
|
|
|
using namespace Stats;
|
|
|
|
|
|
|
|
// MSHR hit statistics
|
2007-02-07 19:53:37 +01:00
|
|
|
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
|
|
|
|
MemCmd cmd(access_idx);
|
|
|
|
const string &cstr = cmd.toString();
|
2006-06-28 17:02:14 +02:00
|
|
|
|
|
|
|
mshr_hits[access_idx]
|
|
|
|
.init(maxThreadsPerCPU)
|
|
|
|
.name(name + "." + cstr + "_mshr_hits")
|
|
|
|
.desc("number of " + cstr + " MSHR hits")
|
|
|
|
.flags(total | nozero | nonan)
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
|
|
|
demandMshrHits
|
|
|
|
.name(name + ".demand_mshr_hits")
|
|
|
|
.desc("number of demand (read+write) MSHR hits")
|
|
|
|
.flags(total)
|
|
|
|
;
|
2007-02-07 19:53:37 +01:00
|
|
|
demandMshrHits = mshr_hits[MemCmd::ReadReq] + mshr_hits[MemCmd::WriteReq];
|
2006-06-28 17:02:14 +02:00
|
|
|
|
|
|
|
overallMshrHits
|
|
|
|
.name(name + ".overall_mshr_hits")
|
|
|
|
.desc("number of overall MSHR hits")
|
|
|
|
.flags(total)
|
|
|
|
;
|
2007-02-07 19:53:37 +01:00
|
|
|
overallMshrHits = demandMshrHits + mshr_hits[MemCmd::SoftPFReq] +
|
|
|
|
mshr_hits[MemCmd::HardPFReq];
|
2006-06-28 17:02:14 +02:00
|
|
|
|
|
|
|
// MSHR miss statistics
|
2007-02-07 19:53:37 +01:00
|
|
|
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
|
|
|
|
MemCmd cmd(access_idx);
|
|
|
|
const string &cstr = cmd.toString();
|
2006-06-28 17:02:14 +02:00
|
|
|
|
|
|
|
mshr_misses[access_idx]
|
|
|
|
.init(maxThreadsPerCPU)
|
|
|
|
.name(name + "." + cstr + "_mshr_misses")
|
|
|
|
.desc("number of " + cstr + " MSHR misses")
|
|
|
|
.flags(total | nozero | nonan)
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
|
|
|
demandMshrMisses
|
|
|
|
.name(name + ".demand_mshr_misses")
|
|
|
|
.desc("number of demand (read+write) MSHR misses")
|
|
|
|
.flags(total)
|
|
|
|
;
|
2007-02-07 19:53:37 +01:00
|
|
|
demandMshrMisses = mshr_misses[MemCmd::ReadReq] + mshr_misses[MemCmd::WriteReq];
|
2006-06-28 17:02:14 +02:00
|
|
|
|
|
|
|
overallMshrMisses
|
|
|
|
.name(name + ".overall_mshr_misses")
|
|
|
|
.desc("number of overall MSHR misses")
|
|
|
|
.flags(total)
|
|
|
|
;
|
2007-02-07 19:53:37 +01:00
|
|
|
overallMshrMisses = demandMshrMisses + mshr_misses[MemCmd::SoftPFReq] +
|
|
|
|
mshr_misses[MemCmd::HardPFReq];
|
2006-06-28 17:02:14 +02:00
|
|
|
|
|
|
|
// MSHR miss latency statistics
|
2007-02-07 19:53:37 +01:00
|
|
|
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
|
|
|
|
MemCmd cmd(access_idx);
|
|
|
|
const string &cstr = cmd.toString();
|
2006-06-28 17:02:14 +02:00
|
|
|
|
|
|
|
mshr_miss_latency[access_idx]
|
|
|
|
.init(maxThreadsPerCPU)
|
|
|
|
.name(name + "." + cstr + "_mshr_miss_latency")
|
|
|
|
.desc("number of " + cstr + " MSHR miss cycles")
|
|
|
|
.flags(total | nozero | nonan)
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
|
|
|
demandMshrMissLatency
|
|
|
|
.name(name + ".demand_mshr_miss_latency")
|
|
|
|
.desc("number of demand (read+write) MSHR miss cycles")
|
|
|
|
.flags(total)
|
|
|
|
;
|
2007-02-07 19:53:37 +01:00
|
|
|
demandMshrMissLatency = mshr_miss_latency[MemCmd::ReadReq]
|
|
|
|
+ mshr_miss_latency[MemCmd::WriteReq];
|
2006-06-28 17:02:14 +02:00
|
|
|
|
|
|
|
overallMshrMissLatency
|
|
|
|
.name(name + ".overall_mshr_miss_latency")
|
|
|
|
.desc("number of overall MSHR miss cycles")
|
|
|
|
.flags(total)
|
|
|
|
;
|
|
|
|
overallMshrMissLatency = demandMshrMissLatency +
|
2007-02-07 19:53:37 +01:00
|
|
|
mshr_miss_latency[MemCmd::SoftPFReq] + mshr_miss_latency[MemCmd::HardPFReq];
|
2006-06-28 17:02:14 +02:00
|
|
|
|
|
|
|
// MSHR uncacheable statistics
|
2007-02-07 19:53:37 +01:00
|
|
|
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
|
|
|
|
MemCmd cmd(access_idx);
|
|
|
|
const string &cstr = cmd.toString();
|
2006-06-28 17:02:14 +02:00
|
|
|
|
|
|
|
mshr_uncacheable[access_idx]
|
|
|
|
.init(maxThreadsPerCPU)
|
|
|
|
.name(name + "." + cstr + "_mshr_uncacheable")
|
|
|
|
.desc("number of " + cstr + " MSHR uncacheable")
|
|
|
|
.flags(total | nozero | nonan)
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
|
|
|
overallMshrUncacheable
|
|
|
|
.name(name + ".overall_mshr_uncacheable_misses")
|
|
|
|
.desc("number of overall MSHR uncacheable misses")
|
|
|
|
.flags(total)
|
|
|
|
;
|
2007-02-07 19:53:37 +01:00
|
|
|
overallMshrUncacheable = mshr_uncacheable[MemCmd::ReadReq]
|
|
|
|
+ mshr_uncacheable[MemCmd::WriteReq] + mshr_uncacheable[MemCmd::SoftPFReq]
|
|
|
|
+ mshr_uncacheable[MemCmd::HardPFReq];
|
2006-06-28 17:02:14 +02:00
|
|
|
|
|
|
|
// MSHR miss latency statistics
|
2007-02-07 19:53:37 +01:00
|
|
|
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
|
|
|
|
MemCmd cmd(access_idx);
|
|
|
|
const string &cstr = cmd.toString();
|
2006-06-28 17:02:14 +02:00
|
|
|
|
|
|
|
mshr_uncacheable_lat[access_idx]
|
|
|
|
.init(maxThreadsPerCPU)
|
|
|
|
.name(name + "." + cstr + "_mshr_uncacheable_latency")
|
|
|
|
.desc("number of " + cstr + " MSHR uncacheable cycles")
|
|
|
|
.flags(total | nozero | nonan)
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
|
|
|
overallMshrUncacheableLatency
|
|
|
|
.name(name + ".overall_mshr_uncacheable_latency")
|
|
|
|
.desc("number of overall MSHR uncacheable cycles")
|
|
|
|
.flags(total)
|
|
|
|
;
|
2007-02-07 19:53:37 +01:00
|
|
|
overallMshrUncacheableLatency = mshr_uncacheable_lat[MemCmd::ReadReq]
|
|
|
|
+ mshr_uncacheable_lat[MemCmd::WriteReq]
|
|
|
|
+ mshr_uncacheable_lat[MemCmd::SoftPFReq]
|
|
|
|
+ mshr_uncacheable_lat[MemCmd::HardPFReq];
|
2006-06-28 17:02:14 +02:00
|
|
|
|
|
|
|
#if 0
|
|
|
|
// MSHR access formulas
|
2007-02-07 19:53:37 +01:00
|
|
|
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
|
|
|
|
MemCmd cmd(access_idx);
|
|
|
|
const string &cstr = cmd.toString();
|
2006-06-28 17:02:14 +02:00
|
|
|
|
|
|
|
mshrAccesses[access_idx]
|
|
|
|
.name(name + "." + cstr + "_mshr_accesses")
|
|
|
|
.desc("number of " + cstr + " mshr accesses(hits+misses)")
|
|
|
|
.flags(total | nozero | nonan)
|
|
|
|
;
|
|
|
|
mshrAccesses[access_idx] =
|
|
|
|
mshr_hits[access_idx] + mshr_misses[access_idx]
|
|
|
|
+ mshr_uncacheable[access_idx];
|
|
|
|
}
|
|
|
|
|
|
|
|
demandMshrAccesses
|
|
|
|
.name(name + ".demand_mshr_accesses")
|
|
|
|
.desc("number of demand (read+write) mshr accesses")
|
|
|
|
.flags(total | nozero | nonan)
|
|
|
|
;
|
|
|
|
demandMshrAccesses = demandMshrHits + demandMshrMisses;
|
|
|
|
|
|
|
|
overallMshrAccesses
|
|
|
|
.name(name + ".overall_mshr_accesses")
|
|
|
|
.desc("number of overall (read+write) mshr accesses")
|
|
|
|
.flags(total | nozero | nonan)
|
|
|
|
;
|
|
|
|
overallMshrAccesses = overallMshrHits + overallMshrMisses
|
|
|
|
+ overallMshrUncacheable;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// MSHR miss rate formulas
|
2007-02-07 19:53:37 +01:00
|
|
|
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
|
|
|
|
MemCmd cmd(access_idx);
|
|
|
|
const string &cstr = cmd.toString();
|
2006-06-28 17:02:14 +02:00
|
|
|
|
|
|
|
mshrMissRate[access_idx]
|
|
|
|
.name(name + "." + cstr + "_mshr_miss_rate")
|
|
|
|
.desc("mshr miss rate for " + cstr + " accesses")
|
|
|
|
.flags(total | nozero | nonan)
|
|
|
|
;
|
|
|
|
|
|
|
|
mshrMissRate[access_idx] =
|
|
|
|
mshr_misses[access_idx] / cache->accesses[access_idx];
|
|
|
|
}
|
|
|
|
|
|
|
|
demandMshrMissRate
|
|
|
|
.name(name + ".demand_mshr_miss_rate")
|
|
|
|
.desc("mshr miss rate for demand accesses")
|
|
|
|
.flags(total)
|
|
|
|
;
|
|
|
|
demandMshrMissRate = demandMshrMisses / cache->demandAccesses;
|
|
|
|
|
|
|
|
overallMshrMissRate
|
|
|
|
.name(name + ".overall_mshr_miss_rate")
|
|
|
|
.desc("mshr miss rate for overall accesses")
|
|
|
|
.flags(total)
|
|
|
|
;
|
|
|
|
overallMshrMissRate = overallMshrMisses / cache->overallAccesses;
|
|
|
|
|
|
|
|
// mshrMiss latency formulas
|
2007-02-07 19:53:37 +01:00
|
|
|
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
|
|
|
|
MemCmd cmd(access_idx);
|
|
|
|
const string &cstr = cmd.toString();
|
2006-06-28 17:02:14 +02:00
|
|
|
|
|
|
|
avgMshrMissLatency[access_idx]
|
|
|
|
.name(name + "." + cstr + "_avg_mshr_miss_latency")
|
|
|
|
.desc("average " + cstr + " mshr miss latency")
|
|
|
|
.flags(total | nozero | nonan)
|
|
|
|
;
|
|
|
|
|
|
|
|
avgMshrMissLatency[access_idx] =
|
|
|
|
mshr_miss_latency[access_idx] / mshr_misses[access_idx];
|
|
|
|
}
|
|
|
|
|
|
|
|
demandAvgMshrMissLatency
|
|
|
|
.name(name + ".demand_avg_mshr_miss_latency")
|
|
|
|
.desc("average overall mshr miss latency")
|
|
|
|
.flags(total)
|
|
|
|
;
|
|
|
|
demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
|
|
|
|
|
|
|
|
overallAvgMshrMissLatency
|
|
|
|
.name(name + ".overall_avg_mshr_miss_latency")
|
|
|
|
.desc("average overall mshr miss latency")
|
|
|
|
.flags(total)
|
|
|
|
;
|
|
|
|
overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
|
|
|
|
|
|
|
|
// mshrUncacheable latency formulas
|
2007-02-07 19:53:37 +01:00
|
|
|
for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
|
|
|
|
MemCmd cmd(access_idx);
|
|
|
|
const string &cstr = cmd.toString();
|
2006-06-28 17:02:14 +02:00
|
|
|
|
|
|
|
avgMshrUncacheableLatency[access_idx]
|
|
|
|
.name(name + "." + cstr + "_avg_mshr_uncacheable_latency")
|
|
|
|
.desc("average " + cstr + " mshr uncacheable latency")
|
|
|
|
.flags(total | nozero | nonan)
|
|
|
|
;
|
|
|
|
|
|
|
|
avgMshrUncacheableLatency[access_idx] =
|
|
|
|
mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx];
|
|
|
|
}
|
|
|
|
|
|
|
|
overallAvgMshrUncacheableLatency
|
|
|
|
.name(name + ".overall_avg_mshr_uncacheable_latency")
|
|
|
|
.desc("average overall mshr uncacheable latency")
|
|
|
|
.flags(total)
|
|
|
|
;
|
|
|
|
overallAvgMshrUncacheableLatency = overallMshrUncacheableLatency / overallMshrUncacheable;
|
|
|
|
|
|
|
|
mshr_cap_events
|
|
|
|
.init(maxThreadsPerCPU)
|
|
|
|
.name(name + ".mshr_cap_events")
|
|
|
|
.desc("number of times MSHR cap was activated")
|
|
|
|
.flags(total)
|
|
|
|
;
|
|
|
|
|
|
|
|
//software prefetching stats
|
|
|
|
soft_prefetch_mshr_full
|
|
|
|
.init(maxThreadsPerCPU)
|
|
|
|
.name(name + ".soft_prefetch_mshr_full")
|
|
|
|
.desc("number of mshr full events for SW prefetching instrutions")
|
|
|
|
.flags(total)
|
|
|
|
;
|
|
|
|
|
|
|
|
mshr_no_allocate_misses
|
|
|
|
.name(name +".no_allocate_misses")
|
|
|
|
.desc("Number of misses that were no-allocate")
|
|
|
|
;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
MSHR*
|
2006-10-20 09:10:12 +02:00
|
|
|
MissQueue::allocateMiss(PacketPtr &pkt, int size, Tick time)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
2006-10-12 19:33:21 +02:00
|
|
|
MSHR* mshr = mq.allocate(pkt, size);
|
2006-06-28 17:02:14 +02:00
|
|
|
mshr->order = order++;
|
2006-06-28 20:35:00 +02:00
|
|
|
if (!pkt->req->isUncacheable() ){//&& !pkt->isNoAllocate()) {
|
2006-06-28 17:02:14 +02:00
|
|
|
// Mark this as a cache line fill
|
|
|
|
mshr->pkt->flags |= CACHE_LINE_FILL;
|
|
|
|
}
|
|
|
|
if (mq.isFull()) {
|
|
|
|
cache->setBlocked(Blocked_NoMSHRs);
|
|
|
|
}
|
2007-02-07 19:53:37 +01:00
|
|
|
if (pkt->cmd != MemCmd::HardPFReq) {
|
2006-06-28 17:02:14 +02:00
|
|
|
//If we need to request the bus (not on HW prefetch), do so
|
|
|
|
cache->setMasterRequest(Request_MSHR, time);
|
|
|
|
}
|
|
|
|
return mshr;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
MSHR*
|
2006-10-20 09:10:12 +02:00
|
|
|
MissQueue::allocateWrite(PacketPtr &pkt, int size, Tick time)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
2006-10-09 22:37:02 +02:00
|
|
|
MSHR* mshr = wb.allocate(pkt,size);
|
2006-06-28 17:02:14 +02:00
|
|
|
mshr->order = order++;
|
2006-06-29 22:07:19 +02:00
|
|
|
|
|
|
|
//REMOVING COMPRESSION FOR NOW
|
|
|
|
#if 0
|
|
|
|
if (pkt->isCompressed()) {
|
|
|
|
mshr->pkt->deleteData();
|
|
|
|
mshr->pkt->actualSize = pkt->actualSize;
|
|
|
|
mshr->pkt->data = new uint8_t[pkt->actualSize];
|
|
|
|
memcpy(mshr->pkt->data, pkt->data, pkt->actualSize);
|
|
|
|
} else {
|
|
|
|
#endif
|
|
|
|
memcpy(mshr->pkt->getPtr<uint8_t>(), pkt->getPtr<uint8_t>(), pkt->getSize());
|
|
|
|
//{
|
|
|
|
|
2006-06-28 17:02:14 +02:00
|
|
|
if (wb.isFull()) {
|
|
|
|
cache->setBlocked(Blocked_NoWBBuffers);
|
|
|
|
}
|
|
|
|
|
|
|
|
cache->setMasterRequest(Request_WB, time);
|
|
|
|
|
|
|
|
return mshr;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @todo Remove SW prefetches on mshr hits.
|
|
|
|
*/
|
|
|
|
void
|
2006-10-20 09:10:12 +02:00
|
|
|
MissQueue::handleMiss(PacketPtr &pkt, int blkSize, Tick time)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
|
|
|
// if (!cache->isTopLevel())
|
|
|
|
if (prefetchMiss) prefetcher->handleMiss(pkt, time);
|
|
|
|
|
|
|
|
int size = blkSize;
|
2006-06-29 22:07:19 +02:00
|
|
|
Addr blkAddr = pkt->getAddr() & ~(Addr)(blkSize-1);
|
2006-06-28 17:02:14 +02:00
|
|
|
MSHR* mshr = NULL;
|
2006-06-28 20:35:00 +02:00
|
|
|
if (!pkt->req->isUncacheable()) {
|
2006-08-15 22:21:46 +02:00
|
|
|
mshr = mq.findMatch(blkAddr);
|
2006-06-28 17:02:14 +02:00
|
|
|
if (mshr) {
|
|
|
|
//@todo remove hw_pf here
|
2006-10-06 15:15:53 +02:00
|
|
|
mshr_hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
|
|
|
|
if (mshr->threadNum != 0/*pkt->req->getThreadNum()*/) {
|
2006-06-29 22:07:19 +02:00
|
|
|
mshr->threadNum = -1;
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
|
|
|
mq.allocateTarget(mshr, pkt);
|
|
|
|
if (mshr->pkt->isNoAllocate() && !pkt->isNoAllocate()) {
|
|
|
|
//We are adding an allocate after a no-allocate
|
|
|
|
mshr->pkt->flags &= ~NO_ALLOCATE;
|
|
|
|
}
|
|
|
|
if (mshr->getNumTargets() == numTarget) {
|
|
|
|
noTargetMSHR = mshr;
|
|
|
|
cache->setBlocked(Blocked_NoTargets);
|
|
|
|
mq.moveToFront(mshr);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (pkt->isNoAllocate()) {
|
|
|
|
//Count no-allocate requests differently
|
|
|
|
mshr_no_allocate_misses++;
|
|
|
|
}
|
|
|
|
else {
|
2006-10-06 15:15:53 +02:00
|
|
|
mshr_misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
//Count uncacheable accesses
|
2006-10-06 15:15:53 +02:00
|
|
|
mshr_uncacheable[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
|
2006-06-29 22:07:19 +02:00
|
|
|
size = pkt->getSize();
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
2006-06-29 22:07:19 +02:00
|
|
|
if (pkt->isWrite() && (pkt->req->isUncacheable() || !writeAllocate ||
|
|
|
|
!pkt->needsResponse())) {
|
2006-06-28 17:02:14 +02:00
|
|
|
/**
|
|
|
|
* @todo Add write merging here.
|
|
|
|
*/
|
2006-10-09 22:37:02 +02:00
|
|
|
mshr = allocateWrite(pkt, pkt->getSize(), time);
|
2006-06-28 17:02:14 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2006-07-10 23:16:15 +02:00
|
|
|
mshr = allocateMiss(pkt, blkSize, time);
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
MSHR*
|
2006-08-15 22:21:46 +02:00
|
|
|
MissQueue::fetchBlock(Addr addr, int blk_size, Tick time,
|
2006-10-20 09:10:12 +02:00
|
|
|
PacketPtr &target)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
|
|
|
Addr blkAddr = addr & ~(Addr)(blk_size - 1);
|
2006-08-15 22:21:46 +02:00
|
|
|
assert(mq.findMatch(addr) == NULL);
|
|
|
|
MSHR *mshr = mq.allocateFetch(blkAddr, blk_size, target);
|
2006-06-28 17:02:14 +02:00
|
|
|
mshr->order = order++;
|
|
|
|
mshr->pkt->flags |= CACHE_LINE_FILL;
|
|
|
|
if (mq.isFull()) {
|
|
|
|
cache->setBlocked(Blocked_NoMSHRs);
|
|
|
|
}
|
|
|
|
cache->setMasterRequest(Request_MSHR, time);
|
|
|
|
return mshr;
|
|
|
|
}
|
|
|
|
|
2006-10-20 09:10:12 +02:00
|
|
|
PacketPtr
|
2006-06-28 17:02:14 +02:00
|
|
|
MissQueue::getPacket()
|
|
|
|
{
|
2006-10-20 09:10:12 +02:00
|
|
|
PacketPtr pkt = mq.getReq();
|
2006-06-28 17:02:14 +02:00
|
|
|
if (((wb.isFull() && wb.inServiceMSHRs == 0) || !pkt ||
|
|
|
|
pkt->time > curTick) && wb.havePending()) {
|
|
|
|
pkt = wb.getReq();
|
|
|
|
// Need to search for earlier miss.
|
|
|
|
MSHR *mshr = mq.findPending(pkt);
|
2006-06-29 22:07:19 +02:00
|
|
|
if (mshr && mshr->order < ((MSHR*)(pkt->senderState))->order) {
|
2006-06-28 17:02:14 +02:00
|
|
|
// Service misses in order until conflict is cleared.
|
|
|
|
return mq.getReq();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (pkt) {
|
|
|
|
MSHR* mshr = wb.findPending(pkt);
|
|
|
|
if (mshr /*&& mshr->order < pkt->senderState->order*/) {
|
|
|
|
// The only way this happens is if we are
|
|
|
|
// doing a write and we didn't have permissions
|
|
|
|
// then subsequently saw a writeback(owned got evicted)
|
|
|
|
// We need to make sure to perform the writeback first
|
|
|
|
// To preserve the dirty data, then we can issue the write
|
|
|
|
return wb.getReq();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (!mq.isFull()){
|
|
|
|
//If we have a miss queue slot, we can try a prefetch
|
|
|
|
pkt = prefetcher->getPacket();
|
|
|
|
if (pkt) {
|
|
|
|
//Update statistic on number of prefetches issued (hwpf_mshr_misses)
|
2006-10-06 15:15:53 +02:00
|
|
|
mshr_misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
|
2006-06-28 17:02:14 +02:00
|
|
|
//It will request the bus for the future, but should clear that immedieatley
|
2006-06-29 22:07:19 +02:00
|
|
|
allocateMiss(pkt, pkt->getSize(), curTick);
|
2006-06-28 17:02:14 +02:00
|
|
|
pkt = mq.getReq();
|
|
|
|
assert(pkt); //We should get back a req b/c we just put one in
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return pkt;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2007-02-07 19:53:37 +01:00
|
|
|
MissQueue::setBusCmd(PacketPtr &pkt, MemCmd cmd)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
|
|
|
assert(pkt->senderState != 0);
|
2006-06-29 22:07:19 +02:00
|
|
|
MSHR * mshr = (MSHR*)pkt->senderState;
|
2006-06-28 17:02:14 +02:00
|
|
|
mshr->originalCmd = pkt->cmd;
|
2007-02-07 19:53:37 +01:00
|
|
|
if (cmd == MemCmd::UpgradeReq || cmd == MemCmd::InvalidateReq) {
|
2006-10-10 07:32:18 +02:00
|
|
|
pkt->flags |= NO_ALLOCATE;
|
|
|
|
pkt->flags &= ~CACHE_LINE_FILL;
|
|
|
|
}
|
|
|
|
else if (!pkt->req->isUncacheable() && !pkt->isNoAllocate() &&
|
2007-02-07 19:53:37 +01:00
|
|
|
cmd.needsResponse()) {
|
2006-10-10 07:32:18 +02:00
|
|
|
pkt->flags |= CACHE_LINE_FILL;
|
|
|
|
}
|
2006-06-28 17:02:14 +02:00
|
|
|
if (pkt->isCacheFill() || pkt->isNoAllocate())
|
|
|
|
pkt->cmd = cmd;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2006-10-20 09:10:12 +02:00
|
|
|
MissQueue::restoreOrigCmd(PacketPtr &pkt)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
2006-06-29 22:07:19 +02:00
|
|
|
pkt->cmd = ((MSHR*)(pkt->senderState))->originalCmd;
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2006-10-20 09:10:12 +02:00
|
|
|
MissQueue::markInService(PacketPtr &pkt, MSHR* mshr)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
|
|
|
bool unblock = false;
|
|
|
|
BlockedCause cause = NUM_BLOCKED_CAUSES;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @todo Should include MSHRQueue pointer in MSHR to select the correct
|
|
|
|
* one.
|
|
|
|
*/
|
2006-06-29 22:07:19 +02:00
|
|
|
if ((!pkt->isCacheFill() && pkt->isWrite())) {
|
2006-06-28 17:02:14 +02:00
|
|
|
// Forwarding a write/ writeback, don't need to change
|
|
|
|
// the command
|
|
|
|
unblock = wb.isFull();
|
2006-10-09 22:37:02 +02:00
|
|
|
wb.markInService(mshr);
|
2006-06-28 17:02:14 +02:00
|
|
|
if (!wb.havePending()){
|
|
|
|
cache->clearMasterRequest(Request_WB);
|
|
|
|
}
|
|
|
|
if (unblock) {
|
|
|
|
// Do we really unblock?
|
|
|
|
unblock = !wb.isFull();
|
|
|
|
cause = Blocked_NoWBBuffers;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
unblock = mq.isFull();
|
2006-10-09 22:37:02 +02:00
|
|
|
mq.markInService(mshr);
|
2006-06-28 17:02:14 +02:00
|
|
|
if (!mq.havePending()){
|
|
|
|
cache->clearMasterRequest(Request_MSHR);
|
|
|
|
}
|
2007-02-07 19:53:37 +01:00
|
|
|
if (mshr->originalCmd == MemCmd::HardPFReq) {
|
2006-06-28 17:02:14 +02:00
|
|
|
DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
|
|
|
|
cache->name());
|
|
|
|
//Also clear pending if need be
|
|
|
|
if (!prefetcher->havePending())
|
|
|
|
{
|
|
|
|
cache->clearMasterRequest(Request_PF);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (unblock) {
|
|
|
|
unblock = !mq.isFull();
|
|
|
|
cause = Blocked_NoMSHRs;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (unblock) {
|
|
|
|
cache->clearBlocked(cause);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
2006-10-20 09:10:12 +02:00
|
|
|
MissQueue::handleResponse(PacketPtr &pkt, Tick time)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
2006-06-29 22:07:19 +02:00
|
|
|
MSHR* mshr = (MSHR*)pkt->senderState;
|
2007-02-07 19:53:37 +01:00
|
|
|
if (((MSHR*)(pkt->senderState))->originalCmd == MemCmd::HardPFReq) {
|
2006-06-28 17:02:14 +02:00
|
|
|
DPRINTF(HWPrefetch, "%s:Handling the response to a HW_PF\n",
|
|
|
|
cache->name());
|
|
|
|
}
|
|
|
|
#ifndef NDEBUG
|
|
|
|
int num_targets = mshr->getNumTargets();
|
|
|
|
#endif
|
|
|
|
|
|
|
|
bool unblock = false;
|
|
|
|
bool unblock_target = false;
|
|
|
|
BlockedCause cause = NUM_BLOCKED_CAUSES;
|
|
|
|
|
|
|
|
if (pkt->isCacheFill() && !pkt->isNoAllocate()) {
|
2007-02-07 19:53:37 +01:00
|
|
|
mshr_miss_latency[mshr->originalCmd.toInt()][0/*pkt->req->getThreadNum()*/] +=
|
2006-06-28 17:02:14 +02:00
|
|
|
curTick - pkt->time;
|
|
|
|
// targets were handled in the cache tags
|
|
|
|
if (mshr == noTargetMSHR) {
|
|
|
|
// we always clear at least one target
|
|
|
|
unblock_target = true;
|
|
|
|
cause = Blocked_NoTargets;
|
|
|
|
noTargetMSHR = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mshr->hasTargets()) {
|
|
|
|
// Didn't satisfy all the targets, need to resend
|
2007-02-07 19:53:37 +01:00
|
|
|
MemCmd cmd = mshr->getTarget()->cmd;
|
2006-11-14 03:34:25 +01:00
|
|
|
mshr->pkt->setDest(Packet::Broadcast);
|
|
|
|
mshr->pkt->result = Packet::Unknown;
|
2007-03-23 18:09:37 +01:00
|
|
|
mshr->pkt->req = mshr->getTarget()->req;
|
2006-06-28 17:02:14 +02:00
|
|
|
mq.markPending(mshr, cmd);
|
|
|
|
mshr->order = order++;
|
|
|
|
cache->setMasterRequest(Request_MSHR, time);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
unblock = mq.isFull();
|
|
|
|
mq.deallocate(mshr);
|
|
|
|
if (unblock) {
|
|
|
|
unblock = !mq.isFull();
|
|
|
|
cause = Blocked_NoMSHRs;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2006-06-28 20:35:00 +02:00
|
|
|
if (pkt->req->isUncacheable()) {
|
2007-02-07 19:53:37 +01:00
|
|
|
mshr_uncacheable_lat[pkt->cmd.toInt()][0/*pkt->req->getThreadNum()*/] +=
|
2006-06-28 17:02:14 +02:00
|
|
|
curTick - pkt->time;
|
|
|
|
}
|
2006-06-28 20:35:00 +02:00
|
|
|
if (mshr->hasTargets() && pkt->req->isUncacheable()) {
|
2006-06-28 17:02:14 +02:00
|
|
|
// Should only have 1 target if we had any
|
|
|
|
assert(num_targets == 1);
|
2006-10-20 09:10:12 +02:00
|
|
|
PacketPtr target = mshr->getTarget();
|
2006-06-28 17:02:14 +02:00
|
|
|
mshr->popTarget();
|
2006-06-29 22:07:19 +02:00
|
|
|
if (pkt->isRead()) {
|
|
|
|
memcpy(target->getPtr<uint8_t>(), pkt->getPtr<uint8_t>(),
|
|
|
|
target->getSize());
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
|
|
|
cache->respond(target, time);
|
|
|
|
assert(!mshr->hasTargets());
|
|
|
|
}
|
|
|
|
else if (mshr->hasTargets()) {
|
|
|
|
//Must be a no_allocate with possibly more than one target
|
|
|
|
assert(mshr->pkt->isNoAllocate());
|
|
|
|
while (mshr->hasTargets()) {
|
2006-10-20 09:10:12 +02:00
|
|
|
PacketPtr target = mshr->getTarget();
|
2006-06-28 17:02:14 +02:00
|
|
|
mshr->popTarget();
|
2006-06-29 22:07:19 +02:00
|
|
|
if (pkt->isRead()) {
|
|
|
|
memcpy(target->getPtr<uint8_t>(), pkt->getPtr<uint8_t>(),
|
|
|
|
target->getSize());
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
|
|
|
cache->respond(target, time);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-06-29 22:07:19 +02:00
|
|
|
if (pkt->isWrite()) {
|
2006-06-28 17:02:14 +02:00
|
|
|
// If the wrtie buffer is full, we might unblock now
|
|
|
|
unblock = wb.isFull();
|
|
|
|
wb.deallocate(mshr);
|
|
|
|
if (unblock) {
|
|
|
|
// Did we really unblock?
|
|
|
|
unblock = !wb.isFull();
|
|
|
|
cause = Blocked_NoWBBuffers;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
unblock = mq.isFull();
|
|
|
|
mq.deallocate(mshr);
|
|
|
|
if (unblock) {
|
|
|
|
unblock = !mq.isFull();
|
|
|
|
cause = Blocked_NoMSHRs;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (unblock || unblock_target) {
|
|
|
|
cache->clearBlocked(cause);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2006-06-29 22:07:19 +02:00
|
|
|
MissQueue::squash(int threadNum)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
|
|
|
bool unblock = false;
|
|
|
|
BlockedCause cause = NUM_BLOCKED_CAUSES;
|
|
|
|
|
2006-06-29 22:07:19 +02:00
|
|
|
if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
|
2006-06-28 17:02:14 +02:00
|
|
|
noTargetMSHR = NULL;
|
|
|
|
unblock = true;
|
|
|
|
cause = Blocked_NoTargets;
|
|
|
|
}
|
|
|
|
if (mq.isFull()) {
|
|
|
|
unblock = true;
|
|
|
|
cause = Blocked_NoMSHRs;
|
|
|
|
}
|
2006-06-29 22:07:19 +02:00
|
|
|
mq.squash(threadNum);
|
2006-06-28 17:02:14 +02:00
|
|
|
if (!mq.havePending()) {
|
|
|
|
cache->clearMasterRequest(Request_MSHR);
|
|
|
|
}
|
|
|
|
if (unblock && !mq.isFull()) {
|
|
|
|
cache->clearBlocked(cause);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
MSHR*
|
2006-12-04 18:10:53 +01:00
|
|
|
MissQueue::findMSHR(Addr addr)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
2006-08-15 22:21:46 +02:00
|
|
|
return mq.findMatch(addr);
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2006-12-04 18:10:53 +01:00
|
|
|
MissQueue::findWrites(Addr addr, vector<MSHR*> &writes)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
2006-08-15 22:21:46 +02:00
|
|
|
return wb.findMatches(addr,writes);
|
2006-06-28 17:02:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2006-08-15 22:21:46 +02:00
|
|
|
MissQueue::doWriteback(Addr addr,
|
2006-06-28 17:02:14 +02:00
|
|
|
int size, uint8_t *data, bool compressed)
|
|
|
|
{
|
|
|
|
// Generate request
|
2006-06-29 22:07:19 +02:00
|
|
|
Request * req = new Request(addr, size, 0);
|
2007-02-07 19:53:37 +01:00
|
|
|
PacketPtr pkt = new Packet(req, MemCmd::Writeback, -1);
|
2006-06-30 17:34:27 +02:00
|
|
|
pkt->allocate();
|
2006-06-29 22:07:19 +02:00
|
|
|
if (data) {
|
|
|
|
memcpy(pkt->getPtr<uint8_t>(), data, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (compressed) {
|
|
|
|
pkt->flags |= COMPRESSED;
|
|
|
|
}
|
2006-06-28 17:02:14 +02:00
|
|
|
|
2006-06-29 22:07:19 +02:00
|
|
|
///All writebacks charged to same thread @todo figure this out
|
2006-10-06 15:15:53 +02:00
|
|
|
writebacks[0/*pkt->req->getThreadNum()*/]++;
|
2006-06-28 17:02:14 +02:00
|
|
|
|
|
|
|
allocateWrite(pkt, 0, curTick);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
2006-10-20 09:10:12 +02:00
|
|
|
MissQueue::doWriteback(PacketPtr &pkt)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
2006-10-06 15:15:53 +02:00
|
|
|
writebacks[0/*pkt->req->getThreadNum()*/]++;
|
2006-06-28 17:02:14 +02:00
|
|
|
allocateWrite(pkt, 0, curTick);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
MSHR*
|
2006-08-15 22:21:46 +02:00
|
|
|
MissQueue::allocateTargetList(Addr addr)
|
2006-06-28 17:02:14 +02:00
|
|
|
{
|
2006-08-15 22:21:46 +02:00
|
|
|
MSHR* mshr = mq.allocateTargetList(addr, blkSize);
|
2006-06-28 17:02:14 +02:00
|
|
|
mshr->pkt->flags |= CACHE_LINE_FILL;
|
|
|
|
if (mq.isFull()) {
|
|
|
|
cache->setBlocked(Blocked_NoMSHRs);
|
|
|
|
}
|
|
|
|
return mshr;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
MissQueue::havePending()
|
|
|
|
{
|
|
|
|
return mq.havePending() || wb.havePending() || prefetcher->havePending();
|
|
|
|
}
|