First pass, now compiles with current head of tree.

Compile and initialization work, still working on functionality.

src/mem/cache/base_cache.cc:
    Temp fix for cpu's use of getPort functionality.  CPU's will need to be ported to the new connector objects.
    Also, all packets have to have data or the delete fails.
src/mem/cache/cache.hh:
    Fix function prototypes so overloading works
src/mem/cache/cache_impl.hh:
    fix functions to match virtual base class
src/mem/cache/miss/miss_queue.cc:
    Packets havve to have data, or delete fails
src/python/m5/objects/BaseCache.py:
    Update for newmem

--HG--
extra : convert_revision : 2b6ad1e9d8ae07ace9294cd257e2ccc0024b7fcb
This commit is contained in:
Ron Dreslinski 2006-06-30 16:25:35 -04:00
parent dea1a19b2d
commit 1bdc65b00f
5 changed files with 28 additions and 22 deletions

View file

@ -101,16 +101,21 @@ BaseCache::CachePort::clearBlocked()
Port*
BaseCache::getPort(const std::string &if_name, int idx)
{
if(if_name == "cpu_side")
if (if_name == "")
{
if(cpuSidePort != NULL)
panic("Already have a cpu side for this cache\n");
cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
if(cpuSidePort == NULL)
cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
return cpuSidePort;
}
else if(if_name == "mem_side")
if (if_name == "functional")
{
if(memSidePort != NULL)
if(cpuSidePort == NULL)
cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
return cpuSidePort;
}
else if (if_name == "mem_side")
{
if (memSidePort != NULL)
panic("Already have a mem side for this cache\n");
memSidePort = new CachePort(name() + "-mem_side_port", this, false);
return memSidePort;
@ -121,9 +126,10 @@ BaseCache::getPort(const std::string &if_name, int idx)
void
BaseCache::regStats()
{
Request temp_req;
Request temp_req((Addr) NULL, 4, 0);
Packet::Command temp_cmd = Packet::ReadReq;
Packet temp_pkt(&temp_req, temp_cmd, 0); //@todo FIx command strings so this isn't neccessary
temp_pkt.allocate(); //Temp allocate, all need data
using namespace Stats;
@ -331,4 +337,5 @@ BaseCache::regStats()
.name(name() + ".cache_copies")
.desc("number of cache copies performed")
;
}

View file

@ -149,11 +149,9 @@ class Cache : public BaseCache
virtual bool doTimingAccess(Packet *pkt, CachePort *cachePort,
bool isCpuSide);
virtual Tick doAtomicAccess(Packet *pkt, CachePort *cachePort,
bool isCpuSide);
virtual Tick doAtomicAccess(Packet *pkt, bool isCpuSide);
virtual void doFunctionalAccess(Packet *pkt, CachePort *cachePort,
bool isCpuSide);
virtual void doFunctionalAccess(Packet *pkt, bool isCpuSide);
virtual void recvStatusChange(Port::Status status, bool isCpuSide);

View file

@ -77,7 +77,7 @@ doTimingAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
template<class TagStore, class Buffering, class Coherence>
Tick
Cache<TagStore,Buffering,Coherence>::
doAtomicAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
doAtomicAccess(Packet *pkt, bool isCpuSide)
{
if (isCpuSide)
{
@ -97,18 +97,18 @@ doAtomicAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
template<class TagStore, class Buffering, class Coherence>
void
Cache<TagStore,Buffering,Coherence>::
doFunctionalAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
doFunctionalAccess(Packet *pkt, bool isCpuSide)
{
if (isCpuSide)
{
probe(pkt, false);
probe(pkt, true);
}
else
{
if (pkt->isResponse())
handleResponse(pkt);
else
snoopProbe(pkt, false);
snoopProbe(pkt, true);
}
}

View file

@ -58,9 +58,10 @@ MissQueue::MissQueue(int numMSHRs, int numTargets, int write_buffers,
void
MissQueue::regStats(const string &name)
{
Request temp_req;
Request temp_req((Addr) NULL, 4, 0);
Packet::Command temp_cmd = Packet::ReadReq;
Packet temp_pkt(&temp_req, temp_cmd, 0); //@todo FIx command strings so this isn't neccessary
temp_pkt.allocate();
using namespace Stats;

View file

@ -1,29 +1,26 @@
from m5.config import *
from BaseMem import BaseMem
from MemObject import MemObject
class Prefetch(Enum): vals = ['none', 'tagged', 'stride', 'ghb']
class BaseCache(BaseMem):
class BaseCache(MemObject):
type = 'BaseCache'
adaptive_compression = Param.Bool(False,
"Use an adaptive compression scheme")
assoc = Param.Int("associativity")
block_size = Param.Int("block size in bytes")
latency = Param.Int("Latency")
compressed_bus = Param.Bool(False,
"This cache connects to a compressed memory")
compression_latency = Param.Latency('0ns',
"Latency in cycles of compression algorithm")
do_copy = Param.Bool(False, "perform fast copies in the cache")
hash_delay = Param.Int(1, "time in cycles of hash access")
in_bus = Param.Bus(NULL, "incoming bus object")
lifo = Param.Bool(False,
"whether this NIC partition should use LIFO repl. policy")
max_miss_count = Param.Counter(0,
"number of misses to handle before calling exit")
mem_trace = Param.MemTraceWriter(NULL,
"memory trace writer to record accesses")
mshrs = Param.Int("number of MSHRs (max outstanding requests)")
out_bus = Param.Bus("outgoing bus object")
prioritizeRequests = Param.Bool(False,
"always service demand misses first")
protocol = Param.CoherenceProtocol(NULL, "coherence protocol to use")
@ -63,3 +60,6 @@ class BaseCache(BaseMem):
"Use the CPU ID to seperate calculations of prefetches")
prefetch_data_accesses_only = Param.Bool(False,
"Only prefetch on data not on instruction accesses")
hit_latency = Param.Int(1,"Hit Latency of the cache")
cpu_side = Port("Port on side closer to CPU")
mem_side = Port("Port on side closer to MEM")