First pass, now compiles with current head of tree.

Compile and initialization work, still working on functionality.

src/mem/cache/base_cache.cc:
    Temp fix for cpu's use of getPort functionality.  CPU's will need to be ported to the new connector objects.
    Also, all packets have to have data or the delete fails.
src/mem/cache/cache.hh:
    Fix function prototypes so overloading works
src/mem/cache/cache_impl.hh:
    fix functions to match virtual base class
src/mem/cache/miss/miss_queue.cc:
    Packets havve to have data, or delete fails
src/python/m5/objects/BaseCache.py:
    Update for newmem

--HG--
extra : convert_revision : 2b6ad1e9d8ae07ace9294cd257e2ccc0024b7fcb
This commit is contained in:
Ron Dreslinski 2006-06-30 16:25:35 -04:00
parent dea1a19b2d
commit 1bdc65b00f
5 changed files with 28 additions and 22 deletions

View file

@ -101,16 +101,21 @@ BaseCache::CachePort::clearBlocked()
Port* Port*
BaseCache::getPort(const std::string &if_name, int idx) BaseCache::getPort(const std::string &if_name, int idx)
{ {
if(if_name == "cpu_side") if (if_name == "")
{ {
if(cpuSidePort != NULL) if(cpuSidePort == NULL)
panic("Already have a cpu side for this cache\n");
cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true); cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
return cpuSidePort; return cpuSidePort;
} }
else if(if_name == "mem_side") if (if_name == "functional")
{ {
if(memSidePort != NULL) if(cpuSidePort == NULL)
cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
return cpuSidePort;
}
else if (if_name == "mem_side")
{
if (memSidePort != NULL)
panic("Already have a mem side for this cache\n"); panic("Already have a mem side for this cache\n");
memSidePort = new CachePort(name() + "-mem_side_port", this, false); memSidePort = new CachePort(name() + "-mem_side_port", this, false);
return memSidePort; return memSidePort;
@ -121,9 +126,10 @@ BaseCache::getPort(const std::string &if_name, int idx)
void void
BaseCache::regStats() BaseCache::regStats()
{ {
Request temp_req; Request temp_req((Addr) NULL, 4, 0);
Packet::Command temp_cmd = Packet::ReadReq; Packet::Command temp_cmd = Packet::ReadReq;
Packet temp_pkt(&temp_req, temp_cmd, 0); //@todo FIx command strings so this isn't neccessary Packet temp_pkt(&temp_req, temp_cmd, 0); //@todo FIx command strings so this isn't neccessary
temp_pkt.allocate(); //Temp allocate, all need data
using namespace Stats; using namespace Stats;
@ -331,4 +337,5 @@ BaseCache::regStats()
.name(name() + ".cache_copies") .name(name() + ".cache_copies")
.desc("number of cache copies performed") .desc("number of cache copies performed")
; ;
} }

View file

@ -149,11 +149,9 @@ class Cache : public BaseCache
virtual bool doTimingAccess(Packet *pkt, CachePort *cachePort, virtual bool doTimingAccess(Packet *pkt, CachePort *cachePort,
bool isCpuSide); bool isCpuSide);
virtual Tick doAtomicAccess(Packet *pkt, CachePort *cachePort, virtual Tick doAtomicAccess(Packet *pkt, bool isCpuSide);
bool isCpuSide);
virtual void doFunctionalAccess(Packet *pkt, CachePort *cachePort, virtual void doFunctionalAccess(Packet *pkt, bool isCpuSide);
bool isCpuSide);
virtual void recvStatusChange(Port::Status status, bool isCpuSide); virtual void recvStatusChange(Port::Status status, bool isCpuSide);

View file

@ -77,7 +77,7 @@ doTimingAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
template<class TagStore, class Buffering, class Coherence> template<class TagStore, class Buffering, class Coherence>
Tick Tick
Cache<TagStore,Buffering,Coherence>:: Cache<TagStore,Buffering,Coherence>::
doAtomicAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide) doAtomicAccess(Packet *pkt, bool isCpuSide)
{ {
if (isCpuSide) if (isCpuSide)
{ {
@ -97,18 +97,18 @@ doAtomicAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
template<class TagStore, class Buffering, class Coherence> template<class TagStore, class Buffering, class Coherence>
void void
Cache<TagStore,Buffering,Coherence>:: Cache<TagStore,Buffering,Coherence>::
doFunctionalAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide) doFunctionalAccess(Packet *pkt, bool isCpuSide)
{ {
if (isCpuSide) if (isCpuSide)
{ {
probe(pkt, false); probe(pkt, true);
} }
else else
{ {
if (pkt->isResponse()) if (pkt->isResponse())
handleResponse(pkt); handleResponse(pkt);
else else
snoopProbe(pkt, false); snoopProbe(pkt, true);
} }
} }

View file

@ -58,9 +58,10 @@ MissQueue::MissQueue(int numMSHRs, int numTargets, int write_buffers,
void void
MissQueue::regStats(const string &name) MissQueue::regStats(const string &name)
{ {
Request temp_req; Request temp_req((Addr) NULL, 4, 0);
Packet::Command temp_cmd = Packet::ReadReq; Packet::Command temp_cmd = Packet::ReadReq;
Packet temp_pkt(&temp_req, temp_cmd, 0); //@todo FIx command strings so this isn't neccessary Packet temp_pkt(&temp_req, temp_cmd, 0); //@todo FIx command strings so this isn't neccessary
temp_pkt.allocate();
using namespace Stats; using namespace Stats;

View file

@ -1,29 +1,26 @@
from m5.config import * from m5.config import *
from BaseMem import BaseMem from MemObject import MemObject
class Prefetch(Enum): vals = ['none', 'tagged', 'stride', 'ghb'] class Prefetch(Enum): vals = ['none', 'tagged', 'stride', 'ghb']
class BaseCache(BaseMem): class BaseCache(MemObject):
type = 'BaseCache' type = 'BaseCache'
adaptive_compression = Param.Bool(False, adaptive_compression = Param.Bool(False,
"Use an adaptive compression scheme") "Use an adaptive compression scheme")
assoc = Param.Int("associativity") assoc = Param.Int("associativity")
block_size = Param.Int("block size in bytes") block_size = Param.Int("block size in bytes")
latency = Param.Int("Latency")
compressed_bus = Param.Bool(False, compressed_bus = Param.Bool(False,
"This cache connects to a compressed memory") "This cache connects to a compressed memory")
compression_latency = Param.Latency('0ns', compression_latency = Param.Latency('0ns',
"Latency in cycles of compression algorithm") "Latency in cycles of compression algorithm")
do_copy = Param.Bool(False, "perform fast copies in the cache") do_copy = Param.Bool(False, "perform fast copies in the cache")
hash_delay = Param.Int(1, "time in cycles of hash access") hash_delay = Param.Int(1, "time in cycles of hash access")
in_bus = Param.Bus(NULL, "incoming bus object")
lifo = Param.Bool(False, lifo = Param.Bool(False,
"whether this NIC partition should use LIFO repl. policy") "whether this NIC partition should use LIFO repl. policy")
max_miss_count = Param.Counter(0, max_miss_count = Param.Counter(0,
"number of misses to handle before calling exit") "number of misses to handle before calling exit")
mem_trace = Param.MemTraceWriter(NULL,
"memory trace writer to record accesses")
mshrs = Param.Int("number of MSHRs (max outstanding requests)") mshrs = Param.Int("number of MSHRs (max outstanding requests)")
out_bus = Param.Bus("outgoing bus object")
prioritizeRequests = Param.Bool(False, prioritizeRequests = Param.Bool(False,
"always service demand misses first") "always service demand misses first")
protocol = Param.CoherenceProtocol(NULL, "coherence protocol to use") protocol = Param.CoherenceProtocol(NULL, "coherence protocol to use")
@ -63,3 +60,6 @@ class BaseCache(BaseMem):
"Use the CPU ID to seperate calculations of prefetches") "Use the CPU ID to seperate calculations of prefetches")
prefetch_data_accesses_only = Param.Bool(False, prefetch_data_accesses_only = Param.Bool(False,
"Only prefetch on data not on instruction accesses") "Only prefetch on data not on instruction accesses")
hit_latency = Param.Int(1,"Hit Latency of the cache")
cpu_side = Port("Port on side closer to CPU")
mem_side = Port("Port on side closer to MEM")