ruby: Ruby changes required to use the python config system

This patch includes the necessary changes to connect ruby objects using
the python configuration system.  Mainly it consists of removing
unnecessary ruby object pointers and connecting the necessary object
pointers using the generated param objects.  This patch includes the
slicc changes necessary to connect generated ruby objects together using
the python configuraiton system.
This commit is contained in:
Brad Beckmann 2010-01-29 20:29:19 -08:00
parent 42bebab779
commit ed81489954
26 changed files with 741 additions and 128 deletions

View file

@ -102,8 +102,6 @@ class L2Cache(RubyCache):
# consistent with the NetDest list. Therefore the l1 controller nodes must be # consistent with the NetDest list. Therefore the l1 controller nodes must be
# listed before the directory nodes and directory nodes before dma nodes, etc. # listed before the directory nodes and directory nodes before dma nodes, etc.
# #
# net_nodes = []
l1_cntrl_nodes = [] l1_cntrl_nodes = []
dir_cntrl_nodes = [] dir_cntrl_nodes = []
@ -112,24 +110,44 @@ dir_cntrl_nodes = []
# controller constructors are called before the network constructor # controller constructors are called before the network constructor
# #
for (i, cpu) in enumerate(cpus): for (i, cpu) in enumerate(cpus):
l1_cntrl = L1Cache_Controller() #
cpu_seq = RubySequencer(controller = l1_cntrl, # First create the Ruby objects associated with this cpu
icache = L1Cache(controller = l1_cntrl), # Eventually this code should go in a python file specific to the
dcache = L1Cache(controller = l1_cntrl)) # MOESI_hammer protocol
cpu.controller = l1_cntrl #
cpu.sequencer = cpu_seq
cpu.test = cpu_seq.port
cpu_seq.funcmem_port = system.physmem.port
cpu.functional = system.funcmem.port
l1i_cache = L1Cache()
l1d_cache = L1Cache()
l2_cache = L2Cache()
cpu_seq = RubySequencer(icache = l1i_cache,
dcache = l1d_cache,
funcmem_port = system.physmem.port)
l1_cntrl = L1Cache_Controller(version = i,
sequencer = cpu_seq,
L1IcacheMemory = l1i_cache,
L1DcacheMemory = l1d_cache,
L2cacheMemory = l2_cache)
dir_cntrl = Directory_Controller(version = i, dir_cntrl = Directory_Controller(version = i,
directory = RubyDirectoryMemory(), directory = RubyDirectoryMemory(),
memory_control = RubyMemoryControl()) memBuffer = RubyMemoryControl())
# net_nodes += [l1_cntrl, dir_cntrl] #
# As noted above: Two independent list are track to maintain the order of
# nodes/controllers assumed by the ruby network
#
l1_cntrl_nodes.append(l1_cntrl) l1_cntrl_nodes.append(l1_cntrl)
dir_cntrl_nodes.append(dir_cntrl) dir_cntrl_nodes.append(dir_cntrl)
#
# Finally tie the memtester ports to the correct system ports
#
cpu.test = cpu_seq.port
cpu.functional = system.funcmem.port
# #
# Important: the topology constructor must be called before the network # Important: the topology constructor must be called before the network
# constructor. # constructor.

View file

@ -34,7 +34,11 @@
*/ */
machine(L1Cache, "AMD Hammer-like protocol") machine(L1Cache, "AMD Hammer-like protocol")
: int cache_response_latency = 12, : Sequencer * sequencer,
CacheMemory * L1IcacheMemory,
CacheMemory * L1DcacheMemory,
CacheMemory * L2cacheMemory,
int cache_response_latency = 12,
int issue_latency = 2 int issue_latency = 2
{ {
@ -104,7 +108,6 @@ machine(L1Cache, "AMD Hammer-like protocol")
// STRUCTURE DEFINITIONS // STRUCTURE DEFINITIONS
MessageBuffer mandatoryQueue, ordered="false"; MessageBuffer mandatoryQueue, ordered="false";
Sequencer sequencer, factory='RubySystem::getSequencer(m_cfg["sequencer"])';
// CacheEntry // CacheEntry
structure(Entry, desc="...", interface="AbstractCacheEntry") { structure(Entry, desc="...", interface="AbstractCacheEntry") {
@ -122,17 +125,6 @@ machine(L1Cache, "AMD Hammer-like protocol")
bool Sharers, desc="On a GetS, did we find any other sharers in the system"; bool Sharers, desc="On a GetS, did we find any other sharers in the system";
} }
external_type(CacheMemory) {
bool cacheAvail(Address);
Address cacheProbe(Address);
void allocate(Address, Entry);
void deallocate(Address);
Entry lookup(Address);
void changePermission(Address, AccessPermission);
bool isTagPresent(Address);
void profileMiss(CacheMsg);
}
external_type(TBETable) { external_type(TBETable) {
TBE lookup(Address); TBE lookup(Address);
void allocate(Address); void allocate(Address);
@ -141,17 +133,14 @@ machine(L1Cache, "AMD Hammer-like protocol")
} }
TBETable TBEs, template_hack="<L1Cache_TBE>"; TBETable TBEs, template_hack="<L1Cache_TBE>";
CacheMemory L1IcacheMemory, factory='RubySystem::getCache(m_cfg["icache"])';
CacheMemory L1DcacheMemory, factory='RubySystem::getCache(m_cfg["dcache"])';
CacheMemory L2cacheMemory, factory='RubySystem::getCache(m_cfg["l2cache"])';
Entry getCacheEntry(Address addr), return_by_ref="yes" { Entry getCacheEntry(Address addr), return_by_ref="yes" {
if (L2cacheMemory.isTagPresent(addr)) { if (L2cacheMemory.isTagPresent(addr)) {
return L2cacheMemory[addr]; return static_cast(Entry, L2cacheMemory[addr]);
} else if (L1DcacheMemory.isTagPresent(addr)) { } else if (L1DcacheMemory.isTagPresent(addr)) {
return L1DcacheMemory[addr]; return static_cast(Entry, L1DcacheMemory[addr]);
} else { } else {
return L1IcacheMemory[addr]; return static_cast(Entry, L1IcacheMemory[addr]);
} }
} }
@ -670,17 +659,21 @@ machine(L1Cache, "AMD Hammer-like protocol")
action(ss_copyFromL1toL2, "\s", desc="Copy data block from L1 (I or D) to L2") { action(ss_copyFromL1toL2, "\s", desc="Copy data block from L1 (I or D) to L2") {
if (L1DcacheMemory.isTagPresent(address)) { if (L1DcacheMemory.isTagPresent(address)) {
L2cacheMemory[address] := L1DcacheMemory[address]; static_cast(Entry, L2cacheMemory[address]).Dirty := static_cast(Entry, L1DcacheMemory[address]).Dirty;
static_cast(Entry, L2cacheMemory[address]).DataBlk := static_cast(Entry, L1DcacheMemory[address]).DataBlk;
} else { } else {
L2cacheMemory[address] := L1IcacheMemory[address]; static_cast(Entry, L2cacheMemory[address]).Dirty := static_cast(Entry, L1IcacheMemory[address]).Dirty;
static_cast(Entry, L2cacheMemory[address]).DataBlk := static_cast(Entry, L1IcacheMemory[address]).DataBlk;
} }
} }
action(tt_copyFromL2toL1, "\t", desc="Copy data block from L2 to L1 (I or D)") { action(tt_copyFromL2toL1, "\t", desc="Copy data block from L2 to L1 (I or D)") {
if (L1DcacheMemory.isTagPresent(address)) { if (L1DcacheMemory.isTagPresent(address)) {
L1DcacheMemory[address] := L2cacheMemory[address]; static_cast(Entry, L1DcacheMemory[address]).Dirty := static_cast(Entry, L2cacheMemory[address]).Dirty;
static_cast(Entry, L1DcacheMemory[address]).DataBlk := static_cast(Entry, L2cacheMemory[address]).DataBlk;
} else { } else {
L1IcacheMemory[address] := L2cacheMemory[address]; static_cast(Entry, L1IcacheMemory[address]).Dirty := static_cast(Entry, L2cacheMemory[address]).Dirty;
static_cast(Entry, L1IcacheMemory[address]).DataBlk := static_cast(Entry, L2cacheMemory[address]).DataBlk;
} }
} }

View file

@ -34,7 +34,9 @@
*/ */
machine(Directory, "AMD Hammer-like protocol") machine(Directory, "AMD Hammer-like protocol")
: int memory_controller_latency = 12 : DirectoryMemory * directory,
MemoryControl * memBuffer,
int memory_controller_latency = 12
{ {
MessageBuffer forwardFromDir, network="To", virtual_network="2", ordered="false"; MessageBuffer forwardFromDir, network="To", virtual_network="2", ordered="false";
@ -108,20 +110,11 @@ machine(Directory, "AMD Hammer-like protocol")
// TYPES // TYPES
// DirectoryEntry // DirectoryEntry
structure(Entry, desc="...") { structure(Entry, desc="...", interface="AbstractEntry") {
State DirectoryState, desc="Directory state"; State DirectoryState, desc="Directory state";
DataBlock DataBlk, desc="data for the block"; DataBlock DataBlk, desc="data for the block";
} }
external_type(DirectoryMemory) {
Entry lookup(Address);
bool isPresent(Address);
}
external_type(MemoryControl, inport="yes", outport="yes") {
}
// TBE entries for DMA requests // TBE entries for DMA requests
structure(TBE, desc="TBE entries for outstanding DMA requests") { structure(TBE, desc="TBE entries for outstanding DMA requests") {
Address PhysicalAddress, desc="physical address"; Address PhysicalAddress, desc="physical address";
@ -145,17 +138,17 @@ machine(Directory, "AMD Hammer-like protocol")
// ** OBJECTS ** // ** OBJECTS **
DirectoryMemory directory, factory='RubySystem::getDirectory(m_cfg["directory_name"])';
MemoryControl memBuffer, factory='RubySystem::getMemoryControl(m_cfg["memory_controller_name"])';
TBETable TBEs, template_hack="<Directory_TBE>"; TBETable TBEs, template_hack="<Directory_TBE>";
Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
return static_cast(Entry, directory[addr]);
}
State getState(Address addr) { State getState(Address addr) {
if (TBEs.isPresent(addr)) { if (TBEs.isPresent(addr)) {
return TBEs[addr].TBEState; return TBEs[addr].TBEState;
} else { } else {
return directory[addr].DirectoryState; return getDirectoryEntry(addr).DirectoryState;
} }
} }
@ -163,7 +156,7 @@ machine(Directory, "AMD Hammer-like protocol")
if (TBEs.isPresent(addr)) { if (TBEs.isPresent(addr)) {
TBEs[addr].TBEState := state; TBEs[addr].TBEState := state;
} }
directory[addr].DirectoryState := state; getDirectoryEntry(addr).DirectoryState := state;
} }
MessageBuffer triggerQueue, ordered="true"; MessageBuffer triggerQueue, ordered="true";
@ -454,7 +447,7 @@ machine(Directory, "AMD Hammer-like protocol")
out_msg.Sender := machineID; out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := in_msg.Requestor; out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.MessageSize := in_msg.MessageSize; out_msg.MessageSize := in_msg.MessageSize;
out_msg.DataBlk := directory[address].DataBlk; out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
DEBUG_EXPR(out_msg); DEBUG_EXPR(out_msg);
} }
} }
@ -468,7 +461,7 @@ machine(Directory, "AMD Hammer-like protocol")
out_msg.Sender := machineID; out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := in_msg.Requestor; out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.MessageSize := in_msg.MessageSize; out_msg.MessageSize := in_msg.MessageSize;
out_msg.DataBlk := directory[address].DataBlk; out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
DEBUG_EXPR(out_msg); DEBUG_EXPR(out_msg);
} }
} }
@ -564,15 +557,15 @@ machine(Directory, "AMD Hammer-like protocol")
peek(unblockNetwork_in, ResponseMsg) { peek(unblockNetwork_in, ResponseMsg) {
assert(in_msg.Dirty); assert(in_msg.Dirty);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Data); assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
directory[address].DataBlk := in_msg.DataBlk; getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
DEBUG_EXPR(in_msg.Address); DEBUG_EXPR(in_msg.Address);
DEBUG_EXPR(in_msg.DataBlk); DEBUG_EXPR(in_msg.DataBlk);
} }
} }
action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") { action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
directory[address].DataBlk := TBEs[address].DataBlk; getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk;
directory[address].DataBlk.copyPartial(TBEs[address].DmaDataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len); getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DmaDataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
} }
action(a_assertCacheData, "ac", desc="Assert that a cache provided the data") { action(a_assertCacheData, "ac", desc="Assert that a cache provided the data") {
@ -610,7 +603,7 @@ machine(Directory, "AMD Hammer-like protocol")
// implementation. We include the data in the "dataless" // implementation. We include the data in the "dataless"
// message so we can assert the clean data matches the datablock // message so we can assert the clean data matches the datablock
// in memory // in memory
assert(directory[address].DataBlk == in_msg.DataBlk); assert(getDirectoryEntry(address).DataBlk == in_msg.DataBlk);
} }
} }

View file

@ -98,6 +98,30 @@ external_type(Sequencer) {
void profileNack(Address, int, int, uint64); void profileNack(Address, int, int, uint64);
} }
external_type(AbstractEntry, primitive="yes");
external_type(DirectoryMemory) {
AbstractEntry lookup(Address);
bool isPresent(Address);
}
external_type(AbstractCacheEntry, primitive="yes");
external_type(CacheMemory) {
bool cacheAvail(Address);
Address cacheProbe(Address);
void allocate(Address, AbstractCacheEntry);
void deallocate(Address);
AbstractCacheEntry lookup(Address);
void changePermission(Address, AccessPermission);
bool isTagPresent(Address);
void profileMiss(CacheMsg);
}
external_type(MemoryControl, inport="yes", outport="yes") {
}
external_type(TimerTable, inport="yes") { external_type(TimerTable, inport="yes") {
bool isReady(); bool isReady();
Address readyAddress(); Address readyAddress();
@ -119,3 +143,5 @@ external_type(GenericBloomFilter) {
} }

View file

@ -87,6 +87,7 @@ def MakeInclude(source):
target = generated_dir.File(basename(source)) target = generated_dir.File(basename(source))
env.Command(target, source, MakeIncludeAction) env.Command(target, source, MakeIncludeAction)
MakeInclude('slicc_interface/AbstractEntry.hh')
MakeInclude('slicc_interface/AbstractCacheEntry.hh') MakeInclude('slicc_interface/AbstractCacheEntry.hh')
MakeInclude('slicc_interface/AbstractProtocol.hh') MakeInclude('slicc_interface/AbstractProtocol.hh')
MakeInclude('slicc_interface/Message.hh') MakeInclude('slicc_interface/Message.hh')

View file

@ -40,10 +40,11 @@
#include "mem/ruby/common/Global.hh" #include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh" #include "mem/ruby/common/Address.hh"
#include "mem/protocol/AccessPermission.hh" #include "mem/protocol/AccessPermission.hh"
#include "mem/ruby/slicc_interface/AbstractEntry.hh"
class DataBlock; class DataBlock;
class AbstractCacheEntry { class AbstractCacheEntry : public AbstractEntry {
public: public:
// Constructors // Constructors
AbstractCacheEntry(); AbstractCacheEntry();
@ -51,15 +52,6 @@ public:
// Destructor, prevent it from instantiation // Destructor, prevent it from instantiation
virtual ~AbstractCacheEntry() = 0; virtual ~AbstractCacheEntry() = 0;
// Public Methods
// The methods below are those called by ruby runtime, add when it is
// absolutely necessary and should all be virtual function.
virtual DataBlock& getDataBlk() = 0;
virtual void print(ostream& out) const = 0;
// Data Members (m_ prefix) // Data Members (m_ prefix)
Address m_Address; // Address of this block, required by CacheMemory Address m_Address; // Address of this block, required by CacheMemory
Time m_LastRef; // Last time this block was referenced, required by CacheMemory Time m_LastRef; // Last time this block was referenced, required by CacheMemory

View file

@ -0,0 +1,38 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "mem/ruby/slicc_interface/AbstractEntry.hh"
// Must define constructor and destructor in subclasses
AbstractEntry::AbstractEntry() {
}
AbstractEntry::~AbstractEntry() {
}

View file

@ -0,0 +1,73 @@
/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef AbstractEntry_H
#define AbstractEntry_H
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/protocol/AccessPermission.hh"
class DataBlock;
class AbstractEntry {
public:
// Constructors
AbstractEntry();
// Destructor, prevent it from instantiation
virtual ~AbstractEntry() = 0;
// Public Methods
// The methods below are those called by ruby runtime, add when it is
// absolutely necessary and should all be virtual function.
virtual DataBlock& getDataBlk() = 0;
virtual void print(ostream& out) const = 0;
};
// Output operator declaration
ostream& operator<<(ostream& out, const AbstractEntry& obj);
// ******************* Definitions *******************
// Output operator definition
extern inline
ostream& operator<<(ostream& out, const AbstractEntry& obj)
{
obj.print(out);
out << flush;
return out;
}
#endif //AbstractEntry_H

View file

@ -35,6 +35,7 @@ if not env['RUBY']:
SimObject('Controller.py') SimObject('Controller.py')
Source('AbstractEntry.cc')
Source('AbstractCacheEntry.cc') Source('AbstractCacheEntry.cc')
Source('RubySlicc_Profiler_interface.cc') Source('RubySlicc_Profiler_interface.cc')
Source('RubySlicc_ComponentMapping.cc') Source('RubySlicc_ComponentMapping.cc')

View file

@ -9,4 +9,3 @@ class RubyCache(SimObject):
latency = Param.Int(""); latency = Param.Int("");
assoc = Param.Int(""); assoc = Param.Int("");
replacement_policy = Param.String("PSEUDO_LRU", ""); replacement_policy = Param.String("PSEUDO_LRU", "");
controller = Param.RubyController("");

View file

@ -31,9 +31,6 @@
int CacheMemory::m_num_last_level_caches = 0; int CacheMemory::m_num_last_level_caches = 0;
MachineType CacheMemory::m_last_level_machine_type = MachineType_FIRST; MachineType CacheMemory::m_last_level_machine_type = MachineType_FIRST;
// Output operator declaration
//ostream& operator<<(ostream& out, const CacheMemory<ENTRY>& obj);
// ******************* Definitions ******************* // ******************* Definitions *******************
// Output operator definition // Output operator definition
@ -56,29 +53,27 @@ RubyCacheParams::create()
CacheMemory::CacheMemory(const Params *p) CacheMemory::CacheMemory(const Params *p)
: SimObject(p) : SimObject(p)
{ {
int cache_size = p->size; m_cache_size = p->size;
m_latency = p->latency; m_latency = p->latency;
m_cache_assoc = p->assoc; m_cache_assoc = p->assoc;
string policy = p->replacement_policy; m_policy = p->replacement_policy;
m_controller = p->controller;
int num_lines = cache_size/RubySystem::getBlockSizeBytes();
m_cache_num_sets = num_lines / m_cache_assoc;
m_cache_num_set_bits = log_int(m_cache_num_sets);
assert(m_cache_num_set_bits > 0);
if(policy == "PSEUDO_LRU")
m_replacementPolicy_ptr = new PseudoLRUPolicy(m_cache_num_sets, m_cache_assoc);
else if (policy == "LRU")
m_replacementPolicy_ptr = new LRUPolicy(m_cache_num_sets, m_cache_assoc);
else
assert(false);
} }
void CacheMemory::init() void CacheMemory::init()
{ {
m_cache_num_sets = (m_cache_size / m_cache_assoc) / RubySystem::getBlockSizeBytes();
assert(m_cache_num_sets > 1);
m_cache_num_set_bits = log_int(m_cache_num_sets);
assert(m_cache_num_set_bits > 0);
if(m_policy == "PSEUDO_LRU")
m_replacementPolicy_ptr = new PseudoLRUPolicy(m_cache_num_sets, m_cache_assoc);
else if (m_policy == "LRU")
m_replacementPolicy_ptr = new LRUPolicy(m_cache_num_sets, m_cache_assoc);
else
assert(false);
m_num_last_level_caches = m_num_last_level_caches =
MachineType_base_count(MachineType_FIRST); MachineType_base_count(MachineType_FIRST);
#if 0 #if 0
@ -126,8 +121,6 @@ CacheMemory::numberOfLastLevelCaches()
void CacheMemory::printConfig(ostream& out) void CacheMemory::printConfig(ostream& out)
{ {
out << "Cache config: " << m_cache_name << endl; out << "Cache config: " << m_cache_name << endl;
if (m_controller != NULL)
out << " controller: " << m_controller->getName() << endl;
out << " cache_associativity: " << m_cache_assoc << endl; out << " cache_associativity: " << m_cache_assoc << endl;
out << " num_cache_sets_bits: " << m_cache_num_set_bits << endl; out << " num_cache_sets_bits: " << m_cache_num_set_bits << endl;
const int cache_num_sets = 1 << m_cache_num_set_bits; const int cache_num_sets = 1 << m_cache_num_set_bits;

View file

@ -72,10 +72,6 @@ public:
// Destructor // Destructor
~CacheMemory(); ~CacheMemory();
// factory
// static CacheMemory* createCache(int level, int num, char split_type, AbstractCacheEntry* (*entry_factory)());
// static CacheMemory* getCache(int cache_id);
// Public Methods // Public Methods
void printConfig(ostream& out); void printConfig(ostream& out);
@ -155,7 +151,6 @@ private:
private: private:
const string m_cache_name; const string m_cache_name;
AbstractController* m_controller;
int m_latency; int m_latency;
// Data Members (m_prefix) // Data Members (m_prefix)
@ -172,6 +167,8 @@ private:
CacheProfiler* m_profiler_ptr; CacheProfiler* m_profiler_ptr;
int m_cache_size;
string m_policy;
int m_cache_num_sets; int m_cache_num_sets;
int m_cache_num_set_bits; int m_cache_num_set_bits;
int m_cache_assoc; int m_cache_assoc;

View file

@ -39,7 +39,6 @@
#include "mem/ruby/system/System.hh" #include "mem/ruby/system/System.hh"
#include "mem/ruby/system/DirectoryMemory.hh" #include "mem/ruby/system/DirectoryMemory.hh"
#include "mem/ruby/slicc_interface/RubySlicc_Util.hh" #include "mem/ruby/slicc_interface/RubySlicc_Util.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "mem/gems_common/util.hh" #include "mem/gems_common/util.hh"
int DirectoryMemory::m_num_directories = 0; int DirectoryMemory::m_num_directories = 0;
@ -52,7 +51,6 @@ DirectoryMemory::DirectoryMemory(const Params *p)
m_version = p->version; m_version = p->version;
m_size_bytes = p->size_mb * static_cast<uint64>(1<<20); m_size_bytes = p->size_mb * static_cast<uint64>(1<<20);
m_size_bits = log_int(m_size_bytes); m_size_bits = log_int(m_size_bytes);
m_controller = p->controller;
} }
void DirectoryMemory::init() void DirectoryMemory::init()
@ -85,7 +83,6 @@ DirectoryMemory::~DirectoryMemory()
void DirectoryMemory::printConfig(ostream& out) const void DirectoryMemory::printConfig(ostream& out) const
{ {
out << "DirectoryMemory module config: " << m_name << endl; out << "DirectoryMemory module config: " << m_name << endl;
out << " controller: " << m_controller->getName() << endl;
out << " version: " << m_version << endl; out << " version: " << m_version << endl;
out << " memory_bits: " << m_size_bits << endl; out << " memory_bits: " << m_size_bits << endl;
out << " memory_size_bytes: " << m_size_bytes << endl; out << " memory_size_bytes: " << m_size_bytes << endl;

View file

@ -46,8 +46,6 @@
#include "sim/sim_object.hh" #include "sim/sim_object.hh"
#include "params/RubyDirectoryMemory.hh" #include "params/RubyDirectoryMemory.hh"
class AbstractController;
class DirectoryMemory : public SimObject { class DirectoryMemory : public SimObject {
public: public:
// Constructors // Constructors
@ -83,7 +81,6 @@ private:
private: private:
const string m_name; const string m_name;
AbstractController* m_controller;
// Data Members (m_ prefix) // Data Members (m_ prefix)
Directory_Entry **m_entries; Directory_Entry **m_entries;
// int m_size; // # of memory module blocks this directory is responsible for // int m_size; // # of memory module blocks this directory is responsible for

View file

@ -7,4 +7,3 @@ class RubyDirectoryMemory(SimObject):
cxx_class = 'DirectoryMemory' cxx_class = 'DirectoryMemory'
version = Param.Int(0, "") version = Param.Int(0, "")
size_mb = Param.Int(1024, "") size_mb = Param.Int(1024, "")
controller = Param.RubyController(Parent.any, "")

View file

@ -31,27 +31,290 @@
#include "mem/ruby/system/RubyPort.hh" #include "mem/ruby/system/RubyPort.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh" #include "mem/ruby/slicc_interface/AbstractController.hh"
//void (*RubyPort::m_hit_callback)(int64_t) = NULL;
uint16_t RubyPort::m_num_ports = 0; uint16_t RubyPort::m_num_ports = 0;
RubyPort::RequestMap RubyPort::pending_cpu_requests;
RubyPort::RubyPort(const Params *p) RubyPort::RubyPort(const Params *p)
: MemObject(p) : MemObject(p),
funcMemPort(csprintf("%s-funcmem_port", name()), this)
{ {
m_version = p->version; m_version = p->version;
assert(m_version != -1); assert(m_version != -1);
m_controller = p->controller;
assert(m_controller != NULL);
m_mandatory_q_ptr = m_controller->getMandatoryQueue(); m_controller = NULL;
m_mandatory_q_ptr = NULL;
m_port_id = m_num_ports++; m_port_id = m_num_ports++;
m_request_cnt = 0; m_request_cnt = 0;
m_hit_callback = NULL; m_hit_callback = ruby_hit_callback;
pio_port = NULL;
assert(m_num_ports <= 2048); // see below for reason assert(m_num_ports <= 2048); // see below for reason
} }
void RubyPort::init()
{
assert(m_controller != NULL);
m_mandatory_q_ptr = m_controller->getMandatoryQueue();
}
Port * Port *
RubyPort::getPort(const std::string &if_name, int idx) RubyPort::getPort(const std::string &if_name, int idx)
{ {
if (if_name == "port") {
return new M5Port(csprintf("%s-port%d", name(), idx), this);
} else if (if_name == "pio_port") {
//
// ensure there is only one pio port
//
assert(pio_port == NULL);
pio_port = new PioPort(csprintf("%s-pio-port%d", name(), idx),
this);
return pio_port;
} else if (if_name == "funcmem_port") {
return &funcMemPort;
}
return NULL; return NULL;
} }
RubyPort::PioPort::PioPort(const std::string &_name,
RubyPort *_port)
: SimpleTimingPort(_name, _port)
{
DPRINTF(Ruby, "creating port to ruby sequencer to cpu %s\n", _name);
ruby_port = _port;
}
RubyPort::M5Port::M5Port(const std::string &_name,
RubyPort *_port)
: SimpleTimingPort(_name, _port)
{
DPRINTF(Ruby, "creating port from ruby sequcner to cpu %s\n", _name);
ruby_port = _port;
}
Tick
RubyPort::PioPort::recvAtomic(PacketPtr pkt)
{
panic("RubyPort::PioPort::recvAtomic() not implemented!\n");
return 0;
}
Tick
RubyPort::M5Port::recvAtomic(PacketPtr pkt)
{
panic("RubyPort::M5Port::recvAtomic() not implemented!\n");
return 0;
}
bool
RubyPort::PioPort::recvTiming(PacketPtr pkt)
{
//
// In FS mode, ruby memory will receive pio responses from devices and
// it must forward these responses back to the particular CPU.
//
DPRINTF(MemoryAccess,
"Pio response for address %#x\n",
pkt->getAddr());
assert(pkt->isResponse());
//
// First we must retrieve the request port from the sender State
//
RubyPort::SenderState *senderState =
safe_cast<RubyPort::SenderState *>(pkt->senderState);
M5Port *port = senderState->port;
assert(port != NULL);
// pop the sender state from the packet
pkt->senderState = senderState->saved;
delete senderState;
port->sendTiming(pkt);
return true;
}
bool
RubyPort::M5Port::recvTiming(PacketPtr pkt)
{
DPRINTF(MemoryAccess,
"Timing access caught for address %#x\n",
pkt->getAddr());
//dsm: based on SimpleTimingPort::recvTiming(pkt);
//
// After checking for pio responses, the remainder of packets
// received by ruby should only be M5 requests, which should never
// get nacked. There used to be code to hanldle nacks here, but
// I'm pretty sure it didn't work correctly with the drain code,
// so that would need to be fixed if we ever added it back.
//
assert(pkt->isRequest());
if (pkt->memInhibitAsserted()) {
warn("memInhibitAsserted???");
// snooper will supply based on copy of packet
// still target's responsibility to delete packet
delete pkt;
return true;
}
//
// Check for pio requests and directly send them to the dedicated
// pio port.
//
if (!isPhysMemAddress(pkt->getAddr())) {
assert(ruby_port->pio_port != NULL);
//
// Save the port in the sender state object to be used later to
// route the response
//
pkt->senderState = new SenderState(this, pkt->senderState);
return ruby_port->pio_port->sendTiming(pkt);
}
//
// For DMA and CPU requests, translate them to ruby requests before
// sending them to our assigned ruby port.
//
RubyRequestType type = RubyRequestType_NULL;
Addr pc = 0;
if (pkt->isRead()) {
if (pkt->req->isInstFetch()) {
type = RubyRequestType_IFETCH;
pc = pkt->req->getPC();
} else {
type = RubyRequestType_LD;
}
} else if (pkt->isWrite()) {
type = RubyRequestType_ST;
} else if (pkt->isReadWrite()) {
type = RubyRequestType_RMW_Write;
}
RubyRequest ruby_request(pkt->getAddr(), pkt->getPtr<uint8_t>(),
pkt->getSize(), pc, type,
RubyAccessMode_Supervisor);
// Submit the ruby request
int64_t req_id = ruby_port->makeRequest(ruby_request);
if (req_id == -1) {
return false;
}
// Save the request for the callback
RubyPort::pending_cpu_requests[req_id] = new RequestCookie(pkt, this);
return true;
}
void
RubyPort::ruby_hit_callback(int64_t req_id)
{
//
// Note: This single fuction can be called by cpu and dma ports,
// as well as the functional port.
//
RequestMap::iterator i = pending_cpu_requests.find(req_id);
if (i == pending_cpu_requests.end())
panic("could not find pending request %d\n", req_id);
RequestCookie *cookie = i->second;
pending_cpu_requests.erase(i);
Packet *pkt = cookie->pkt;
M5Port *port = cookie->m5Port;
delete cookie;
port->hitCallback(pkt);
}
void
RubyPort::M5Port::hitCallback(PacketPtr pkt)
{
bool needsResponse = pkt->needsResponse();
DPRINTF(MemoryAccess, "Hit callback needs response %d\n",
needsResponse);
ruby_port->funcMemPort.sendFunctional(pkt);
// turn packet around to go back to requester if response expected
if (needsResponse) {
// recvAtomic() should already have turned packet into
// atomic response
assert(pkt->isResponse());
DPRINTF(MemoryAccess, "Sending packet back over port\n");
sendTiming(pkt);
} else {
delete pkt;
}
DPRINTF(MemoryAccess, "Hit callback done!\n");
}
bool
RubyPort::M5Port::sendTiming(PacketPtr pkt)
{
schedSendTiming(pkt, curTick + 1); //minimum latency, must be > 0
return true;
}
bool
RubyPort::PioPort::sendTiming(PacketPtr pkt)
{
schedSendTiming(pkt, curTick + 1); //minimum latency, must be > 0
return true;
}
bool
RubyPort::M5Port::isPhysMemAddress(Addr addr)
{
AddrRangeList physMemAddrList;
bool snoop = false;
ruby_port->funcMemPort.getPeerAddressRanges(physMemAddrList, snoop);
for(AddrRangeIter iter = physMemAddrList.begin();
iter != physMemAddrList.end();
iter++) {
if (addr >= iter->start && addr <= iter->end) {
DPRINTF(MemoryAccess, "Request found in %#llx - %#llx range\n",
iter->start, iter->end);
return true;
}
}
assert(isPioAddress(addr));
return false;
}
bool
RubyPort::M5Port::isPioAddress(Addr addr)
{
AddrRangeList pioAddrList;
bool snoop = false;
if (ruby_port->pio_port == NULL) {
return false;
}
ruby_port->pio_port->getPeerAddressRanges(pioAddrList, snoop);
for(AddrRangeIter iter = pioAddrList.begin();
iter != pioAddrList.end();
iter++) {
if (addr >= iter->start && addr <= iter->end) {
DPRINTF(MemoryAccess, "Pio request found in %#llx - %#llx range\n",
iter->start, iter->end);
return true;
}
}
return false;
}

View file

@ -46,18 +46,81 @@ class AbstractController;
class RubyPort : public MemObject { class RubyPort : public MemObject {
public: public:
class M5Port : public SimpleTimingPort
{
RubyPort *ruby_port;
public:
M5Port(const std::string &_name,
RubyPort *_port);
bool sendTiming(PacketPtr pkt);
void hitCallback(PacketPtr pkt);
protected:
virtual bool recvTiming(PacketPtr pkt);
virtual Tick recvAtomic(PacketPtr pkt);
private:
bool isPioAddress(Addr addr);
bool isPhysMemAddress(Addr addr);
};
friend class M5Port;
class PioPort : public SimpleTimingPort
{
RubyPort *ruby_port;
public:
PioPort(const std::string &_name,
RubyPort *_port);
bool sendTiming(PacketPtr pkt);
protected:
virtual bool recvTiming(PacketPtr pkt);
virtual Tick recvAtomic(PacketPtr pkt);
};
friend class PioPort;
struct SenderState : public Packet::SenderState
{
M5Port* port;
Packet::SenderState *saved;
SenderState(M5Port* _port,
Packet::SenderState *sender_state = NULL)
: port(_port), saved(sender_state)
{}
};
typedef RubyPortParams Params; typedef RubyPortParams Params;
RubyPort(const Params *p); RubyPort(const Params *p);
virtual ~RubyPort() {} virtual ~RubyPort() {}
void init();
Port *getPort(const std::string &if_name, int idx); Port *getPort(const std::string &if_name, int idx);
virtual int64_t makeRequest(const RubyRequest & request) = 0; virtual int64_t makeRequest(const RubyRequest & request) = 0;
void registerHitCallback(void (*hit_callback)(int64_t request_id)) { void registerHitCallback(void (*hit_callback)(int64_t request_id)) {
assert(m_hit_callback == NULL); // can't assign hit_callback twice //
m_hit_callback = hit_callback; // Can't assign hit_callback twice and by default it is set to the
} // RubyPort's default callback function.
//
assert(m_hit_callback == ruby_hit_callback);
m_hit_callback = hit_callback;
}
//
// Called by the controller to give the sequencer a pointer.
// A pointer to the controller is needed for atomic support.
//
void setController(AbstractController* _cntrl) { m_controller = _cntrl; }
protected: protected:
const string m_name; const string m_name;
@ -87,11 +150,26 @@ protected:
int m_version; int m_version;
AbstractController* m_controller; AbstractController* m_controller;
MessageBuffer* m_mandatory_q_ptr; MessageBuffer* m_mandatory_q_ptr;
PioPort* pio_port;
private: private:
static uint16_t m_num_ports; static uint16_t m_num_ports;
uint16_t m_port_id; uint16_t m_port_id;
uint64_t m_request_cnt; uint64_t m_request_cnt;
struct RequestCookie {
Packet *pkt;
M5Port *m5Port;
RequestCookie(Packet *p, M5Port *m5p)
: pkt(p), m5Port(m5p)
{}
};
typedef std::map<int64_t, RequestCookie*> RequestMap;
static RequestMap pending_cpu_requests;
static void ruby_hit_callback(int64_t req_id);
FunctionalPort funcMemPort;
}; };
#endif #endif

View file

@ -118,9 +118,6 @@ private:
CacheMemory* m_dataCache_ptr; CacheMemory* m_dataCache_ptr;
CacheMemory* m_instCache_ptr; CacheMemory* m_instCache_ptr;
// indicates what processor on the chip this sequencer is associated with
int m_controller_type;
Map<Address, SequencerRequest*> m_writeRequestTable; Map<Address, SequencerRequest*> m_writeRequestTable;
Map<Address, SequencerRequest*> m_readRequestTable; Map<Address, SequencerRequest*> m_readRequestTable;
// Global outstanding request count, across all request tables // Global outstanding request count, across all request tables

View file

@ -1,12 +1,13 @@
from m5.params import * from m5.params import *
from m5.proxy import *
from MemObject import MemObject from MemObject import MemObject
class RubyPort(MemObject): class RubyPort(MemObject):
type = 'RubyPort' type = 'RubyPort'
abstract = True abstract = True
port = VectorPort("M5 port") port = VectorPort("M5 port")
controller = Param.RubyController("")
version = Param.Int(0, "") version = Param.Int(0, "")
pio_port = Port("Ruby_pio_port")
class RubySequencer(RubyPort): class RubySequencer(RubyPort):
type = 'RubySequencer' type = 'RubySequencer'

View file

@ -29,11 +29,12 @@ from slicc.ast.AST import AST
from slicc.symbols import Var from slicc.symbols import Var
class FormalParamAST(AST): class FormalParamAST(AST):
def __init__(self, slicc, type_ast, ident, default = None): def __init__(self, slicc, type_ast, ident, default = None, pointer = False):
super(FormalParamAST, self).__init__(slicc) super(FormalParamAST, self).__init__(slicc)
self.type_ast = type_ast self.type_ast = type_ast
self.ident = ident self.ident = ident
self.default = default self.default = default
self.pointer = pointer
def __repr__(self): def __repr__(self):
return "[FormalParamAST: %s]" % self.ident return "[FormalParamAST: %s]" % self.ident

View file

@ -68,7 +68,8 @@ class MethodCallExprAST(ExprAST):
for actual_type, expected_type in \ for actual_type, expected_type in \
zip(paramTypes, obj_type.methods[methodId].param_types): zip(paramTypes, obj_type.methods[methodId].param_types):
if actual_type != expected_type: if actual_type != expected_type and \
str(actual_type["interface"]) != str(expected_type):
self.error("Type mismatch: expected: %s actual: %s", self.error("Type mismatch: expected: %s actual: %s",
expected_type, actual_type) expected_type, actual_type)
@ -97,9 +98,48 @@ class MemberMethodCallExprAST(MethodCallExprAST):
methodId = obj_type.methodId(self.proc_name, paramTypes) methodId = obj_type.methodId(self.proc_name, paramTypes)
prefix = "" prefix = ""
implements_interface = False
if methodId not in obj_type.methods: if methodId not in obj_type.methods:
self.error("Invalid method call: Type '%s' does not have a method '%s'", #
obj_type, methodId) # The initial method check has failed, but before generating an
# error we must check whether any of the paramTypes implement
# an interface. If so, we must check if the method ids using
# the inherited types exist.
#
# This code is a temporary fix and only checks for the methodId
# where all paramTypes are converted to their inherited type. The
# right way to do this is to replace slicc's simple string
# comparison for determining the correct overloaded method, with a
# more robust param by param check.
#
implemented_paramTypes = []
for paramType in paramTypes:
implemented_paramType = paramType
if paramType.isInterface:
implements_interface = True
implemented_paramType.abstract_ident = paramType["interface"]
else:
implemented_paramType.abstract_ident = paramType.c_ident
implemented_paramTypes.append(implemented_paramType)
if implements_interface:
implementedMethodId = obj_type.methodIdAbstract(self.proc_name,
implemented_paramTypes)
else:
implementedMethodId = ""
if implementedMethodId not in obj_type.methods:
self.error("Invalid method call: " \
"Type '%s' does not have a method '%s' nor '%s'",
obj_type, methodId, implementedMethodId)
else:
#
# Replace the methodId with the implementedMethodId found in
# the method list.
#
methodId = implementedMethodId
return_type = obj_type.methods[methodId].return_type return_type = obj_type.methods[methodId].return_type
if return_type.isInterface: if return_type.isInterface:
prefix = "static_cast<%s &>" % return_type.c_ident prefix = "static_cast<%s &>" % return_type.c_ident

View file

@ -0,0 +1,54 @@
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.ExprAST import ExprAST
class StaticCastAST(ExprAST):
def __init__(self, slicc, type_ast, expr_ast):
super(StaticCastAST, self).__init__(slicc)
self.type_ast = type_ast
self.expr_ast = expr_ast
def __repr__(self):
return "[StaticCastAST: %r]" % self.expr_ast
def generate(self, code):
actual_type, ecode = self.expr_ast.inline(True)
code('static_cast<${{self.type_ast.type.c_ident}} &>($ecode)')
if not "interface" in self.type_ast.type:
self.expr_ast.error("static cast only premitted for those types " \
"that implement inherit an interface")
# The interface type should match
if str(actual_type) != str(self.type_ast.type["interface"]):
self.expr_ast.error("static cast miss-match, type is '%s'," \
"but inherited type is '%s'",
actual_type, self.type_ast.type["interface"])
return self.type_ast.type

View file

@ -59,6 +59,7 @@ from slicc.ast.PeekStatementAST import *
from slicc.ast.ReturnStatementAST import * from slicc.ast.ReturnStatementAST import *
from slicc.ast.StatementAST import * from slicc.ast.StatementAST import *
from slicc.ast.StatementListAST import * from slicc.ast.StatementListAST import *
from slicc.ast.StaticCastAST import *
from slicc.ast.TransitionDeclAST import * from slicc.ast.TransitionDeclAST import *
from slicc.ast.TypeAST import * from slicc.ast.TypeAST import *
from slicc.ast.TypeDeclAST import * from slicc.ast.TypeDeclAST import *

View file

@ -154,6 +154,7 @@ class SLICC(Grammar):
'copy_head' : 'COPY_HEAD', 'copy_head' : 'COPY_HEAD',
'check_allocate' : 'CHECK_ALLOCATE', 'check_allocate' : 'CHECK_ALLOCATE',
'check_stop_slots' : 'CHECK_STOP_SLOTS', 'check_stop_slots' : 'CHECK_STOP_SLOTS',
'static_cast' : 'STATIC_CAST',
'if' : 'IF', 'if' : 'IF',
'else' : 'ELSE', 'else' : 'ELSE',
'return' : 'RETURN', 'return' : 'RETURN',
@ -416,6 +417,10 @@ class SLICC(Grammar):
"param : type ident" "param : type ident"
p[0] = ast.FormalParamAST(self, p[1], p[2]) p[0] = ast.FormalParamAST(self, p[1], p[2])
def p_param__pointer(self, p):
"param : type STAR ident"
p[0] = ast.FormalParamAST(self, p[1], p[3], None, True)
def p_param__default(self, p): def p_param__default(self, p):
"param : type ident '=' NUMBER" "param : type ident '=' NUMBER"
p[0] = ast.FormalParamAST(self, p[1], p[2], p[4]) p[0] = ast.FormalParamAST(self, p[1], p[2], p[4])
@ -531,6 +536,10 @@ class SLICC(Grammar):
"statement : CHECK_STOP_SLOTS '(' var ',' STRING ',' STRING ')' SEMI" "statement : CHECK_STOP_SLOTS '(' var ',' STRING ',' STRING ')' SEMI"
p[0] = ast.CheckStopStatementAST(self, p[3], p[5], p[7]) p[0] = ast.CheckStopStatementAST(self, p[3], p[5], p[7])
def p_statement__static_cast(self, p):
"aexpr : STATIC_CAST '(' type ',' expr ')'"
p[0] = ast.StaticCastAST(self, p[3], p[5])
def p_statement__return(self, p): def p_statement__return(self, p):
"statement : RETURN expr SEMI" "statement : RETURN expr SEMI"
p[0] = ast.ReturnStatementAST(self, p[2]) p[0] = ast.ReturnStatementAST(self, p[2])

View file

@ -31,14 +31,27 @@ from slicc.symbols.Symbol import Symbol
from slicc.symbols.Var import Var from slicc.symbols.Var import Var
import slicc.generate.html as html import slicc.generate.html as html
python_class_map = {"int": "Int",
"string": "String",
"bool": "Bool",
"CacheMemory": "RubyCache",
"Sequencer": "RubySequencer",
"DirectoryMemory": "RubyDirectoryMemory",
"MemoryControl": "RubyMemoryControl",
}
class StateMachine(Symbol): class StateMachine(Symbol):
def __init__(self, symtab, ident, location, pairs, config_parameters): def __init__(self, symtab, ident, location, pairs, config_parameters):
super(StateMachine, self).__init__(symtab, ident, location, pairs) super(StateMachine, self).__init__(symtab, ident, location, pairs)
self.table = None self.table = None
self.config_parameters = config_parameters self.config_parameters = config_parameters
for param in config_parameters: for param in config_parameters:
var = Var(symtab, param.name, location, param.type_ast.type, if param.pointer:
"m_%s" % param.name, {}, self) var = Var(symtab, param.name, location, param.type_ast.type,
"(*m_%s_ptr)" % param.name, {}, self)
else:
var = Var(symtab, param.name, location, param.type_ast.type,
"m_%s" % param.name, {}, self)
self.symtab.registerSym(param.name, var) self.symtab.registerSym(param.name, var)
self.states = orderdict() self.states = orderdict()
@ -153,7 +166,13 @@ class $py_ident(RubyController):
dflt_str = '' dflt_str = ''
if param.default is not None: if param.default is not None:
dflt_str = str(param.default) + ', ' dflt_str = str(param.default) + ', '
code('${{param.name}} = Param.Int(${dflt_str}"")') if python_class_map.has_key(param.type_ast.type.c_ident):
python_type = python_class_map[param.type_ast.type.c_ident]
code('${{param.name}} = Param.${{python_type}}(${dflt_str}"")')
else:
self.error("Unknown c++ to python class conversion for c++ " \
"type: '%s'. Please update the python_class_map " \
"in StateMachine.py", param.type_ast.type.c_ident)
code.dedent() code.dedent()
code.write(path, '%s.py' % py_ident) code.write(path, '%s.py' % py_ident)
@ -223,7 +242,10 @@ private:
code.indent() code.indent()
# added by SS # added by SS
for param in self.config_parameters: for param in self.config_parameters:
code('int m_${{param.ident}};') if param.pointer:
code('${{param.type_ast.type}}* m_${{param.ident}}_ptr;')
else:
code('${{param.type_ast.type}} m_${{param.ident}};')
code(''' code('''
int m_number_of_TBEs; int m_number_of_TBEs;
@ -328,8 +350,34 @@ $c_ident::$c_ident(const Params *p)
m_number_of_TBEs = p->number_of_TBEs; m_number_of_TBEs = p->number_of_TBEs;
''') ''')
code.indent() code.indent()
#
# After initializing the universal machine parameters, initialize the
# this machines config parameters. Also detemine if these configuration
# params include a sequencer. This information will be used later for
# contecting the sequencer back to the L1 cache controller.
#
contains_sequencer = False
for param in self.config_parameters: for param in self.config_parameters:
code('m_${{param.name}} = p->${{param.name}};') if param.name == "sequencer":
contains_sequencer = True
if param.pointer:
code('m_${{param.name}}_ptr = p->${{param.name}};')
else:
code('m_${{param.name}} = p->${{param.name}};')
#
# For the l1 cache controller, add the special atomic support which
# includes passing the sequencer a pointer to the controller.
#
if self.ident == "L1Cache":
if not contains_sequencer:
self.error("The L1Cache controller must include the sequencer " \
"configuration parameter")
code('''
m_sequencer_ptr->setController(this);
''')
code('m_num_controllers++;') code('m_num_controllers++;')
for var in self.objects: for var in self.objects:

View file

@ -51,6 +51,7 @@ class Type(Symbol):
def __init__(self, table, ident, location, pairs, machine=None): def __init__(self, table, ident, location, pairs, machine=None):
super(Type, self).__init__(table, ident, location, pairs) super(Type, self).__init__(table, ident, location, pairs)
self.c_ident = ident self.c_ident = ident
self.abstract_ident = ""
if machine: if machine:
if self.isExternal or self.isPrimitive: if self.isExternal or self.isPrimitive:
if "external_name" in self: if "external_name" in self:
@ -154,6 +155,9 @@ class Type(Symbol):
def methodId(self, name, param_type_vec): def methodId(self, name, param_type_vec):
return '_'.join([name] + [ pt.c_ident for pt in param_type_vec ]) return '_'.join([name] + [ pt.c_ident for pt in param_type_vec ])
def methodIdAbstract(self, name, param_type_vec):
return '_'.join([name] + [ pt.abstract_ident for pt in param_type_vec ])
def methodAdd(self, name, return_type, param_type_vec): def methodAdd(self, name, return_type, param_type_vec):
ident = self.methodId(name, param_type_vec) ident = self.methodId(name, param_type_vec)
if ident in self.methods: if ident in self.methods: