gem5/src/mem/ruby/system/CacheMemory.cc

478 lines
15 KiB
C++
Raw Normal View History

/*
* Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "base/intmath.hh"
#include "debug/RubyCache.hh"
#include "mem/protocol/AccessPermission.hh"
#include "mem/ruby/system/CacheMemory.hh"
#include "mem/ruby/system/System.hh"
using namespace std;
2010-03-23 02:43:53 +01:00
ostream&
operator<<(ostream& out, const CacheMemory& obj)
{
2010-03-23 02:43:53 +01:00
obj.print(out);
out << flush;
return out;
}
CacheMemory *
RubyCacheParams::create()
{
return new CacheMemory(this);
}
CacheMemory::CacheMemory(const Params *p)
: SimObject(p)
{
m_cache_size = p->size;
m_latency = p->latency;
m_cache_assoc = p->assoc;
m_policy = p->replacement_policy;
m_profiler_ptr = new CacheProfiler(name());
m_start_index_bit = p->start_index_bit;
m_is_instruction_only_cache = p->is_icache;
}
2010-03-23 02:43:53 +01:00
void
CacheMemory::init()
{
2010-03-23 02:43:53 +01:00
m_cache_num_sets = (m_cache_size / m_cache_assoc) /
RubySystem::getBlockSizeBytes();
assert(m_cache_num_sets > 1);
m_cache_num_set_bits = floorLog2(m_cache_num_sets);
assert(m_cache_num_set_bits > 0);
2010-03-23 02:43:53 +01:00
if (m_policy == "PSEUDO_LRU")
m_replacementPolicy_ptr =
new PseudoLRUPolicy(m_cache_num_sets, m_cache_assoc);
else if (m_policy == "LRU")
2010-03-23 02:43:53 +01:00
m_replacementPolicy_ptr =
new LRUPolicy(m_cache_num_sets, m_cache_assoc);
else
2010-03-23 02:43:53 +01:00
assert(false);
m_cache.resize(m_cache_num_sets);
2010-03-23 02:43:53 +01:00
for (int i = 0; i < m_cache_num_sets; i++) {
m_cache[i].resize(m_cache_assoc);
2010-03-23 02:43:53 +01:00
for (int j = 0; j < m_cache_assoc; j++) {
m_cache[i][j] = NULL;
}
}
}
CacheMemory::~CacheMemory()
{
2010-03-23 02:43:53 +01:00
if (m_replacementPolicy_ptr != NULL)
delete m_replacementPolicy_ptr;
delete m_profiler_ptr;
for (int i = 0; i < m_cache_num_sets; i++) {
for (int j = 0; j < m_cache_assoc; j++) {
delete m_cache[i][j];
}
2009-11-19 01:33:35 +01:00
}
}
2010-03-23 02:43:53 +01:00
void
CacheMemory::printConfig(ostream& out)
{
2010-03-23 02:43:53 +01:00
int block_size = RubySystem::getBlockSizeBytes();
out << "Cache config: " << m_cache_name << endl;
out << " cache_associativity: " << m_cache_assoc << endl;
out << " num_cache_sets_bits: " << m_cache_num_set_bits << endl;
const int cache_num_sets = 1 << m_cache_num_set_bits;
out << " num_cache_sets: " << cache_num_sets << endl;
out << " cache_set_size_bytes: " << cache_num_sets * block_size << endl;
out << " cache_set_size_Kbytes: "
<< double(cache_num_sets * block_size) / (1<<10) << endl;
out << " cache_set_size_Mbytes: "
<< double(cache_num_sets * block_size) / (1<<20) << endl;
out << " cache_size_bytes: "
<< cache_num_sets * block_size * m_cache_assoc << endl;
out << " cache_size_Kbytes: "
<< double(cache_num_sets * block_size * m_cache_assoc) / (1<<10)
<< endl;
out << " cache_size_Mbytes: "
<< double(cache_num_sets * block_size * m_cache_assoc) / (1<<20)
<< endl;
}
// convert a Address to its location in the cache
2010-03-23 02:43:53 +01:00
Index
CacheMemory::addressToCacheSet(const Address& address) const
{
2010-03-23 02:43:53 +01:00
assert(address == line_address(address));
return address.bitSelect(m_start_index_bit,
m_start_index_bit + m_cache_num_set_bits - 1);
}
// Given a cache index: returns the index of the tag in a set.
// returns -1 if the tag is not found.
2010-03-23 02:43:53 +01:00
int
CacheMemory::findTagInSet(Index cacheSet, const Address& tag) const
{
2010-03-23 02:43:53 +01:00
assert(tag == line_address(tag));
// search the set for the tags
m5::hash_map<Address, int>::const_iterator it = m_tag_index.find(tag);
if (it != m_tag_index.end())
if (m_cache[cacheSet][it->second]->m_Permission !=
AccessPermission_NotPresent)
return it->second;
return -1; // Not found
}
// Given a cache index: returns the index of the tag in a set.
// returns -1 if the tag is not found.
2010-03-23 02:43:53 +01:00
int
CacheMemory::findTagInSetIgnorePermissions(Index cacheSet,
const Address& tag) const
{
assert(tag == line_address(tag));
// search the set for the tags
m5::hash_map<Address, int>::const_iterator it = m_tag_index.find(tag);
if (it != m_tag_index.end())
return it->second;
return -1; // Not found
}
bool
CacheMemory::tryCacheAccess(const Address& address, RubyRequestType type,
2010-03-23 02:43:53 +01:00
DataBlock*& data_ptr)
{
assert(address == line_address(address));
DPRINTF(RubyCache, "address: %s\n", address);
2010-03-23 02:43:53 +01:00
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if (loc != -1) {
// Do we even have a tag match?
AbstractCacheEntry* entry = m_cache[cacheSet][loc];
m_replacementPolicy_ptr->
touch(cacheSet, loc, g_eventQueue_ptr->getTime());
data_ptr = &(entry->getDataBlk());
if (entry->m_Permission == AccessPermission_Read_Write) {
return true;
}
if ((entry->m_Permission == AccessPermission_Read_Only) &&
(type == RubyRequestType_LD || type == RubyRequestType_IFETCH)) {
2010-03-23 02:43:53 +01:00
return true;
}
// The line must not be accessible
}
2010-03-23 02:43:53 +01:00
data_ptr = NULL;
return false;
}
2010-03-23 02:43:53 +01:00
bool
CacheMemory::testCacheAccess(const Address& address, RubyRequestType type,
2010-03-23 02:43:53 +01:00
DataBlock*& data_ptr)
{
2010-03-23 02:43:53 +01:00
assert(address == line_address(address));
DPRINTF(RubyCache, "address: %s\n", address);
2010-03-23 02:43:53 +01:00
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if (loc != -1) {
// Do we even have a tag match?
AbstractCacheEntry* entry = m_cache[cacheSet][loc];
m_replacementPolicy_ptr->
touch(cacheSet, loc, g_eventQueue_ptr->getTime());
data_ptr = &(entry->getDataBlk());
return m_cache[cacheSet][loc]->m_Permission !=
AccessPermission_NotPresent;
}
2010-03-23 02:43:53 +01:00
data_ptr = NULL;
return false;
}
// tests to see if an address is present in the cache
2010-03-23 02:43:53 +01:00
bool
CacheMemory::isTagPresent(const Address& address) const
{
2010-03-23 02:43:53 +01:00
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if (loc == -1) {
// We didn't find the tag
DPRINTF(RubyCache, "No tag match for address: %s\n", address);
2010-03-23 02:43:53 +01:00
return false;
}
DPRINTF(RubyCache, "address: %s found\n", address);
2010-03-23 02:43:53 +01:00
return true;
}
// Returns true if there is:
// a) a tag match on this address or there is
// b) an unused line in the same cache "way"
2010-03-23 02:43:53 +01:00
bool
CacheMemory::cacheAvail(const Address& address) const
{
2010-03-23 02:43:53 +01:00
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
for (int i = 0; i < m_cache_assoc; i++) {
AbstractCacheEntry* entry = m_cache[cacheSet][i];
if (entry != NULL) {
if (entry->m_Address == address ||
entry->m_Permission == AccessPermission_NotPresent) {
// Already in the cache or we found an empty entry
return true;
}
} else {
return true;
}
}
2010-03-23 02:43:53 +01:00
return false;
}
Change interface between coherence protocols and CacheMemory The purpose of this patch is to change the way CacheMemory interfaces with coherence protocols. Currently, whenever a cache controller (defined in the protocol under consideration) needs to carry out any operation on a cache block, it looks up the tag hash map and figures out whether or not the block exists in the cache. In case it does exist, the operation is carried out (which requires another lookup). As observed through profiling of different protocols, multiple such lookups take place for a given cache block. It was noted that the tag lookup takes anything from 10% to 20% of the simulation time. In order to reduce this time, this patch is being posted. I have to acknowledge that the many of the thoughts that went in to this patch belong to Brad. Changes to CacheMemory, TBETable and AbstractCacheEntry classes: 1. The lookup function belonging to CacheMemory class now returns a pointer to a cache block entry, instead of a reference. The pointer is NULL in case the block being looked up is not present in the cache. Similar change has been carried out in the lookup function of the TBETable class. 2. Function for setting and getting access permission of a cache block have been moved from CacheMemory class to AbstractCacheEntry class. 3. The allocate function in CacheMemory class now returns pointer to the allocated cache entry. Changes to SLICC: 1. Each action now has implicit variables - cache_entry and tbe. cache_entry, if != NULL, must point to the cache entry for the address on which the action is being carried out. Similarly, tbe should also point to the transaction buffer entry of the address on which the action is being carried out. 2. If a cache entry or a transaction buffer entry is passed on as an argument to a function, it is presumed that a pointer is being passed on. 3. The cache entry and the tbe pointers received __implicitly__ by the actions, are passed __explicitly__ to the trigger function. 4. While performing an action, set/unset_cache_entry, set/unset_tbe are to be used for setting / unsetting cache entry and tbe pointers respectively. 5. is_valid() and is_invalid() has been made available for testing whether a given pointer 'is not NULL' and 'is NULL' respectively. 6. Local variables are now available, but they are assumed to be pointers always. 7. It is now possible for an object of the derieved class to make calls to a function defined in the interface. 8. An OOD token has been introduced in SLICC. It is same as the NULL token used in C/C++. If you are wondering, OOD stands for Out Of Domain. 9. static_cast can now taken an optional parameter that asks for casting the given variable to a pointer of the given type. 10. Functions can be annotated with 'return_by_pointer=yes' to return a pointer. 11. StateMachine has two new variables, EntryType and TBEType. EntryType is set to the type which inherits from 'AbstractCacheEntry'. There can only be one such type in the machine. TBEType is set to the type for which 'TBE' is used as the name. All the protocols have been modified to conform with the new interface.
2011-01-18 01:46:16 +01:00
AbstractCacheEntry*
2010-03-23 02:43:53 +01:00
CacheMemory::allocate(const Address& address, AbstractCacheEntry* entry)
{
2010-03-23 02:43:53 +01:00
assert(address == line_address(address));
assert(!isTagPresent(address));
assert(cacheAvail(address));
DPRINTF(RubyCache, "address: %s\n", address);
2010-03-23 02:43:53 +01:00
// Find the first open slot
Index cacheSet = addressToCacheSet(address);
std::vector<AbstractCacheEntry*> &set = m_cache[cacheSet];
2010-03-23 02:43:53 +01:00
for (int i = 0; i < m_cache_assoc; i++) {
if (!set[i] || set[i]->m_Permission == AccessPermission_NotPresent) {
set[i] = entry; // Init entry
set[i]->m_Address = address;
set[i]->m_Permission = AccessPermission_Invalid;
DPRINTF(RubyCache, "Allocate clearing lock for addr: %x\n",
address);
Change interface between coherence protocols and CacheMemory The purpose of this patch is to change the way CacheMemory interfaces with coherence protocols. Currently, whenever a cache controller (defined in the protocol under consideration) needs to carry out any operation on a cache block, it looks up the tag hash map and figures out whether or not the block exists in the cache. In case it does exist, the operation is carried out (which requires another lookup). As observed through profiling of different protocols, multiple such lookups take place for a given cache block. It was noted that the tag lookup takes anything from 10% to 20% of the simulation time. In order to reduce this time, this patch is being posted. I have to acknowledge that the many of the thoughts that went in to this patch belong to Brad. Changes to CacheMemory, TBETable and AbstractCacheEntry classes: 1. The lookup function belonging to CacheMemory class now returns a pointer to a cache block entry, instead of a reference. The pointer is NULL in case the block being looked up is not present in the cache. Similar change has been carried out in the lookup function of the TBETable class. 2. Function for setting and getting access permission of a cache block have been moved from CacheMemory class to AbstractCacheEntry class. 3. The allocate function in CacheMemory class now returns pointer to the allocated cache entry. Changes to SLICC: 1. Each action now has implicit variables - cache_entry and tbe. cache_entry, if != NULL, must point to the cache entry for the address on which the action is being carried out. Similarly, tbe should also point to the transaction buffer entry of the address on which the action is being carried out. 2. If a cache entry or a transaction buffer entry is passed on as an argument to a function, it is presumed that a pointer is being passed on. 3. The cache entry and the tbe pointers received __implicitly__ by the actions, are passed __explicitly__ to the trigger function. 4. While performing an action, set/unset_cache_entry, set/unset_tbe are to be used for setting / unsetting cache entry and tbe pointers respectively. 5. is_valid() and is_invalid() has been made available for testing whether a given pointer 'is not NULL' and 'is NULL' respectively. 6. Local variables are now available, but they are assumed to be pointers always. 7. It is now possible for an object of the derieved class to make calls to a function defined in the interface. 8. An OOD token has been introduced in SLICC. It is same as the NULL token used in C/C++. If you are wondering, OOD stands for Out Of Domain. 9. static_cast can now taken an optional parameter that asks for casting the given variable to a pointer of the given type. 10. Functions can be annotated with 'return_by_pointer=yes' to return a pointer. 11. StateMachine has two new variables, EntryType and TBEType. EntryType is set to the type which inherits from 'AbstractCacheEntry'. There can only be one such type in the machine. TBEType is set to the type for which 'TBE' is used as the name. All the protocols have been modified to conform with the new interface.
2011-01-18 01:46:16 +01:00
set[i]->m_locked = -1;
2010-03-23 02:43:53 +01:00
m_tag_index[address] = i;
m_replacementPolicy_ptr->
touch(cacheSet, i, g_eventQueue_ptr->getTime());
Change interface between coherence protocols and CacheMemory The purpose of this patch is to change the way CacheMemory interfaces with coherence protocols. Currently, whenever a cache controller (defined in the protocol under consideration) needs to carry out any operation on a cache block, it looks up the tag hash map and figures out whether or not the block exists in the cache. In case it does exist, the operation is carried out (which requires another lookup). As observed through profiling of different protocols, multiple such lookups take place for a given cache block. It was noted that the tag lookup takes anything from 10% to 20% of the simulation time. In order to reduce this time, this patch is being posted. I have to acknowledge that the many of the thoughts that went in to this patch belong to Brad. Changes to CacheMemory, TBETable and AbstractCacheEntry classes: 1. The lookup function belonging to CacheMemory class now returns a pointer to a cache block entry, instead of a reference. The pointer is NULL in case the block being looked up is not present in the cache. Similar change has been carried out in the lookup function of the TBETable class. 2. Function for setting and getting access permission of a cache block have been moved from CacheMemory class to AbstractCacheEntry class. 3. The allocate function in CacheMemory class now returns pointer to the allocated cache entry. Changes to SLICC: 1. Each action now has implicit variables - cache_entry and tbe. cache_entry, if != NULL, must point to the cache entry for the address on which the action is being carried out. Similarly, tbe should also point to the transaction buffer entry of the address on which the action is being carried out. 2. If a cache entry or a transaction buffer entry is passed on as an argument to a function, it is presumed that a pointer is being passed on. 3. The cache entry and the tbe pointers received __implicitly__ by the actions, are passed __explicitly__ to the trigger function. 4. While performing an action, set/unset_cache_entry, set/unset_tbe are to be used for setting / unsetting cache entry and tbe pointers respectively. 5. is_valid() and is_invalid() has been made available for testing whether a given pointer 'is not NULL' and 'is NULL' respectively. 6. Local variables are now available, but they are assumed to be pointers always. 7. It is now possible for an object of the derieved class to make calls to a function defined in the interface. 8. An OOD token has been introduced in SLICC. It is same as the NULL token used in C/C++. If you are wondering, OOD stands for Out Of Domain. 9. static_cast can now taken an optional parameter that asks for casting the given variable to a pointer of the given type. 10. Functions can be annotated with 'return_by_pointer=yes' to return a pointer. 11. StateMachine has two new variables, EntryType and TBEType. EntryType is set to the type which inherits from 'AbstractCacheEntry'. There can only be one such type in the machine. TBEType is set to the type for which 'TBE' is used as the name. All the protocols have been modified to conform with the new interface.
2011-01-18 01:46:16 +01:00
return entry;
2010-03-23 02:43:53 +01:00
}
}
panic("Allocate didn't find an available entry");
}
2010-03-23 02:43:53 +01:00
void
CacheMemory::deallocate(const Address& address)
{
2010-03-23 02:43:53 +01:00
assert(address == line_address(address));
assert(isTagPresent(address));
DPRINTF(RubyCache, "address: %s\n", address);
2010-03-23 02:43:53 +01:00
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if (loc != -1) {
delete m_cache[cacheSet][loc];
m_cache[cacheSet][loc] = NULL;
m_tag_index.erase(address);
}
}
// Returns with the physical address of the conflicting cache line
2010-03-23 02:43:53 +01:00
Address
CacheMemory::cacheProbe(const Address& address) const
{
2010-03-23 02:43:53 +01:00
assert(address == line_address(address));
assert(!cacheAvail(address));
2010-03-23 02:43:53 +01:00
Index cacheSet = addressToCacheSet(address);
return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)]->
m_Address;
}
// looks an address up in the cache
Change interface between coherence protocols and CacheMemory The purpose of this patch is to change the way CacheMemory interfaces with coherence protocols. Currently, whenever a cache controller (defined in the protocol under consideration) needs to carry out any operation on a cache block, it looks up the tag hash map and figures out whether or not the block exists in the cache. In case it does exist, the operation is carried out (which requires another lookup). As observed through profiling of different protocols, multiple such lookups take place for a given cache block. It was noted that the tag lookup takes anything from 10% to 20% of the simulation time. In order to reduce this time, this patch is being posted. I have to acknowledge that the many of the thoughts that went in to this patch belong to Brad. Changes to CacheMemory, TBETable and AbstractCacheEntry classes: 1. The lookup function belonging to CacheMemory class now returns a pointer to a cache block entry, instead of a reference. The pointer is NULL in case the block being looked up is not present in the cache. Similar change has been carried out in the lookup function of the TBETable class. 2. Function for setting and getting access permission of a cache block have been moved from CacheMemory class to AbstractCacheEntry class. 3. The allocate function in CacheMemory class now returns pointer to the allocated cache entry. Changes to SLICC: 1. Each action now has implicit variables - cache_entry and tbe. cache_entry, if != NULL, must point to the cache entry for the address on which the action is being carried out. Similarly, tbe should also point to the transaction buffer entry of the address on which the action is being carried out. 2. If a cache entry or a transaction buffer entry is passed on as an argument to a function, it is presumed that a pointer is being passed on. 3. The cache entry and the tbe pointers received __implicitly__ by the actions, are passed __explicitly__ to the trigger function. 4. While performing an action, set/unset_cache_entry, set/unset_tbe are to be used for setting / unsetting cache entry and tbe pointers respectively. 5. is_valid() and is_invalid() has been made available for testing whether a given pointer 'is not NULL' and 'is NULL' respectively. 6. Local variables are now available, but they are assumed to be pointers always. 7. It is now possible for an object of the derieved class to make calls to a function defined in the interface. 8. An OOD token has been introduced in SLICC. It is same as the NULL token used in C/C++. If you are wondering, OOD stands for Out Of Domain. 9. static_cast can now taken an optional parameter that asks for casting the given variable to a pointer of the given type. 10. Functions can be annotated with 'return_by_pointer=yes' to return a pointer. 11. StateMachine has two new variables, EntryType and TBEType. EntryType is set to the type which inherits from 'AbstractCacheEntry'. There can only be one such type in the machine. TBEType is set to the type for which 'TBE' is used as the name. All the protocols have been modified to conform with the new interface.
2011-01-18 01:46:16 +01:00
AbstractCacheEntry*
2010-03-23 02:43:53 +01:00
CacheMemory::lookup(const Address& address)
{
2010-03-23 02:43:53 +01:00
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
Change interface between coherence protocols and CacheMemory The purpose of this patch is to change the way CacheMemory interfaces with coherence protocols. Currently, whenever a cache controller (defined in the protocol under consideration) needs to carry out any operation on a cache block, it looks up the tag hash map and figures out whether or not the block exists in the cache. In case it does exist, the operation is carried out (which requires another lookup). As observed through profiling of different protocols, multiple such lookups take place for a given cache block. It was noted that the tag lookup takes anything from 10% to 20% of the simulation time. In order to reduce this time, this patch is being posted. I have to acknowledge that the many of the thoughts that went in to this patch belong to Brad. Changes to CacheMemory, TBETable and AbstractCacheEntry classes: 1. The lookup function belonging to CacheMemory class now returns a pointer to a cache block entry, instead of a reference. The pointer is NULL in case the block being looked up is not present in the cache. Similar change has been carried out in the lookup function of the TBETable class. 2. Function for setting and getting access permission of a cache block have been moved from CacheMemory class to AbstractCacheEntry class. 3. The allocate function in CacheMemory class now returns pointer to the allocated cache entry. Changes to SLICC: 1. Each action now has implicit variables - cache_entry and tbe. cache_entry, if != NULL, must point to the cache entry for the address on which the action is being carried out. Similarly, tbe should also point to the transaction buffer entry of the address on which the action is being carried out. 2. If a cache entry or a transaction buffer entry is passed on as an argument to a function, it is presumed that a pointer is being passed on. 3. The cache entry and the tbe pointers received __implicitly__ by the actions, are passed __explicitly__ to the trigger function. 4. While performing an action, set/unset_cache_entry, set/unset_tbe are to be used for setting / unsetting cache entry and tbe pointers respectively. 5. is_valid() and is_invalid() has been made available for testing whether a given pointer 'is not NULL' and 'is NULL' respectively. 6. Local variables are now available, but they are assumed to be pointers always. 7. It is now possible for an object of the derieved class to make calls to a function defined in the interface. 8. An OOD token has been introduced in SLICC. It is same as the NULL token used in C/C++. If you are wondering, OOD stands for Out Of Domain. 9. static_cast can now taken an optional parameter that asks for casting the given variable to a pointer of the given type. 10. Functions can be annotated with 'return_by_pointer=yes' to return a pointer. 11. StateMachine has two new variables, EntryType and TBEType. EntryType is set to the type which inherits from 'AbstractCacheEntry'. There can only be one such type in the machine. TBEType is set to the type for which 'TBE' is used as the name. All the protocols have been modified to conform with the new interface.
2011-01-18 01:46:16 +01:00
if(loc == -1) return NULL;
return m_cache[cacheSet][loc];
}
// looks an address up in the cache
Change interface between coherence protocols and CacheMemory The purpose of this patch is to change the way CacheMemory interfaces with coherence protocols. Currently, whenever a cache controller (defined in the protocol under consideration) needs to carry out any operation on a cache block, it looks up the tag hash map and figures out whether or not the block exists in the cache. In case it does exist, the operation is carried out (which requires another lookup). As observed through profiling of different protocols, multiple such lookups take place for a given cache block. It was noted that the tag lookup takes anything from 10% to 20% of the simulation time. In order to reduce this time, this patch is being posted. I have to acknowledge that the many of the thoughts that went in to this patch belong to Brad. Changes to CacheMemory, TBETable and AbstractCacheEntry classes: 1. The lookup function belonging to CacheMemory class now returns a pointer to a cache block entry, instead of a reference. The pointer is NULL in case the block being looked up is not present in the cache. Similar change has been carried out in the lookup function of the TBETable class. 2. Function for setting and getting access permission of a cache block have been moved from CacheMemory class to AbstractCacheEntry class. 3. The allocate function in CacheMemory class now returns pointer to the allocated cache entry. Changes to SLICC: 1. Each action now has implicit variables - cache_entry and tbe. cache_entry, if != NULL, must point to the cache entry for the address on which the action is being carried out. Similarly, tbe should also point to the transaction buffer entry of the address on which the action is being carried out. 2. If a cache entry or a transaction buffer entry is passed on as an argument to a function, it is presumed that a pointer is being passed on. 3. The cache entry and the tbe pointers received __implicitly__ by the actions, are passed __explicitly__ to the trigger function. 4. While performing an action, set/unset_cache_entry, set/unset_tbe are to be used for setting / unsetting cache entry and tbe pointers respectively. 5. is_valid() and is_invalid() has been made available for testing whether a given pointer 'is not NULL' and 'is NULL' respectively. 6. Local variables are now available, but they are assumed to be pointers always. 7. It is now possible for an object of the derieved class to make calls to a function defined in the interface. 8. An OOD token has been introduced in SLICC. It is same as the NULL token used in C/C++. If you are wondering, OOD stands for Out Of Domain. 9. static_cast can now taken an optional parameter that asks for casting the given variable to a pointer of the given type. 10. Functions can be annotated with 'return_by_pointer=yes' to return a pointer. 11. StateMachine has two new variables, EntryType and TBEType. EntryType is set to the type which inherits from 'AbstractCacheEntry'. There can only be one such type in the machine. TBEType is set to the type for which 'TBE' is used as the name. All the protocols have been modified to conform with the new interface.
2011-01-18 01:46:16 +01:00
const AbstractCacheEntry*
2010-03-23 02:43:53 +01:00
CacheMemory::lookup(const Address& address) const
{
2010-03-23 02:43:53 +01:00
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
Change interface between coherence protocols and CacheMemory The purpose of this patch is to change the way CacheMemory interfaces with coherence protocols. Currently, whenever a cache controller (defined in the protocol under consideration) needs to carry out any operation on a cache block, it looks up the tag hash map and figures out whether or not the block exists in the cache. In case it does exist, the operation is carried out (which requires another lookup). As observed through profiling of different protocols, multiple such lookups take place for a given cache block. It was noted that the tag lookup takes anything from 10% to 20% of the simulation time. In order to reduce this time, this patch is being posted. I have to acknowledge that the many of the thoughts that went in to this patch belong to Brad. Changes to CacheMemory, TBETable and AbstractCacheEntry classes: 1. The lookup function belonging to CacheMemory class now returns a pointer to a cache block entry, instead of a reference. The pointer is NULL in case the block being looked up is not present in the cache. Similar change has been carried out in the lookup function of the TBETable class. 2. Function for setting and getting access permission of a cache block have been moved from CacheMemory class to AbstractCacheEntry class. 3. The allocate function in CacheMemory class now returns pointer to the allocated cache entry. Changes to SLICC: 1. Each action now has implicit variables - cache_entry and tbe. cache_entry, if != NULL, must point to the cache entry for the address on which the action is being carried out. Similarly, tbe should also point to the transaction buffer entry of the address on which the action is being carried out. 2. If a cache entry or a transaction buffer entry is passed on as an argument to a function, it is presumed that a pointer is being passed on. 3. The cache entry and the tbe pointers received __implicitly__ by the actions, are passed __explicitly__ to the trigger function. 4. While performing an action, set/unset_cache_entry, set/unset_tbe are to be used for setting / unsetting cache entry and tbe pointers respectively. 5. is_valid() and is_invalid() has been made available for testing whether a given pointer 'is not NULL' and 'is NULL' respectively. 6. Local variables are now available, but they are assumed to be pointers always. 7. It is now possible for an object of the derieved class to make calls to a function defined in the interface. 8. An OOD token has been introduced in SLICC. It is same as the NULL token used in C/C++. If you are wondering, OOD stands for Out Of Domain. 9. static_cast can now taken an optional parameter that asks for casting the given variable to a pointer of the given type. 10. Functions can be annotated with 'return_by_pointer=yes' to return a pointer. 11. StateMachine has two new variables, EntryType and TBEType. EntryType is set to the type which inherits from 'AbstractCacheEntry'. There can only be one such type in the machine. TBEType is set to the type for which 'TBE' is used as the name. All the protocols have been modified to conform with the new interface.
2011-01-18 01:46:16 +01:00
if(loc == -1) return NULL;
return m_cache[cacheSet][loc];
}
// Sets the most recently used bit for a cache block
2010-03-23 02:43:53 +01:00
void
CacheMemory::setMRU(const Address& address)
{
2010-03-23 02:43:53 +01:00
Index cacheSet;
2010-03-23 02:43:53 +01:00
cacheSet = addressToCacheSet(address);
m_replacementPolicy_ptr->
touch(cacheSet, findTagInSet(cacheSet, address),
g_eventQueue_ptr->getTime());
}
2010-03-23 02:43:53 +01:00
void
CacheMemory::profileMiss(const RubyRequest& msg)
{
m_profiler_ptr->addCacheStatSample(msg.getType(),
msg.getAccessMode(),
msg.getPrefetch());
}
void
CacheMemory::profileGenericRequest(GenericRequestType requestType,
RubyAccessMode accessType,
PrefetchBit pfBit)
{
m_profiler_ptr->addGenericStatSample(requestType,
accessType,
pfBit);
}
2010-03-23 02:43:53 +01:00
void
CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const
{
uint64 warmedUpBlocks = 0;
uint64 totalBlocks M5_VAR_USED = (uint64)m_cache_num_sets
* (uint64)m_cache_assoc;
2010-03-23 02:43:53 +01:00
for (int i = 0; i < m_cache_num_sets; i++) {
for (int j = 0; j < m_cache_assoc; j++) {
if (m_cache[i][j] != NULL) {
AccessPermission perm = m_cache[i][j]->m_Permission;
RubyRequestType request_type = RubyRequestType_NULL;
if (perm == AccessPermission_Read_Only) {
if (m_is_instruction_only_cache) {
request_type = RubyRequestType_IFETCH;
} else {
request_type = RubyRequestType_LD;
}
} else if (perm == AccessPermission_Read_Write) {
request_type = RubyRequestType_ST;
2010-03-23 02:43:53 +01:00
}
if (request_type != RubyRequestType_NULL) {
tr->addRecord(cntrl, m_cache[i][j]->m_Address.getAddress(),
0, request_type,
m_replacementPolicy_ptr->getLastAccess(i, j),
m_cache[i][j]->getDataBlk());
warmedUpBlocks++;
}
2010-03-23 02:43:53 +01:00
}
}
}
DPRINTF(RubyCache, "%s: %lli blocks of %lli total blocks"
"recorded %.2f%% \n", name().c_str(), warmedUpBlocks,
(uint64)m_cache_num_sets * (uint64)m_cache_assoc,
(float(warmedUpBlocks)/float(totalBlocks))*100.0);
2010-03-23 02:43:53 +01:00
}
void
CacheMemory::print(ostream& out) const
{
out << "Cache dump: " << m_cache_name << endl;
for (int i = 0; i < m_cache_num_sets; i++) {
for (int j = 0; j < m_cache_assoc; j++) {
if (m_cache[i][j] != NULL) {
out << " Index: " << i
<< " way: " << j
<< " entry: " << *m_cache[i][j] << endl;
} else {
out << " Index: " << i
<< " way: " << j
<< " entry: NULL" << endl;
}
}
}
}
2010-03-23 02:43:53 +01:00
void
CacheMemory::printData(ostream& out) const
{
2010-03-23 02:43:53 +01:00
out << "printData() not supported" << endl;
}
2010-03-23 02:43:53 +01:00
void
CacheMemory::clearStats() const
{
2010-03-23 02:43:53 +01:00
m_profiler_ptr->clearStats();
}
2010-03-23 02:43:53 +01:00
void
CacheMemory::printStats(ostream& out) const
{
2010-03-23 02:43:53 +01:00
m_profiler_ptr->printStats(out);
}
2010-03-23 02:43:53 +01:00
void
CacheMemory::setLocked(const Address& address, int context)
{
DPRINTF(RubyCache, "Setting Lock for addr: %x to %d\n", address, context);
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
Change interface between coherence protocols and CacheMemory The purpose of this patch is to change the way CacheMemory interfaces with coherence protocols. Currently, whenever a cache controller (defined in the protocol under consideration) needs to carry out any operation on a cache block, it looks up the tag hash map and figures out whether or not the block exists in the cache. In case it does exist, the operation is carried out (which requires another lookup). As observed through profiling of different protocols, multiple such lookups take place for a given cache block. It was noted that the tag lookup takes anything from 10% to 20% of the simulation time. In order to reduce this time, this patch is being posted. I have to acknowledge that the many of the thoughts that went in to this patch belong to Brad. Changes to CacheMemory, TBETable and AbstractCacheEntry classes: 1. The lookup function belonging to CacheMemory class now returns a pointer to a cache block entry, instead of a reference. The pointer is NULL in case the block being looked up is not present in the cache. Similar change has been carried out in the lookup function of the TBETable class. 2. Function for setting and getting access permission of a cache block have been moved from CacheMemory class to AbstractCacheEntry class. 3. The allocate function in CacheMemory class now returns pointer to the allocated cache entry. Changes to SLICC: 1. Each action now has implicit variables - cache_entry and tbe. cache_entry, if != NULL, must point to the cache entry for the address on which the action is being carried out. Similarly, tbe should also point to the transaction buffer entry of the address on which the action is being carried out. 2. If a cache entry or a transaction buffer entry is passed on as an argument to a function, it is presumed that a pointer is being passed on. 3. The cache entry and the tbe pointers received __implicitly__ by the actions, are passed __explicitly__ to the trigger function. 4. While performing an action, set/unset_cache_entry, set/unset_tbe are to be used for setting / unsetting cache entry and tbe pointers respectively. 5. is_valid() and is_invalid() has been made available for testing whether a given pointer 'is not NULL' and 'is NULL' respectively. 6. Local variables are now available, but they are assumed to be pointers always. 7. It is now possible for an object of the derieved class to make calls to a function defined in the interface. 8. An OOD token has been introduced in SLICC. It is same as the NULL token used in C/C++. If you are wondering, OOD stands for Out Of Domain. 9. static_cast can now taken an optional parameter that asks for casting the given variable to a pointer of the given type. 10. Functions can be annotated with 'return_by_pointer=yes' to return a pointer. 11. StateMachine has two new variables, EntryType and TBEType. EntryType is set to the type which inherits from 'AbstractCacheEntry'. There can only be one such type in the machine. TBEType is set to the type for which 'TBE' is used as the name. All the protocols have been modified to conform with the new interface.
2011-01-18 01:46:16 +01:00
m_cache[cacheSet][loc]->m_locked = context;
}
2010-03-23 02:43:53 +01:00
void
CacheMemory::clearLocked(const Address& address)
{
2010-03-23 02:43:53 +01:00
DPRINTF(RubyCache, "Clear Lock for addr: %x\n", address);
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
Change interface between coherence protocols and CacheMemory The purpose of this patch is to change the way CacheMemory interfaces with coherence protocols. Currently, whenever a cache controller (defined in the protocol under consideration) needs to carry out any operation on a cache block, it looks up the tag hash map and figures out whether or not the block exists in the cache. In case it does exist, the operation is carried out (which requires another lookup). As observed through profiling of different protocols, multiple such lookups take place for a given cache block. It was noted that the tag lookup takes anything from 10% to 20% of the simulation time. In order to reduce this time, this patch is being posted. I have to acknowledge that the many of the thoughts that went in to this patch belong to Brad. Changes to CacheMemory, TBETable and AbstractCacheEntry classes: 1. The lookup function belonging to CacheMemory class now returns a pointer to a cache block entry, instead of a reference. The pointer is NULL in case the block being looked up is not present in the cache. Similar change has been carried out in the lookup function of the TBETable class. 2. Function for setting and getting access permission of a cache block have been moved from CacheMemory class to AbstractCacheEntry class. 3. The allocate function in CacheMemory class now returns pointer to the allocated cache entry. Changes to SLICC: 1. Each action now has implicit variables - cache_entry and tbe. cache_entry, if != NULL, must point to the cache entry for the address on which the action is being carried out. Similarly, tbe should also point to the transaction buffer entry of the address on which the action is being carried out. 2. If a cache entry or a transaction buffer entry is passed on as an argument to a function, it is presumed that a pointer is being passed on. 3. The cache entry and the tbe pointers received __implicitly__ by the actions, are passed __explicitly__ to the trigger function. 4. While performing an action, set/unset_cache_entry, set/unset_tbe are to be used for setting / unsetting cache entry and tbe pointers respectively. 5. is_valid() and is_invalid() has been made available for testing whether a given pointer 'is not NULL' and 'is NULL' respectively. 6. Local variables are now available, but they are assumed to be pointers always. 7. It is now possible for an object of the derieved class to make calls to a function defined in the interface. 8. An OOD token has been introduced in SLICC. It is same as the NULL token used in C/C++. If you are wondering, OOD stands for Out Of Domain. 9. static_cast can now taken an optional parameter that asks for casting the given variable to a pointer of the given type. 10. Functions can be annotated with 'return_by_pointer=yes' to return a pointer. 11. StateMachine has two new variables, EntryType and TBEType. EntryType is set to the type which inherits from 'AbstractCacheEntry'. There can only be one such type in the machine. TBEType is set to the type for which 'TBE' is used as the name. All the protocols have been modified to conform with the new interface.
2011-01-18 01:46:16 +01:00
m_cache[cacheSet][loc]->m_locked = -1;
}
bool
CacheMemory::isLocked(const Address& address, int context)
{
2010-03-23 02:43:53 +01:00
assert(address == line_address(address));
Index cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n",
Change interface between coherence protocols and CacheMemory The purpose of this patch is to change the way CacheMemory interfaces with coherence protocols. Currently, whenever a cache controller (defined in the protocol under consideration) needs to carry out any operation on a cache block, it looks up the tag hash map and figures out whether or not the block exists in the cache. In case it does exist, the operation is carried out (which requires another lookup). As observed through profiling of different protocols, multiple such lookups take place for a given cache block. It was noted that the tag lookup takes anything from 10% to 20% of the simulation time. In order to reduce this time, this patch is being posted. I have to acknowledge that the many of the thoughts that went in to this patch belong to Brad. Changes to CacheMemory, TBETable and AbstractCacheEntry classes: 1. The lookup function belonging to CacheMemory class now returns a pointer to a cache block entry, instead of a reference. The pointer is NULL in case the block being looked up is not present in the cache. Similar change has been carried out in the lookup function of the TBETable class. 2. Function for setting and getting access permission of a cache block have been moved from CacheMemory class to AbstractCacheEntry class. 3. The allocate function in CacheMemory class now returns pointer to the allocated cache entry. Changes to SLICC: 1. Each action now has implicit variables - cache_entry and tbe. cache_entry, if != NULL, must point to the cache entry for the address on which the action is being carried out. Similarly, tbe should also point to the transaction buffer entry of the address on which the action is being carried out. 2. If a cache entry or a transaction buffer entry is passed on as an argument to a function, it is presumed that a pointer is being passed on. 3. The cache entry and the tbe pointers received __implicitly__ by the actions, are passed __explicitly__ to the trigger function. 4. While performing an action, set/unset_cache_entry, set/unset_tbe are to be used for setting / unsetting cache entry and tbe pointers respectively. 5. is_valid() and is_invalid() has been made available for testing whether a given pointer 'is not NULL' and 'is NULL' respectively. 6. Local variables are now available, but they are assumed to be pointers always. 7. It is now possible for an object of the derieved class to make calls to a function defined in the interface. 8. An OOD token has been introduced in SLICC. It is same as the NULL token used in C/C++. If you are wondering, OOD stands for Out Of Domain. 9. static_cast can now taken an optional parameter that asks for casting the given variable to a pointer of the given type. 10. Functions can be annotated with 'return_by_pointer=yes' to return a pointer. 11. StateMachine has two new variables, EntryType and TBEType. EntryType is set to the type which inherits from 'AbstractCacheEntry'. There can only be one such type in the machine. TBEType is set to the type for which 'TBE' is used as the name. All the protocols have been modified to conform with the new interface.
2011-01-18 01:46:16 +01:00
address, m_cache[cacheSet][loc]->m_locked, context);
return m_cache[cacheSet][loc]->m_locked == context;
}