gem5/src/mem/ruby/system/Sequencer.cc

754 lines
26 KiB
C++
Raw Normal View History

/*
* Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "base/misc.hh"
2011-04-15 19:44:06 +02:00
#include "base/str.hh"
testers: move testers to a new directory This patch moves the testers to a new subdirectory under src/cpu and includes the necessary fixes to work with latest m5 initialization patches. --HG-- rename : configs/example/determ_test.py => configs/example/ruby_direct_test.py rename : src/cpu/directedtest/DirectedGenerator.cc => src/cpu/testers/directedtest/DirectedGenerator.cc rename : src/cpu/directedtest/DirectedGenerator.hh => src/cpu/testers/directedtest/DirectedGenerator.hh rename : src/cpu/directedtest/InvalidateGenerator.cc => src/cpu/testers/directedtest/InvalidateGenerator.cc rename : src/cpu/directedtest/InvalidateGenerator.hh => src/cpu/testers/directedtest/InvalidateGenerator.hh rename : src/cpu/directedtest/RubyDirectedTester.cc => src/cpu/testers/directedtest/RubyDirectedTester.cc rename : src/cpu/directedtest/RubyDirectedTester.hh => src/cpu/testers/directedtest/RubyDirectedTester.hh rename : src/cpu/directedtest/RubyDirectedTester.py => src/cpu/testers/directedtest/RubyDirectedTester.py rename : src/cpu/directedtest/SConscript => src/cpu/testers/directedtest/SConscript rename : src/cpu/directedtest/SeriesRequestGenerator.cc => src/cpu/testers/directedtest/SeriesRequestGenerator.cc rename : src/cpu/directedtest/SeriesRequestGenerator.hh => src/cpu/testers/directedtest/SeriesRequestGenerator.hh rename : src/cpu/memtest/MemTest.py => src/cpu/testers/memtest/MemTest.py rename : src/cpu/memtest/SConscript => src/cpu/testers/memtest/SConscript rename : src/cpu/memtest/memtest.cc => src/cpu/testers/memtest/memtest.cc rename : src/cpu/memtest/memtest.hh => src/cpu/testers/memtest/memtest.hh rename : src/cpu/rubytest/Check.cc => src/cpu/testers/rubytest/Check.cc rename : src/cpu/rubytest/Check.hh => src/cpu/testers/rubytest/Check.hh rename : src/cpu/rubytest/CheckTable.cc => src/cpu/testers/rubytest/CheckTable.cc rename : src/cpu/rubytest/CheckTable.hh => src/cpu/testers/rubytest/CheckTable.hh rename : src/cpu/rubytest/RubyTester.cc => src/cpu/testers/rubytest/RubyTester.cc rename : src/cpu/rubytest/RubyTester.hh => src/cpu/testers/rubytest/RubyTester.hh rename : src/cpu/rubytest/RubyTester.py => src/cpu/testers/rubytest/RubyTester.py rename : src/cpu/rubytest/SConscript => src/cpu/testers/rubytest/SConscript
2010-08-24 21:07:22 +02:00
#include "cpu/testers/rubytest/RubyTester.hh"
#include "debug/MemoryAccess.hh"
#include "debug/ProtocolTrace.hh"
#include "mem/protocol/Protocol.hh"
#include "mem/ruby/buffers/MessageBuffer.hh"
2010-03-23 02:43:53 +01:00
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/SubBlock.hh"
#include "mem/ruby/profiler/Profiler.hh"
#include "mem/ruby/recorder/Tracer.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
2011-04-15 19:44:06 +02:00
#include "mem/ruby/slicc_interface/RubyRequest.hh"
2010-03-23 02:43:53 +01:00
#include "mem/ruby/system/CacheMemory.hh"
#include "mem/ruby/system/Sequencer.hh"
#include "mem/ruby/system/System.hh"
#include "mem/packet.hh"
#include "params/RubySequencer.hh"
using namespace std;
Sequencer *
RubySequencerParams::create()
{
return new Sequencer(this);
}
2010-03-23 02:43:53 +01:00
Sequencer::Sequencer(const Params *p)
: RubyPort(p), deadlockCheckEvent(this)
{
m_store_waiting_on_load_cycles = 0;
m_store_waiting_on_store_cycles = 0;
m_load_waiting_on_store_cycles = 0;
m_load_waiting_on_load_cycles = 0;
2010-03-23 02:43:53 +01:00
m_outstanding_count = 0;
m_max_outstanding_requests = 0;
m_deadlock_threshold = 0;
m_instCache_ptr = NULL;
m_dataCache_ptr = NULL;
m_instCache_ptr = p->icache;
m_dataCache_ptr = p->dcache;
m_max_outstanding_requests = p->max_outstanding_requests;
m_deadlock_threshold = p->deadlock_threshold;
2010-01-30 05:29:23 +01:00
assert(m_max_outstanding_requests > 0);
assert(m_deadlock_threshold > 0);
assert(m_instCache_ptr != NULL);
assert(m_dataCache_ptr != NULL);
m_usingNetworkTester = p->using_network_tester;
}
2010-03-23 02:43:53 +01:00
Sequencer::~Sequencer()
{
}
2010-03-23 02:43:53 +01:00
void
Sequencer::wakeup()
{
// Check for deadlock of any of the requests
Time current_time = g_eventQueue_ptr->getTime();
// Check across all outstanding requests
int total_outstanding = 0;
2010-06-11 08:17:07 +02:00
RequestTable::iterator read = m_readRequestTable.begin();
RequestTable::iterator read_end = m_readRequestTable.end();
for (; read != read_end; ++read) {
SequencerRequest* request = read->second;
if (current_time - request->issue_time < m_deadlock_threshold)
continue;
panic("Possible Deadlock detected. Aborting!\n"
"version: %d request.paddr: 0x%x m_readRequestTable: %d "
"current time: %u issue_time: %d difference: %d\n", m_version,
request->ruby_request.m_PhysicalAddress, m_readRequestTable.size(),
current_time, request->issue_time,
current_time - request->issue_time);
}
2010-03-23 02:43:53 +01:00
2010-06-11 08:17:07 +02:00
RequestTable::iterator write = m_writeRequestTable.begin();
RequestTable::iterator write_end = m_writeRequestTable.end();
for (; write != write_end; ++write) {
SequencerRequest* request = write->second;
if (current_time - request->issue_time < m_deadlock_threshold)
continue;
panic("Possible Deadlock detected. Aborting!\n"
"version: %d request.paddr: 0x%x m_writeRequestTable: %d "
"current time: %u issue_time: %d difference: %d\n", m_version,
request->ruby_request.m_PhysicalAddress, m_writeRequestTable.size(),
current_time, request->issue_time,
current_time - request->issue_time);
}
2010-03-23 02:43:53 +01:00
total_outstanding += m_writeRequestTable.size();
total_outstanding += m_readRequestTable.size();
2010-03-23 02:43:53 +01:00
assert(m_outstanding_count == total_outstanding);
if (m_outstanding_count > 0) {
// If there are still outstanding requests, keep checking
schedule(deadlockCheckEvent,
m_deadlock_threshold * g_eventQueue_ptr->getClock() +
curTick());
2010-03-23 02:43:53 +01:00
}
}
2010-03-23 02:43:53 +01:00
void
Sequencer::printStats(ostream & out) const
{
out << "Sequencer: " << m_name << endl
<< " store_waiting_on_load_cycles: "
<< m_store_waiting_on_load_cycles << endl
<< " store_waiting_on_store_cycles: "
<< m_store_waiting_on_store_cycles << endl
<< " load_waiting_on_load_cycles: "
<< m_load_waiting_on_load_cycles << endl
<< " load_waiting_on_store_cycles: "
<< m_load_waiting_on_store_cycles << endl;
}
2010-03-23 02:43:53 +01:00
void
Sequencer::printProgress(ostream& out) const
{
#if 0
int total_demand = 0;
out << "Sequencer Stats Version " << m_version << endl;
out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
out << "---------------" << endl;
out << "outstanding requests" << endl;
2010-06-11 08:17:07 +02:00
out << "proc " << m_Read
<< " version Requests = " << m_readRequestTable.size() << endl;
2010-03-23 02:43:53 +01:00
// print the request table
2010-06-11 08:17:07 +02:00
RequestTable::iterator read = m_readRequestTable.begin();
RequestTable::iterator read_end = m_readRequestTable.end();
for (; read != read_end; ++read) {
SequencerRequest* request = read->second;
2010-03-23 02:43:53 +01:00
out << "\tRequest[ " << i << " ] = " << request->type
<< " Address " << rkeys[i]
<< " Posted " << request->issue_time
<< " PF " << PrefetchBit_No << endl;
total_demand++;
2010-03-23 02:43:53 +01:00
}
2010-06-11 08:17:07 +02:00
out << "proc " << m_version
<< " Write Requests = " << m_writeRequestTable.size << endl;
2010-03-23 02:43:53 +01:00
// print the request table
2010-06-11 08:17:07 +02:00
RequestTable::iterator write = m_writeRequestTable.begin();
RequestTable::iterator write_end = m_writeRequestTable.end();
for (; write != write_end; ++write) {
SequencerRequest* request = write->second;
2010-03-23 02:43:53 +01:00
out << "\tRequest[ " << i << " ] = " << request.getType()
<< " Address " << wkeys[i]
<< " Posted " << request.getTime()
<< " PF " << request.getPrefetch() << endl;
if (request.getPrefetch() == PrefetchBit_No) {
total_demand++;
}
}
2010-03-23 02:43:53 +01:00
out << endl;
out << "Total Number Outstanding: " << m_outstanding_count << endl
<< "Total Number Demand : " << total_demand << endl
<< "Total Number Prefetches : " << m_outstanding_count - total_demand
<< endl << endl << endl;
#endif
}
2010-03-23 02:43:53 +01:00
void
Sequencer::printConfig(ostream& out) const
{
out << "Seqeuncer config: " << m_name << endl
<< " controller: " << m_controller->getName() << endl
<< " version: " << m_version << endl
<< " max_outstanding_requests: " << m_max_outstanding_requests << endl
<< " deadlock_threshold: " << m_deadlock_threshold << endl;
}
// Insert the request on the correct request table. Return true if
// the entry was already present.
2010-03-23 02:43:53 +01:00
bool
Sequencer::insertRequest(SequencerRequest* request)
{
int total_outstanding =
m_writeRequestTable.size() + m_readRequestTable.size();
assert(m_outstanding_count == total_outstanding);
// See if we should schedule a deadlock check
if (deadlockCheckEvent.scheduled() == false) {
schedule(deadlockCheckEvent, m_deadlock_threshold + curTick());
}
2010-03-23 02:43:53 +01:00
Address line_addr(request->ruby_request.m_PhysicalAddress);
2010-03-23 02:43:53 +01:00
line_addr.makeLineAddress();
if ((request->ruby_request.m_Type == RubyRequestType_ST) ||
(request->ruby_request.m_Type == RubyRequestType_ATOMIC) ||
(request->ruby_request.m_Type == RubyRequestType_RMW_Read) ||
(request->ruby_request.m_Type == RubyRequestType_RMW_Write) ||
(request->ruby_request.m_Type == RubyRequestType_Load_Linked) ||
(request->ruby_request.m_Type == RubyRequestType_Store_Conditional) ||
(request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) ||
(request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) ||
(request->ruby_request.m_Type == RubyRequestType_FLUSH)) {
2010-06-11 08:17:07 +02:00
pair<RequestTable::iterator, bool> r =
m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0));
bool success = r.second;
RequestTable::iterator i = r.first;
if (!success) {
i->second = request;
2010-03-23 02:43:53 +01:00
// return true;
// drh5: isn't this an error? do you lose the initial request?
assert(0);
}
2010-06-11 08:17:07 +02:00
i->second = request;
2010-03-23 02:43:53 +01:00
m_outstanding_count++;
} else {
2010-06-11 08:17:07 +02:00
pair<RequestTable::iterator, bool> r =
m_readRequestTable.insert(RequestTable::value_type(line_addr, 0));
bool success = r.second;
RequestTable::iterator i = r.first;
if (!success) {
i->second = request;
2010-03-23 02:43:53 +01:00
// return true;
// drh5: isn't this an error? do you lose the initial request?
assert(0);
}
2010-06-11 08:17:07 +02:00
i->second = request;
2010-03-23 02:43:53 +01:00
m_outstanding_count++;
}
2010-03-23 02:43:53 +01:00
g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
2010-03-23 02:43:53 +01:00
total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
assert(m_outstanding_count == total_outstanding);
2010-03-23 02:43:53 +01:00
return false;
}
2010-06-11 08:17:07 +02:00
void
Sequencer::markRemoved()
{
m_outstanding_count--;
assert(m_outstanding_count ==
m_writeRequestTable.size() + m_readRequestTable.size());
}
2010-03-23 02:43:53 +01:00
void
Sequencer::removeRequest(SequencerRequest* srequest)
{
assert(m_outstanding_count ==
m_writeRequestTable.size() + m_readRequestTable.size());
const RubyRequest & ruby_request = srequest->ruby_request;
Address line_addr(ruby_request.m_PhysicalAddress);
2010-03-23 02:43:53 +01:00
line_addr.makeLineAddress();
if ((ruby_request.m_Type == RubyRequestType_ST) ||
(ruby_request.m_Type == RubyRequestType_RMW_Read) ||
(ruby_request.m_Type == RubyRequestType_RMW_Write) ||
(ruby_request.m_Type == RubyRequestType_Load_Linked) ||
(ruby_request.m_Type == RubyRequestType_Store_Conditional) ||
(ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) ||
(ruby_request.m_Type == RubyRequestType_Locked_RMW_Write)) {
2010-06-11 08:17:07 +02:00
m_writeRequestTable.erase(line_addr);
2010-03-23 02:43:53 +01:00
} else {
2010-06-11 08:17:07 +02:00
m_readRequestTable.erase(line_addr);
2010-03-23 02:43:53 +01:00
}
2010-06-11 08:17:07 +02:00
markRemoved();
2010-03-23 02:43:53 +01:00
}
bool
Sequencer::handleLlsc(const Address& address, SequencerRequest* request)
{
//
// The success flag indicates whether the LLSC operation was successful.
// LL ops will always succeed, but SC may fail if the cache line is no
// longer locked.
//
bool success = true;
if (request->ruby_request.m_Type == RubyRequestType_Store_Conditional) {
if (!m_dataCache_ptr->isLocked(address, m_version)) {
//
// For failed SC requests, indicate the failure to the cpu by
// setting the extra data to zero.
//
request->ruby_request.pkt->req->setExtraData(0);
success = false;
} else {
//
// For successful SC requests, indicate the success to the cpu by
// setting the extra data to one.
//
request->ruby_request.pkt->req->setExtraData(1);
}
//
// Independent of success, all SC operations must clear the lock
//
m_dataCache_ptr->clearLocked(address);
} else if (request->ruby_request.m_Type == RubyRequestType_Load_Linked) {
//
// Note: To fully follow Alpha LLSC semantics, should the LL clear any
// previously locked cache lines?
//
m_dataCache_ptr->setLocked(address, m_version);
} else if ((m_dataCache_ptr->isTagPresent(address)) && (m_dataCache_ptr->isLocked(address, m_version))) {
//
// Normal writes should clear the locked address
//
m_dataCache_ptr->clearLocked(address);
}
return success;
}
2010-03-23 02:43:53 +01:00
void
Sequencer::writeCallback(const Address& address, DataBlock& data)
{
writeCallback(address, GenericMachineType_NULL, data);
}
void
Sequencer::writeCallback(const Address& address,
GenericMachineType mach,
DataBlock& data)
{
writeCallback(address, mach, data, 0, 0, 0);
}
void
Sequencer::writeCallback(const Address& address,
GenericMachineType mach,
DataBlock& data,
Time initialRequestTime,
Time forwardRequestTime,
Time firstResponseTime)
2010-03-23 02:43:53 +01:00
{
assert(address == line_address(address));
2010-06-11 08:17:07 +02:00
assert(m_writeRequestTable.count(line_address(address)));
2010-06-11 08:17:07 +02:00
RequestTable::iterator i = m_writeRequestTable.find(address);
assert(i != m_writeRequestTable.end());
SequencerRequest* request = i->second;
2009-09-18 00:39:52 +02:00
2010-06-11 08:17:07 +02:00
m_writeRequestTable.erase(i);
markRemoved();
assert((request->ruby_request.m_Type == RubyRequestType_ST) ||
(request->ruby_request.m_Type == RubyRequestType_ATOMIC) ||
(request->ruby_request.m_Type == RubyRequestType_RMW_Read) ||
(request->ruby_request.m_Type == RubyRequestType_RMW_Write) ||
(request->ruby_request.m_Type == RubyRequestType_Load_Linked) ||
(request->ruby_request.m_Type == RubyRequestType_Store_Conditional) ||
(request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) ||
(request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) ||
(request->ruby_request.m_Type == RubyRequestType_FLUSH));
//
// For Alpha, properly handle LL, SC, and write requests with respect to
// locked cache blocks.
//
// Not valid for Network_test protocl
//
bool success = true;
if(!m_usingNetworkTester)
success = handleLlsc(address, request);
if (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Read) {
2010-03-23 02:43:53 +01:00
m_controller->blockOnQueue(address, m_mandatory_q_ptr);
} else if (request->ruby_request.m_Type == RubyRequestType_Locked_RMW_Write) {
2010-03-23 02:43:53 +01:00
m_controller->unblock(address);
}
hitCallback(request, mach, data, success,
initialRequestTime, forwardRequestTime, firstResponseTime);
}
2010-03-23 02:43:53 +01:00
void
Sequencer::readCallback(const Address& address, DataBlock& data)
{
readCallback(address, GenericMachineType_NULL, data);
}
void
Sequencer::readCallback(const Address& address,
GenericMachineType mach,
DataBlock& data)
{
readCallback(address, mach, data, 0, 0, 0);
}
void
Sequencer::readCallback(const Address& address,
GenericMachineType mach,
DataBlock& data,
Time initialRequestTime,
Time forwardRequestTime,
Time firstResponseTime)
2010-03-23 02:43:53 +01:00
{
assert(address == line_address(address));
2010-06-11 08:17:07 +02:00
assert(m_readRequestTable.count(line_address(address)));
2010-06-11 08:17:07 +02:00
RequestTable::iterator i = m_readRequestTable.find(address);
assert(i != m_readRequestTable.end());
SequencerRequest* request = i->second;
m_readRequestTable.erase(i);
markRemoved();
assert((request->ruby_request.m_Type == RubyRequestType_LD) ||
(request->ruby_request.m_Type == RubyRequestType_IFETCH));
hitCallback(request, mach, data, true,
initialRequestTime, forwardRequestTime, firstResponseTime);
}
2010-03-23 02:43:53 +01:00
void
Sequencer::hitCallback(SequencerRequest* srequest,
GenericMachineType mach,
DataBlock& data,
bool success,
Time initialRequestTime,
Time forwardRequestTime,
Time firstResponseTime)
2010-03-23 02:43:53 +01:00
{
const RubyRequest & ruby_request = srequest->ruby_request;
Address request_address(ruby_request.m_PhysicalAddress);
Address request_line_address(ruby_request.m_PhysicalAddress);
2010-03-23 02:43:53 +01:00
request_line_address.makeLineAddress();
RubyRequestType type = ruby_request.m_Type;
2010-03-23 02:43:53 +01:00
Time issued_time = srequest->issue_time;
// Set this cache entry to the most recently used
if (type == RubyRequestType_IFETCH) {
if (m_instCache_ptr->isTagPresent(request_line_address))
m_instCache_ptr->setMRU(request_line_address);
} else {
if (m_dataCache_ptr->isTagPresent(request_line_address))
m_dataCache_ptr->setMRU(request_line_address);
}
assert(g_eventQueue_ptr->getTime() >= issued_time);
Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
2010-03-23 02:43:53 +01:00
// Profile the miss latency for all non-zero demand misses
if (miss_latency != 0) {
g_system_ptr->getProfiler()->missLatency(miss_latency, type, mach);
2010-03-23 02:43:53 +01:00
if (mach == GenericMachineType_L1Cache_wCC) {
g_system_ptr->getProfiler()->missLatencyWcc(issued_time,
initialRequestTime,
forwardRequestTime,
firstResponseTime,
g_eventQueue_ptr->getTime());
}
if (mach == GenericMachineType_Directory) {
g_system_ptr->getProfiler()->missLatencyDir(issued_time,
initialRequestTime,
forwardRequestTime,
firstResponseTime,
g_eventQueue_ptr->getTime());
}
DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
curTick(), m_version, "Seq",
success ? "Done" : "SC_Failed", "", "",
ruby_request.m_PhysicalAddress, miss_latency);
}
2010-03-23 02:43:53 +01:00
#if 0
if (request.getPrefetch() == PrefetchBit_Yes) {
return; // Ignore the prefetch
}
#endif
2010-03-22 05:22:21 +01:00
2010-03-23 02:43:53 +01:00
// update the data
if (ruby_request.data != NULL) {
if ((type == RubyRequestType_LD) ||
(type == RubyRequestType_IFETCH) ||
(type == RubyRequestType_RMW_Read) ||
(type == RubyRequestType_Locked_RMW_Read) ||
(type == RubyRequestType_Load_Linked)) {
2010-03-23 02:43:53 +01:00
memcpy(ruby_request.data,
data.getData(request_address.getOffset(), ruby_request.m_Size),
ruby_request.m_Size);
2010-03-23 02:43:53 +01:00
} else {
data.setData(ruby_request.data, request_address.getOffset(),
ruby_request.m_Size);
2010-03-23 02:43:53 +01:00
}
} else {
DPRINTF(MemoryAccess,
"WARNING. Data not transfered from Ruby to M5 for type %s\n",
RubyRequestType_to_string(type));
}
2010-03-22 05:22:21 +01:00
2010-03-23 02:43:53 +01:00
// If using the RubyTester, update the RubyTester sender state's
// subBlock with the recieved data. The tester will later access
// this state.
// Note: RubyPort will access it's sender state before the
// RubyTester.
if (m_usingRubyTester) {
RubyPort::SenderState *requestSenderState =
safe_cast<RubyPort::SenderState*>(ruby_request.pkt->senderState);
RubyTester::SenderState* testerSenderState =
safe_cast<RubyTester::SenderState*>(requestSenderState->saved);
testerSenderState->subBlock->mergeFrom(data);
}
2010-03-23 02:43:53 +01:00
ruby_hit_callback(ruby_request.pkt);
delete srequest;
}
// Returns true if the sequencer already has a load or store outstanding
2010-03-23 02:43:53 +01:00
RequestStatus
Sequencer::getRequestStatus(const RubyRequest& request)
{
bool is_outstanding_store =
!!m_writeRequestTable.count(line_address(request.m_PhysicalAddress));
2010-03-23 02:43:53 +01:00
bool is_outstanding_load =
!!m_readRequestTable.count(line_address(request.m_PhysicalAddress));
2010-03-23 02:43:53 +01:00
if (is_outstanding_store) {
if ((request.m_Type == RubyRequestType_LD) ||
(request.m_Type == RubyRequestType_IFETCH) ||
(request.m_Type == RubyRequestType_RMW_Read)) {
2010-03-23 02:43:53 +01:00
m_store_waiting_on_load_cycles++;
} else {
m_store_waiting_on_store_cycles++;
}
return RequestStatus_Aliased;
} else if (is_outstanding_load) {
if ((request.m_Type == RubyRequestType_ST) ||
(request.m_Type == RubyRequestType_RMW_Write)) {
2010-03-23 02:43:53 +01:00
m_load_waiting_on_store_cycles++;
} else {
m_load_waiting_on_load_cycles++;
}
return RequestStatus_Aliased;
}
2010-03-23 02:43:53 +01:00
if (m_outstanding_count >= m_max_outstanding_requests) {
return RequestStatus_BufferFull;
}
2010-03-23 02:43:53 +01:00
return RequestStatus_Ready;
}
2010-03-23 02:43:53 +01:00
bool
Sequencer::empty() const
{
2010-06-11 08:17:07 +02:00
return m_writeRequestTable.empty() && m_readRequestTable.empty();
2010-03-23 02:43:53 +01:00
}
2010-03-23 02:43:53 +01:00
RequestStatus
Sequencer::makeRequest(const RubyRequest &request)
{
assert(request.m_PhysicalAddress.getOffset() + request.m_Size <=
2010-03-23 02:43:53 +01:00
RubySystem::getBlockSizeBytes());
RequestStatus status = getRequestStatus(request);
if (status != RequestStatus_Ready)
return status;
SequencerRequest *srequest =
new SequencerRequest(request, g_eventQueue_ptr->getTime());
bool found = insertRequest(srequest);
2010-03-23 02:43:53 +01:00
if (found) {
panic("Sequencer::makeRequest should never be called if the "
"request is already outstanding\n");
return RequestStatus_NULL;
}
2010-03-22 05:22:21 +01:00
2010-03-23 02:43:53 +01:00
issueRequest(request);
2010-03-23 02:43:53 +01:00
// TODO: issue hardware prefetches here
return RequestStatus_Issued;
}
void
Sequencer::issueRequest(const RubyRequest& request)
{
// TODO: Eliminate RubyRequest being copied again.
RubyRequestType ctype;
switch(request.m_Type) {
2010-03-23 02:43:53 +01:00
case RubyRequestType_IFETCH:
ctype = RubyRequestType_IFETCH;
2010-03-23 02:43:53 +01:00
break;
case RubyRequestType_LD:
ctype = RubyRequestType_LD;
2010-03-23 02:43:53 +01:00
break;
case RubyRequestType_FLUSH:
ctype = RubyRequestType_FLUSH;
break;
2010-03-23 02:43:53 +01:00
case RubyRequestType_ST:
case RubyRequestType_RMW_Read:
case RubyRequestType_RMW_Write:
//
// x86 locked instructions are translated to store cache coherence
// requests because these requests should always be treated as read
// exclusive operations and should leverage any migratory sharing
// optimization built into the protocol.
//
case RubyRequestType_Locked_RMW_Read:
case RubyRequestType_Locked_RMW_Write:
ctype = RubyRequestType_ST;
2010-03-23 02:43:53 +01:00
break;
//
// Alpha LL/SC instructions need to be handled carefully by the cache
// coherence protocol to ensure they follow the proper semantics. In
// particular, by identifying the operations as atomic, the protocol
// should understand that migratory sharing optimizations should not be
// performed (i.e. a load between the LL and SC should not steal away
// exclusive permission).
//
case RubyRequestType_Load_Linked:
case RubyRequestType_Store_Conditional:
case RubyRequestType_ATOMIC:
ctype = RubyRequestType_ATOMIC;
2010-03-23 02:43:53 +01:00
break;
default:
assert(0);
2009-08-21 22:52:46 +02:00
}
2010-03-23 02:43:53 +01:00
RubyAccessMode amtype;
switch(request.m_AccessMode){
2010-03-23 02:43:53 +01:00
case RubyAccessMode_User:
amtype = RubyAccessMode_User;
2010-03-23 02:43:53 +01:00
break;
case RubyAccessMode_Supervisor:
amtype = RubyAccessMode_Supervisor;
2010-03-23 02:43:53 +01:00
break;
case RubyAccessMode_Device:
amtype = RubyAccessMode_User;
2010-03-23 02:43:53 +01:00
break;
default:
assert(0);
2009-08-21 22:52:46 +02:00
}
Address line_addr(request.m_PhysicalAddress);
2010-03-23 02:43:53 +01:00
line_addr.makeLineAddress();
int proc_id = -1;
if (request.pkt != NULL && request.pkt->req->hasContextId()) {
proc_id = request.pkt->req->contextId();
}
RubyRequest *msg = new RubyRequest(request.m_PhysicalAddress.getAddress(),
request.data, request.m_Size,
request.m_ProgramCounter.getAddress(),
ctype, amtype, request.pkt,
PrefetchBit_No, proc_id);
2010-03-23 02:43:53 +01:00
DPRINTFR(ProtocolTrace, "%15s %3s %10s%20s %6s>%-6s %s %s\n",
curTick(), m_version, "Seq", "Begin", "", "",
request.m_PhysicalAddress, RubyRequestType_to_string(request.m_Type));
2010-03-23 02:43:53 +01:00
Time latency = 0; // initialzed to an null value
if (request.m_Type == RubyRequestType_IFETCH)
2010-03-23 02:43:53 +01:00
latency = m_instCache_ptr->getLatency();
else
latency = m_dataCache_ptr->getLatency();
// Send the message to the cache controller
assert(latency > 0);
assert(m_mandatory_q_ptr != NULL);
m_mandatory_q_ptr->enqueue(msg, latency);
}
2010-03-23 02:43:53 +01:00
#if 0
bool
Sequencer::tryCacheAccess(const Address& addr, RubyRequestType type,
RubyAccessMode access_mode,
2010-03-23 02:43:53 +01:00
int size, DataBlock*& data_ptr)
{
CacheMemory *cache =
(type == RubyRequestType_IFETCH) ? m_instCache_ptr : m_dataCache_ptr;
2010-03-23 02:43:53 +01:00
return cache->tryCacheAccess(line_address(addr), type, data_ptr);
}
#endif
2010-06-11 08:17:07 +02:00
template <class KEY, class VALUE>
std::ostream &
operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map)
{
typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin();
typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end();
out << "[";
for (; i != end; ++i)
out << " " << i->first << "=" << i->second;
out << " ]";
return out;
}
2010-03-23 02:43:53 +01:00
void
Sequencer::print(ostream& out) const
{
out << "[Sequencer: " << m_version
<< ", outstanding requests: " << m_outstanding_count
<< ", read request table: " << m_readRequestTable
<< ", write request table: " << m_writeRequestTable
<< "]";
}
2010-03-23 02:43:53 +01:00
// this can be called from setState whenever coherence permissions are
// upgraded when invoked, coherence violations will be checked for the
// given block
void
Sequencer::checkCoherence(const Address& addr)
{
#ifdef CHECK_COHERENCE
2010-03-23 02:43:53 +01:00
g_system_ptr->checkGlobalCoherenceInvariant(addr);
#endif
}