gem5/src/mem/protocol/MESI_Two_Level-dir.sm

588 lines
19 KiB
Plaintext
Raw Normal View History

/*
* Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $
*/
// This file is copied from Yasuko Watanabe's prefetch / memory protocol
// Copied here by aep 12/14/07
ruby: rename MESI_CMP_directory to MESI_Two_Level This is because the next patch introduces a three level hierarchy. --HG-- rename : build_opts/ALPHA_MESI_CMP_directory => build_opts/ALPHA_MESI_Two_Level rename : build_opts/X86_MESI_CMP_directory => build_opts/X86_MESI_Two_Level rename : configs/ruby/MESI_CMP_directory.py => configs/ruby/MESI_Two_Level.py rename : src/mem/protocol/MESI_CMP_directory-L1cache.sm => src/mem/protocol/MESI_Two_Level-L1cache.sm rename : src/mem/protocol/MESI_CMP_directory-L2cache.sm => src/mem/protocol/MESI_Two_Level-L2cache.sm rename : src/mem/protocol/MESI_CMP_directory-dir.sm => src/mem/protocol/MESI_Two_Level-dir.sm rename : src/mem/protocol/MESI_CMP_directory-dma.sm => src/mem/protocol/MESI_Two_Level-dma.sm rename : src/mem/protocol/MESI_CMP_directory-msg.sm => src/mem/protocol/MESI_Two_Level-msg.sm rename : src/mem/protocol/MESI_CMP_directory.slicc => src/mem/protocol/MESI_Two_Level.slicc rename : tests/long/fs/10.linux-boot/ref/x86/linux/pc-simple-timing-ruby-MESI_CMP_directory/config.ini => tests/long/fs/10.linux-boot/ref/x86/linux/pc-simple-timing-ruby-MESI_Two_Level/config.ini rename : tests/long/fs/10.linux-boot/ref/x86/linux/pc-simple-timing-ruby-MESI_CMP_directory/ruby.stats => tests/long/fs/10.linux-boot/ref/x86/linux/pc-simple-timing-ruby-MESI_Two_Level/ruby.stats rename : tests/long/fs/10.linux-boot/ref/x86/linux/pc-simple-timing-ruby-MESI_CMP_directory/simerr => tests/long/fs/10.linux-boot/ref/x86/linux/pc-simple-timing-ruby-MESI_Two_Level/simerr rename : tests/long/fs/10.linux-boot/ref/x86/linux/pc-simple-timing-ruby-MESI_CMP_directory/simout => tests/long/fs/10.linux-boot/ref/x86/linux/pc-simple-timing-ruby-MESI_Two_Level/simout rename : tests/long/fs/10.linux-boot/ref/x86/linux/pc-simple-timing-ruby-MESI_CMP_directory/stats.txt => tests/long/fs/10.linux-boot/ref/x86/linux/pc-simple-timing-ruby-MESI_Two_Level/stats.txt rename : tests/long/fs/10.linux-boot/ref/x86/linux/pc-simple-timing-ruby-MESI_CMP_directory/system.pc.com_1.terminal => tests/long/fs/10.linux-boot/ref/x86/linux/pc-simple-timing-ruby-MESI_Two_Level/system.pc.com_1.terminal rename : tests/quick/se/00.hello/ref/alpha/linux/simple-timing-ruby-MESI_CMP_directory/config.ini => tests/quick/se/00.hello/ref/alpha/linux/simple-timing-ruby-MESI_Two_Level/config.ini rename : tests/quick/se/00.hello/ref/alpha/linux/simple-timing-ruby-MESI_CMP_directory/ruby.stats => tests/quick/se/00.hello/ref/alpha/linux/simple-timing-ruby-MESI_Two_Level/ruby.stats rename : tests/quick/se/00.hello/ref/alpha/linux/simple-timing-ruby-MESI_CMP_directory/simerr => tests/quick/se/00.hello/ref/alpha/linux/simple-timing-ruby-MESI_Two_Level/simerr rename : tests/quick/se/00.hello/ref/alpha/linux/simple-timing-ruby-MESI_CMP_directory/simout => tests/quick/se/00.hello/ref/alpha/linux/simple-timing-ruby-MESI_Two_Level/simout rename : tests/quick/se/00.hello/ref/alpha/linux/simple-timing-ruby-MESI_CMP_directory/stats.txt => tests/quick/se/00.hello/ref/alpha/linux/simple-timing-ruby-MESI_Two_Level/stats.txt rename : tests/quick/se/00.hello/ref/alpha/tru64/simple-timing-ruby-MESI_CMP_directory/config.ini => tests/quick/se/00.hello/ref/alpha/tru64/simple-timing-ruby-MESI_Two_Level/config.ini rename : tests/quick/se/00.hello/ref/alpha/tru64/simple-timing-ruby-MESI_CMP_directory/ruby.stats => tests/quick/se/00.hello/ref/alpha/tru64/simple-timing-ruby-MESI_Two_Level/ruby.stats rename : tests/quick/se/00.hello/ref/alpha/tru64/simple-timing-ruby-MESI_CMP_directory/simerr => tests/quick/se/00.hello/ref/alpha/tru64/simple-timing-ruby-MESI_Two_Level/simerr rename : tests/quick/se/00.hello/ref/alpha/tru64/simple-timing-ruby-MESI_CMP_directory/simout => tests/quick/se/00.hello/ref/alpha/tru64/simple-timing-ruby-MESI_Two_Level/simout rename : tests/quick/se/00.hello/ref/alpha/tru64/simple-timing-ruby-MESI_CMP_directory/stats.txt => tests/quick/se/00.hello/ref/alpha/tru64/simple-timing-ruby-MESI_Two_Level/stats.txt rename : tests/quick/se/50.memtest/ref/alpha/linux/memtest-ruby-MESI_CMP_directory/config.ini => tests/quick/se/50.memtest/ref/alpha/linux/memtest-ruby-MESI_Two_Level/config.ini rename : tests/quick/se/50.memtest/ref/alpha/linux/memtest-ruby-MESI_CMP_directory/ruby.stats => tests/quick/se/50.memtest/ref/alpha/linux/memtest-ruby-MESI_Two_Level/ruby.stats rename : tests/quick/se/50.memtest/ref/alpha/linux/memtest-ruby-MESI_CMP_directory/simerr => tests/quick/se/50.memtest/ref/alpha/linux/memtest-ruby-MESI_Two_Level/simerr rename : tests/quick/se/50.memtest/ref/alpha/linux/memtest-ruby-MESI_CMP_directory/simout => tests/quick/se/50.memtest/ref/alpha/linux/memtest-ruby-MESI_Two_Level/simout rename : tests/quick/se/50.memtest/ref/alpha/linux/memtest-ruby-MESI_CMP_directory/stats.txt => tests/quick/se/50.memtest/ref/alpha/linux/memtest-ruby-MESI_Two_Level/stats.txt rename : tests/quick/se/60.rubytest/ref/alpha/linux/rubytest-ruby-MESI_CMP_directory/config.ini => tests/quick/se/60.rubytest/ref/alpha/linux/rubytest-ruby-MESI_Two_Level/config.ini rename : tests/quick/se/60.rubytest/ref/alpha/linux/rubytest-ruby-MESI_CMP_directory/ruby.stats => tests/quick/se/60.rubytest/ref/alpha/linux/rubytest-ruby-MESI_Two_Level/ruby.stats rename : tests/quick/se/60.rubytest/ref/alpha/linux/rubytest-ruby-MESI_CMP_directory/simerr => tests/quick/se/60.rubytest/ref/alpha/linux/rubytest-ruby-MESI_Two_Level/simerr rename : tests/quick/se/60.rubytest/ref/alpha/linux/rubytest-ruby-MESI_CMP_directory/simout => tests/quick/se/60.rubytest/ref/alpha/linux/rubytest-ruby-MESI_Two_Level/simout rename : tests/quick/se/60.rubytest/ref/alpha/linux/rubytest-ruby-MESI_CMP_directory/stats.txt => tests/quick/se/60.rubytest/ref/alpha/linux/rubytest-ruby-MESI_Two_Level/stats.txt
2014-01-04 07:03:33 +01:00
machine(Directory, "MESI Two Level directory protocol")
: DirectoryMemory * directory,
MemoryControl * memBuffer,
Cycles to_mem_ctrl_latency = 1,
Cycles directory_latency = 6,
{
MessageBuffer requestToDir, network="From", virtual_network="0",
ordered="false", vnet_type="request";
MessageBuffer responseToDir, network="From", virtual_network="1",
ordered="false", vnet_type="response";
MessageBuffer responseFromDir, network="To", virtual_network="1",
ordered="false", vnet_type="response";
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_I") {
// Base states
I, AccessPermission:Read_Write, desc="dir is the owner and memory is up-to-date, all other copies are Invalid";
ID, AccessPermission:Busy, desc="Intermediate state for DMA_READ when in I";
ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I";
M, AccessPermission:Maybe_Stale, desc="memory copy may be stale, i.e. other modified copies may exist";
IM, AccessPermission:Busy, desc="Intermediate State I>M";
MI, AccessPermission:Busy, desc="Intermediate State M>I";
M_DRD, AccessPermission:Busy, desc="Intermediate State when there is a dma read";
M_DRDI, AccessPermission:Busy, desc="Intermediate State when there is a dma read";
M_DWR, AccessPermission:Busy, desc="Intermediate State when there is a dma write";
M_DWRI, AccessPermission:Busy, desc="Intermediate State when there is a dma write";
}
// Events
enumeration(Event, desc="Directory events") {
Fetch, desc="A memory fetch arrives";
Data, desc="writeback data arrives";
Memory_Data, desc="Fetched data from memory arrives";
Memory_Ack, desc="Writeback Ack from memory arrives";
//added by SS for dma
DMA_READ, desc="A DMA Read memory request";
DMA_WRITE, desc="A DMA Write memory request";
CleanReplacement, desc="Clean Replacement in L2 cache";
}
// TYPES
// DirectoryEntry
structure(Entry, desc="...", interface="AbstractEntry") {
State DirectoryState, desc="Directory state";
DataBlock DataBlk, desc="data for the block";
MachineID Owner;
}
// TBE entries for DMA requests
structure(TBE, desc="TBE entries for outstanding DMA requests") {
Address PhysicalAddress, desc="physical address";
State TBEState, desc="Transient State";
DataBlock DataBlk, desc="Data to be written (DMA write only)";
int Len, desc="...";
}
structure(TBETable, external="yes") {
TBE lookup(Address);
void allocate(Address);
void deallocate(Address);
bool isPresent(Address);
}
// ** OBJECTS **
TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
Change interface between coherence protocols and CacheMemory The purpose of this patch is to change the way CacheMemory interfaces with coherence protocols. Currently, whenever a cache controller (defined in the protocol under consideration) needs to carry out any operation on a cache block, it looks up the tag hash map and figures out whether or not the block exists in the cache. In case it does exist, the operation is carried out (which requires another lookup). As observed through profiling of different protocols, multiple such lookups take place for a given cache block. It was noted that the tag lookup takes anything from 10% to 20% of the simulation time. In order to reduce this time, this patch is being posted. I have to acknowledge that the many of the thoughts that went in to this patch belong to Brad. Changes to CacheMemory, TBETable and AbstractCacheEntry classes: 1. The lookup function belonging to CacheMemory class now returns a pointer to a cache block entry, instead of a reference. The pointer is NULL in case the block being looked up is not present in the cache. Similar change has been carried out in the lookup function of the TBETable class. 2. Function for setting and getting access permission of a cache block have been moved from CacheMemory class to AbstractCacheEntry class. 3. The allocate function in CacheMemory class now returns pointer to the allocated cache entry. Changes to SLICC: 1. Each action now has implicit variables - cache_entry and tbe. cache_entry, if != NULL, must point to the cache entry for the address on which the action is being carried out. Similarly, tbe should also point to the transaction buffer entry of the address on which the action is being carried out. 2. If a cache entry or a transaction buffer entry is passed on as an argument to a function, it is presumed that a pointer is being passed on. 3. The cache entry and the tbe pointers received __implicitly__ by the actions, are passed __explicitly__ to the trigger function. 4. While performing an action, set/unset_cache_entry, set/unset_tbe are to be used for setting / unsetting cache entry and tbe pointers respectively. 5. is_valid() and is_invalid() has been made available for testing whether a given pointer 'is not NULL' and 'is NULL' respectively. 6. Local variables are now available, but they are assumed to be pointers always. 7. It is now possible for an object of the derieved class to make calls to a function defined in the interface. 8. An OOD token has been introduced in SLICC. It is same as the NULL token used in C/C++. If you are wondering, OOD stands for Out Of Domain. 9. static_cast can now taken an optional parameter that asks for casting the given variable to a pointer of the given type. 10. Functions can be annotated with 'return_by_pointer=yes' to return a pointer. 11. StateMachine has two new variables, EntryType and TBEType. EntryType is set to the type which inherits from 'AbstractCacheEntry'. There can only be one such type in the machine. TBEType is set to the type for which 'TBE' is used as the name. All the protocols have been modified to conform with the new interface.
2011-01-18 01:46:16 +01:00
void set_tbe(TBE tbe);
void unset_tbe();
void wakeUpBuffers(Address a);
Entry getDirectoryEntry(Address addr), return_by_pointer="yes" {
Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
}
dir_entry := static_cast(Entry, "pointer",
directory.allocate(addr, new Entry));
return dir_entry;
}
Change interface between coherence protocols and CacheMemory The purpose of this patch is to change the way CacheMemory interfaces with coherence protocols. Currently, whenever a cache controller (defined in the protocol under consideration) needs to carry out any operation on a cache block, it looks up the tag hash map and figures out whether or not the block exists in the cache. In case it does exist, the operation is carried out (which requires another lookup). As observed through profiling of different protocols, multiple such lookups take place for a given cache block. It was noted that the tag lookup takes anything from 10% to 20% of the simulation time. In order to reduce this time, this patch is being posted. I have to acknowledge that the many of the thoughts that went in to this patch belong to Brad. Changes to CacheMemory, TBETable and AbstractCacheEntry classes: 1. The lookup function belonging to CacheMemory class now returns a pointer to a cache block entry, instead of a reference. The pointer is NULL in case the block being looked up is not present in the cache. Similar change has been carried out in the lookup function of the TBETable class. 2. Function for setting and getting access permission of a cache block have been moved from CacheMemory class to AbstractCacheEntry class. 3. The allocate function in CacheMemory class now returns pointer to the allocated cache entry. Changes to SLICC: 1. Each action now has implicit variables - cache_entry and tbe. cache_entry, if != NULL, must point to the cache entry for the address on which the action is being carried out. Similarly, tbe should also point to the transaction buffer entry of the address on which the action is being carried out. 2. If a cache entry or a transaction buffer entry is passed on as an argument to a function, it is presumed that a pointer is being passed on. 3. The cache entry and the tbe pointers received __implicitly__ by the actions, are passed __explicitly__ to the trigger function. 4. While performing an action, set/unset_cache_entry, set/unset_tbe are to be used for setting / unsetting cache entry and tbe pointers respectively. 5. is_valid() and is_invalid() has been made available for testing whether a given pointer 'is not NULL' and 'is NULL' respectively. 6. Local variables are now available, but they are assumed to be pointers always. 7. It is now possible for an object of the derieved class to make calls to a function defined in the interface. 8. An OOD token has been introduced in SLICC. It is same as the NULL token used in C/C++. If you are wondering, OOD stands for Out Of Domain. 9. static_cast can now taken an optional parameter that asks for casting the given variable to a pointer of the given type. 10. Functions can be annotated with 'return_by_pointer=yes' to return a pointer. 11. StateMachine has two new variables, EntryType and TBEType. EntryType is set to the type which inherits from 'AbstractCacheEntry'. There can only be one such type in the machine. TBEType is set to the type for which 'TBE' is used as the name. All the protocols have been modified to conform with the new interface.
2011-01-18 01:46:16 +01:00
State getState(TBE tbe, Address addr) {
if (is_valid(tbe)) {
return tbe.TBEState;
} else if (directory.isPresent(addr)) {
return getDirectoryEntry(addr).DirectoryState;
} else {
return State:I;
}
}
Change interface between coherence protocols and CacheMemory The purpose of this patch is to change the way CacheMemory interfaces with coherence protocols. Currently, whenever a cache controller (defined in the protocol under consideration) needs to carry out any operation on a cache block, it looks up the tag hash map and figures out whether or not the block exists in the cache. In case it does exist, the operation is carried out (which requires another lookup). As observed through profiling of different protocols, multiple such lookups take place for a given cache block. It was noted that the tag lookup takes anything from 10% to 20% of the simulation time. In order to reduce this time, this patch is being posted. I have to acknowledge that the many of the thoughts that went in to this patch belong to Brad. Changes to CacheMemory, TBETable and AbstractCacheEntry classes: 1. The lookup function belonging to CacheMemory class now returns a pointer to a cache block entry, instead of a reference. The pointer is NULL in case the block being looked up is not present in the cache. Similar change has been carried out in the lookup function of the TBETable class. 2. Function for setting and getting access permission of a cache block have been moved from CacheMemory class to AbstractCacheEntry class. 3. The allocate function in CacheMemory class now returns pointer to the allocated cache entry. Changes to SLICC: 1. Each action now has implicit variables - cache_entry and tbe. cache_entry, if != NULL, must point to the cache entry for the address on which the action is being carried out. Similarly, tbe should also point to the transaction buffer entry of the address on which the action is being carried out. 2. If a cache entry or a transaction buffer entry is passed on as an argument to a function, it is presumed that a pointer is being passed on. 3. The cache entry and the tbe pointers received __implicitly__ by the actions, are passed __explicitly__ to the trigger function. 4. While performing an action, set/unset_cache_entry, set/unset_tbe are to be used for setting / unsetting cache entry and tbe pointers respectively. 5. is_valid() and is_invalid() has been made available for testing whether a given pointer 'is not NULL' and 'is NULL' respectively. 6. Local variables are now available, but they are assumed to be pointers always. 7. It is now possible for an object of the derieved class to make calls to a function defined in the interface. 8. An OOD token has been introduced in SLICC. It is same as the NULL token used in C/C++. If you are wondering, OOD stands for Out Of Domain. 9. static_cast can now taken an optional parameter that asks for casting the given variable to a pointer of the given type. 10. Functions can be annotated with 'return_by_pointer=yes' to return a pointer. 11. StateMachine has two new variables, EntryType and TBEType. EntryType is set to the type which inherits from 'AbstractCacheEntry'. There can only be one such type in the machine. TBEType is set to the type for which 'TBE' is used as the name. All the protocols have been modified to conform with the new interface.
2011-01-18 01:46:16 +01:00
void setState(TBE tbe, Address addr, State state) {
if (is_valid(tbe)) {
tbe.TBEState := state;
}
if (directory.isPresent(addr)) {
getDirectoryEntry(addr).DirectoryState := state;
}
}
AccessPermission getAccessPermission(Address addr) {
TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(tbe.TBEState));
return Directory_State_to_permission(tbe.TBEState);
}
if(directory.isPresent(addr)) {
DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
}
DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
return AccessPermission:NotPresent;
}
DataBlock getDataBlock(Address addr), return_by_ref="yes" {
TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
return tbe.DataBlk;
}
return getDirectoryEntry(addr).DataBlk;
}
void setAccessPermission(Address addr, State state) {
if (directory.isPresent(addr)) {
getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
}
}
bool isGETRequest(CoherenceRequestType type) {
return (type == CoherenceRequestType:GETS) ||
(type == CoherenceRequestType:GET_INSTR) ||
(type == CoherenceRequestType:GETX);
}
// ** OUT_PORTS **
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
out_port(memQueue_out, MemoryMsg, memBuffer);
// ** IN_PORTS **
in_port(requestNetwork_in, RequestMsg, requestToDir, rank = 0) {
if (requestNetwork_in.isReady()) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (isGETRequest(in_msg.Type)) {
trigger(Event:Fetch, in_msg.Addr, TBEs[in_msg.Addr]);
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:DMA_READ, makeLineAddress(in_msg.Addr),
TBEs[makeLineAddress(in_msg.Addr)]);
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Addr),
TBEs[makeLineAddress(in_msg.Addr)]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg);
error("Invalid message");
}
}
}
}
in_port(responseNetwork_in, ResponseMsg, responseToDir, rank = 1) {
if (responseNetwork_in.isReady()) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
trigger(Event:Data, in_msg.Addr, TBEs[in_msg.Addr]);
} else if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:CleanReplacement, in_msg.Addr, TBEs[in_msg.Addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
}
}
}
}
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, memBuffer, rank = 2) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.Addr, TBEs[in_msg.Addr]);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
trigger(Event:Memory_Ack, in_msg.Addr, TBEs[in_msg.Addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
}
}
}
}
// Actions
action(a_sendAck, "a", desc="Send ack to L2") {
peek(responseNetwork_in, ResponseMsg) {
enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceResponseType:MEMORY_ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Sender);
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
action(d_sendData, "d", desc="Send data to requestor") {
peek(memQueue_in, MemoryMsg) {
enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceResponseType:MEMORY_DATA;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Dirty := false;
out_msg.MessageSize := MessageSizeType:Response_Data;
Entry e := getDirectoryEntry(in_msg.Addr);
e.Owner := in_msg.OriginalRequestorMachId;
}
}
}
// Actions
action(aa_sendAck, "aa", desc="Send ack to L2") {
peek(memQueue_in, MemoryMsg) {
enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceResponseType:MEMORY_ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
requestNetwork_in.dequeue();
}
action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
responseNetwork_in.dequeue();
}
action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
memQueue_in.dequeue();
}
action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
wakeUpBuffers(address);
}
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(requestNetwork_in, RequestMsg) {
enqueue(memQueue_out, MemoryMsg, to_mem_ctrl_latency) {
out_msg.Addr := address;
out_msg.Type := MemoryRequestType:MEMORY_READ;
out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.MessageSize := in_msg.MessageSize;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.DataBlk := getDirectoryEntry(in_msg.Addr).DataBlk;
DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
peek(responseNetwork_in, ResponseMsg) {
enqueue(memQueue_out, MemoryMsg, to_mem_ctrl_latency) {
out_msg.Addr := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := in_msg.Sender;
out_msg.DataBlk := in_msg.DataBlk;
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") {
peek(responseNetwork_in, ResponseMsg) {
getDirectoryEntry(in_msg.Addr).DataBlk := in_msg.DataBlk;
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
in_msg.Addr, in_msg.DataBlk);
}
}
//added by SS for dma
action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
peek(requestNetwork_in, RequestMsg) {
enqueue(memQueue_out, MemoryMsg, to_mem_ctrl_latency) {
out_msg.Addr := address;
out_msg.Type := MemoryRequestType:MEMORY_READ;
out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := machineID;
out_msg.MessageSize := in_msg.MessageSize;
out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
requestNetwork_in.dequeue();
}
action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
peek(memQueue_in, MemoryMsg) {
enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
out_msg.Destination.add(map_Address_to_DMA(address));
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(dw_writeDMAData, "dw", desc="DMA Write data to memory") {
peek(requestNetwork_in, RequestMsg) {
getDirectoryEntry(address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Addr), in_msg.Len);
}
}
action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
peek(requestNetwork_in, RequestMsg) {
enqueue(memQueue_out, MemoryMsg, to_mem_ctrl_latency) {
out_msg.Addr := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
out_msg.OriginalRequestorMachId := machineID;
//out_msg.DataBlk := in_msg.DataBlk;
out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(address), in_msg.Len);
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Destination.add(map_Address_to_DMA(address));
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
action(z_stallAndWaitRequest, "z", desc="recycle request queue") {
stall_and_wait(requestNetwork_in, address);
}
action(zz_recycleDMAQueue, "zz", desc="recycle DMA queue") {
requestNetwork_in.recycle();
}
action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
peek(requestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, directory_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceResponseType:INV;
out_msg.Sender := machineID;
out_msg.Destination.add(getDirectoryEntry(address).Owner);
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
peek(responseNetwork_in, ResponseMsg) {
enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
out_msg.Destination.add(map_Address_to_DMA(address));
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(v_allocateTBE, "v", desc="Allocate TBE") {
peek(requestNetwork_in, RequestMsg) {
TBEs.allocate(address);
set_tbe(TBEs[address]);
Change interface between coherence protocols and CacheMemory The purpose of this patch is to change the way CacheMemory interfaces with coherence protocols. Currently, whenever a cache controller (defined in the protocol under consideration) needs to carry out any operation on a cache block, it looks up the tag hash map and figures out whether or not the block exists in the cache. In case it does exist, the operation is carried out (which requires another lookup). As observed through profiling of different protocols, multiple such lookups take place for a given cache block. It was noted that the tag lookup takes anything from 10% to 20% of the simulation time. In order to reduce this time, this patch is being posted. I have to acknowledge that the many of the thoughts that went in to this patch belong to Brad. Changes to CacheMemory, TBETable and AbstractCacheEntry classes: 1. The lookup function belonging to CacheMemory class now returns a pointer to a cache block entry, instead of a reference. The pointer is NULL in case the block being looked up is not present in the cache. Similar change has been carried out in the lookup function of the TBETable class. 2. Function for setting and getting access permission of a cache block have been moved from CacheMemory class to AbstractCacheEntry class. 3. The allocate function in CacheMemory class now returns pointer to the allocated cache entry. Changes to SLICC: 1. Each action now has implicit variables - cache_entry and tbe. cache_entry, if != NULL, must point to the cache entry for the address on which the action is being carried out. Similarly, tbe should also point to the transaction buffer entry of the address on which the action is being carried out. 2. If a cache entry or a transaction buffer entry is passed on as an argument to a function, it is presumed that a pointer is being passed on. 3. The cache entry and the tbe pointers received __implicitly__ by the actions, are passed __explicitly__ to the trigger function. 4. While performing an action, set/unset_cache_entry, set/unset_tbe are to be used for setting / unsetting cache entry and tbe pointers respectively. 5. is_valid() and is_invalid() has been made available for testing whether a given pointer 'is not NULL' and 'is NULL' respectively. 6. Local variables are now available, but they are assumed to be pointers always. 7. It is now possible for an object of the derieved class to make calls to a function defined in the interface. 8. An OOD token has been introduced in SLICC. It is same as the NULL token used in C/C++. If you are wondering, OOD stands for Out Of Domain. 9. static_cast can now taken an optional parameter that asks for casting the given variable to a pointer of the given type. 10. Functions can be annotated with 'return_by_pointer=yes' to return a pointer. 11. StateMachine has two new variables, EntryType and TBEType. EntryType is set to the type which inherits from 'AbstractCacheEntry'. There can only be one such type in the machine. TBEType is set to the type for which 'TBE' is used as the name. All the protocols have been modified to conform with the new interface.
2011-01-18 01:46:16 +01:00
tbe.DataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.Addr;
tbe.Len := in_msg.Len;
}
}
action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
Change interface between coherence protocols and CacheMemory The purpose of this patch is to change the way CacheMemory interfaces with coherence protocols. Currently, whenever a cache controller (defined in the protocol under consideration) needs to carry out any operation on a cache block, it looks up the tag hash map and figures out whether or not the block exists in the cache. In case it does exist, the operation is carried out (which requires another lookup). As observed through profiling of different protocols, multiple such lookups take place for a given cache block. It was noted that the tag lookup takes anything from 10% to 20% of the simulation time. In order to reduce this time, this patch is being posted. I have to acknowledge that the many of the thoughts that went in to this patch belong to Brad. Changes to CacheMemory, TBETable and AbstractCacheEntry classes: 1. The lookup function belonging to CacheMemory class now returns a pointer to a cache block entry, instead of a reference. The pointer is NULL in case the block being looked up is not present in the cache. Similar change has been carried out in the lookup function of the TBETable class. 2. Function for setting and getting access permission of a cache block have been moved from CacheMemory class to AbstractCacheEntry class. 3. The allocate function in CacheMemory class now returns pointer to the allocated cache entry. Changes to SLICC: 1. Each action now has implicit variables - cache_entry and tbe. cache_entry, if != NULL, must point to the cache entry for the address on which the action is being carried out. Similarly, tbe should also point to the transaction buffer entry of the address on which the action is being carried out. 2. If a cache entry or a transaction buffer entry is passed on as an argument to a function, it is presumed that a pointer is being passed on. 3. The cache entry and the tbe pointers received __implicitly__ by the actions, are passed __explicitly__ to the trigger function. 4. While performing an action, set/unset_cache_entry, set/unset_tbe are to be used for setting / unsetting cache entry and tbe pointers respectively. 5. is_valid() and is_invalid() has been made available for testing whether a given pointer 'is not NULL' and 'is NULL' respectively. 6. Local variables are now available, but they are assumed to be pointers always. 7. It is now possible for an object of the derieved class to make calls to a function defined in the interface. 8. An OOD token has been introduced in SLICC. It is same as the NULL token used in C/C++. If you are wondering, OOD stands for Out Of Domain. 9. static_cast can now taken an optional parameter that asks for casting the given variable to a pointer of the given type. 10. Functions can be annotated with 'return_by_pointer=yes' to return a pointer. 11. StateMachine has two new variables, EntryType and TBEType. EntryType is set to the type which inherits from 'AbstractCacheEntry'. There can only be one such type in the machine. TBEType is set to the type for which 'TBE' is used as the name. All the protocols have been modified to conform with the new interface.
2011-01-18 01:46:16 +01:00
assert(is_valid(tbe));
//getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, tbe.Offset, tbe.Len);
getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
}
action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
peek(responseNetwork_in, ResponseMsg) {
enqueue(memQueue_out, MemoryMsg, to_mem_ctrl_latency) {
Change interface between coherence protocols and CacheMemory The purpose of this patch is to change the way CacheMemory interfaces with coherence protocols. Currently, whenever a cache controller (defined in the protocol under consideration) needs to carry out any operation on a cache block, it looks up the tag hash map and figures out whether or not the block exists in the cache. In case it does exist, the operation is carried out (which requires another lookup). As observed through profiling of different protocols, multiple such lookups take place for a given cache block. It was noted that the tag lookup takes anything from 10% to 20% of the simulation time. In order to reduce this time, this patch is being posted. I have to acknowledge that the many of the thoughts that went in to this patch belong to Brad. Changes to CacheMemory, TBETable and AbstractCacheEntry classes: 1. The lookup function belonging to CacheMemory class now returns a pointer to a cache block entry, instead of a reference. The pointer is NULL in case the block being looked up is not present in the cache. Similar change has been carried out in the lookup function of the TBETable class. 2. Function for setting and getting access permission of a cache block have been moved from CacheMemory class to AbstractCacheEntry class. 3. The allocate function in CacheMemory class now returns pointer to the allocated cache entry. Changes to SLICC: 1. Each action now has implicit variables - cache_entry and tbe. cache_entry, if != NULL, must point to the cache entry for the address on which the action is being carried out. Similarly, tbe should also point to the transaction buffer entry of the address on which the action is being carried out. 2. If a cache entry or a transaction buffer entry is passed on as an argument to a function, it is presumed that a pointer is being passed on. 3. The cache entry and the tbe pointers received __implicitly__ by the actions, are passed __explicitly__ to the trigger function. 4. While performing an action, set/unset_cache_entry, set/unset_tbe are to be used for setting / unsetting cache entry and tbe pointers respectively. 5. is_valid() and is_invalid() has been made available for testing whether a given pointer 'is not NULL' and 'is NULL' respectively. 6. Local variables are now available, but they are assumed to be pointers always. 7. It is now possible for an object of the derieved class to make calls to a function defined in the interface. 8. An OOD token has been introduced in SLICC. It is same as the NULL token used in C/C++. If you are wondering, OOD stands for Out Of Domain. 9. static_cast can now taken an optional parameter that asks for casting the given variable to a pointer of the given type. 10. Functions can be annotated with 'return_by_pointer=yes' to return a pointer. 11. StateMachine has two new variables, EntryType and TBEType. EntryType is set to the type which inherits from 'AbstractCacheEntry'. There can only be one such type in the machine. TBEType is set to the type for which 'TBE' is used as the name. All the protocols have been modified to conform with the new interface.
2011-01-18 01:46:16 +01:00
assert(is_valid(tbe));
out_msg.Addr := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
out_msg.OriginalRequestorMachId := in_msg.Sender;
//out_msg.DataBlk := in_msg.DataBlk;
Change interface between coherence protocols and CacheMemory The purpose of this patch is to change the way CacheMemory interfaces with coherence protocols. Currently, whenever a cache controller (defined in the protocol under consideration) needs to carry out any operation on a cache block, it looks up the tag hash map and figures out whether or not the block exists in the cache. In case it does exist, the operation is carried out (which requires another lookup). As observed through profiling of different protocols, multiple such lookups take place for a given cache block. It was noted that the tag lookup takes anything from 10% to 20% of the simulation time. In order to reduce this time, this patch is being posted. I have to acknowledge that the many of the thoughts that went in to this patch belong to Brad. Changes to CacheMemory, TBETable and AbstractCacheEntry classes: 1. The lookup function belonging to CacheMemory class now returns a pointer to a cache block entry, instead of a reference. The pointer is NULL in case the block being looked up is not present in the cache. Similar change has been carried out in the lookup function of the TBETable class. 2. Function for setting and getting access permission of a cache block have been moved from CacheMemory class to AbstractCacheEntry class. 3. The allocate function in CacheMemory class now returns pointer to the allocated cache entry. Changes to SLICC: 1. Each action now has implicit variables - cache_entry and tbe. cache_entry, if != NULL, must point to the cache entry for the address on which the action is being carried out. Similarly, tbe should also point to the transaction buffer entry of the address on which the action is being carried out. 2. If a cache entry or a transaction buffer entry is passed on as an argument to a function, it is presumed that a pointer is being passed on. 3. The cache entry and the tbe pointers received __implicitly__ by the actions, are passed __explicitly__ to the trigger function. 4. While performing an action, set/unset_cache_entry, set/unset_tbe are to be used for setting / unsetting cache entry and tbe pointers respectively. 5. is_valid() and is_invalid() has been made available for testing whether a given pointer 'is not NULL' and 'is NULL' respectively. 6. Local variables are now available, but they are assumed to be pointers always. 7. It is now possible for an object of the derieved class to make calls to a function defined in the interface. 8. An OOD token has been introduced in SLICC. It is same as the NULL token used in C/C++. If you are wondering, OOD stands for Out Of Domain. 9. static_cast can now taken an optional parameter that asks for casting the given variable to a pointer of the given type. 10. Functions can be annotated with 'return_by_pointer=yes' to return a pointer. 11. StateMachine has two new variables, EntryType and TBEType. EntryType is set to the type which inherits from 'AbstractCacheEntry'. There can only be one such type in the machine. TBEType is set to the type for which 'TBE' is used as the name. All the protocols have been modified to conform with the new interface.
2011-01-18 01:46:16 +01:00
//out_msg.DataBlk.copyPartial(tbe.DataBlk, tbe.Offset, tbe.Len);
out_msg.DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
TBEs.deallocate(address);
Change interface between coherence protocols and CacheMemory The purpose of this patch is to change the way CacheMemory interfaces with coherence protocols. Currently, whenever a cache controller (defined in the protocol under consideration) needs to carry out any operation on a cache block, it looks up the tag hash map and figures out whether or not the block exists in the cache. In case it does exist, the operation is carried out (which requires another lookup). As observed through profiling of different protocols, multiple such lookups take place for a given cache block. It was noted that the tag lookup takes anything from 10% to 20% of the simulation time. In order to reduce this time, this patch is being posted. I have to acknowledge that the many of the thoughts that went in to this patch belong to Brad. Changes to CacheMemory, TBETable and AbstractCacheEntry classes: 1. The lookup function belonging to CacheMemory class now returns a pointer to a cache block entry, instead of a reference. The pointer is NULL in case the block being looked up is not present in the cache. Similar change has been carried out in the lookup function of the TBETable class. 2. Function for setting and getting access permission of a cache block have been moved from CacheMemory class to AbstractCacheEntry class. 3. The allocate function in CacheMemory class now returns pointer to the allocated cache entry. Changes to SLICC: 1. Each action now has implicit variables - cache_entry and tbe. cache_entry, if != NULL, must point to the cache entry for the address on which the action is being carried out. Similarly, tbe should also point to the transaction buffer entry of the address on which the action is being carried out. 2. If a cache entry or a transaction buffer entry is passed on as an argument to a function, it is presumed that a pointer is being passed on. 3. The cache entry and the tbe pointers received __implicitly__ by the actions, are passed __explicitly__ to the trigger function. 4. While performing an action, set/unset_cache_entry, set/unset_tbe are to be used for setting / unsetting cache entry and tbe pointers respectively. 5. is_valid() and is_invalid() has been made available for testing whether a given pointer 'is not NULL' and 'is NULL' respectively. 6. Local variables are now available, but they are assumed to be pointers always. 7. It is now possible for an object of the derieved class to make calls to a function defined in the interface. 8. An OOD token has been introduced in SLICC. It is same as the NULL token used in C/C++. If you are wondering, OOD stands for Out Of Domain. 9. static_cast can now taken an optional parameter that asks for casting the given variable to a pointer of the given type. 10. Functions can be annotated with 'return_by_pointer=yes' to return a pointer. 11. StateMachine has two new variables, EntryType and TBEType. EntryType is set to the type which inherits from 'AbstractCacheEntry'. There can only be one such type in the machine. TBEType is set to the type for which 'TBE' is used as the name. All the protocols have been modified to conform with the new interface.
2011-01-18 01:46:16 +01:00
unset_tbe();
}
// TRANSITIONS
transition(I, Fetch, IM) {
qf_queueMemoryFetchRequest;
j_popIncomingRequestQueue;
}
transition(M, Fetch) {
inv_sendCacheInvalidate;
z_stallAndWaitRequest;
}
transition(IM, Memory_Data, M) {
d_sendData;
l_popMemQueue;
kd_wakeUpDependents;
}
//added by SS
transition(M, CleanReplacement, I) {
a_sendAck;
k_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(M, Data, MI) {
m_writeDataToMemory;
qw_queueMemoryWBRequest;
k_popIncomingResponseQueue;
}
transition(MI, Memory_Ack, I) {
aa_sendAck;
l_popMemQueue;
kd_wakeUpDependents;
}
//added by SS for dma support
transition(I, DMA_READ, ID) {
qf_queueMemoryFetchRequestDMA;
j_popIncomingRequestQueue;
}
transition(ID, Memory_Data, I) {
dr_sendDMAData;
l_popMemQueue;
kd_wakeUpDependents;
}
transition(I, DMA_WRITE, ID_W) {
dw_writeDMAData;
qw_queueMemoryWBRequest_partial;
j_popIncomingRequestQueue;
}
transition(ID_W, Memory_Ack, I) {
da_sendDMAAck;
l_popMemQueue;
kd_wakeUpDependents;
}
transition({ID, ID_W, M_DRDI, M_DWRI, IM, MI}, {Fetch, Data} ) {
z_stallAndWaitRequest;
}
transition({ID, ID_W, M_DRD, M_DRDI, M_DWR, M_DWRI, IM, MI}, {DMA_WRITE, DMA_READ} ) {
zz_recycleDMAQueue;
}
transition(M, DMA_READ, M_DRD) {
inv_sendCacheInvalidate;
j_popIncomingRequestQueue;
}
transition(M_DRD, Data, M_DRDI) {
drp_sendDMAData;
m_writeDataToMemory;
qw_queueMemoryWBRequest;
k_popIncomingResponseQueue;
}
transition(M_DRDI, Memory_Ack, I) {
aa_sendAck;
l_popMemQueue;
kd_wakeUpDependents;
}
transition(M, DMA_WRITE, M_DWR) {
v_allocateTBE;
inv_sendCacheInvalidate;
j_popIncomingRequestQueue;
}
transition(M_DWR, Data, M_DWRI) {
m_writeDataToMemory;
qw_queueMemoryWBRequest_partialTBE;
k_popIncomingResponseQueue;
}
transition(M_DWRI, Memory_Ack, I) {
dwt_writeDMADataFromTBE;
aa_sendAck;
da_sendDMAAck;
w_deallocateTBE;
l_popMemQueue;
kd_wakeUpDependents;
}
}