From 12b7ebcbcab1c5ea30065ea45908e5711b05b63f Mon Sep 17 00:00:00 2001 From: Ali Saidi Date: Thu, 22 Mar 2007 18:39:41 -0400 Subject: [PATCH] finish up the coding of the Intel Gb NIC... Many Many bugs to squash src/dev/i8254xGBe.cc: src/dev/i8254xGBe.hh: src/dev/i8254xGBe_defs.hh: finish coding the Intel Gb NIC device src/dev/io_device.hh: we really don't want to be able to pass a null buffer to dma read, at least not the way we have things setup now... it won't work at all --HG-- extra : convert_revision : 6739497232317ec407cfa7a96de4575a9a6cfc46 --- src/dev/i8254xGBe.cc | 684 ++++++++++++++++++++++++++++++++++++-- src/dev/i8254xGBe.hh | 420 ++++++++++++++++++++++- src/dev/i8254xGBe_defs.hh | 373 +++++++++------------ src/dev/io_device.hh | 9 +- 4 files changed, 1243 insertions(+), 243 deletions(-) diff --git a/src/dev/i8254xGBe.cc b/src/dev/i8254xGBe.cc index 5476ef9eb..d6449f6c5 100644 --- a/src/dev/i8254xGBe.cc +++ b/src/dev/i8254xGBe.cc @@ -35,7 +35,13 @@ * other MACs with slight modifications. */ + +/* + * @todo really there are multiple dma engines.. we should implement them. + */ + #include "base/inet.hh" +#include "base/trace.hh" #include "dev/i8254xGBe.hh" #include "mem/packet.hh" #include "mem/packet_access.hh" @@ -43,32 +49,34 @@ #include "sim/stats.hh" #include "sim/system.hh" +#include + using namespace iGbReg; +using namespace Net; IGbE::IGbE(Params *p) - : PciDev(p), etherInt(NULL), useFlowControl(p->use_flow_control) + : PciDev(p), etherInt(NULL), useFlowControl(p->use_flow_control), + rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size), rxTick(false), + txTick(false), rdtrEvent(this), radvEvent(this), tadvEvent(this), + tidvEvent(this), tickEvent(this), interEvent(this), + rxDescCache(this, name()+".TxDesc", p->rx_desc_cache_size), + txDescCache(this, name()+".RxDesc", p->tx_desc_cache_size), clock(p->clock) { // Initialized internal registers per Intel documentation - regs.tctl(0); - regs.rctl(0); - regs.ctrl(0); + // All registers intialized to 0 by per register constructor regs.ctrl.fd(1); regs.ctrl.lrst(1); regs.ctrl.speed(2); regs.ctrl.frcspd(1); - regs.sts(0); regs.sts.speed(3); // Say we're 1000Mbps regs.sts.fd(1); // full duplex - regs.eecd(0); regs.eecd.fwe(1); regs.eecd.ee_type(1); - regs.eerd(0); - regs.icr(0); - regs.rctl(0); - regs.tctl(0); - regs.fcrtl(0); + regs.imr = 0; + regs.iam = 0; + regs.rxdctl.gran(1); + regs.rxdctl.wthresh(1); regs.fcrth(1); - regs.manc(0); regs.pba.rxa(0x30); regs.pba.txa(0x10); @@ -92,6 +100,9 @@ IGbE::IGbE(Params *p) // Magic happy checksum value flash[EEPROM_SIZE-1] = htobe((uint16_t)(EEPROM_CSUM - csum)); + + rxFifo.clear(); + txFifo.clear(); } @@ -156,7 +167,12 @@ IGbE::read(PacketPtr pkt) break; case REG_ICR: pkt->set(regs.icr()); - // handle auto setting mask from IAM + if (regs.icr.int_assert()) + regs.imr &= regs.iam; + if (regs.imr == 0 || (regs.icr.int_assert() && regs.ctrl_ext.iame())) { + regs.icr(0); + cpuClearInt(); + } break; case REG_ITR: pkt->set(regs.itr()); @@ -200,6 +216,18 @@ IGbE::read(PacketPtr pkt) break; case REG_RDTR: pkt->set(regs.rdtr()); + if (regs.rdtr.fpd()) { + rxDescCache.writeback(0); + postInterrupt(IT_RXT); + regs.rdtr.fpd(0); + } + if (regs.rdtr.delay()) { + Tick t = regs.rdtr.delay() * Clock::Int::ns * 1024; + if (rdtrEvent.scheduled()) + rdtrEvent.reschedule(curTick + t); + else + rdtrEvent.schedule(curTick + t); + } break; case REG_RADV: pkt->set(regs.radv()); @@ -271,6 +299,9 @@ IGbE::write(PacketPtr pkt) /// uint32_t val = pkt->get(); + Regs::RCTL oldrctl; + Regs::TCTL oldtctl; + switch (daddr) { case REG_CTRL: regs.ctrl = val; @@ -372,36 +403,60 @@ IGbE::write(PacketPtr pkt) regs.mdic.r(1); break; case REG_ICR: - regs.icr = val; - // handle auto setting mask from IAM + if (regs.icr.int_assert()) + regs.imr &= regs.iam; + + regs.icr = ~bits(val,30,0) & regs.icr(); + // if no more bits are set clear the int_asserted bit + if (!bits(regs.icr(),31,31)) + cpuClearInt(); + break; case REG_ITR: regs.itr = val; break; case REG_ICS: - regs.icr = val | regs.icr(); - // generate an interrupt if needed here + postInterrupt((IntTypes)val); break; case REG_IMS: regs.imr |= val; - // handle interrupts if needed here + chkInterrupt(); break; case REG_IMC: - regs.imr |= ~val; - // handle interrupts if needed here + regs.imr &= ~val; + chkInterrupt(); break; case REG_IAM: regs.iam = val; break; case REG_RCTL: + oldrctl = regs.rctl; regs.rctl = val; + if (regs.rctl.rst()) { + rxDescCache.reset(); + rxFifo.clear(); + regs.rctl.rst(0); + } + if (regs.rctl.en()) + rxTick = true; + if ((rxTick || txTick) && !tickEvent.scheduled()) + tickEvent.schedule(curTick + cycles(1)); break; case REG_FCTTV: regs.fcttv = val; break; case REG_TCTL: regs.tctl = val; - break; + oldtctl = regs.tctl; + regs.tctl = val; + if (regs.tctl.en()) + txTick = true; + if ((rxTick || txTick) && !tickEvent.scheduled()) + tickEvent.schedule(curTick + cycles(1)); + if (regs.tctl.en() && !oldtctl.en()) { + txDescCache.reset(); + } + break; case REG_PBA: regs.pba.rxa(val); regs.pba.txa(64 - regs.pba.rxa()); @@ -424,18 +479,25 @@ IGbE::write(PacketPtr pkt) break; case REG_RDBAL: regs.rdba.rdbal( val & ~mask(4)); + rxDescCache.areaChanged(); break; case REG_RDBAH: regs.rdba.rdbah(val); + rxDescCache.areaChanged(); break; case REG_RDLEN: regs.rdlen = val & ~mask(7); + rxDescCache.areaChanged(); break; case REG_RDH: regs.rdh = val; + rxDescCache.areaChanged(); break; case REG_RDT: regs.rdt = val; + rxTick = true; + if ((rxTick || txTick) && !tickEvent.scheduled()) + tickEvent.schedule(curTick + cycles(1)); break; case REG_RDTR: regs.rdtr = val; @@ -445,18 +507,25 @@ IGbE::write(PacketPtr pkt) break; case REG_TDBAL: regs.tdba.tdbal( val & ~mask(4)); + txDescCache.areaChanged(); break; case REG_TDBAH: regs.tdba.tdbah(val); + txDescCache.areaChanged(); break; case REG_TDLEN: regs.tdlen = val & ~mask(7); + txDescCache.areaChanged(); break; case REG_TDH: regs.tdh = val; + txDescCache.areaChanged(); break; case REG_TDT: regs.tdt = val; + txTick = true; + if ((rxTick || txTick) && !tickEvent.scheduled()) + tickEvent.schedule(curTick + cycles(1)); break; case REG_TIDV: regs.tidv = val; @@ -484,18 +553,585 @@ IGbE::write(PacketPtr pkt) return pioDelay; } +void +IGbE::postInterrupt(IntTypes t, bool now) +{ + // Interrupt is already pending + if (t & regs.icr()) + return; + + if (regs.icr() & regs.imr) + { + // already in an interrupt state, set new int and done + regs.icr = regs.icr() | t; + } else { + regs.icr = regs.icr() | t; + if (regs.itr.interval() == 0 || now) { + if (now) { + if (interEvent.scheduled()) + interEvent.deschedule(); + } + cpuPostInt(); + } else { + DPRINTF(EthernetIntr, "EINT: Scheduling timer interrupt for %d ticks\n", + Clock::Int::ns * 256 * regs.itr.interval()); + assert(!interEvent.scheduled()); + interEvent.schedule(curTick + Clock::Int::ns * 256 * regs.itr.interval()); + } + } +} + +void +IGbE::cpuPostInt() +{ + if (rdtrEvent.scheduled()) { + regs.icr.rxt0(1); + rdtrEvent.deschedule(); + } + if (radvEvent.scheduled()) { + regs.icr.rxt0(1); + radvEvent.deschedule(); + } + if (tadvEvent.scheduled()) { + regs.icr.txdw(1); + tadvEvent.deschedule(); + } + if (tidvEvent.scheduled()) { + regs.icr.txdw(1); + tidvEvent.deschedule(); + } + + regs.icr.int_assert(1); + DPRINTF(EthernetIntr, "EINT: Posting interrupt to CPU now. Vector %#x\n", + regs.icr()); + intrPost(); +} + +void +IGbE::cpuClearInt() +{ + regs.icr.int_assert(0); + DPRINTF(EthernetIntr, "EINT: Clearing interrupt to CPU now. Vector %#x\n", + regs.icr()); + intrClear(); +} + +void +IGbE::chkInterrupt() +{ + // Check if we need to clear the cpu interrupt + if (!(regs.icr() & regs.imr)) + cpuClearInt(); + + // Check if we need to set the cpu interupt + postInterrupt(IT_NONE); +} + + +IGbE::RxDescCache::RxDescCache(IGbE *i, const std::string n, int s) + : DescCache(i, n, s), pktDone(false), pktEvent(this) + +{ +} bool -IGbE::ethRxPkt(EthPacketPtr packet) +IGbE::RxDescCache::writePacket(EthPacketPtr packet) { - panic("Need to implemenet\n"); + // We shouldn't have to deal with any of these yet + assert(packet->length < igbe->regs.rctl.descSize()); + + if (!unusedCache.size()) + return false; + + pktPtr = packet; + + igbe->dmaWrite(unusedCache.front()->buf, packet->length, &pktEvent, packet->data); + return true; +} + +void +IGbE::RxDescCache::pktComplete() +{ + assert(unusedCache.size()); + RxDesc *desc; + desc = unusedCache.front(); + + desc->len = pktPtr->length; + // no support for anything but starting at 0 + assert(igbe->regs.rxcsum.pcss() == 0); + + DPRINTF(EthernetDesc, "RxDesc: Packet written to memory updating Descriptor\n"); + + uint8_t status = RXDS_DD | RXDS_EOP; + uint8_t err = 0; + IpPtr ip(pktPtr); + if (ip) { + if (igbe->regs.rxcsum.ipofld()) { + DPRINTF(EthernetDesc, "RxDesc: Checking IP checksum\n"); + status |= RXDS_IPCS; + desc->csum = cksum(ip); + if (cksum(ip) != 0) { + err |= RXDE_IPE; + DPRINTF(EthernetDesc, "RxDesc: Checksum is bad!!\n"); + } + } + TcpPtr tcp(ip); + if (tcp && igbe->regs.rxcsum.tuofld()) { + DPRINTF(EthernetDesc, "RxDesc: Checking TCP checksum\n"); + status |= RXDS_TCPCS; + desc->csum = cksum(tcp); + if (cksum(tcp) != 0) { + DPRINTF(EthernetDesc, "RxDesc: Checksum is bad!!\n"); + err |= RXDE_TCPE; + } + } + + UdpPtr udp(ip); + if (udp && igbe->regs.rxcsum.tuofld()) { + DPRINTF(EthernetDesc, "RxDesc: Checking UDP checksum\n"); + status |= RXDS_UDPCS; + desc->csum = cksum(udp); + if (cksum(tcp) != 0) { + DPRINTF(EthernetDesc, "RxDesc: Checksum is bad!!\n"); + err |= RXDE_TCPE; + } + } + } // if ip + + desc->status = status; + desc->errors = err; + + // No vlan support at this point... just set it to 0 + desc->vlan = 0; + + // Deal with the rx timer interrupts + if (igbe->regs.rdtr.delay()) { + DPRINTF(EthernetSM, "RXS: Scheduling DTR for %d\n", + igbe->regs.rdtr.delay() * igbe->intClock()); + if (igbe->rdtrEvent.scheduled()) + igbe->rdtrEvent.reschedule(curTick + igbe->regs.rdtr.delay() * + igbe->intClock()); + else + igbe->rdtrEvent.schedule(curTick + igbe->regs.rdtr.delay() * + igbe->intClock()); + } + + if (igbe->regs.radv.idv() && igbe->regs.rdtr.delay()) { + DPRINTF(EthernetSM, "RXS: Scheduling ADV for %d\n", + igbe->regs.radv.idv() * igbe->intClock()); + if (!igbe->radvEvent.scheduled()) + igbe->radvEvent.schedule(curTick + igbe->regs.radv.idv() * + igbe->intClock()); + } + + // If the packet is small enough, interrupt appropriately + if (pktPtr->length <= igbe->regs.rsrpd.idv()) + igbe->postInterrupt(IT_SRPD); + + DPRINTF(EthernetDesc, "RxDesc: Processing of this descriptor complete\n"); + unusedCache.pop_front(); + usedCache.push_back(desc); + pktPtr = NULL; + enableSm(); + pktDone = true; +} + +void +IGbE::RxDescCache::enableSm() +{ + igbe->rxTick = true; + if ((igbe->rxTick || igbe->txTick) && !igbe->tickEvent.scheduled()) + igbe->tickEvent.schedule((curTick/igbe->cycles(1)) * igbe->cycles(1) + + igbe->cycles(1)); +} + +bool +IGbE::RxDescCache::packetDone() +{ + if (pktDone) { + pktDone = false; + return true; + } + return false; +} + +///////////////////////////////////// IGbE::TxDesc ///////////////////////////////// + +IGbE::TxDescCache::TxDescCache(IGbE *i, const std::string n, int s) + : DescCache(i,n, s), pktDone(false), isTcp(false), pktWaiting(false), + pktEvent(this) + +{ +} + +int +IGbE::TxDescCache::getPacketSize() +{ + assert(unusedCache.size()); + + TxDesc *desc; + + DPRINTF(EthernetDesc, "TxDesc: Starting processing of descriptor\n"); + + while (unusedCache.size() && TxdOp::isContext(unusedCache.front())) { + DPRINTF(EthernetDesc, "TxDesc: Got context descriptor type... skipping\n"); + + // I think we can just ignore these for now? + desc = unusedCache.front(); + // is this going to be a tcp or udp packet? + isTcp = TxdOp::tcp(desc) ? true : false; + + // make sure it's ipv4 + assert(TxdOp::ip(desc)); + + TxdOp::setDd(desc); + unusedCache.pop_front(); + usedCache.push_back(desc); + } + + if (!unusedCache.size()) + return -1; + + DPRINTF(EthernetDesc, "TxDesc: Next TX packet is %d bytes\n", + TxdOp::getLen(unusedCache.front())); + + return TxdOp::getLen(unusedCache.front()); +} + +void +IGbE::TxDescCache::getPacketData(EthPacketPtr p) +{ + assert(unusedCache.size()); + + TxDesc *desc; + desc = unusedCache.front(); + + assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc)); + + pktPtr = p; + + pktWaiting = true; + + DPRINTF(EthernetDesc, "TxDesc: Starting DMA of packet\n"); + igbe->dmaRead(TxdOp::getBuf(desc), TxdOp::getLen(desc), &pktEvent, p->data); + + +} + +void +IGbE::TxDescCache::pktComplete() +{ + + TxDesc *desc; + assert(unusedCache.size()); + assert(pktPtr); + + DPRINTF(EthernetDesc, "TxDesc: DMA of packet complete\n"); + + desc = unusedCache.front(); + assert((TxdOp::isLegacy(desc) || TxdOp::isData(desc)) && TxdOp::getLen(desc)); + + // no support for vlans + assert(!TxdOp::vle(desc)); + + // we alway report status + assert(TxdOp::rs(desc)); + + // we only support single packet descriptors at this point + assert(TxdOp::eop(desc)); + + // set that this packet is done + TxdOp::setDd(desc); + + // Checksums are only ofloaded for new descriptor types + if (TxdOp::isData(desc) && ( TxdOp::ixsm(desc) || TxdOp::txsm(desc)) ) { + DPRINTF(EthernetDesc, "TxDesc: Calculating checksums for packet\n"); + IpPtr ip(pktPtr); + if (TxdOp::ixsm(desc)) { + ip->sum(0); + ip->sum(cksum(ip)); + DPRINTF(EthernetDesc, "TxDesc: Calculated IP checksum\n"); + } + if (TxdOp::txsm(desc)) { + if (isTcp) { + TcpPtr tcp(ip); + tcp->sum(0); + tcp->sum(cksum(tcp)); + DPRINTF(EthernetDesc, "TxDesc: Calculated TCP checksum\n"); + } else { + UdpPtr udp(ip); + udp->sum(0); + udp->sum(cksum(udp)); + DPRINTF(EthernetDesc, "TxDesc: Calculated UDP checksum\n"); + } + } + } + + if (TxdOp::ide(desc)) { + // Deal with the rx timer interrupts + DPRINTF(EthernetDesc, "TxDesc: Descriptor had IDE set\n"); + if (igbe->regs.tidv.idv()) { + DPRINTF(EthernetDesc, "TxDesc: setting tidv\n"); + if (igbe->tidvEvent.scheduled()) + igbe->tidvEvent.reschedule(curTick + igbe->regs.tidv.idv() * + igbe->intClock()); + else + igbe->tidvEvent.schedule(curTick + igbe->regs.tidv.idv() * + igbe->intClock()); + } + + if (igbe->regs.tadv.idv() && igbe->regs.tidv.idv()) { + DPRINTF(EthernetDesc, "TxDesc: setting tadv\n"); + if (!igbe->tadvEvent.scheduled()) + igbe->tadvEvent.schedule(curTick + igbe->regs.tadv.idv() * + igbe->intClock()); + } + } + + unusedCache.pop_front(); + usedCache.push_back(desc); + pktDone = true; + pktWaiting = false; + pktPtr = NULL; + + DPRINTF(EthernetDesc, "TxDesc: Descriptor Done\n"); +} + +bool +IGbE::TxDescCache::packetAvailable() +{ + if (pktDone) { + pktDone = false; + return true; + } + return false; +} + +void +IGbE::TxDescCache::enableSm() +{ + igbe->txTick = true; + if ((igbe->rxTick || igbe->txTick) && !igbe->tickEvent.scheduled()) + igbe->tickEvent.schedule((curTick/igbe->cycles(1)) * igbe->cycles(1) + + igbe->cycles(1)); +} + + + + +///////////////////////////////////// IGbE ///////////////////////////////// + +void +IGbE::txStateMachine() +{ + if (!regs.tctl.en()) { + txTick = false; + DPRINTF(EthernetSM, "TXS: RX disabled, stopping ticking\n"); + return; + } + + if (txPacket && txDescCache.packetAvailable()) { + bool success; + DPRINTF(EthernetSM, "TXS: packet placed in TX FIFO\n"); + success = txFifo.push(txPacket); + assert(success); + txPacket = NULL; + return; + } + + // Only support descriptor granularity + assert(regs.txdctl.gran()); + if (regs.txdctl.lwthresh() && txDescCache.descLeft() < (regs.txdctl.lwthresh() * 8)) { + DPRINTF(EthernetSM, "TXS: LWTHRESH caused posting of TXDLOW\n"); + postInterrupt(IT_TXDLOW); + } + + if (!txPacket) { + txPacket = new EthPacketData(16384); + } + + if (!txDescCache.packetWaiting()) { + if (txDescCache.descLeft() == 0) { + DPRINTF(EthernetSM, "TXS: No descriptors left in ring, forcing writeback\n"); + txDescCache.writeback(0); + DPRINTF(EthernetSM, "TXS: No descriptors left, stopping ticking\n"); + txTick = false; + } + + if (!(txDescCache.descUnused())) { + DPRINTF(EthernetSM, "TXS: No descriptors available in cache, stopping ticking\n"); + txTick = false; + DPRINTF(EthernetSM, "TXS: No descriptors left, fetching\n"); + txDescCache.fetchDescriptors(); + return; + } + + int size; + size = txDescCache.getPacketSize(); + if (size > 0 && rxFifo.avail() > size) { + DPRINTF(EthernetSM, "TXS: Reserving %d bytes in FIFO and begining DMA of next packet\n"); + rxFifo.reserve(size); + txDescCache.getPacketData(txPacket); + } else { + DPRINTF(EthernetSM, "TXS: No packets to get, writing back used descriptors\n"); + txDescCache.writeback(0); + } + + return; + } +} + +bool +IGbE::ethRxPkt(EthPacketPtr pkt) +{ + DPRINTF(Ethernet, "RxFIFO: Receiving pcakte from wire\n"); + if (!regs.rctl.en()) { + DPRINTF(Ethernet, "RxFIFO: RX not enabled, dropping\n"); + return true; + } + + // restart the state machines if they are stopped + rxTick = true; + if ((rxTick || txTick) && !tickEvent.scheduled()) { + DPRINTF(EthernetSM, "RXS: received packet into fifo, starting ticking\n"); + tickEvent.schedule(curTick/cycles(1) + cycles(1)); + } + + if (!rxFifo.push(pkt)) { + DPRINTF(Ethernet, "RxFIFO: Packet won't fit in fifo... dropped\n"); + postInterrupt(IT_RXO, true); + return false; + } + return true; } +void +IGbE::rxStateMachine() +{ + if (!regs.rctl.en()) { + rxTick = false; + DPRINTF(EthernetSM, "RXS: RX disabled, stopping ticking\n"); + return; + } + + // If the packet is done check for interrupts/descriptors/etc + if (rxDescCache.packetDone()) { + DPRINTF(EthernetSM, "RXS: Packet completed DMA to memory\n"); + int descLeft = rxDescCache.descLeft(); + switch (regs.rctl.rdmts()) { + case 2: if (descLeft > .125 * regs.rdlen()) break; + case 1: if (descLeft > .250 * regs.rdlen()) break; + case 0: if (descLeft > .500 * regs.rdlen()) break; + DPRINTF(Ethernet, "RXS: Interrupting (RXDMT) because of descriptors left\n"); + postInterrupt(IT_RXDMT); + break; + } + + if (descLeft == 0) { + DPRINTF(EthernetSM, "RXS: No descriptors left in ring, forcing writeback\n"); + rxDescCache.writeback(0); + DPRINTF(EthernetSM, "RXS: No descriptors left, stopping ticking\n"); + rxTick = false; + } + + // only support descriptor granulaties + assert(regs.rxdctl.gran()); + + if (regs.rxdctl.wthresh() >= rxDescCache.descUsed()) { + DPRINTF(EthernetSM, "RXS: Writing back because WTHRESH >= descUsed\n"); + rxDescCache.writeback(cacheBlockSize()-1); + } + + if ((rxDescCache.descUnused() < regs.rxdctl.pthresh()) && + ((rxDescCache.descLeft() - rxDescCache.descUnused()) > regs.rxdctl.hthresh())) { + DPRINTF(EthernetSM, "RXS: Fetching descriptors because descUnused < PTHRESH\n"); + rxDescCache.fetchDescriptors(); + } + + if (rxDescCache.descUnused() == 0) { + DPRINTF(EthernetSM, "RXS: No descriptors available in cache, stopping ticking\n"); + rxTick = false; + DPRINTF(EthernetSM, "RXS: Fetching descriptors because none available\n"); + rxDescCache.fetchDescriptors(); + } + return; + } + + if (!rxDescCache.descUnused()) { + DPRINTF(EthernetSM, "RXS: No descriptors available in cache, stopping ticking\n"); + rxTick = false; + DPRINTF(EthernetSM, "RXS: No descriptors available, fetching\n"); + rxDescCache.fetchDescriptors(); + return; + } + + if (rxFifo.empty()) { + DPRINTF(EthernetSM, "RXS: RxFIFO empty, stopping ticking\n"); + rxTick = false; + return; + } + + EthPacketPtr pkt; + pkt = rxFifo.front(); + + DPRINTF(EthernetSM, "RXS: Writing packet into memory\n"); + if (!rxDescCache.writePacket(pkt)) { + return; + } + + DPRINTF(EthernetSM, "RXS: Removing packet from FIFO\n"); + rxFifo.pop(); + DPRINTF(EthernetSM, "RXS: stopping ticking until packet DMA completes\n"); + rxTick = false; +} + +void +IGbE::txWire() +{ + if (txFifo.empty()) { + return; + } + + txTick = true; + + if (etherInt->sendPacket(txFifo.front())) { + DPRINTF(Ethernet, "TxFIFO: Successful transmit, bytes in fifo: %d\n", + txFifo.avail()); + txFifo.pop(); + } + + if (txFifo.empty()) { + postInterrupt(IT_TXQE); + DPRINTF(Ethernet, "TxFIFO: Empty, posting interruppt\n"); + } +} + +void +IGbE::tick() +{ + DPRINTF(EthernetSM, "IGbE: -------------- Cycle -------------- "); + + if (rxTick) + rxStateMachine(); + + if (txTick) { + txStateMachine(); + txWire(); + } + + if (rxTick || txTick) + tickEvent.schedule(curTick + cycles(1)); +} + void IGbE::ethTxDone() { - panic("Need to implemenet\n"); + // restart the state machines if they are stopped + txTick = true; + if ((rxTick || txTick) && !tickEvent.scheduled()) + tickEvent.schedule(curTick/cycles(1) + cycles(1)); + DPRINTF(Ethernet, "TxFIFO: Transmission complete\n"); } void diff --git a/src/dev/i8254xGBe.hh b/src/dev/i8254xGBe.hh index fa9e65b22..d7d20ae50 100644 --- a/src/dev/i8254xGBe.hh +++ b/src/dev/i8254xGBe.hh @@ -35,6 +35,9 @@ #ifndef __DEV_I8254XGBE_HH__ #define __DEV_I8254XGBE_HH__ +#include +#include + #include "base/inet.hh" #include "base/statistics.hh" #include "dev/etherint.hh" @@ -50,24 +53,434 @@ class IGbE : public PciDev { private: IGbEInt *etherInt; + + // device registers iGbReg::Regs regs; + + // eeprom data, status and control bits int eeOpBits, eeAddrBits, eeDataBits; uint8_t eeOpcode, eeAddr; - - bool useFlowControl; - uint16_t flash[iGbReg::EEPROM_SIZE]; + // cached parameters from params struct + Tick tickRate; + bool useFlowControl; + + // packet fifos + PacketFifo rxFifo; + PacketFifo txFifo; + + // Packet that we are currently putting into the txFifo + EthPacketPtr txPacket; + + // Should to Rx/Tx State machine tick? + bool rxTick; + bool txTick; + + // Event and function to deal with RDTR timer expiring + void rdtrProcess() { postInterrupt(iGbReg::IT_RXDMT, true); } + //friend class EventWrapper; + EventWrapper rdtrEvent; + + // Event and function to deal with RADV timer expiring + void radvProcess() { postInterrupt(iGbReg::IT_RXDMT, true); } + //friend class EventWrapper; + EventWrapper radvEvent; + + // Event and function to deal with TADV timer expiring + void tadvProcess() { postInterrupt(iGbReg::IT_TXDW, true); } + //friend class EventWrapper; + EventWrapper tadvEvent; + + // Event and function to deal with TIDV timer expiring + void tidvProcess() { postInterrupt(iGbReg::IT_TXDW, true); }; + //friend class EventWrapper; + EventWrapper tidvEvent; + + // Main event to tick the device + void tick(); + //friend class EventWrapper; + EventWrapper tickEvent; + + + void rxStateMachine(); + void txStateMachine(); + void txWire(); + + /** Write an interrupt into the interrupt pending register and check mask + * and interrupt limit timer before sending interrupt to CPU + * @param t the type of interrupt we are posting + * @param now should we ignore the interrupt limiting timer + */ + void postInterrupt(iGbReg::IntTypes t, bool now = false); + + /** Check and see if changes to the mask register have caused an interrupt + * to need to be sent or perhaps removed an interrupt cause. + */ + void chkInterrupt(); + + /** Send an interrupt to the cpu + */ + void cpuPostInt(); + // Event to moderate interrupts + EventWrapper interEvent; + + /** Clear the interupt line to the cpu + */ + void cpuClearInt(); + + Tick intClock() { return Clock::Int::ns * 1024; } + + template + class DescCache + { + protected: + virtual Addr descBase() const = 0; + virtual long descHead() const = 0; + virtual long descTail() const = 0; + virtual long descLen() const = 0; + virtual void updateHead(long h) = 0; + virtual void enableSm() = 0; + + std::deque usedCache; + std::deque unusedCache; + + T *fetchBuf; + T *wbBuf; + + // Pointer to the device we cache for + IGbE *igbe; + + // Name of this descriptor cache + std::string _name; + + // How far we've cached + int cachePnt; + + // The size of the descriptor cache + int size; + + // How many descriptors we are currently fetching + int curFetching; + + // How many descriptors we are currently writing back + int wbOut; + + // if the we wrote back to the end of the descriptor ring and are going + // to have to wrap and write more + bool moreToWb; + + // What the alignment is of the next descriptor writeback + Addr wbAlignment; + + /** The packet that is currently being dmad to memory if any + */ + EthPacketPtr pktPtr; + + public: + DescCache(IGbE *i, const std::string n, int s) + : igbe(i), _name(n), cachePnt(0), size(s), curFetching(0), wbOut(0), + pktPtr(NULL), fetchEvent(this), wbEvent(this) + { + fetchBuf = new T[size]; + wbBuf = new T[size]; + } + + virtual ~DescCache() + { + reset(); + } + + std::string name() { return _name; } + + /** If the address/len/head change when we've got descriptors that are + * dirty that is very bad. This function checks that we don't and if we + * do panics. + */ + void areaChanged() + { + if (usedCache.size() > 0 || unusedCache.size() > 0) + panic("Descriptor Address, Length or Head changed. Bad\n"); + } + + void writeback(Addr aMask) + { + int curHead = descHead(); + int max_to_wb = usedCache.size() + curHead; + + DPRINTF(EthernetDesc, "Writing back descriptors head: %d tail: " + "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n", + curHead, descTail(), descLen(), cachePnt, max_to_wb, + descLeft()); + + // Check if this writeback is less restrictive that the previous + // and if so setup another one immediately following it + if (wbOut && (aMask < wbAlignment)) { + moreToWb = true; + wbAlignment = aMask; + DPRINTF(EthernetDesc, "Writing back already in process, returning\n"); + return; + } + + + moreToWb = false; + wbAlignment = aMask; + + if (max_to_wb > descLen()) { + max_to_wb = descLen() - curHead; + moreToWb = true; + // this is by definition aligned correctly + } else if (aMask != 0) { + // align the wb point to the mask + max_to_wb = max_to_wb & ~(aMask>>4); + } + + DPRINTF(EthernetDesc, "Writing back %d descriptors\n", max_to_wb); + + if (max_to_wb <= 0 || wbOut) + return; + + wbOut = max_to_wb - curHead; + + for (int x = 0; x < wbOut; x++) + memcpy(&wbBuf[x], usedCache[x], sizeof(T)); + + for (int x = 0; x < wbOut; x++) { + assert(usedCache.size()); + delete usedCache[0]; + usedCache.pop_front(); + }; + + igbe->dmaWrite(descBase() + curHead * sizeof(T), wbOut * sizeof(T), + &wbEvent, (uint8_t*)wbBuf); + } + + /** Fetch a chunk of descriptors into the descriptor cache. + * Calls fetchComplete when the memory system returns the data + */ + void fetchDescriptors() + { + size_t max_to_fetch = cachePnt - descTail(); + if (max_to_fetch < 0) + max_to_fetch = descLen() - cachePnt; + + max_to_fetch = std::min(max_to_fetch, (size - usedCache.size() - + unusedCache.size())); + + DPRINTF(EthernetDesc, "Fetching descriptors head: %d tail: " + "%d len: %d cachePnt: %d max_to_wb: %d descleft: %d\n", + descHead(), descTail(), descLen(), cachePnt, + max_to_fetch, descLeft()); + + // Nothing to do + if (max_to_fetch == 0 || curFetching) + return; + + // So we don't have two descriptor fetches going on at once + curFetching = max_to_fetch; + + igbe->dmaRead(descBase() + cachePnt * sizeof(T), + curFetching * sizeof(T), &fetchEvent, (uint8_t*)fetchBuf); + } + + + /** Called by event when dma to read descriptors is completed + */ + void fetchComplete() + { + T *newDesc; + for (int x = 0; x < curFetching; x++) { + newDesc = new T; + memcpy(newDesc, &fetchBuf[x], sizeof(T)); + unusedCache.push_back(newDesc); + } + +#ifndef NDEBUG + int oldCp = cachePnt; +#endif + + cachePnt += curFetching; + if (cachePnt > descLen()) + cachePnt -= descLen(); + + DPRINTF(EthernetDesc, "Fetching complete cachePnt %d -> %d\n", + oldCp, cachePnt); + + enableSm(); + + } + + EventWrapper fetchEvent; + + /** Called by event when dma to writeback descriptors is completed + */ + void wbComplete() + { + long curHead = descHead(); +#ifndef NDEBUG + long oldHead = curHead; +#endif + + curHead += wbOut; + wbOut = 0; + + if (curHead > descLen()) + curHead = 0; + + // Update the head + updateHead(curHead); + + DPRINTF(EthernetDesc, "Writeback complete cachePnt %d -> %d\n", + oldHead, curHead); + + // If we still have more to wb, call wb now + if (moreToWb) { + DPRINTF(EthernetDesc, "Writeback has more todo\n"); + writeback(wbAlignment); + } + } + + + EventWrapper wbEvent; + + /* Return the number of descriptors left in the ring, so the device has + * a way to figure out if it needs to interrupt. + */ + int descLeft() const + { + int left = unusedCache.size(); + if (cachePnt - descTail() >= 0) + left += (cachePnt - descTail()); + else + left += (descLen() - cachePnt); + + return left; + } + + /* Return the number of descriptors used and not written back. + */ + int descUsed() const { return usedCache.size(); } + + /* Return the number of cache unused descriptors we have. */ + int descUnused() const {return unusedCache.size(); } + + /* Get into a state where the descriptor address/head/etc colud be + * changed */ + void reset() + { + DPRINTF(EthernetDesc, "Reseting descriptor cache\n"); + for (int x = 0; x < usedCache.size(); x++) + delete usedCache[x]; + for (int x = 0; x < unusedCache.size(); x++) + delete unusedCache[x]; + + usedCache.clear(); + unusedCache.clear(); + } + + }; + + + class RxDescCache : public DescCache + { + protected: + virtual Addr descBase() const { return igbe->regs.rdba(); } + virtual long descHead() const { return igbe->regs.rdh(); } + virtual long descLen() const { return igbe->regs.rdlen() >> 4; } + virtual long descTail() const { return igbe->regs.rdt(); } + virtual void updateHead(long h) { igbe->regs.rdh(h); } + virtual void enableSm(); + + bool pktDone; + + public: + RxDescCache(IGbE *i, std::string n, int s); + + /** Write the given packet into the buffer(s) pointed to by the + * descriptor and update the book keeping. Should only be called when + * there are no dma's pending. + * @param packet ethernet packet to write + * @return if the packet could be written (there was a free descriptor) + */ + bool writePacket(EthPacketPtr packet); + /** Called by event when dma to write packet is completed + */ + void pktComplete(); + + /** Check if the dma on the packet has completed. + */ + + bool packetDone(); + + EventWrapper pktEvent; + + }; + friend class RxDescCache; + + RxDescCache rxDescCache; + + class TxDescCache : public DescCache + { + protected: + virtual Addr descBase() const { return igbe->regs.tdba(); } + virtual long descHead() const { return igbe->regs.tdh(); } + virtual long descTail() const { return igbe->regs.tdt(); } + virtual long descLen() const { return igbe->regs.tdlen() >> 4; } + virtual void updateHead(long h) { igbe->regs.tdh(h); } + virtual void enableSm(); + + bool pktDone; + bool isTcp; + bool pktWaiting; + + public: + TxDescCache(IGbE *i, std::string n, int s); + + /** Tell the cache to DMA a packet from main memory into its buffer and + * return the size the of the packet to reserve space in tx fifo. + * @return size of the packet + */ + int getPacketSize(); + void getPacketData(EthPacketPtr p); + + /** Ask if the packet has been transfered so the state machine can give + * it to the fifo. + * @return packet available in descriptor cache + */ + bool packetAvailable(); + + /** Ask if we are still waiting for the packet to be transfered. + * @return packet still in transit. + */ + bool packetWaiting() { return pktWaiting; } + + /** Called by event when dma to write packet is completed + */ + void pktComplete(); + EventWrapper pktEvent; + + }; + friend class TxDescCache; + + TxDescCache txDescCache; public: struct Params : public PciDev::Params { bool use_flow_control; + int rx_fifo_size; + int tx_fifo_size; + int rx_desc_cache_size; + int tx_desc_cache_size; + Tick clock; }; IGbE(Params *params); ~IGbE() {;} + Tick clock; + inline Tick cycles(int numCycles) const { return numCycles * clock; } + virtual Tick read(PacketPtr pkt); virtual Tick write(PacketPtr pkt); @@ -78,6 +491,7 @@ class IGbE : public PciDev void setEthInt(IGbEInt *i) { assert(!etherInt); etherInt = i; } + const Params *params() const {return (const Params *)_params; } virtual void serialize(std::ostream &os); diff --git a/src/dev/i8254xGBe_defs.hh b/src/dev/i8254xGBe_defs.hh index b59b34a67..d9648a7c2 100644 --- a/src/dev/i8254xGBe_defs.hh +++ b/src/dev/i8254xGBe_defs.hh @@ -35,59 +35,58 @@ namespace iGbReg { -const uint32_t REG_CTRL = 0x00000; //* -const uint32_t REG_STATUS = 0x00008; //* -const uint32_t REG_EECD = 0x00010; //* -const uint32_t REG_EERD = 0x00014; //* -const uint32_t REG_CTRL_EXT = 0x00018; //*- -const uint32_t REG_MDIC = 0x00020; //* -const uint32_t REG_FCAL = 0x00028; //* -const uint32_t REG_FCAH = 0x0002C; //* -const uint32_t REG_FCT = 0x00030; //* -const uint32_t REG_VET = 0x00038; //* -const uint32_t REG_PBA = 0x01000; //* -const uint32_t REG_ICR = 0x000C0; //* -const uint32_t REG_ITR = 0x000C4; //* -const uint32_t REG_ICS = 0x000C8; //* -const uint32_t REG_IMS = 0x000D0; //* -const uint32_t REG_IMC = 0x000D8; //* -const uint32_t REG_IAM = 0x000E0; //* -const uint32_t REG_RCTL = 0x00100; //* -const uint32_t REG_FCTTV = 0x00170; //* -const uint32_t REG_TIPG = 0x00410; //* -const uint32_t REG_AIFS = 0x00458; //* -const uint32_t REG_LEDCTL = 0x00e00; //* -const uint32_t REG_FCRTL = 0x02160; //* -const uint32_t REG_FCRTH = 0x02168; //* -const uint32_t REG_RDBAL = 0x02800; //*- -const uint32_t REG_RDBAH = 0x02804; //*- -const uint32_t REG_RDLEN = 0x02808; //*- -const uint32_t REG_RDH = 0x02810; //*- -const uint32_t REG_RDT = 0x02818; //*- -const uint32_t REG_RDTR = 0x02820; //*- -const uint32_t REG_RXDCTL = 0x02828; //* -const uint32_t REG_RADV = 0x0282C; //*- -const uint32_t REG_RSRPD = 0x02C00; -const uint32_t REG_TCTL = 0x00400; //* -const uint32_t REG_TDBAL = 0x03800; //* -const uint32_t REG_TDBAH = 0x03804; //* -const uint32_t REG_TDLEN = 0x03808; //* -const uint32_t REG_TDH = 0x03810; //* -const uint32_t REG_TDT = 0x03818; //* -const uint32_t REG_TIDV = 0x03820; //* -const uint32_t REG_TXDMAC = 0x03000; -const uint32_t REG_TXDCTL = 0x03828; //* -const uint32_t REG_TADV = 0x0382C; //* -const uint32_t REG_TSPMT = 0x03830; + +// Registers used by the Intel GbE NIC +const uint32_t REG_CTRL = 0x00000; +const uint32_t REG_STATUS = 0x00008; +const uint32_t REG_EECD = 0x00010; +const uint32_t REG_EERD = 0x00014; +const uint32_t REG_CTRL_EXT = 0x00018; +const uint32_t REG_MDIC = 0x00020; +const uint32_t REG_FCAL = 0x00028; +const uint32_t REG_FCAH = 0x0002C; +const uint32_t REG_FCT = 0x00030; +const uint32_t REG_VET = 0x00038; +const uint32_t REG_PBA = 0x01000; +const uint32_t REG_ICR = 0x000C0; +const uint32_t REG_ITR = 0x000C4; +const uint32_t REG_ICS = 0x000C8; +const uint32_t REG_IMS = 0x000D0; +const uint32_t REG_IMC = 0x000D8; +const uint32_t REG_IAM = 0x000E0; +const uint32_t REG_RCTL = 0x00100; +const uint32_t REG_FCTTV = 0x00170; +const uint32_t REG_TIPG = 0x00410; +const uint32_t REG_AIFS = 0x00458; +const uint32_t REG_LEDCTL = 0x00e00; +const uint32_t REG_FCRTL = 0x02160; +const uint32_t REG_FCRTH = 0x02168; +const uint32_t REG_RDBAL = 0x02800; +const uint32_t REG_RDBAH = 0x02804; +const uint32_t REG_RDLEN = 0x02808; +const uint32_t REG_RDH = 0x02810; +const uint32_t REG_RDT = 0x02818; +const uint32_t REG_RDTR = 0x02820; +const uint32_t REG_RXDCTL = 0x02828; +const uint32_t REG_RADV = 0x0282C; +const uint32_t REG_TCTL = 0x00400; +const uint32_t REG_TDBAL = 0x03800; +const uint32_t REG_TDBAH = 0x03804; +const uint32_t REG_TDLEN = 0x03808; +const uint32_t REG_TDH = 0x03810; +const uint32_t REG_TDT = 0x03818; +const uint32_t REG_TIDV = 0x03820; +const uint32_t REG_TXDCTL = 0x03828; +const uint32_t REG_TADV = 0x0382C; const uint32_t REG_CRCERRS = 0x04000; -const uint32_t REG_RXCSUM = 0x05000; //*- +const uint32_t REG_RXCSUM = 0x05000; const uint32_t REG_MTA = 0x05200; const uint32_t REG_RAL = 0x05400; const uint32_t REG_RAH = 0x05404; const uint32_t REG_VFTA = 0x05600; -const uint32_t REG_WUC = 0x05800;//* -const uint32_t REG_MANC = 0x05820;//* +const uint32_t REG_WUC = 0x05800; +const uint32_t REG_MANC = 0x05820; const uint8_t EEPROM_READ_OPCODE_SPI = 0x03; const uint8_t EEPROM_RDSR_OPCODE_SPI = 0x05; @@ -99,6 +98,8 @@ const uint8_t RCV_ADDRESS_TABLE_SIZE = 16; const uint8_t MULTICAST_TABLE_SIZE = 128; const uint32_t STATS_REGS_SIZE = 0x124; + +// Registers in that are accessed in the PHY const uint8_t PHY_PSTATUS = 0x1; const uint8_t PHY_PID = 0x2; const uint8_t PHY_EPID = 0x3; @@ -106,179 +107,102 @@ const uint8_t PHY_GSTATUS = 10; const uint8_t PHY_EPSTATUS = 15; const uint8_t PHY_AGC = 18; +// Receive Descriptor Status Flags +const uint8_t RXDS_PIF = 0x80; +const uint8_t RXDS_IPCS = 0x40; +const uint8_t RXDS_TCPCS = 0x20; +const uint8_t RXDS_UDPCS = 0x10; +const uint8_t RXDS_VP = 0x08; +const uint8_t RXDS_IXSM = 0x04; +const uint8_t RXDS_EOP = 0x02; +const uint8_t RXDS_DD = 0x01; +// Receive Descriptor Error Flags +const uint8_t RXDE_RXE = 0x80; +const uint8_t RXDE_IPE = 0x40; +const uint8_t RXDE_TCPE = 0x20; +const uint8_t RXDE_SEQ = 0x04; +const uint8_t RXDE_SE = 0x02; +const uint8_t RXDE_CE = 0x01; + +// Interrupt types +enum IntTypes +{ + IT_NONE = 0x00000, //dummy value + IT_TXDW = 0x00001, + IT_TXQE = 0x00002, + IT_LSC = 0x00004, + IT_RXSEQ = 0x00008, + IT_RXDMT = 0x00010, + IT_RXO = 0x00040, + IT_RXT = 0x00080, + IT_MADC = 0x00200, + IT_RXCFG = 0x00400, + IT_GPI0 = 0x02000, + IT_GPI1 = 0x04000, + IT_TXDLOW = 0x08000, + IT_SRPD = 0x10000, + IT_ACK = 0x20000 +}; + +// Receive Descriptor struct struct RxDesc { Addr buf; uint16_t len; uint16_t csum; - union { - uint8_t status; - struct { // these may be in the worng order - uint8_t dd:1; // descriptor done (hw is done when 1) - uint8_t eop:1; // end of packet - uint8_t xism:1; // ignore checksum - uint8_t vp:1; // packet is vlan packet - uint8_t rsv:1; // reserved - uint8_t tcpcs:1; // TCP checksum done - uint8_t ipcs:1; // IP checksum done - uint8_t pif:1; // passed in-exact filter - } st; - }; - union { - uint8_t errors; - struct { - uint8_t ce:1; // crc error or alignment error - uint8_t se:1; // symbol error - uint8_t seq:1; // sequence error - uint8_t rsv:1; // reserved - uint8_t cxe:1; // carrier extension error - uint8_t tcpe:1; // tcp checksum error - uint8_t ipe:1; // ip checksum error - uint8_t rxe:1; // PX data error - } er; - }; - union { - uint16_t special; - struct { - uint16_t vlan:12; //vlan id - uint16_t cfi:1; // canocial form id - uint16_t pri:3; // user priority - } sp; - }; + uint8_t status; + uint8_t errors; + uint16_t vlan; }; -union TxDesc { - uint8_t data[16]; - struct { - Addr buf; - uint16_t len; - uint8_t cso; - union { - uint8_t command; - struct { - uint8_t eop:1; // end of packet - uint8_t ifcs:1; // insert crc - uint8_t ic:1; // insert checksum - uint8_t rs:1; // report status - uint8_t rps:1; // report packet sent - uint8_t dext:1; // extension - uint8_t vle:1; // vlan enable - uint8_t ide:1; // interrupt delay enable - } cmd; - }; - union { - uint8_t status:4; - struct { - uint8_t dd:1; // descriptor done - uint8_t ec:1; // excess collisions - uint8_t lc:1; // late collision - uint8_t tu:1; // transmit underrun - } st; - }; - uint8_t reserved:4; - uint8_t css; - union { - uint16_t special; - struct { - uint16_t vlan:12; //vlan id - uint16_t cfi:1; // canocial form id - uint16_t pri:3; // user priority - } sp; - }; - } legacy; - - // Type 0000 descriptor - struct { - uint8_t ipcss; - uint8_t ipcso; - uint16_t ipcse; - uint8_t tucss; - uint8_t tucso; - uint16_t tucse; - uint32_t paylen:20; - uint8_t dtype:4; - union { - uint8_t tucommand; - struct { - uint8_t tcp:1; // tcp/udp - uint8_t ip:1; // ip ipv4/ipv6 - uint8_t tse:1; // tcp segment enbale - uint8_t rs:1; // report status - uint8_t rsv0:1; // reserved - uint8_t dext:1; // descriptor extension - uint8_t rsv1:1; // reserved - uint8_t ide:1; // interrupt delay enable - } tucmd; - }; - union { - uint8_t status:4; - struct { - uint8_t dd:1; - uint8_t rsvd:3; - } sta; - }; - uint8_t reserved:4; - uint8_t hdrlen; - uint16_t mss; - } t0; - - // Type 0001 descriptor - struct { - Addr buf; - uint32_t dtalen:20; - uint8_t dtype:4; - union { - uint8_t dcommand; - struct { - uint8_t eop:1; // end of packet - uint8_t ifcs:1; // insert crc - uint8_t tse:1; // segmentation enable - uint8_t rs:1; // report status - uint8_t rps:1; // report packet sent - uint8_t dext:1; // extension - uint8_t vle:1; // vlan enable - uint8_t ide:1; // interrupt delay enable - } dcmd; - }; - union { - uint8_t status:4; - struct { - uint8_t dd:1; // descriptor done - uint8_t ec:1; // excess collisions - uint8_t lc:1; // late collision - uint8_t tu:1; // transmit underrun - } sta; - }; - union { - uint8_t pktopts; - struct { - uint8_t ixsm:1; // insert ip checksum - uint8_t txsm:1; // insert tcp checksum - }; - }; - union { - uint16_t special; - struct { - uint16_t vlan:12; //vlan id - uint16_t cfi:1; // canocial form id - uint16_t pri:3; // user priority - } sp; - }; - } t1; - - // Junk to test descriptor type! - struct { - uint64_t junk; - uint32_t junk1:20; - uint8_t dtype; - uint8_t junk2:5; - uint8_t dext:1; - uint8_t junk3:2; - uint8_t junk4:4; - uint32_t junk5; - } type; +struct TxDesc { + uint64_t d1; + uint64_t d2; }; +namespace TxdOp { +const uint8_t TXD_CNXT = 0x0; +const uint8_t TXD_DATA = 0x0; + +bool isLegacy(TxDesc *d) { return !bits(d->d2,29,29); } +uint8_t getType(TxDesc *d) { return bits(d->d2, 23,20); } +bool isContext(TxDesc *d) { return !isLegacy(d) && getType(d) == TXD_CNXT; } +bool isData(TxDesc *d) { return !isLegacy(d) && getType(d) == TXD_DATA; } + +Addr getBuf(TxDesc *d) { assert(isLegacy(d) || isData(d)); return d->d1; } +Addr getLen(TxDesc *d) { if (isLegacy(d)) return bits(d->d2,15,0); else return bits(d->d2, 19,0); } +void setDd(TxDesc *d) +{ + replaceBits(d->d1, 35, 32, 1); +} + +bool ide(TxDesc *d) { return bits(d->d2, 31,31); } +bool vle(TxDesc *d) { assert(isLegacy(d) || isData(d)); return bits(d->d2, 30,30); } +bool rs(TxDesc *d) { return bits(d->d2, 28,28); } +bool ic(TxDesc *d) { assert(isLegacy(d) || isData(d)); return isLegacy(d) && bits(d->d2, 27,27); } +bool tse(TxDesc *d) { return (isData(d) || isContext(d)) && bits(d->d2, 27,27); } +bool ifcs(TxDesc *d) { assert(isLegacy(d) || isData(d)); return bits(d->d2, 26,26); } +bool eop(TxDesc *d) { assert(isLegacy(d) || isData(d)); return bits(d->d2, 25,25); } +bool ip(TxDesc *d) { assert(isContext(d)); return bits(d->d2, 26,26); } +bool tcp(TxDesc *d) { assert(isContext(d)); return bits(d->d2, 25,25); } + +uint8_t getCso(TxDesc *d) { assert(isLegacy(d)); return bits(d->d2, 23,16); } +uint8_t getCss(TxDesc *d) { assert(isLegacy(d)); return bits(d->d2, 47,40); } + +bool ixsm(TxDesc *d) { return isData(d) && bits(d->d2, 40,40); } +bool txsm(TxDesc *d) { return isData(d) && bits(d->d2, 41,41); } + +int tucse(TxDesc *d) { assert(isContext(d)); return bits(d->d1,63,48); } +int tucso(TxDesc *d) { assert(isContext(d)); return bits(d->d1,47,40); } +int tucss(TxDesc *d) { assert(isContext(d)); return bits(d->d1,39,32); } +int ipcse(TxDesc *d) { assert(isContext(d)); return bits(d->d1,31,16); } +int ipcso(TxDesc *d) { assert(isContext(d)); return bits(d->d1,15,8); } +int ipcss(TxDesc *d) { assert(isContext(d)); return bits(d->d1,7,0); } +int mss(TxDesc *d) { assert(isContext(d)); return bits(d->d2,63,48); } +int hdrlen(TxDesc *d) { assert(isContext(d)); return bits(d->d2,47,40); } +} // namespace TxdOp + + #define ADD_FIELD32(NAME, OFFSET, BITS) \ inline uint32_t NAME() { return bits(_data, OFFSET+BITS-1, OFFSET); } \ inline void NAME(uint32_t d) { replaceBits(_data, OFFSET+BITS-1, OFFSET,d); } @@ -295,6 +219,7 @@ struct Regs { const Reg &operator=(T d) { _data = d; return *this;} bool operator==(T d) { return d == _data; } void operator()(T d) { _data = d; } + Reg() { _data = 0; } }; struct CTRL : public Reg { // 0x0000 CTRL Register @@ -454,7 +379,6 @@ struct Regs { ADD_FIELD32(lpe,5,1); // long packet reception enabled ADD_FIELD32(lbm,6,2); // ADD_FIELD32(rdmts,8,2); // - ADD_FIELD32(rsvd,10,2); // ADD_FIELD32(mo,12,2); // ADD_FIELD32(mdr,14,1); // ADD_FIELD32(bam,15,1); // @@ -462,11 +386,21 @@ struct Regs { ADD_FIELD32(vfe,18,1); // ADD_FIELD32(cfien,19,1); // ADD_FIELD32(cfi,20,1); // - ADD_FIELD32(rsvd2,21,1); // ADD_FIELD32(dpf,22,1); // discard pause frames ADD_FIELD32(pmcf,23,1); // pass mac control frames ADD_FIELD32(bsex,25,1); // buffer size extension ADD_FIELD32(secrc,26,1); // strip ethernet crc from incoming packet + int descSize() + { + switch(bsize()) { + case 0: return bsex() ? 2048 : -1; + case 1: return bsex() ? 1024 : 16384; + case 2: return bsex() ? 512 : 8192; + case 3: return bsex() ? 256 : 4096; + default: + return -1; + } + } }; RCTL rctl; @@ -543,10 +477,21 @@ struct Regs { struct RDTR : public Reg { // 0x2820 RDTR Register using Reg::operator=; ADD_FIELD32(delay,0,16); // receive delay timer - ADD_FIELD32(fpd, 31,); // flush partial descriptor block ?? + ADD_FIELD32(fpd, 31,1); // flush partial descriptor block ?? }; RDTR rdtr; + struct RXDCTL : public Reg { // 0x2828 RXDCTL Register + using Reg::operator=; + ADD_FIELD32(pthresh,0,6); // prefetch threshold, less that this + // consider prefetch + ADD_FIELD32(hthresh,8,6); // number of descriptors in host mem to + // consider prefetch + ADD_FIELD32(wthresh,16,6); // writeback threshold + ADD_FIELD32(gran,24,1); // granularity 0 = desc, 1 = cacheline + }; + RXDCTL rxdctl; + struct RADV : public Reg { // 0x282C RADV Register using Reg::operator=; ADD_FIELD32(idv,0,16); // absolute interrupt delay diff --git a/src/dev/io_device.hh b/src/dev/io_device.hh index 902cde909..cd7a5296a 100644 --- a/src/dev/io_device.hh +++ b/src/dev/io_device.hh @@ -132,6 +132,7 @@ class DmaPort : public Port bool dmaPending() { return pendingCount > 0; } + int cacheBlockSize() { return peerBlockSize(); } unsigned int drain(Event *de); }; @@ -261,13 +262,17 @@ class DmaDevice : public PioDevice addr, size, event, data); } - void dmaRead(Addr addr, int size, Event *event, uint8_t *data = NULL) - { dmaPort->dmaAction(MemCmd::ReadReq, addr, size, event, data); } + void dmaRead(Addr addr, int size, Event *event, uint8_t *data) + { + dmaPort->dmaAction(MemCmd::ReadReq, addr, size, event, data); + } bool dmaPending() { return dmaPort->dmaPending(); } virtual unsigned int drain(Event *de); + int cacheBlockSize() { return dmaPort->cacheBlockSize(); } + virtual Port *getPort(const std::string &if_name, int idx = -1) { if (if_name == "pio") {