#include "synop3504_dma.h" #include "synop3504_reg.h" #include #include #include #include /* Size of each temporary Rx buffer.*/ #define PKT_BUF_SZ 1536 /* print debug message on bad packet receiving or transmitting */ #define SYNOP_ETH_DEBUG_BAD /* try to pre-synchronize descriptors */ /* #define USE_PRESYNC */ /* count of check the current RX descriptor again if owned by DMA */ #define MAX_CHECK_RX 1 /* RX descriptor error mask */ #define RX_ERROR_MASK 0x400078DF extern uint32_t DMA_zone_base; struct syeth_dma_rx_t { union { struct { /** [ 0: 0] Rx MAC Address/Payload Checksum Error */ int err_mac:1; /** [ 1: 1] CRC error */ int err_crc:1; /** [ 2: 2] Dribble error */ int err_drib:1; /** [ 3: 3] Receive error */ int err_recv:1; /** [ 4: 4] Watchdog event occurred */ int watchdog:1; /** [ 5: 5] Ethernet-type */ int type:1; /** [ 6: 6] Late collision */ int err_coll:1; /** [ 7: 7] IPC checksum error */ int err_ipc:1; /** [ 8: 8] last frame descriptor */ int last:1; /** [ 9: 9] first frame descriptor */ int first:1; /** [10:10] VLAN tag */ int vlan:1; /** [11:11] overflow error */ int err_over:1; /** [12:12] length/type error */ int err_length:1; /** [13:13] source address fail */ int err_sfail:1; /** [14:14] Descriptor error */ int err_desc:1; /** [15:15] error summary */ int err:1; /** [29:16] frame length */ uint length:14; /** [30:30] if set - destination address fail */ int err_dfail:1; /** [31:31] if set - owned by DMA */ int dma_own:1; } bf; unsigned int val; } status; union { struct { /** [10: 0] Buffer1 length */ uint length1:11; /** [21:11] Buffer2 length */ uint length2:11; /** [23:22] */ int _reserved2:2; /** [24:24] Addr2 is the next descr address */ int addr2en:1; /** [25:25] End of ring */ int eor:1; /** [30:26] */ int _reserved1:5; /** [31:31] Disable Int on Completion */ int int_dis:1; } bf; unsigned int val; } state; unsigned int addr1; unsigned int addr2; }; struct syeth_dma_tx_t { union { struct { /** [ 0: 0] Deferred bit */ int defered:1; /** [ 1: 1] Underflow error */ int err_under:1; /** [ 2: 2] excessive deferral */ int exc_def:1; /** [ 6: 3] Collision count*/ int coll_cnt:4; /** [ 7: 7] VLAN tag */ int vlan:1; /** [ 8: 8] Excessive collision */ int err_ecoll:1; /** [ 9: 9] Late collision */ int err_lcoll:1; /** [10:10] No carrier */ int err_nocar:1; /** [11:11] Carrier lost */ int err_clost:1; /** [12:12] Payload/Checksum error */ int err_plcs:1; /** [13:13] Frame flushed */ int flushed:1; /** [14:14] Jabber Timeout */ int err_jabt:1; /** [15:15] error summary */ int err:1; /** [16:16] IP header error */ int err_ip:1; /** [30:17] */ int _reserved:14; /** [31:31] if set - owned by DMA */ int dma_own:1; } bf; unsigned int val; } status; union { struct { /** [10: 0] Buffer1 length */ uint length1:11; /** [21:11] Buffer2 length */ uint length2:11; /** [22:22] */ int _reserved:1; /** [23:23] Disable padding */ int no_pad:1; /** [24:24] Addr2 is the next descr address */ int addr2en:1; /** [25:25] End of ring */ int eor:1; /** [26:26] Disable CRC */ int no_crc:1; /** [28:27] Checksum insertion control */ int cs_ctrl:2; /** [29:29] First frame segment */ int first:1; /** [30:30] Last frame segment */ int last:1; /** [31:31] Enable Int on Completion */ int int_oncomp:1; } bf; unsigned int val; } state; unsigned int addr1; unsigned int addr2; }; struct syeth_dma_tx_ex_t { union { struct { /** [ 0: 0] Deferred bit */ int defered:1; /** [ 1: 1] Underflow error */ int err_under:1; /** [ 2: 2] excessive deferral */ int exc_def:1; /** [ 6: 3] Collision count*/ int coll_cnt:4; /** [ 7: 7] VLAN tag */ int vlan:1; /** [ 8: 8] Excessive collision */ int err_ecoll:1; /** [ 9: 9] Late collision */ int err_lcoll:1; /** [10:10] No carrier */ int err_nocar:1; /** [11:11] Carrier lost */ int err_clost:1; /** [12:12] Payload/Checksum error */ int err_plcs:1; /** [13:13] Frame flushed */ int flushed:1; /** [14:14] Jabber Timeout */ int err_jabt:1; /** [15:15] error summary */ int err:1; /** [16:16] IP header error */ int err_ip:1; /** [19:17] */ int _reserved2:3; /** [20:20] Addr2 is the next descr address */ int addr2en:1; /** [21:21] End of ring */ int eor:1; /** [23:22] Checksum insertion control */ int cs_ctrl:2; /** [25:24] */ int _reserved1:14; /** [26:26] Disable padding */ int no_pad:1; /** [27:27] Disable CRC */ int no_crc:1; /** [28:28] First frame segment */ int first:1; /** [29:29] Last frame segment */ int last:1; /** [30:30] Enable Int on Completion */ int int_oncomp:1; /** [31:31] if set - owned by DMA */ int dma_own:1; } bf; unsigned int val; } status; union { struct { /** [12: 0] Buffer1 length */ int length1:13; /** [15:13] */ int _reserved1:3; /** [28:16] Buffer1 length */ int length2:13; /** [31:29] */ int _reserved2:3; } bf; unsigned int val; } state; unsigned int addr1; unsigned int addr2; }; static const unsigned char __hexdigits[] = "0123456789ABCDEF"; static void sprintf_hex (unsigned char *str, const unsigned char *ptr, int len, unsigned char delim) { int i, j = 0; for (i = 0; i < len; i++) { if (i) str[j++] = delim; str[j++] = __hexdigits[ptr[i] >> 4]; str[j++] = __hexdigits[ptr[i] & 0x0F]; } str[j] = 0; } static void __print_packet (const char *prefix, int descr, int len, struct sk_buff *skb) { struct ethhdr *h; unsigned char src[20], dst[20], body[50]; int l; h = (struct ethhdr *) skb->data; l = len - 14 > 16 ? 16 : len - 14; sprintf_hex (src, &h->h_source[0], 6, ':'); sprintf_hex (dst, &h->h_dest[0], 6, ':'); sprintf_hex (body, ((unsigned char *) skb->data) + 14, l, ' '); printk ("%08lX %s: d=%-3d len=%-4d proto=0x%04X src=%s dst=%s\n" " body=%s\n", jiffies, prefix, descr, len, be16_to_cpu (h->h_proto), src, dst, body); } #ifdef SYNOP_ETH_DEBUG #define print_packet __print_packet #else #define print_packet(...) #endif struct syeth_dma_rb * syeth_dma_rx_rb_static_alloc (struct net_device_stats *st) { struct syeth_dma_rb *rb; int i; rb = (struct syeth_dma_rb *) ETH_DMA_RX_BASE; /* dma descriptors */ rb->cur = 0; rb->dcount = ETH_RX_RING_SIZE; rb->skblength = PKT_BUF_SZ; rb->dsize = ETH_RX_DMAREG_SIZE; rb->stats = st; rb->d.rx = (struct syeth_dma_rx_t *) ETH_RX_DMAREG_BASE; rb->phys = virt_to_phys (rb->d.rx); memset (rb->d.rx, 0, rb->dsize); memset (rb->sk, 0, ETH_RX_RING_SIZE * sizeof (void *)); /* sk_buff array */ for (i = 0; i < ETH_RX_RING_SIZE; i++) { struct syeth_dma_rx_t *d = rb->d.rx + i; struct sk_buff *skb = dev_alloc_skb (rb->skblength); if (skb == NULL) { printk (KERN_ERR "synop3504: DMA: RX: " "Could not allocate skb. Stop!!!\n"); syeth_dma_rx_rb_free (rb); return NULL; } rb->sk[i] = skb; consistent_sync (skb->data, rb->skblength, DMA_FROM_DEVICE); d->addr1 = virt_to_dma (NULL, (unsigned long) skb->data); d->state.bf.length1 = rb->skblength; d->status.bf.dma_own = 1; } /* set EndOfRing bit */ (rb->d.rx + (ETH_RX_RING_SIZE - 1))->state.bf.eor = 1; return rb; } void syeth_dma_rx_rb_reset (struct syeth_dma_rb *rb) { int i; volatile struct syeth_dma_rx_t *d = rb->d.rx; for (i = 0; i < rb->dcount; i++) { d->status.val = 0; d->status.bf.dma_own = 1; d++; } rb->cur = 0; } void syeth_dma_rx_rb_free (struct syeth_dma_rb *rb) { int i; for (i = 0; i < rb->dcount; i++) { if (rb->sk[i]) { dev_kfree_skb_any (rb->sk[i]); } } } struct sk_buff * syeth_dma_rx_rb_get (struct syeth_dma_rb *rb) { struct sk_buff *retskb = NULL; struct sk_buff *nskb; volatile struct syeth_dma_rx_t *d = rb->d.rx + rb->cur; int trycnt = MAX_CHECK_RX; while (retskb == NULL) { if (d->status.bf.dma_own) { if (--trycnt) { udelay (50); continue; } break; } #ifdef CONFIG_SYNOP3504_RMII /* Dribble error is not affected on RMII */ if (d->status.bf.err_drib) d->status.bf.err_drib = 0; #endif if ((d->status.val & RX_ERROR_MASK) || /* some error occured. skip this descriptor */ 0 == d->status.bf.first || /* packetizing is not supported now */ 0 == d->status.bf.last || 4 >= d->status.bf.length /* Zero-length is not supported */ ) { rb->stats->rx_errors++; rb->stats->collisions += (d->status.bf.err_over | d->status.bf.err_length); rb->stats->rx_crc_errors += d->status.bf.err_crc; rb->stats->rx_frame_errors += d->status.bf.err_drib; rb->stats->rx_length_errors += d->status.bf.err_length; #ifdef SYNOP_ETH_DEBUG_BAD { char prefix[64]; sprintf (prefix, "RX BAD: %s%s%s%s%s%s%s%s%s", d->status.bf.err_mac ? "EMAC " : "", d->status.bf.err_recv ? "ERECV " : "", d->status.bf.err_crc ? "ECRC " : "", d->status.bf.err_drib ? "EDRIB " : "", d->status.bf.watchdog ? "EWD " : "", d->status.bf.err_length ? "ELEN " : "", 4 >= d->status.bf.length ? "ZLEN " : "", 0 == d->status.bf.first ? "NOFIRST " : "", 0 == d->status.bf.last ? "NOLAST " : ""); __print_packet (prefix, rb->cur, d->status.bf.length - 4, rb->sk[rb->cur]); } #endif goto d_skip; } retskb = rb->sk[rb->cur]; BUG_ON (retskb == NULL); nskb = dev_alloc_skb (rb->skblength); if (nskb) { int len = d->status.bf.length - 4; skb_put (retskb, len); print_packet ("RECV", rb->cur, len, retskb); rb->sk[rb->cur] = nskb; d->state.bf.length1 = rb->skblength; consistent_sync (nskb->data, rb->skblength, DMA_FROM_DEVICE); d->addr1 = virt_to_dma (NULL, nskb->data); if (rb->stats) { rb->stats->rx_packets++; rb->stats->rx_bytes += len; } } else { retskb = NULL; if (rb->stats) { rb->stats->rx_dropped++; } } d_skip: d->status.val = 0; d->status.bf.dma_own = 1; rb->cur = d->state.bf.eor ? 0 : rb->cur + 1; d = rb->d.rx + rb->cur; } return retskb; } struct sk_buff * syeth_dma_rx_rb_scan (struct syeth_dma_rb *rb) { volatile struct syeth_dma_rx_t *d; struct sk_buff *rscb = NULL; u32 i = rb->cur; do { d = rb->d.rx + i; if (0 == d->status.bf.dma_own) { debugp (KERN_INFO "synop3504: DMA: RX: " "Resynced (%d==>%d)\n", rb->cur, i); rb->cur = i; rscb = syeth_dma_rx_rb_get (rb); break; } if (d->state.bf.eor) { i = 0; } else { i++; } } while (i != rb->cur); return rscb; } struct syeth_dma_rb * syeth_dma_tx_rb_static_alloc (struct net_device_stats *st) { struct syeth_dma_rb *rb; rb = (struct syeth_dma_rb *) ETH_DMA_TX_BASE; /* dma descriptors */ rb->cur = 0; rb->dcount = ETH_TX_RING_SIZE; rb->dsize = ETH_RX_DMAREG_SIZE; rb->stats = st; rb->skblength = 0; rb->d.tx = (struct syeth_dma_tx_t *) ETH_TX_DMAREG_BASE; rb->phys = virt_to_phys (rb->d.tx); memset (rb->d.tx, 0, rb->dsize); memset (rb->sk, 0, ETH_TX_RING_SIZE * sizeof (void *)); /* set EndOfRing bit */ (rb->d.tx + (ETH_TX_RING_SIZE - 1))->state.bf.eor = 1; return rb; } static void syeth_dma_tx_skb_complete (struct syeth_dma_rb *rb, int i) { struct syeth_dma_tx_t *d = rb->d.tx + i; struct sk_buff *old = rb->sk[i]; /* last transmitted frame found. * cleanup */ if (d->status.bf.dma_own || d->status.bf.err) { rb->stats->tx_errors++; rb->stats->tx_aborted_errors += d->status.bf.err_lcoll + d->status.bf.err_ecoll; rb->stats->tx_carrier_errors += d->status.bf.err_clost + d->status.bf.err_nocar; #ifdef SYNOP_ETH_DEBUG_BAD if (d->status.bf.err) { char prefix[64]; sprintf (prefix, "TX BAD: %s%s%s%s%s%s%s%s", d->status.bf.err_ip ? "EIPH " : "", d->status.bf.flushed ? "FLUSH " : "", d->status.bf.err_plcs ? "EPLCS " : "", d->status.bf.err_clost ? "ECLOST " : "", d->status.bf.err_nocar ? "ENOCAR " : "", d->status.bf.err_ecoll ? "EECOLL " : "", d->status.bf.err_lcoll ? "ELCOLL " : "", d->status.bf.exc_def ? "EEDEF " : ""); __print_packet (prefix, rb->cur, d->state.bf.length1, old); } #endif } else { rb->stats->tx_bytes += d->state.bf.length1; rb->stats->tx_packets++; } rb->stats->collisions += d->status.bf.coll_cnt; dev_kfree_skb_any (old); rb->sk[i] = NULL; } void syeth_dma_tx_rb_reset (struct syeth_dma_rb *rb) { int i; memset (rb->d.tx, 0, rb->dsize); /* set EndOfRing bit */ (rb->d.tx + (rb->dcount - 1))->state.bf.eor = 1; rb->cur = 0; /* delete all sk_bufs */ for (i = 0; i < rb->dcount; i++) { if (rb->sk[i]) { dev_kfree_skb_any (rb->sk[i]); rb->sk[i] = NULL; } } } void syeth_dma_tx_rb_free (struct syeth_dma_rb *rb) { syeth_dma_tx_rb_reset (rb); } int syeth_dma_tx_rb_put (struct syeth_dma_rb *rb, struct sk_buff *skb) { volatile struct syeth_dma_tx_t *d = rb->d.tx + rb->cur; int eor; if (d->status.bf.dma_own) { printk (KERN_ERR "synop3504: TX DESCRIPTOR %d is owned by DMA.\n", rb->cur); return -1; } if (rb->sk[rb->cur]) { syeth_dma_tx_skb_complete (rb, rb->cur); } rb->sk[rb->cur] = skb; eor = d->state.bf.eor; d->state.val = 0; d->status.val = 0; consistent_sync (skb->data, skb->len, DMA_TO_DEVICE); d->addr1 = virt_to_dma (NULL, (unsigned long) skb->data); d->addr2 = 0; d->state.bf.eor = eor; d->state.bf.first = 1; d->state.bf.last = 1; d->state.bf.int_oncomp = 1; d->state.bf.length1 = skb->len; d->status.val = 0; d->status.bf.dma_own = 1; print_packet ("SEND", rb->cur, skb->len, skb); rb->cur = eor ? 0 : rb->cur + 1; /* check next and next+1 descriptor for Host ownership */ d = rb->d.tx + rb->cur; if (d->status.bf.dma_own) { return rb->cur + 1; } else { int n = rb->cur + 1; if (n == rb->dcount) n = 0; d = rb->d.tx + n; if (d->status.bf.dma_own) { return n + 1; } } return 0; } int syeth_dma_tx_rb_complete (struct syeth_dma_rb *rb) { int cnt = 0; struct syeth_dma_tx_t *d = rb->d.tx + rb->skblength; while (rb->skblength != rb->cur && 0 == d->status.bf.dma_own && rb->sk[rb->skblength]) { syeth_dma_tx_skb_complete (rb, rb->skblength); rb->skblength = (d->state.bf.eor) ? 0 : rb->skblength + 1; d = rb->d.tx + rb->skblength; cnt++; } return cnt; } int syeth_dma_tx_rb_complete_next (struct syeth_dma_rb *rb) { int i, cnt = 0; struct syeth_dma_tx_t *d; i = rb->cur; do { d = rb->d.tx + i; if (d->status.bf.dma_own) { break; } if (rb->sk[i]) { syeth_dma_tx_skb_complete (rb, i); cnt++; } i = (d->state.bf.eor) ? 0 : i + 1; } while (i != rb->cur); return cnt; }