summaryrefslogtreecommitdiff
path: root/polux/linux-2.6.10/drivers/net/synop3504/synop3504_dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'polux/linux-2.6.10/drivers/net/synop3504/synop3504_dma.c')
-rw-r--r--polux/linux-2.6.10/drivers/net/synop3504/synop3504_dma.c1165
1 files changed, 652 insertions, 513 deletions
diff --git a/polux/linux-2.6.10/drivers/net/synop3504/synop3504_dma.c b/polux/linux-2.6.10/drivers/net/synop3504/synop3504_dma.c
index 197f693a67..86dd8f6326 100644
--- a/polux/linux-2.6.10/drivers/net/synop3504/synop3504_dma.c
+++ b/polux/linux-2.6.10/drivers/net/synop3504/synop3504_dma.c
@@ -7,7 +7,7 @@
#include <linux/netdevice.h>
/* Size of each temporary Rx buffer.*/
-#define PKT_BUF_SZ 1536
+#define PKT_BUF_SZ 1536
/* print debug message on bad packet receiving or transmitting */
#define SYNOP_ETH_DEBUG_BAD
@@ -31,166 +31,265 @@ extern uint32_t DMA_zone_base;
struct syeth_dma_rx_t
{
- union {
- struct {
- int err_mac : 1; /**< [ 0: 0] Rx MAC Address/Payload Checksum Errort */
- int err_crc : 1; /**< [ 1: 1] CRC error */
- int err_drib : 1; /**< [ 2: 2] Dribble error */
- int err_recv : 1; /**< [ 3: 3] Receive error */
- int watchdog : 1; /**< [ 4: 4] Watchdog event occured */
- int type : 1; /**< [ 5: 5] Ethernet-type */
- int err_coll : 1; /**< [ 6: 6] Late collision */
- int err_ipc : 1; /**< [ 7: 7] IPC checksumm error */
- int last : 1; /**< [ 8: 8] last frame descriptor */
- int first : 1; /**< [ 9: 9] first frame descriptor */
- int vlan : 1; /**< [10:10] VLAN tag */
- int err_over : 1; /**< [11:11] overflow error */
- int err_length: 1; /**< [12:12] length/type error */
- int err_sfail : 1; /**< [13:13] source address fail */
- int err_desc : 1; /**< [14:14] Descriptor error */
- int err : 1; /**< [15:15] error summary */
- uint length : 14; /**< [29:16] frame length */
- int err_dfail : 1; /**< [30:30] if set - destination address fail */
- int dma_own : 1; /**< [31:31] if set - owned by DMA */
- }bf;
+ union
+ {
+ struct
+ {
+ /** [ 0: 0] Rx MAC Address/Payload Checksum Error */
+ int err_mac:1;
+ /** [ 1: 1] CRC error */
+ int err_crc:1;
+ /** [ 2: 2] Dribble error */
+ int err_drib:1;
+ /** [ 3: 3] Receive error */
+ int err_recv:1;
+ /** [ 4: 4] Watchdog event occurred */
+ int watchdog:1;
+ /** [ 5: 5] Ethernet-type */
+ int type:1;
+ /** [ 6: 6] Late collision */
+ int err_coll:1;
+ /** [ 7: 7] IPC checksum error */
+ int err_ipc:1;
+ /** [ 8: 8] last frame descriptor */
+ int last:1;
+ /** [ 9: 9] first frame descriptor */
+ int first:1;
+ /** [10:10] VLAN tag */
+ int vlan:1;
+ /** [11:11] overflow error */
+ int err_over:1;
+ /** [12:12] length/type error */
+ int err_length:1;
+ /** [13:13] source address fail */
+ int err_sfail:1;
+ /** [14:14] Descriptor error */
+ int err_desc:1;
+ /** [15:15] error summary */
+ int err:1;
+ /** [29:16] frame length */
+ uint length:14;
+ /** [30:30] if set - destination address fail */
+ int err_dfail:1;
+ /** [31:31] if set - owned by DMA */
+ int dma_own:1;
+ } bf;
unsigned int val;
- }status;
-
- union {
- struct {
- uint length1 : 11; /**< [10: 0] Buffer1 length */
- uint length2 : 11; /**< [21:11] Buffer2 length */
- int _reserved2 : 2; /**< [23:22] */
- int addr2en : 1; /**< [24:24] Addr2 is the next descr address */
- int eor : 1; /**< [25:25] End of ring */
- int _reserved1 : 5; /**< [30:26] */
- int int_dis : 1; /**< [31:31] Disable Int on Completion */
- }bf;
+ } status;
+
+ union
+ {
+ struct
+ {
+ /** [10: 0] Buffer1 length */
+ uint length1:11;
+ /** [21:11] Buffer2 length */
+ uint length2:11;
+ /** [23:22] */
+ int _reserved2:2;
+ /** [24:24] Addr2 is the next descr address */
+ int addr2en:1;
+ /** [25:25] End of ring */
+ int eor:1;
+ /** [30:26] */
+ int _reserved1:5;
+ /** [31:31] Disable Int on Completion */
+ int int_dis:1;
+ } bf;
unsigned int val;
- }state;
+ } state;
unsigned int addr1;
unsigned int addr2;
};
struct syeth_dma_tx_t
{
- union {
- struct {
- int deffered : 1; /**< [ 0: 0] Deffered bit */
- int err_under : 1; /**< [ 1: 1] Underflow error */
- int exc_deff : 1; /**< [ 2: 2] excessive defferal */
- int coll_cnt : 4; /**< [ 6: 3] Collision count*/
- int vlan : 1; /**< [ 7: 7] VLAN tag */
- int err_ecoll : 1; /**< [ 8: 8] Excessive collision */
- int err_lcoll : 1; /**< [ 9: 9] Late collision */
- int err_nocar : 1; /**< [10:10] No carrier */
- int err_clost : 1; /**< [11:11] Carrier lost */
- int err_plcs : 1; /**< [12:12] Payload/Checksumm error */
- int flushed : 1; /**< [13:13] Frame flushed */
- int err_jabt : 1; /**< [14:14] Jabber Timeout */
- int err : 1; /**< [15:15] error summary */
- int err_ip : 1; /**< [16:16] IP header error */
- int _reserved : 14; /**< [30:17] */
- int dma_own : 1; /**< [31:31] if set - owned by DMA */
- }bf;
+ union
+ {
+ struct
+ {
+ /** [ 0: 0] Deferred bit */
+ int defered:1;
+ /** [ 1: 1] Underflow error */
+ int err_under:1;
+ /** [ 2: 2] excessive deferral */
+ int exc_def:1;
+ /** [ 6: 3] Collision count*/
+ int coll_cnt:4;
+ /** [ 7: 7] VLAN tag */
+ int vlan:1;
+ /** [ 8: 8] Excessive collision */
+ int err_ecoll:1;
+ /** [ 9: 9] Late collision */
+ int err_lcoll:1;
+ /** [10:10] No carrier */
+ int err_nocar:1;
+ /** [11:11] Carrier lost */
+ int err_clost:1;
+ /** [12:12] Payload/Checksum error */
+ int err_plcs:1;
+ /** [13:13] Frame flushed */
+ int flushed:1;
+ /** [14:14] Jabber Timeout */
+ int err_jabt:1;
+ /** [15:15] error summary */
+ int err:1;
+ /** [16:16] IP header error */
+ int err_ip:1;
+ /** [30:17] */
+ int _reserved:14;
+ /** [31:31] if set - owned by DMA */
+ int dma_own:1;
+ } bf;
unsigned int val;
- }status;
-
- union {
- struct {
- uint length1 : 11; /**< [10: 0] Buffer1 length */
- uint length2 : 11; /**< [21:11] Buffer2 length */
- int _reserved : 1; /**< [22:22] */
- int no_pad : 1; /**< [23:23] Disable padding */
- int addr2en : 1; /**< [24:24] Addr2 is the next descr address */
- int eor : 1; /**< [25:25] End of ring */
- int no_crc : 1; /**< [26:26] Disable CRC */
- int cs_ctrl : 2; /**< [28:27] Checksumm insertion control */
- int first : 1; /**< [29:29] First frame segment */
- int last : 1; /**< [30:30] Last frame segment */
- int int_oncomp: 1; /**< [31:31] Enable Int on Completion */
- }bf;
+ } status;
+
+ union
+ {
+ struct
+ {
+ /** [10: 0] Buffer1 length */
+ uint length1:11;
+ /** [21:11] Buffer2 length */
+ uint length2:11;
+ /** [22:22] */
+ int _reserved:1;
+ /** [23:23] Disable padding */
+ int no_pad:1;
+ /** [24:24] Addr2 is the next descr address */
+ int addr2en:1;
+ /** [25:25] End of ring */
+ int eor:1;
+ /** [26:26] Disable CRC */
+ int no_crc:1;
+ /** [28:27] Checksum insertion control */
+ int cs_ctrl:2;
+ /** [29:29] First frame segment */
+ int first:1;
+ /** [30:30] Last frame segment */
+ int last:1;
+ /** [31:31] Enable Int on Completion */
+ int int_oncomp:1;
+ } bf;
unsigned int val;
- }state;
+ } state;
unsigned int addr1;
unsigned int addr2;
};
struct syeth_dma_tx_ex_t
{
- union {
- struct {
- int deffered : 1; /**< [ 0: 0] Deffered bit */
- int err_under : 1; /**< [ 1: 1] Underflow error */
- int exc_deff : 1; /**< [ 2: 2] excessive defferal */
- int coll_cnt : 4; /**< [ 6: 3] Collision count*/
- int vlan : 1; /**< [ 7: 7] VLAN tag */
- int err_ecoll : 1; /**< [ 8: 8] Excessive collision */
- int err_lcoll : 1; /**< [ 9: 9] Late collision */
- int err_nocar : 1; /**< [10:10] No carrier */
- int err_clost : 1; /**< [11:11] Carrier lost */
- int err_plcs : 1; /**< [12:12] Payload/Checksumm error */
- int flushed : 1; /**< [13:13] Frame flushed */
- int err_jabt : 1; /**< [14:14] Jabber Timeout */
- int err : 1; /**< [15:15] error summary */
- int err_ip : 1; /**< [16:16] IP header error */
- int _reserved2 : 3; /**< [19:17] */
- int addr2en : 1; /**< [20:20] Addr2 is the next descr address */
- int eor : 1; /**< [21:21] End of ring */
- int cs_ctrl : 2; /**< [23:22] Checksumm insertion control */
- int _reserved1 : 14; /**< [25:24] */
- int no_pad : 1; /**< [26:26] Disable padding */
- int no_crc : 1; /**< [27:27] Disable CRC */
- int first : 1; /**< [28:28] First frame segment */
- int last : 1; /**< [29:29] Last frame segment */
- int int_oncomp: 1; /**< [30:30] Enable Int on Completion */
- int dma_own : 1; /**< [31:31] if set - owned by DMA */
- }bf;
+ union
+ {
+ struct
+ {
+ /** [ 0: 0] Deferred bit */
+ int defered:1;
+ /** [ 1: 1] Underflow error */
+ int err_under:1;
+ /** [ 2: 2] excessive deferral */
+ int exc_def:1;
+ /** [ 6: 3] Collision count*/
+ int coll_cnt:4;
+ /** [ 7: 7] VLAN tag */
+ int vlan:1;
+ /** [ 8: 8] Excessive collision */
+ int err_ecoll:1;
+ /** [ 9: 9] Late collision */
+ int err_lcoll:1;
+ /** [10:10] No carrier */
+ int err_nocar:1;
+ /** [11:11] Carrier lost */
+ int err_clost:1;
+ /** [12:12] Payload/Checksum error */
+ int err_plcs:1;
+ /** [13:13] Frame flushed */
+ int flushed:1;
+ /** [14:14] Jabber Timeout */
+ int err_jabt:1;
+ /** [15:15] error summary */
+ int err:1;
+ /** [16:16] IP header error */
+ int err_ip:1;
+ /** [19:17] */
+ int _reserved2:3;
+ /** [20:20] Addr2 is the next descr address */
+ int addr2en:1;
+ /** [21:21] End of ring */
+ int eor:1;
+ /** [23:22] Checksum insertion control */
+ int cs_ctrl:2;
+ /** [25:24] */
+ int _reserved1:14;
+ /** [26:26] Disable padding */
+ int no_pad:1;
+ /** [27:27] Disable CRC */
+ int no_crc:1;
+ /** [28:28] First frame segment */
+ int first:1;
+ /** [29:29] Last frame segment */
+ int last:1;
+ /** [30:30] Enable Int on Completion */
+ int int_oncomp:1;
+ /** [31:31] if set - owned by DMA */
+ int dma_own:1;
+ } bf;
unsigned int val;
- }status;
-
- union {
- struct {
- int length1 : 13; /**< [12: 0] Buffer1 length */
- int _reserved1 : 3; /**< [15:13] */
- int length2 : 13; /**< [28:16] Buffer1 length */
- int _reserved2 : 3; /**< [31:29] */
- }bf;
+ } status;
+
+ union
+ {
+ struct
+ {
+ /** [12: 0] Buffer1 length */
+ int length1:13;
+ /** [15:13] */
+ int _reserved1:3;
+ /** [28:16] Buffer1 length */
+ int length2:13;
+ /** [31:29] */
+ int _reserved2:3;
+ } bf;
unsigned int val;
- }state;
+ } state;
unsigned int addr1;
unsigned int addr2;
};
static const unsigned char __hexdigits[] = "0123456789ABCDEF";
-static void sprintf_hex(unsigned char * str, const unsigned char * ptr,
- int len, unsigned char delim)
+
+static void
+sprintf_hex (unsigned char *str, const unsigned char *ptr,
+ int len, unsigned char delim)
{
- int i, j=0;
- for(i=0; i<len; i++){
- if(i)str[j++]=delim;
- str[j++]=__hexdigits[ptr[i]>>4];
- str[j++]=__hexdigits[ptr[i]&0x0F];
+ int i, j = 0;
+ for (i = 0; i < len; i++)
+ {
+ if (i)
+ str[j++] = delim;
+ str[j++] = __hexdigits[ptr[i] >> 4];
+ str[j++] = __hexdigits[ptr[i] & 0x0F];
}
str[j] = 0;
}
-static void __print_packet(const char * prefix, int descr, int len, struct sk_buff * skb)
+static void
+__print_packet (const char *prefix, int descr, int len, struct sk_buff *skb)
{
- struct ethhdr * h;
+ struct ethhdr *h;
unsigned char src[20], dst[20], body[50];
int l;
- h = (struct ethhdr *)skb->data;
- l = len - 14 > 16 ? 16 : len - 14;
- sprintf_hex(src, &h->h_source[0], 6, ':');
- sprintf_hex(dst, &h->h_dest[0], 6, ':');
- sprintf_hex(body, ((unsigned char *)skb->data)+14, l, ' ');
+ h = (struct ethhdr *) skb->data;
+ l = len - 14 > 16 ? 16 : len - 14;
+ sprintf_hex (src, &h->h_source[0], 6, ':');
+ sprintf_hex (dst, &h->h_dest[0], 6, ':');
+ sprintf_hex (body, ((unsigned char *) skb->data) + 14, l, ' ');
- printk("%08lX %s: d=%-3d len=%-4d proto=0x%04X src=%s dst=%s\n"
- " body=%s\n",
- jiffies, prefix, descr, len, be16_to_cpu(h->h_proto),
- src, dst, body);
+ printk ("%08lX %s: d=%-3d len=%-4d proto=0x%04X src=%s dst=%s\n"
+ " body=%s\n",
+ jiffies, prefix, descr, len, be16_to_cpu (h->h_proto),
+ src, dst, body);
}
#ifdef SYNOP_ETH_DEBUG
@@ -200,495 +299,535 @@ static void __print_packet(const char * prefix, int descr, int len, struct sk_bu
#endif
#ifdef CONFIG_MMU
-struct syeth_dma_rb * syeth_dma_rx_rb_alloc (int dcount, int mtu,
- struct net_device_stats * st)
+struct syeth_dma_rb *
+syeth_dma_rx_rb_alloc (int dcount, int mtu, struct net_device_stats *st)
{
- struct syeth_dma_rb * rb;
- int i;
-
- rb = kmalloc (sizeof (*rb) + (dcount*sizeof(void*)), GFP_ATOMIC);
- if(NULL == rb) return NULL;
-
- /* dma descriptors */
- rb->cur = 0;
- rb->dcount = dcount;
- if (mtu > ETH_DATA_LEN)
- /* MTU + ethernet header + FCS + optional VLAN tag */
- rb->skblength = mtu + ETH_HLEN + 8;
- else
- rb->skblength = PKT_BUF_SZ;
+ struct syeth_dma_rb *rb;
+ int i;
+
+ rb = kmalloc (sizeof (*rb) + (dcount * sizeof (void *)), GFP_ATOMIC);
+ if (NULL == rb)
+ return NULL;
+
+ /* dma descriptors */
+ rb->cur = 0;
+ rb->dcount = dcount;
+ if (mtu > ETH_DATA_LEN)
+ /* MTU + ethernet header + FCS + optional VLAN tag */
+ rb->skblength = mtu + ETH_HLEN + 8;
+ else
+ rb->skblength = PKT_BUF_SZ;
- rb->dsize = dcount * 16;
- rb->stats = st;
+ rb->dsize = dcount * 16;
+ rb->stats = st;
- rb->d.rx = dma_alloc_coherent(NULL, rb->dsize, &rb->phys, GFP_ATOMIC);
- if(rb->d.rx == NULL){
- kfree(rb);
- return NULL;
- }
+ rb->d.rx = dma_alloc_coherent (NULL, rb->dsize, &rb->phys, GFP_ATOMIC);
+ if (rb->d.rx == NULL)
+ {
+ kfree (rb);
+ return NULL;
+ }
- memset(rb->d.rx, 0, rb->dsize);
- memset(rb->sk, 0, dcount * sizeof(struct sk_buff*));
-
- /* sk_buff array */
- for(i=0; i<dcount; i++){
- struct syeth_dma_rx_t * d = rb->d.rx + i;
- struct sk_buff * skb = dev_alloc_skb(rb->skblength);
- if (skb == NULL) {
- printk(KERN_ERR "synop3504: DMA: RX: "
- "Could not allocate skb. Stop!!!\n");
- syeth_dma_rx_rb_free (rb);
- return NULL;
- }
- rb->sk[i] = skb;
- d->addr1 = dma_map_single(NULL, skb->data, rb->skblength,
- DMA_FROM_DEVICE);
- d->state.bf.length1 = rb->skblength;
-
- d->status.bf.dma_own = 1;
+ memset (rb->d.rx, 0, rb->dsize);
+ memset (rb->sk, 0, dcount * sizeof (struct sk_buff *));
+
+ /* sk_buff array */
+ for (i = 0; i < dcount; i++)
+ {
+ struct syeth_dma_rx_t *d = rb->d.rx + i;
+ struct sk_buff *skb = dev_alloc_skb (rb->skblength);
+ if (skb == NULL)
+ {
+ printk (KERN_ERR "synop3504: DMA: RX: "
+ "Could not allocate skb. Stop!!!\n");
+ syeth_dma_rx_rb_free (rb);
+ return NULL;
}
- /* set EndOfRing bit */
- (rb->d.rx+(dcount-1))->state.bf.eor = 1;
+ rb->sk[i] = skb;
+ d->addr1 = dma_map_single (NULL, skb->data, rb->skblength,
+ DMA_FROM_DEVICE);
+ d->state.bf.length1 = rb->skblength;
+
+ d->status.bf.dma_own = 1;
+ }
+ /* set EndOfRing bit */
+ (rb->d.rx + (dcount - 1))->state.bf.eor = 1;
- return rb;
+ return rb;
}
#else
-struct syeth_dma_rb * syeth_dma_rx_rb_static_alloc (struct net_device_stats * st)
+struct syeth_dma_rb *
+syeth_dma_rx_rb_static_alloc (struct net_device_stats *st)
{
- struct syeth_dma_rb * rb;
- int i;
-
- rb = (struct syeth_dma_rb*)ETH_DMA_RX_BASE;
-
- /* dma descriptors */
- rb->cur = 0;
- rb->dcount = ETH_RX_RING_SIZE;
- rb->skblength = PKT_BUF_SZ;
- rb->dsize = ETH_RX_DMAREG_SIZE;
- rb->stats = st;
- rb->d.rx = (struct syeth_dma_rx_t*)ETH_RX_DMAREG_BASE;
- rb->phys = virt_to_phys(rb->d.rx);
-
- memset(rb->d.rx, 0, rb->dsize);
- memset(rb->sk, 0, ETH_RX_RING_SIZE*sizeof(void*));
-
- /* sk_buff array */
- for(i=0; i<ETH_RX_RING_SIZE; i++){
- struct syeth_dma_rx_t * d = rb->d.rx + i;
- struct sk_buff * skb = dev_alloc_skb(rb->skblength);
- if (skb == NULL) {
- printk(KERN_ERR "synop3504: DMA: RX: "
- "Could not allocate skb. Stop!!!\n");
- syeth_dma_rx_rb_free (rb);
- return NULL;
- }
- rb->sk[i] = skb;
- consistent_sync(skb->data, rb->skblength, DMA_FROM_DEVICE);
- d->addr1 = virt_to_dma(NULL, (unsigned long)skb->data);
- d->state.bf.length1 = rb->skblength;
-
- d->status.bf.dma_own = 1;
+ struct syeth_dma_rb *rb;
+ int i;
+
+ rb = (struct syeth_dma_rb *) ETH_DMA_RX_BASE;
+
+ /* dma descriptors */
+ rb->cur = 0;
+ rb->dcount = ETH_RX_RING_SIZE;
+ rb->skblength = PKT_BUF_SZ;
+ rb->dsize = ETH_RX_DMAREG_SIZE;
+ rb->stats = st;
+ rb->d.rx = (struct syeth_dma_rx_t *) ETH_RX_DMAREG_BASE;
+ rb->phys = virt_to_phys (rb->d.rx);
+
+ memset (rb->d.rx, 0, rb->dsize);
+ memset (rb->sk, 0, ETH_RX_RING_SIZE * sizeof (void *));
+
+ /* sk_buff array */
+ for (i = 0; i < ETH_RX_RING_SIZE; i++)
+ {
+ struct syeth_dma_rx_t *d = rb->d.rx + i;
+ struct sk_buff *skb = dev_alloc_skb (rb->skblength);
+ if (skb == NULL)
+ {
+ printk (KERN_ERR "synop3504: DMA: RX: "
+ "Could not allocate skb. Stop!!!\n");
+ syeth_dma_rx_rb_free (rb);
+ return NULL;
}
- /* set EndOfRing bit */
- (rb->d.rx+(ETH_RX_RING_SIZE-1))->state.bf.eor = 1;
+ rb->sk[i] = skb;
+ consistent_sync (skb->data, rb->skblength, DMA_FROM_DEVICE);
+ d->addr1 = virt_to_dma (NULL, (unsigned long) skb->data);
+ d->state.bf.length1 = rb->skblength;
+
+ d->status.bf.dma_own = 1;
+ }
+ /* set EndOfRing bit */
+ (rb->d.rx + (ETH_RX_RING_SIZE - 1))->state.bf.eor = 1;
- return rb;
+ return rb;
}
#endif
-void syeth_dma_rx_rb_reset (struct syeth_dma_rb * rb)
+void
+syeth_dma_rx_rb_reset (struct syeth_dma_rb *rb)
{
- int i;
- volatile struct syeth_dma_rx_t * d = rb->d.rx;
+ int i;
+ volatile struct syeth_dma_rx_t *d = rb->d.rx;
#if defined USE_PRESYNC && defined CONFIG_MMU
- dma_sync_single_for_cpu(NULL, rb->phys,
- rb->dsize, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu (NULL, rb->phys, rb->dsize, DMA_FROM_DEVICE);
#endif
- for(i=0; i<rb->dcount; i++){
- d->status.val = 0;
- d->status.bf.dma_own = 1;
- d++;
- }
+ for (i = 0; i < rb->dcount; i++)
+ {
+ d->status.val = 0;
+ d->status.bf.dma_own = 1;
+ d++;
+ }
#if defined USE_PRESYNC && defined CONFIG_MMU
- dma_sync_single_for_cpu(NULL, rb->phys,
- rb->dsize, DMA_TO_DEVICE);
+ dma_sync_single_for_cpu (NULL, rb->phys, rb->dsize, DMA_TO_DEVICE);
#endif
- rb->cur = 0;
+ rb->cur = 0;
}
-void syeth_dma_rx_rb_free (struct syeth_dma_rb * rb)
+void
+syeth_dma_rx_rb_free (struct syeth_dma_rb *rb)
{
- int i;
- for(i=0; i<rb->dcount; i++){
- if(rb->sk[i]){
- dev_kfree_skb_any(rb->sk[i]);
- }
+ int i;
+ for (i = 0; i < rb->dcount; i++)
+ {
+ if (rb->sk[i])
+ {
+ dev_kfree_skb_any (rb->sk[i]);
}
+ }
#ifdef CONFIG_MMU
- dma_free_coherent(NULL, rb->dcount * sizeof(struct syeth_dma_rx_t),
- rb->d.rx, rb->phys);
- kfree(rb);
+ dma_free_coherent (NULL, rb->dcount * sizeof (struct syeth_dma_rx_t),
+ rb->d.rx, rb->phys);
+ kfree (rb);
#endif
}
-struct sk_buff * syeth_dma_rx_rb_get (struct syeth_dma_rb * rb)
+struct sk_buff *
+syeth_dma_rx_rb_get (struct syeth_dma_rb *rb)
{
- struct sk_buff *retskb = NULL;
- struct sk_buff *nskb;
- volatile struct syeth_dma_rx_t * d = rb->d.rx + rb->cur;
- int trycnt = MAX_CHECK_RX;
+ struct sk_buff *retskb = NULL;
+ struct sk_buff *nskb;
+ volatile struct syeth_dma_rx_t *d = rb->d.rx + rb->cur;
+ int trycnt = MAX_CHECK_RX;
- while(retskb == NULL) {
+ while (retskb == NULL)
+ {
#if defined USE_PRESYNC && defined CONFIG_MMU
- dma_sync_single_for_cpu(NULL, rb->phys,
- rb->dsize, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu (NULL, rb->phys, rb->dsize, DMA_FROM_DEVICE);
#endif
- if(d->status.bf.dma_own){
- if(--trycnt){
- udelay(50);
- continue;
- }
- break;
- }
+ if (d->status.bf.dma_own)
+ {
+ if (--trycnt)
+ {
+ udelay (50);
+ continue;
+ }
+ break;
+ }
#ifdef CONFIG_SYNOP3504_RMII
- /* Dribble error is not affected on RMII */
- if(d->status.bf.err_drib)
- d->status.bf.err_drib = 0;
+ /* Dribble error is not affected on RMII */
+ if (d->status.bf.err_drib)
+ d->status.bf.err_drib = 0;
#endif
- if((d->status.val&RX_ERROR_MASK) || /* some error occured. skip this descriptor */
- 0==d->status.bf.first || /* packetizing is not supported now */
- 0==d->status.bf.last ||
- 4>=d->status.bf.length /* Zero-length is not supported */
- ){
- rb->stats->rx_errors++;
- rb->stats->collisions +=
- (d->status.bf.err_over|
- d->status.bf.err_length);
-
- rb->stats->rx_crc_errors +=
- d->status.bf.err_crc;
-
- rb->stats->rx_frame_errors +=
- d->status.bf.err_drib;
-
- rb->stats->rx_length_errors +=
- d->status.bf.err_length;
+ if ((d->status.val & RX_ERROR_MASK) || /* some error occured. skip this descriptor */
+ 0 == d->status.bf.first || /* packetizing is not supported now */
+ 0 == d->status.bf.last || 4 >= d->status.bf.length /* Zero-length is not supported */
+ )
+ {
+ rb->stats->rx_errors++;
+ rb->stats->collisions +=
+ (d->status.bf.err_over | d->status.bf.err_length);
+
+ rb->stats->rx_crc_errors += d->status.bf.err_crc;
+
+ rb->stats->rx_frame_errors += d->status.bf.err_drib;
+
+ rb->stats->rx_length_errors += d->status.bf.err_length;
#ifdef SYNOP_ETH_DEBUG_BAD
- {
- char prefix[64];
- sprintf(prefix, "RX BAD: %s%s%s%s%s%s%s%s%s",
- d->status.bf.err_mac ? "EMAC " :"",
- d->status.bf.err_recv ? "ERECV " :"",
- d->status.bf.err_crc ? "ECRC " :"",
- d->status.bf.err_drib ? "EDRIB " :"",
- d->status.bf.watchdog ? "EWD " :"",
- d->status.bf.err_length ? "ELEN " :"",
- 4>=d->status.bf.length ? "ZLEN " :"",
- 0==d->status.bf.first ? "NOFIRST ":"",
- 0==d->status.bf.last ? "NOLAST " :""
- );
- __print_packet(prefix, rb->cur,
- d->status.bf.length-4,
- rb->sk[rb->cur]);
- }
+ {
+ char prefix[64];
+ sprintf (prefix, "RX BAD: %s%s%s%s%s%s%s%s%s",
+ d->status.bf.err_mac ? "EMAC " : "",
+ d->status.bf.err_recv ? "ERECV " : "",
+ d->status.bf.err_crc ? "ECRC " : "",
+ d->status.bf.err_drib ? "EDRIB " : "",
+ d->status.bf.watchdog ? "EWD " : "",
+ d->status.bf.err_length ? "ELEN " : "",
+ 4 >= d->status.bf.length ? "ZLEN " : "",
+ 0 == d->status.bf.first ? "NOFIRST " : "",
+ 0 == d->status.bf.last ? "NOLAST " : "");
+ __print_packet (prefix, rb->cur,
+ d->status.bf.length - 4, rb->sk[rb->cur]);
+ }
#endif
- goto d_skip;
- }
+ goto d_skip;
+ }
- retskb = rb->sk[rb->cur];
- BUG_ON(retskb == NULL);
+ retskb = rb->sk[rb->cur];
+ BUG_ON (retskb == NULL);
- nskb = dev_alloc_skb(rb->skblength);
- if(nskb){
- int len = d->status.bf.length-4;
- skb_put(retskb, len);
+ nskb = dev_alloc_skb (rb->skblength);
+ if (nskb)
+ {
+ int len = d->status.bf.length - 4;
+ skb_put (retskb, len);
- print_packet("RECV", rb->cur, len, retskb);
+ print_packet ("RECV", rb->cur, len, retskb);
#ifdef CONFIG_MMU
- dma_unmap_single(NULL, d->addr1, d->state.bf.length1,
- DMA_FROM_DEVICE);
+ dma_unmap_single (NULL, d->addr1, d->state.bf.length1,
+ DMA_FROM_DEVICE);
#endif
- rb->sk[rb->cur] = nskb;
- d->state.bf.length1 = rb->skblength;
+ rb->sk[rb->cur] = nskb;
+ d->state.bf.length1 = rb->skblength;
#ifdef CONFIG_MMU
- d->addr1 = dma_map_single(NULL, nskb->data,
- rb->skblength,
- DMA_FROM_DEVICE);
+ d->addr1 = dma_map_single (NULL, nskb->data,
+ rb->skblength, DMA_FROM_DEVICE);
#else
- consistent_sync(nskb->data, rb->skblength, DMA_FROM_DEVICE);
- d->addr1 = virt_to_dma(NULL,nskb->data);
+ consistent_sync (nskb->data, rb->skblength, DMA_FROM_DEVICE);
+ d->addr1 = virt_to_dma (NULL, nskb->data);
#endif
- if(rb->stats){
- rb->stats->rx_packets++;
- rb->stats->rx_bytes += len;
- }
-
- }else{
- retskb = NULL;
- if(rb->stats){
- rb->stats->rx_dropped++;
- }
- }
- d_skip:
- d->status.val = 0;
- d->status.bf.dma_own = 1;
+ if (rb->stats)
+ {
+ rb->stats->rx_packets++;
+ rb->stats->rx_bytes += len;
+ }
+
+ }
+ else
+ {
+ retskb = NULL;
+ if (rb->stats)
+ {
+ rb->stats->rx_dropped++;
+ }
+ }
+d_skip:
+ d->status.val = 0;
+ d->status.bf.dma_own = 1;
#if defined USE_PRESYNC && defined CONFIG_MMU
- dma_sync_single_for_cpu(NULL, rb->phys, rb->dsize,
- DMA_TO_DEVICE);
+ dma_sync_single_for_cpu (NULL, rb->phys, rb->dsize, DMA_TO_DEVICE);
#endif
- rb->cur = d->state.bf.eor ? 0 : rb->cur + 1;
- d = rb->d.rx + rb->cur;
- }
- return retskb;
+ rb->cur = d->state.bf.eor ? 0 : rb->cur + 1;
+ d = rb->d.rx + rb->cur;
+ }
+ return retskb;
}
-struct sk_buff * syeth_dma_rx_rb_scan (struct syeth_dma_rb * rb)
+struct sk_buff *
+syeth_dma_rx_rb_scan (struct syeth_dma_rb *rb)
{
- volatile struct syeth_dma_rx_t * d;
- struct sk_buff * rscb = NULL;
- u32 i = rb->cur;
+ volatile struct syeth_dma_rx_t *d;
+ struct sk_buff *rscb = NULL;
+ u32 i = rb->cur;
#if defined USE_PRESYNC && defined CONFIG_MMU
- dma_sync_single_for_cpu(NULL, rb->phys, rb->dsize, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu (NULL, rb->phys, rb->dsize, DMA_FROM_DEVICE);
#endif
- do{
- d = rb->d.rx + i;
- if(0==d->status.bf.dma_own){
- debugp(KERN_INFO "synop3504: DMA: RX: "
- "Resynced (%d==>%d)\n", rb->cur, i);
- rb->cur = i;
- rscb = syeth_dma_rx_rb_get(rb);
- break;
- }
- if(d->state.bf.eor){
- i = 0;
- }else{
- i ++;
- }
- }while(i != rb->cur);
-
- return rscb;
+ do
+ {
+ d = rb->d.rx + i;
+ if (0 == d->status.bf.dma_own)
+ {
+ debugp (KERN_INFO "synop3504: DMA: RX: "
+ "Resynced (%d==>%d)\n", rb->cur, i);
+ rb->cur = i;
+ rscb = syeth_dma_rx_rb_get (rb);
+ break;
+ }
+ if (d->state.bf.eor)
+ {
+ i = 0;
+ }
+ else
+ {
+ i++;
+ }
+ }
+ while (i != rb->cur);
+
+ return rscb;
}
#ifdef CONFIG_MMU
-struct syeth_dma_rb * syeth_dma_tx_rb_alloc (int dcount,
- struct net_device_stats * st)
+struct syeth_dma_rb *
+syeth_dma_tx_rb_alloc (int dcount, struct net_device_stats *st)
{
- struct syeth_dma_rb *rb;
-
- rb = kmalloc (sizeof (*rb) + (dcount*sizeof(void*)), GFP_ATOMIC);
- if(NULL == rb) return NULL;
-
- /* dma descriptors */
- rb->cur = 0;
- rb->dcount = dcount;
- rb->dsize = dcount * 16;
- rb->stats = st;
- rb->skblength = 0;
-
- rb->d.tx = dma_alloc_coherent(NULL, rb->dsize, &rb->phys, GFP_ATOMIC);
- if(rb->d.tx == NULL){
- kfree(rb);
- return NULL;
- }
+ struct syeth_dma_rb *rb;
+
+ rb = kmalloc (sizeof (*rb) + (dcount * sizeof (void *)), GFP_ATOMIC);
+ if (NULL == rb)
+ return NULL;
+
+ /* dma descriptors */
+ rb->cur = 0;
+ rb->dcount = dcount;
+ rb->dsize = dcount * 16;
+ rb->stats = st;
+ rb->skblength = 0;
+
+ rb->d.tx = dma_alloc_coherent (NULL, rb->dsize, &rb->phys, GFP_ATOMIC);
+ if (rb->d.tx == NULL)
+ {
+ kfree (rb);
+ return NULL;
+ }
- memset(rb->d.tx, 0, rb->dsize);
- memset(rb->sk, 0, dcount * sizeof(void*));
+ memset (rb->d.tx, 0, rb->dsize);
+ memset (rb->sk, 0, dcount * sizeof (void *));
- /* set EndOfRing bit */
- (rb->d.tx+(dcount-1))->state.bf.eor = 1;
- return rb;
+ /* set EndOfRing bit */
+ (rb->d.tx + (dcount - 1))->state.bf.eor = 1;
+ return rb;
}
#else
-struct syeth_dma_rb * syeth_dma_tx_rb_static_alloc (struct net_device_stats * st)
+struct syeth_dma_rb *
+syeth_dma_tx_rb_static_alloc (struct net_device_stats *st)
{
- struct syeth_dma_rb *rb;
+ struct syeth_dma_rb *rb;
- rb = (struct syeth_dma_rb*)ETH_DMA_TX_BASE;
+ rb = (struct syeth_dma_rb *) ETH_DMA_TX_BASE;
- /* dma descriptors */
- rb->cur = 0;
- rb->dcount = ETH_TX_RING_SIZE;
- rb->dsize = ETH_RX_DMAREG_SIZE;
- rb->stats = st;
- rb->skblength = 0;
- rb->d.tx = (struct syeth_dma_tx_t*)ETH_TX_DMAREG_BASE;
- rb->phys = virt_to_phys(rb->d.tx);
+ /* dma descriptors */
+ rb->cur = 0;
+ rb->dcount = ETH_TX_RING_SIZE;
+ rb->dsize = ETH_RX_DMAREG_SIZE;
+ rb->stats = st;
+ rb->skblength = 0;
+ rb->d.tx = (struct syeth_dma_tx_t *) ETH_TX_DMAREG_BASE;
+ rb->phys = virt_to_phys (rb->d.tx);
- memset(rb->d.tx, 0, rb->dsize);
- memset(rb->sk, 0, ETH_TX_RING_SIZE*sizeof(void*));
+ memset (rb->d.tx, 0, rb->dsize);
+ memset (rb->sk, 0, ETH_TX_RING_SIZE * sizeof (void *));
- /* set EndOfRing bit */
- (rb->d.tx+(ETH_TX_RING_SIZE-1))->state.bf.eor = 1;
- return rb;
+ /* set EndOfRing bit */
+ (rb->d.tx + (ETH_TX_RING_SIZE - 1))->state.bf.eor = 1;
+ return rb;
}
#endif
-static void syeth_dma_tx_skb_complete(struct syeth_dma_rb * rb, int i)
+static void
+syeth_dma_tx_skb_complete (struct syeth_dma_rb *rb, int i)
{
- struct syeth_dma_tx_t * d = rb->d.tx + i;
- struct sk_buff * old = rb->sk[i];
- /* last transmitted frame found.
- * cleanup */
- if(d->status.bf.dma_own || d->status.bf.err){
- rb->stats->tx_errors++;
-
- rb->stats->tx_aborted_errors +=
- d->status.bf.err_lcoll+
- d->status.bf.err_ecoll;
-
- rb->stats->tx_carrier_errors +=
- d->status.bf.err_clost+
- d->status.bf.err_nocar;
+ struct syeth_dma_tx_t *d = rb->d.tx + i;
+ struct sk_buff *old = rb->sk[i];
+ /* last transmitted frame found.
+ * cleanup */
+ if (d->status.bf.dma_own || d->status.bf.err)
+ {
+ rb->stats->tx_errors++;
+
+ rb->stats->tx_aborted_errors +=
+ d->status.bf.err_lcoll + d->status.bf.err_ecoll;
+
+ rb->stats->tx_carrier_errors +=
+ d->status.bf.err_clost + d->status.bf.err_nocar;
#ifdef SYNOP_ETH_DEBUG_BAD
- if(d->status.bf.err){
- char prefix[64];
- sprintf(prefix, "TX BAD: %s%s%s%s%s%s%s%s",
- d->status.bf.err_ip ? "EIPH " :"",
- d->status.bf.flushed ? "FLUSH " :"",
- d->status.bf.err_plcs ? "EPLCS " :"",
- d->status.bf.err_clost ? "ECLOST ":"",
- d->status.bf.err_nocar ? "ENOCAR ":"",
- d->status.bf.err_ecoll ? "EECOLL ":"",
- d->status.bf.err_lcoll ? "ELCOLL ":"",
- d->status.bf.exc_deff ? "EEDEF " :""
- );
- __print_packet(prefix, rb->cur, d->state.bf.length1, old);
- }
+ if (d->status.bf.err)
+ {
+ char prefix[64];
+ sprintf (prefix, "TX BAD: %s%s%s%s%s%s%s%s",
+ d->status.bf.err_ip ? "EIPH " : "",
+ d->status.bf.flushed ? "FLUSH " : "",
+ d->status.bf.err_plcs ? "EPLCS " : "",
+ d->status.bf.err_clost ? "ECLOST " : "",
+ d->status.bf.err_nocar ? "ENOCAR " : "",
+ d->status.bf.err_ecoll ? "EECOLL " : "",
+ d->status.bf.err_lcoll ? "ELCOLL " : "",
+ d->status.bf.exc_def ? "EEDEF " : "");
+ __print_packet (prefix, rb->cur, d->state.bf.length1, old);
+ }
#endif
- }else{
- rb->stats->tx_bytes += d->state.bf.length1;
- rb->stats->tx_packets++;
- }
- rb->stats->collisions += d->status.bf.coll_cnt;
+ }
+ else
+ {
+ rb->stats->tx_bytes += d->state.bf.length1;
+ rb->stats->tx_packets++;
+ }
+ rb->stats->collisions += d->status.bf.coll_cnt;
#ifdef CONFIG_MMU
- dma_unmap_single(NULL, d->addr1, d->state.bf.length1,
- DMA_TO_DEVICE);
+ dma_unmap_single (NULL, d->addr1, d->state.bf.length1, DMA_TO_DEVICE);
#endif
- dev_kfree_skb_any(old);
- rb->sk[i] = NULL;
+ dev_kfree_skb_any (old);
+ rb->sk[i] = NULL;
}
-void syeth_dma_tx_rb_reset (struct syeth_dma_rb * rb)
+void
+syeth_dma_tx_rb_reset (struct syeth_dma_rb *rb)
{
- int i;
- memset(rb->d.tx, 0, rb->dsize);
- /* set EndOfRing bit */
- (rb->d.tx+(rb->dcount-1))->state.bf.eor = 1;
- rb->cur = 0;
-
- /* delete all sk_bufs */
- for(i=0; i<rb->dcount; i++){
- if(rb->sk[i]){
- dev_kfree_skb_any(rb->sk[i]);
- rb->sk[i] = NULL;
- }
+ int i;
+ memset (rb->d.tx, 0, rb->dsize);
+ /* set EndOfRing bit */
+ (rb->d.tx + (rb->dcount - 1))->state.bf.eor = 1;
+ rb->cur = 0;
+
+ /* delete all sk_bufs */
+ for (i = 0; i < rb->dcount; i++)
+ {
+ if (rb->sk[i])
+ {
+ dev_kfree_skb_any (rb->sk[i]);
+ rb->sk[i] = NULL;
}
+ }
}
-void syeth_dma_tx_rb_free (struct syeth_dma_rb * rb)
+
+void
+syeth_dma_tx_rb_free (struct syeth_dma_rb *rb)
{
- syeth_dma_tx_rb_reset(rb);
+ syeth_dma_tx_rb_reset (rb);
#ifdef CONFIG_MMU
- dma_free_coherent(NULL, rb->dsize, rb->d.tx, rb->phys);
- kfree(rb);
+ dma_free_coherent (NULL, rb->dsize, rb->d.tx, rb->phys);
+ kfree (rb);
#endif
}
-int syeth_dma_tx_rb_put (struct syeth_dma_rb * rb, struct sk_buff * skb)
+int
+syeth_dma_tx_rb_put (struct syeth_dma_rb *rb, struct sk_buff *skb)
{
- volatile struct syeth_dma_tx_t * d = rb->d.tx + rb->cur;
- int eor;
+ volatile struct syeth_dma_tx_t *d = rb->d.tx + rb->cur;
+ int eor;
#if defined USE_PRESYNC && defined CONFIG_MMU
- dma_sync_single_for_cpu(NULL, rb->phys, rb->dsize, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu (NULL, rb->phys, rb->dsize, DMA_FROM_DEVICE);
#endif
- if(d->status.bf.dma_own){
- printk(KERN_ERR "synop3504: TX DESCRIPTOR %d is owned by DMA.\n", rb->cur);
- return -1;
- }
+ if (d->status.bf.dma_own)
+ {
+ printk (KERN_ERR "synop3504: TX DESCRIPTOR %d is owned by DMA.\n",
+ rb->cur);
+ return -1;
+ }
- if(rb->sk[rb->cur]){
- syeth_dma_tx_skb_complete(rb, rb->cur);
- }
+ if (rb->sk[rb->cur])
+ {
+ syeth_dma_tx_skb_complete (rb, rb->cur);
+ }
- rb->sk[rb->cur] = skb;
- eor = d->state.bf.eor;
- d->state.val = 0;
- d->status.val = 0;
+ rb->sk[rb->cur] = skb;
+ eor = d->state.bf.eor;
+ d->state.val = 0;
+ d->status.val = 0;
#ifdef CONFIG_MMU
- d->addr1 = dma_map_single(NULL, skb->data, skb->len,
- DMA_TO_DEVICE);
+ d->addr1 = dma_map_single (NULL, skb->data, skb->len, DMA_TO_DEVICE);
#else
- consistent_sync(skb->data, skb->len, DMA_TO_DEVICE);
- d->addr1 = virt_to_dma(NULL, (unsigned long)skb->data);
+ consistent_sync (skb->data, skb->len, DMA_TO_DEVICE);
+ d->addr1 = virt_to_dma (NULL, (unsigned long) skb->data);
#endif
- d->addr2 = 0;
- d->state.bf.eor = eor;
- d->state.bf.first = 1;
- d->state.bf.last = 1;
- d->state.bf.int_oncomp = 1;
- d->state.bf.length1 = skb->len;
+ d->addr2 = 0;
+ d->state.bf.eor = eor;
+ d->state.bf.first = 1;
+ d->state.bf.last = 1;
+ d->state.bf.int_oncomp = 1;
+ d->state.bf.length1 = skb->len;
- d->status.val = 0;
- d->status.bf.dma_own = 1;
+ d->status.val = 0;
+ d->status.bf.dma_own = 1;
#if defined USE_PRESYNC && defined CONFIG_MMU
- dma_sync_single_for_cpu(NULL, rb->phys, rb->dsize, DMA_TO_DEVICE);
+ dma_sync_single_for_cpu (NULL, rb->phys, rb->dsize, DMA_TO_DEVICE);
#endif
- print_packet("SEND", rb->cur, skb->len, skb);
-
- rb->cur = eor ? 0 : rb->cur + 1;
-
- /* check next and next+1 descriptor for Host ownership */
- d = rb->d.tx + rb->cur;
- if(d->status.bf.dma_own){
- return rb->cur+1;
- }else{
- int n = rb->cur + 1;
- if(n == rb->dcount) n = 0;
- d = rb->d.tx + n;
- if(d->status.bf.dma_own){
- return n+1;
- }
+ print_packet ("SEND", rb->cur, skb->len, skb);
+
+ rb->cur = eor ? 0 : rb->cur + 1;
+
+ /* check next and next+1 descriptor for Host ownership */
+ d = rb->d.tx + rb->cur;
+ if (d->status.bf.dma_own)
+ {
+ return rb->cur + 1;
+ }
+ else
+ {
+ int n = rb->cur + 1;
+ if (n == rb->dcount)
+ n = 0;
+ d = rb->d.tx + n;
+ if (d->status.bf.dma_own)
+ {
+ return n + 1;
}
- return 0;
+ }
+ return 0;
}
-int syeth_dma_tx_rb_complete(struct syeth_dma_rb * rb)
+int
+syeth_dma_tx_rb_complete (struct syeth_dma_rb *rb)
{
- int cnt = 0;
- struct syeth_dma_tx_t * d = rb->d.tx + rb->skblength;
- while ( rb->skblength != rb->cur
- && 0==d->status.bf.dma_own
- && rb->sk[rb->skblength] ) {
- syeth_dma_tx_skb_complete(rb, rb->skblength);
- rb->skblength = (d->state.bf.eor) ? 0 : rb->skblength + 1;
- d = rb->d.tx + rb->skblength;
- cnt++;
- }
- return cnt;
+ int cnt = 0;
+ struct syeth_dma_tx_t *d = rb->d.tx + rb->skblength;
+ while (rb->skblength != rb->cur
+ && 0 == d->status.bf.dma_own && rb->sk[rb->skblength])
+ {
+ syeth_dma_tx_skb_complete (rb, rb->skblength);
+ rb->skblength = (d->state.bf.eor) ? 0 : rb->skblength + 1;
+ d = rb->d.tx + rb->skblength;
+ cnt++;
+ }
+ return cnt;
}
-int syeth_dma_tx_rb_complete_next(struct syeth_dma_rb * rb)
+int
+syeth_dma_tx_rb_complete_next (struct syeth_dma_rb *rb)
{
- int i, cnt = 0;
- struct syeth_dma_tx_t * d;
- i = rb->cur;
-
- do{
- d = rb->d.tx + i;
- if(d->status.bf.dma_own){
- break;
- }
- if(rb->sk[i]){
- syeth_dma_tx_skb_complete(rb, i);
- cnt++;
- }
- i = (d->state.bf.eor) ? 0 : i+1;
- }while(i != rb->cur);
- return cnt;
+ int i, cnt = 0;
+ struct syeth_dma_tx_t *d;
+ i = rb->cur;
+
+ do
+ {
+ d = rb->d.tx + i;
+ if (d->status.bf.dma_own)
+ {
+ break;
+ }
+ if (rb->sk[i])
+ {
+ syeth_dma_tx_skb_complete (rb, i);
+ cnt++;
+ }
+ i = (d->state.bf.eor) ? 0 : i + 1;
+ }
+ while (i != rb->cur);
+ return cnt;
}