@@ -163,7 +163,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
163
163
struct sk_buff * skb = tx_buff -> skb ;
164
164
unsigned int info = le32_to_cpu (txbd -> info );
165
165
166
- if ((info & FOR_EMAC ) || !txbd -> data )
166
+ if ((info & FOR_EMAC ) || !txbd -> data || ! skb )
167
167
break ;
168
168
169
169
if (unlikely (info & (DROP | DEFR | LTCL | UFLO ))) {
@@ -191,6 +191,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
191
191
192
192
txbd -> data = 0 ;
193
193
txbd -> info = 0 ;
194
+ tx_buff -> skb = NULL ;
194
195
195
196
* txbd_dirty = (* txbd_dirty + 1 ) % TX_BD_NUM ;
196
197
}
@@ -610,7 +611,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
610
611
dma_unmap_addr_set (& priv -> tx_buff [* txbd_curr ], addr , addr );
611
612
dma_unmap_len_set (& priv -> tx_buff [* txbd_curr ], len , len );
612
613
613
- priv -> tx_buff [* txbd_curr ].skb = skb ;
614
614
priv -> txbd [* txbd_curr ].data = cpu_to_le32 (addr );
615
615
616
616
/* Make sure pointer to data buffer is set */
@@ -620,6 +620,11 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
620
620
621
621
* info = cpu_to_le32 (FOR_EMAC | FIRST_OR_LAST_MASK | len );
622
622
623
+ /* Make sure info word is set */
624
+ wmb ();
625
+
626
+ priv -> tx_buff [* txbd_curr ].skb = skb ;
627
+
623
628
/* Increment index to point to the next BD */
624
629
* txbd_curr = (* txbd_curr + 1 ) % TX_BD_NUM ;
625
630
0 commit comments