Skip to content

Commit 716fb91

Browse files
Weinn JhengFelipe Balbi
Weinn Jheng
authored and
Felipe Balbi
committed
usb: gadget: u_ether: move hardware transmit to RX NAPI
In order to reduce the interrupt times in the embedded system, a receiving workqueue is introduced. This modification also enhanced the overall throughput as the benefits of reducing interrupt occurrence. This work was derived from previous work: u_ether: move hardware transmit to RX workqueue. Which should be base on codeaurora's work. However, the benchmark on my platform shows the throughput with workqueue is slightly better than NAPI. Signed-off-by: Weinn Jheng <clanlab.proj@gmail.com> Cc: David Brownell <dbrownell@users.sourceforge.net> Cc: David S. Miller <davem@davemloft.net> Cc: Stephen Hemminger <shemminger@vyatta.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Manu Gautam <mgautam@codeaurora.org> Signed-off-by: Felipe Balbi <balbi@ti.com>
1 parent 3f89204 commit 716fb91

File tree

1 file changed

+66
-35
lines changed

1 file changed

+66
-35
lines changed

drivers/usb/gadget/u_ether.c

+66-35
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,8 @@
4848

4949
#define UETH__VERSION "29-May-2008"
5050

51+
#define GETHER_NAPI_WEIGHT 32
52+
5153
struct eth_dev {
5254
/* lock is held while accessing port_usb
5355
*/
@@ -72,6 +74,7 @@ struct eth_dev {
7274
struct sk_buff_head *list);
7375

7476
struct work_struct work;
77+
struct napi_struct rx_napi;
7578

7679
unsigned long todo;
7780
#define WORK_RX_MEMORY 0
@@ -253,18 +256,16 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
253256
DBG(dev, "rx submit --> %d\n", retval);
254257
if (skb)
255258
dev_kfree_skb_any(skb);
256-
spin_lock_irqsave(&dev->req_lock, flags);
257-
list_add(&req->list, &dev->rx_reqs);
258-
spin_unlock_irqrestore(&dev->req_lock, flags);
259259
}
260260
return retval;
261261
}
262262

263263
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
264264
{
265-
struct sk_buff *skb = req->context, *skb2;
265+
struct sk_buff *skb = req->context;
266266
struct eth_dev *dev = ep->driver_data;
267267
int status = req->status;
268+
bool rx_queue = 0;
268269

269270
switch (status) {
270271

@@ -288,30 +289,8 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
288289
} else {
289290
skb_queue_tail(&dev->rx_frames, skb);
290291
}
291-
skb = NULL;
292-
293-
skb2 = skb_dequeue(&dev->rx_frames);
294-
while (skb2) {
295-
if (status < 0
296-
|| ETH_HLEN > skb2->len
297-
|| skb2->len > VLAN_ETH_FRAME_LEN) {
298-
dev->net->stats.rx_errors++;
299-
dev->net->stats.rx_length_errors++;
300-
DBG(dev, "rx length %d\n", skb2->len);
301-
dev_kfree_skb_any(skb2);
302-
goto next_frame;
303-
}
304-
skb2->protocol = eth_type_trans(skb2, dev->net);
305-
dev->net->stats.rx_packets++;
306-
dev->net->stats.rx_bytes += skb2->len;
307-
308-
/* no buffer copies needed, unless hardware can't
309-
* use skb buffers.
310-
*/
311-
status = netif_rx(skb2);
312-
next_frame:
313-
skb2 = skb_dequeue(&dev->rx_frames);
314-
}
292+
if (!status)
293+
rx_queue = 1;
315294
break;
316295

317296
/* software-driven interface shutdown */
@@ -334,22 +313,20 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
334313
/* FALLTHROUGH */
335314

336315
default:
316+
rx_queue = 1;
317+
dev_kfree_skb_any(skb);
337318
dev->net->stats.rx_errors++;
338319
DBG(dev, "rx status %d\n", status);
339320
break;
340321
}
341322

342-
if (skb)
343-
dev_kfree_skb_any(skb);
344-
if (!netif_running(dev->net)) {
345323
clean:
346324
spin_lock(&dev->req_lock);
347325
list_add(&req->list, &dev->rx_reqs);
348326
spin_unlock(&dev->req_lock);
349-
req = NULL;
350-
}
351-
if (req)
352-
rx_submit(dev, req, GFP_ATOMIC);
327+
328+
if (rx_queue && likely(napi_schedule_prep(&dev->rx_napi)))
329+
__napi_schedule(&dev->rx_napi);
353330
}
354331

355332
static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
@@ -414,16 +391,24 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
414391
{
415392
struct usb_request *req;
416393
unsigned long flags;
394+
int rx_counts = 0;
417395

418396
/* fill unused rxq slots with some skb */
419397
spin_lock_irqsave(&dev->req_lock, flags);
420398
while (!list_empty(&dev->rx_reqs)) {
399+
400+
if (++rx_counts > qlen(dev->gadget, dev->qmult))
401+
break;
402+
421403
req = container_of(dev->rx_reqs.next,
422404
struct usb_request, list);
423405
list_del_init(&req->list);
424406
spin_unlock_irqrestore(&dev->req_lock, flags);
425407

426408
if (rx_submit(dev, req, gfp_flags) < 0) {
409+
spin_lock_irqsave(&dev->req_lock, flags);
410+
list_add(&req->list, &dev->rx_reqs);
411+
spin_unlock_irqrestore(&dev->req_lock, flags);
427412
defer_kevent(dev, WORK_RX_MEMORY);
428413
return;
429414
}
@@ -433,6 +418,41 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
433418
spin_unlock_irqrestore(&dev->req_lock, flags);
434419
}
435420

421+
static int gether_poll(struct napi_struct *napi, int budget)
422+
{
423+
struct eth_dev *dev = container_of(napi, struct eth_dev, rx_napi);
424+
struct sk_buff *skb;
425+
unsigned int work_done = 0;
426+
int status = 0;
427+
428+
while ((skb = skb_dequeue(&dev->rx_frames))) {
429+
if (status < 0
430+
|| ETH_HLEN > skb->len
431+
|| skb->len > VLAN_ETH_FRAME_LEN) {
432+
dev->net->stats.rx_errors++;
433+
dev->net->stats.rx_length_errors++;
434+
DBG(dev, "rx length %d\n", skb->len);
435+
dev_kfree_skb_any(skb);
436+
continue;
437+
}
438+
skb->protocol = eth_type_trans(skb, dev->net);
439+
dev->net->stats.rx_packets++;
440+
dev->net->stats.rx_bytes += skb->len;
441+
442+
status = netif_rx_ni(skb);
443+
}
444+
445+
if (netif_running(dev->net)) {
446+
rx_fill(dev, GFP_KERNEL);
447+
work_done++;
448+
}
449+
450+
if (work_done < budget)
451+
napi_complete(&dev->rx_napi);
452+
453+
return work_done;
454+
}
455+
436456
static void eth_work(struct work_struct *work)
437457
{
438458
struct eth_dev *dev = container_of(work, struct eth_dev, work);
@@ -625,6 +645,7 @@ static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
625645
/* and open the tx floodgates */
626646
atomic_set(&dev->tx_qlen, 0);
627647
netif_wake_queue(dev->net);
648+
napi_enable(&dev->rx_napi);
628649
}
629650

630651
static int eth_open(struct net_device *net)
@@ -651,6 +672,7 @@ static int eth_stop(struct net_device *net)
651672
unsigned long flags;
652673

653674
VDBG(dev, "%s\n", __func__);
675+
napi_disable(&dev->rx_napi);
654676
netif_stop_queue(net);
655677

656678
DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
@@ -768,6 +790,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
768790
return ERR_PTR(-ENOMEM);
769791

770792
dev = netdev_priv(net);
793+
netif_napi_add(net, &dev->rx_napi, gether_poll, GETHER_NAPI_WEIGHT);
771794
spin_lock_init(&dev->lock);
772795
spin_lock_init(&dev->req_lock);
773796
INIT_WORK(&dev->work, eth_work);
@@ -830,6 +853,7 @@ struct net_device *gether_setup_name_default(const char *netname)
830853
return ERR_PTR(-ENOMEM);
831854

832855
dev = netdev_priv(net);
856+
netif_napi_add(net, &dev->rx_napi, gether_poll, GETHER_NAPI_WEIGHT);
833857
spin_lock_init(&dev->lock);
834858
spin_lock_init(&dev->req_lock);
835859
INIT_WORK(&dev->work, eth_work);
@@ -1113,6 +1137,7 @@ void gether_disconnect(struct gether *link)
11131137
{
11141138
struct eth_dev *dev = link->ioport;
11151139
struct usb_request *req;
1140+
struct sk_buff *skb;
11161141

11171142
WARN_ON(!dev);
11181143
if (!dev)
@@ -1139,6 +1164,12 @@ void gether_disconnect(struct gether *link)
11391164
spin_lock(&dev->req_lock);
11401165
}
11411166
spin_unlock(&dev->req_lock);
1167+
1168+
spin_lock(&dev->rx_frames.lock);
1169+
while ((skb = __skb_dequeue(&dev->rx_frames)))
1170+
dev_kfree_skb_any(skb);
1171+
spin_unlock(&dev->rx_frames.lock);
1172+
11421173
link->in_ep->driver_data = NULL;
11431174
link->in_ep->desc = NULL;
11441175

0 commit comments

Comments
 (0)