mirror of
https://github.com/fail0verflow/switch-linux.git
synced 2025-05-04 02:34:21 -04:00
netpoll: Respect NETIF_F_LLTX
Stop taking the transmit lock when a network device has specified NETIF_F_LLTX. If no locks needed to trasnmit a packet this is the ideal scenario for netpoll as all packets can be trasnmitted immediately. Even if some locks are needed in ndo_start_xmit skipping any unnecessary serialization is desirable for netpoll as it makes it more likely a debugging packet may be trasnmitted immediately instead of being deferred until later. Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
080b3c19a4
commit
5efeac44cf
2 changed files with 10 additions and 5 deletions
|
@ -2909,6 +2909,11 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
|
||||||
} \
|
} \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define HARD_TX_TRYLOCK(dev, txq) \
|
||||||
|
(((dev->features & NETIF_F_LLTX) == 0) ? \
|
||||||
|
__netif_tx_trylock(txq) : \
|
||||||
|
true )
|
||||||
|
|
||||||
#define HARD_TX_UNLOCK(dev, txq) { \
|
#define HARD_TX_UNLOCK(dev, txq) { \
|
||||||
if ((dev->features & NETIF_F_LLTX) == 0) { \
|
if ((dev->features & NETIF_F_LLTX) == 0) { \
|
||||||
__netif_tx_unlock(txq); \
|
__netif_tx_unlock(txq); \
|
||||||
|
|
|
@ -119,17 +119,17 @@ static void queue_process(struct work_struct *work)
|
||||||
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
|
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
__netif_tx_lock(txq, smp_processor_id());
|
HARD_TX_LOCK(dev, txq, smp_processor_id());
|
||||||
if (netif_xmit_frozen_or_stopped(txq) ||
|
if (netif_xmit_frozen_or_stopped(txq) ||
|
||||||
netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
|
netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
|
||||||
skb_queue_head(&npinfo->txq, skb);
|
skb_queue_head(&npinfo->txq, skb);
|
||||||
__netif_tx_unlock(txq);
|
HARD_TX_UNLOCK(dev, txq);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
schedule_delayed_work(&npinfo->tx_work, HZ/10);
|
schedule_delayed_work(&npinfo->tx_work, HZ/10);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
__netif_tx_unlock(txq);
|
HARD_TX_UNLOCK(dev, txq);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -345,11 +345,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
|
||||||
/* try until next clock tick */
|
/* try until next clock tick */
|
||||||
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
|
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
|
||||||
tries > 0; --tries) {
|
tries > 0; --tries) {
|
||||||
if (__netif_tx_trylock(txq)) {
|
if (HARD_TX_TRYLOCK(dev, txq)) {
|
||||||
if (!netif_xmit_stopped(txq))
|
if (!netif_xmit_stopped(txq))
|
||||||
status = netpoll_start_xmit(skb, dev, txq);
|
status = netpoll_start_xmit(skb, dev, txq);
|
||||||
|
|
||||||
__netif_tx_unlock(txq);
|
HARD_TX_UNLOCK(dev, txq);
|
||||||
|
|
||||||
if (status == NETDEV_TX_OK)
|
if (status == NETDEV_TX_OK)
|
||||||
break;
|
break;
|
||||||
|
|
Loading…
Add table
Reference in a new issue