aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/netpoll.c
diff options
context:
space:
mode:
authorJohannes Berg <[email protected]>2025-03-18 08:45:23 +0000
committerJohannes Berg <[email protected]>2025-03-18 08:46:36 +0000
commitc924c5e9b8c65b3a479a90e5e37d74cc8cd9fe0a (patch)
tree9626c13418dd3770f2772980a7dacdc110c442b6 /net/core/netpoll.c
parentwifi: iwlwifi: Fix uninitialized variable with __free() (diff)
parentMerge branch 'net-phy-rework-linkmodes-handling-in-a-dedicated-file' (diff)
downloadkernel-c924c5e9b8c65b3a479a90e5e37d74cc8cd9fe0a.tar.gz
kernel-c924c5e9b8c65b3a479a90e5e37d74cc8cd9fe0a.zip
Merge net-next/main to resolve conflicts
There are a few conflicts between the work that went into wireless and that's here now, resolve them. Signed-off-by: Johannes Berg <[email protected]>
Diffstat (limited to 'net/core/netpoll.c')
-rw-r--r--net/core/netpoll.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 62b4041aae1a..3cc3eae9def3 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -284,12 +284,13 @@ static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
struct sk_buff *skb;
zap_completion_queue();
- refill_skbs(np);
repeat:
skb = alloc_skb(len, GFP_ATOMIC);
- if (!skb)
+ if (!skb) {
skb = skb_dequeue(&np->skb_pool);
+ schedule_work(&np->refill_wq);
+ }
if (!skb) {
if (++count < 10) {
@@ -319,6 +320,7 @@ static int netpoll_owner_active(struct net_device *dev)
static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
netdev_tx_t status = NETDEV_TX_BUSY;
+ netdev_tx_t ret = NET_XMIT_DROP;
struct net_device *dev;
unsigned long tries;
/* It is up to the caller to keep npinfo alive. */
@@ -327,11 +329,12 @@ static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
lockdep_assert_irqs_disabled();
dev = np->dev;
+ rcu_read_lock();
npinfo = rcu_dereference_bh(dev->npinfo);
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
dev_kfree_skb_irq(skb);
- return NET_XMIT_DROP;
+ goto out;
}
/* don't get messages out of order, and no recursion */
@@ -370,7 +373,10 @@ static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
skb_queue_tail(&npinfo->txq, skb);
schedule_delayed_work(&npinfo->tx_work,0);
}
- return NETDEV_TX_OK;
+ ret = NETDEV_TX_OK;
+out:
+ rcu_read_unlock();
+ return ret;
}
netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
@@ -535,6 +541,7 @@ static void skb_pool_flush(struct netpoll *np)
{
struct sk_buff_head *skb_pool;
+ cancel_work_sync(&np->refill_wq);
skb_pool = &np->skb_pool;
skb_queue_purge_reason(skb_pool, SKB_CONSUMED);
}
@@ -621,6 +628,14 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
}
EXPORT_SYMBOL(netpoll_parse_options);
+static void refill_skbs_work_handler(struct work_struct *work)
+{
+ struct netpoll *np =
+ container_of(work, struct netpoll, refill_wq);
+
+ refill_skbs(np);
+}
+
int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
{
struct netpoll_info *npinfo;
@@ -666,6 +681,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
/* fill up the skb queue */
refill_skbs(np);
+ INIT_WORK(&np->refill_wq, refill_skbs_work_handler);
/* last thing to do is link it to the net device structure */
rcu_assign_pointer(ndev->npinfo, npinfo);