diff options
| author | Kuniyuki Iwashima <[email protected]> | 2025-04-11 20:52:32 +0000 |
|---|---|---|
| committer | Jakub Kicinski <[email protected]> | 2025-04-15 00:08:41 +0000 |
| commit | 7a60d91c690bf73c2c78e763efa29f294e217c3a (patch) | |
| tree | 74869ee0d779bae24cb1894e1d62b68674241bd1 /net/core/net_namespace.c | |
| parent | net: Add ops_undo_single for module load/unload. (diff) | |
| download | kernel-7a60d91c690bf73c2c78e763efa29f294e217c3a.tar.gz kernel-7a60d91c690bf73c2c78e763efa29f294e217c3a.zip | |
net: Add ->exit_rtnl() hook to struct pernet_operations.
struct pernet_operations provides two batching hooks; ->exit_batch()
and ->exit_batch_rtnl().
The batching variant is beneficial if ->exit() meets any of the
following conditions:
1) ->exit() repeatedly acquires a global lock for each netns
2) ->exit() has a time-consuming operation that can be factored
out (e.g. synchronize_rcu(), smp_mb(), etc)
3) ->exit() does not need to repeat the same iterations for each
netns (e.g. inet_twsk_purge())
Currently, none of the ->exit_batch_rtnl() functions satisfy any of
the above conditions because RTNL is factored out and held by the
caller and all of these functions iterate over the dying netns list.
Also, we want to hold per-netns RTNL there but avoid spreading
__rtnl_net_lock() across multiple locations.
Let's add ->exit_rtnl() hook and run it under __rtnl_net_lock().
The following patches will convert all ->exit_batch_rtnl() users
to ->exit_rtnl().
Signed-off-by: Kuniyuki Iwashima <[email protected]>
Reviewed-by: Sabrina Dubroca <[email protected]>
Link: https://patch.msgid.link/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]>
Diffstat (limited to 'net/core/net_namespace.c')
| -rw-r--r-- | net/core/net_namespace.c | 53 |
1 files changed, 39 insertions, 14 deletions
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 37026776ae4e..afaa3d1bda8d 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -163,16 +163,51 @@ static void ops_pre_exit_list(const struct pernet_operations *ops, } } +static void ops_exit_rtnl_list(const struct list_head *ops_list, + const struct pernet_operations *ops, + struct list_head *net_exit_list) +{ + const struct pernet_operations *saved_ops = ops; + LIST_HEAD(dev_kill_list); + struct net *net; + + rtnl_lock(); + + list_for_each_entry(net, net_exit_list, exit_list) { + __rtnl_net_lock(net); + + ops = saved_ops; + list_for_each_entry_continue_reverse(ops, ops_list, list) { + if (ops->exit_rtnl) + ops->exit_rtnl(net, &dev_kill_list); + } + + __rtnl_net_unlock(net); + } + + ops = saved_ops; + list_for_each_entry_continue_reverse(ops, ops_list, list) { + if (ops->exit_batch_rtnl) + ops->exit_batch_rtnl(net_exit_list, &dev_kill_list); + } + + unregister_netdevice_many(&dev_kill_list); + + rtnl_unlock(); +} + static void ops_exit_list(const struct pernet_operations *ops, struct list_head *net_exit_list) { - struct net *net; if (ops->exit) { + struct net *net; + list_for_each_entry(net, net_exit_list, exit_list) { ops->exit(net); cond_resched(); } } + if (ops->exit_batch) ops->exit_batch(net_exit_list); } @@ -213,18 +248,8 @@ static void ops_undo_list(const struct list_head *ops_list, else synchronize_rcu(); - if (hold_rtnl) { - LIST_HEAD(dev_kill_list); - - ops = saved_ops; - rtnl_lock(); - list_for_each_entry_continue_reverse(ops, ops_list, list) { - if (ops->exit_batch_rtnl) - ops->exit_batch_rtnl(net_exit_list, &dev_kill_list); - } - unregister_netdevice_many(&dev_kill_list); - rtnl_unlock(); - } + if (hold_rtnl) + ops_exit_rtnl_list(ops_list, saved_ops, net_exit_list); ops = saved_ops; list_for_each_entry_continue_reverse(ops, ops_list, list) @@ -238,7 +263,7 @@ static void ops_undo_list(const struct list_head *ops_list, static void ops_undo_single(struct pernet_operations *ops, struct list_head *net_exit_list) { - bool hold_rtnl = !!ops->exit_batch_rtnl; + bool hold_rtnl = ops->exit_rtnl || ops->exit_batch_rtnl; LIST_HEAD(ops_list); list_add(&ops->list, &ops_list); |
