mirror of
https://github.com/fail0verflow/switch-linux.git
synced 2025-05-04 02:34:21 -04:00
net: Convert atomic_t net::count to refcount_t
Since net could be obtained from RCU lists, and there is a race with net destruction, the patch converts net::count to refcount_t. This provides sanity checks for the cases of incrementing counter of already dead net, when maybe_get_net() has to used instead of get_net(). Drivers: allyesconfig and allmodconfig are OK. Suggested-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
594831a8ab
commit
273c28bc57
5 changed files with 14 additions and 14 deletions
|
@ -51,7 +51,7 @@ struct net {
|
||||||
refcount_t passive; /* To decided when the network
|
refcount_t passive; /* To decided when the network
|
||||||
* namespace should be freed.
|
* namespace should be freed.
|
||||||
*/
|
*/
|
||||||
atomic_t count; /* To decided when the network
|
refcount_t count; /* To decided when the network
|
||||||
* namespace should be shut down.
|
* namespace should be shut down.
|
||||||
*/
|
*/
|
||||||
spinlock_t rules_mod_lock;
|
spinlock_t rules_mod_lock;
|
||||||
|
@ -195,7 +195,7 @@ void __put_net(struct net *net);
|
||||||
|
|
||||||
static inline struct net *get_net(struct net *net)
|
static inline struct net *get_net(struct net *net)
|
||||||
{
|
{
|
||||||
atomic_inc(&net->count);
|
refcount_inc(&net->count);
|
||||||
return net;
|
return net;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -206,14 +206,14 @@ static inline struct net *maybe_get_net(struct net *net)
|
||||||
* exists. If the reference count is zero this
|
* exists. If the reference count is zero this
|
||||||
* function fails and returns NULL.
|
* function fails and returns NULL.
|
||||||
*/
|
*/
|
||||||
if (!atomic_inc_not_zero(&net->count))
|
if (!refcount_inc_not_zero(&net->count))
|
||||||
net = NULL;
|
net = NULL;
|
||||||
return net;
|
return net;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void put_net(struct net *net)
|
static inline void put_net(struct net *net)
|
||||||
{
|
{
|
||||||
if (atomic_dec_and_test(&net->count))
|
if (refcount_dec_and_test(&net->count))
|
||||||
__put_net(net);
|
__put_net(net);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -961,7 +961,7 @@ net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
|
||||||
while (--i >= new_num) {
|
while (--i >= new_num) {
|
||||||
struct kobject *kobj = &dev->_rx[i].kobj;
|
struct kobject *kobj = &dev->_rx[i].kobj;
|
||||||
|
|
||||||
if (!atomic_read(&dev_net(dev)->count))
|
if (!refcount_read(&dev_net(dev)->count))
|
||||||
kobj->uevent_suppress = 1;
|
kobj->uevent_suppress = 1;
|
||||||
if (dev->sysfs_rx_queue_group)
|
if (dev->sysfs_rx_queue_group)
|
||||||
sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
|
sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
|
||||||
|
@ -1367,7 +1367,7 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
|
||||||
while (--i >= new_num) {
|
while (--i >= new_num) {
|
||||||
struct netdev_queue *queue = dev->_tx + i;
|
struct netdev_queue *queue = dev->_tx + i;
|
||||||
|
|
||||||
if (!atomic_read(&dev_net(dev)->count))
|
if (!refcount_read(&dev_net(dev)->count))
|
||||||
queue->kobj.uevent_suppress = 1;
|
queue->kobj.uevent_suppress = 1;
|
||||||
#ifdef CONFIG_BQL
|
#ifdef CONFIG_BQL
|
||||||
sysfs_remove_group(&queue->kobj, &dql_group);
|
sysfs_remove_group(&queue->kobj, &dql_group);
|
||||||
|
@ -1558,7 +1558,7 @@ void netdev_unregister_kobject(struct net_device *ndev)
|
||||||
{
|
{
|
||||||
struct device *dev = &ndev->dev;
|
struct device *dev = &ndev->dev;
|
||||||
|
|
||||||
if (!atomic_read(&dev_net(ndev)->count))
|
if (!refcount_read(&dev_net(ndev)->count))
|
||||||
dev_set_uevent_suppress(dev, 1);
|
dev_set_uevent_suppress(dev, 1);
|
||||||
|
|
||||||
kobject_get(&dev->kobj);
|
kobject_get(&dev->kobj);
|
||||||
|
|
|
@ -35,7 +35,7 @@ LIST_HEAD(net_namespace_list);
|
||||||
EXPORT_SYMBOL_GPL(net_namespace_list);
|
EXPORT_SYMBOL_GPL(net_namespace_list);
|
||||||
|
|
||||||
struct net init_net = {
|
struct net init_net = {
|
||||||
.count = ATOMIC_INIT(1),
|
.count = REFCOUNT_INIT(1),
|
||||||
.dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
|
.dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
|
||||||
};
|
};
|
||||||
EXPORT_SYMBOL(init_net);
|
EXPORT_SYMBOL(init_net);
|
||||||
|
@ -224,10 +224,10 @@ int peernet2id_alloc(struct net *net, struct net *peer)
|
||||||
bool alloc;
|
bool alloc;
|
||||||
int id;
|
int id;
|
||||||
|
|
||||||
if (atomic_read(&net->count) == 0)
|
if (refcount_read(&net->count) == 0)
|
||||||
return NETNSA_NSID_NOT_ASSIGNED;
|
return NETNSA_NSID_NOT_ASSIGNED;
|
||||||
spin_lock_bh(&net->nsid_lock);
|
spin_lock_bh(&net->nsid_lock);
|
||||||
alloc = atomic_read(&peer->count) == 0 ? false : true;
|
alloc = refcount_read(&peer->count) == 0 ? false : true;
|
||||||
id = __peernet2id_alloc(net, peer, &alloc);
|
id = __peernet2id_alloc(net, peer, &alloc);
|
||||||
spin_unlock_bh(&net->nsid_lock);
|
spin_unlock_bh(&net->nsid_lock);
|
||||||
if (alloc && id >= 0)
|
if (alloc && id >= 0)
|
||||||
|
@ -284,7 +284,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
|
||||||
int error = 0;
|
int error = 0;
|
||||||
LIST_HEAD(net_exit_list);
|
LIST_HEAD(net_exit_list);
|
||||||
|
|
||||||
atomic_set(&net->count, 1);
|
refcount_set(&net->count, 1);
|
||||||
refcount_set(&net->passive, 1);
|
refcount_set(&net->passive, 1);
|
||||||
net->dev_base_seq = 1;
|
net->dev_base_seq = 1;
|
||||||
net->user_ns = user_ns;
|
net->user_ns = user_ns;
|
||||||
|
|
|
@ -270,14 +270,14 @@ restart:
|
||||||
continue;
|
continue;
|
||||||
tw = inet_twsk(sk);
|
tw = inet_twsk(sk);
|
||||||
if ((tw->tw_family != family) ||
|
if ((tw->tw_family != family) ||
|
||||||
atomic_read(&twsk_net(tw)->count))
|
refcount_read(&twsk_net(tw)->count))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
|
if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (unlikely((tw->tw_family != family) ||
|
if (unlikely((tw->tw_family != family) ||
|
||||||
atomic_read(&twsk_net(tw)->count))) {
|
refcount_read(&twsk_net(tw)->count))) {
|
||||||
inet_twsk_put(tw);
|
inet_twsk_put(tw);
|
||||||
goto restart;
|
goto restart;
|
||||||
}
|
}
|
||||||
|
|
|
@ -892,7 +892,7 @@ static void tcp_metrics_flush_all(struct net *net)
|
||||||
pp = &hb->chain;
|
pp = &hb->chain;
|
||||||
for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
|
for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
|
||||||
match = net ? net_eq(tm_net(tm), net) :
|
match = net ? net_eq(tm_net(tm), net) :
|
||||||
!atomic_read(&tm_net(tm)->count);
|
!refcount_read(&tm_net(tm)->count);
|
||||||
if (match) {
|
if (match) {
|
||||||
*pp = tm->tcpm_next;
|
*pp = tm->tcpm_next;
|
||||||
kfree_rcu(tm, rcu_head);
|
kfree_rcu(tm, rcu_head);
|
||||||
|
|
Loading…
Add table
Reference in a new issue