mirror of
https://github.com/fail0verflow/switch-linux.git
synced 2025-05-04 02:34:21 -04:00
Staging: batman-adv: fix rogue packets on shutdown
On module shutdown batman-adv would purge the internal packet queue by sending all remaining packets which could confuse other nodes. Now, the packets are silently discarded. Signed-off-by: Marek Lindner <lindner_marek@yahoo.de> Signed-off-by: Sven Eckelmann <sven.eckelmann@gmx.de> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
parent
9d20015391
commit
5f411a90ee
1 changed files with 15 additions and 9 deletions
|
@ -440,6 +440,9 @@ void send_outstanding_bcast_packet(struct work_struct *work)
|
||||||
hlist_del(&forw_packet->list);
|
hlist_del(&forw_packet->list);
|
||||||
spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
|
spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
|
||||||
|
|
||||||
|
if (atomic_read(&module_state) == MODULE_DEACTIVATING)
|
||||||
|
goto out;
|
||||||
|
|
||||||
/* rebroadcast packet */
|
/* rebroadcast packet */
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_entry_rcu(batman_if, &if_list, list) {
|
list_for_each_entry_rcu(batman_if, &if_list, list) {
|
||||||
|
@ -453,15 +456,15 @@ void send_outstanding_bcast_packet(struct work_struct *work)
|
||||||
|
|
||||||
forw_packet->num_packets++;
|
forw_packet->num_packets++;
|
||||||
|
|
||||||
/* if we still have some more bcasts to send and we are not shutting
|
/* if we still have some more bcasts to send */
|
||||||
* down */
|
if (forw_packet->num_packets < 3) {
|
||||||
if ((forw_packet->num_packets < 3) &&
|
|
||||||
(atomic_read(&module_state) != MODULE_DEACTIVATING))
|
|
||||||
_add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000));
|
_add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000));
|
||||||
else {
|
return;
|
||||||
forw_packet_free(forw_packet);
|
|
||||||
atomic_inc(&bcast_queue_left);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
forw_packet_free(forw_packet);
|
||||||
|
atomic_inc(&bcast_queue_left);
|
||||||
}
|
}
|
||||||
|
|
||||||
void send_outstanding_bat_packet(struct work_struct *work)
|
void send_outstanding_bat_packet(struct work_struct *work)
|
||||||
|
@ -476,6 +479,9 @@ void send_outstanding_bat_packet(struct work_struct *work)
|
||||||
hlist_del(&forw_packet->list);
|
hlist_del(&forw_packet->list);
|
||||||
spin_unlock_irqrestore(&forw_bat_list_lock, flags);
|
spin_unlock_irqrestore(&forw_bat_list_lock, flags);
|
||||||
|
|
||||||
|
if (atomic_read(&module_state) == MODULE_DEACTIVATING)
|
||||||
|
goto out;
|
||||||
|
|
||||||
send_packet(forw_packet);
|
send_packet(forw_packet);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -483,10 +489,10 @@ void send_outstanding_bat_packet(struct work_struct *work)
|
||||||
* to determine the queues wake up time unless we are
|
* to determine the queues wake up time unless we are
|
||||||
* shutting down
|
* shutting down
|
||||||
*/
|
*/
|
||||||
if ((forw_packet->own) &&
|
if (forw_packet->own)
|
||||||
(atomic_read(&module_state) != MODULE_DEACTIVATING))
|
|
||||||
schedule_own_packet(forw_packet->if_incoming);
|
schedule_own_packet(forw_packet->if_incoming);
|
||||||
|
|
||||||
|
out:
|
||||||
/* don't count own packet */
|
/* don't count own packet */
|
||||||
if (!forw_packet->own)
|
if (!forw_packet->own)
|
||||||
atomic_inc(&batman_queue_left);
|
atomic_inc(&batman_queue_left);
|
||||||
|
|
Loading…
Add table
Reference in a new issue