From 52de457b3cfd1e94a52df1c3dfcd9dbf3511fa0d Mon Sep 17 00:00:00 2001 From: fgao Date: Mon, 20 Oct 2014 07:18:05 -0700 Subject: [PATCH 1/1] netfilter: Fix wastful cleanup check for unconfirmed conn in get_next_corpse The function get_next_corpse is used to iterate the conntracks. It will check the per cpu unconfirmed list of every cpu too. Now it is only invoked by nf_ct_iterate_cleanup in one while loop. Actually the unconfirmed list could be accessed completely by one call, then the others are wastful. So move the unconfirmed list check outside the function get_next_corpse and create one new function Let the nf_ct_iterate_cleanup invokes the new function clean_up_unconfirmed_conntracks once after the loops. Signed-off-by: fgao --- net/netfilter/nf_conntrack_core.c | 36 ++++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 5016a69..ace7c2c2 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1348,6 +1348,28 @@ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb) nf_conntrack_get(nskb->nfct); } +static void clean_up_unconfirmed_conntracks(struct net *net, + int (*iter)(struct nf_conn *i, void *data), + void *data) +{ + struct nf_conntrack_tuple_hash *h; + struct nf_conn *ct; + struct hlist_nulls_node *n; + int cpu; + + for_each_possible_cpu(cpu) { + struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); + + spin_lock_bh(&pcpu->lock); + hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) { + ct = nf_ct_tuplehash_to_ctrack(h); + if (iter(ct, data)) + set_bit(IPS_DYING_BIT, &ct->status); + } + spin_unlock_bh(&pcpu->lock); + } +} + /* Bring out ya dead! */ static struct nf_conn * get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), @@ -1356,7 +1378,6 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; struct hlist_nulls_node *n; - int cpu; spinlock_t *lockp; for (; *bucket < net->ct.htable_size; (*bucket)++) { @@ -1376,17 +1397,6 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), local_bh_enable(); } - for_each_possible_cpu(cpu) { - struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); - - spin_lock_bh(&pcpu->lock); - hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) { - ct = nf_ct_tuplehash_to_ctrack(h); - if (iter(ct, data)) - set_bit(IPS_DYING_BIT, &ct->status); - } - spin_unlock_bh(&pcpu->lock); - } return NULL; found: atomic_inc(&ct->ct_general.use); @@ -1411,6 +1421,8 @@ void nf_ct_iterate_cleanup(struct net *net, nf_ct_put(ct); } + + clean_up_unconfirmed_conntracks(net, iter, data); } EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); -- 1.9.1