View | Details | Raw Unified | Return to bug 196821 | Differences between
and this patch

Collapse All | Expand All

(-)a/include/net/netfilter/nf_conntrack.h (-2 / +1 lines)
Lines 17-23 Link Here
17
#include <linux/bitops.h>
17
#include <linux/bitops.h>
18
#include <linux/compiler.h>
18
#include <linux/compiler.h>
19
#include <linux/atomic.h>
19
#include <linux/atomic.h>
20
#include <linux/rhashtable.h>
21
20
22
#include <linux/netfilter/nf_conntrack_tcp.h>
21
#include <linux/netfilter/nf_conntrack_tcp.h>
23
#include <linux/netfilter/nf_conntrack_dccp.h>
22
#include <linux/netfilter/nf_conntrack_dccp.h>
Lines 118-124 struct nf_conn { Link Here
118
	struct nf_ct_ext *ext;
117
	struct nf_ct_ext *ext;
119
118
120
#if IS_ENABLED(CONFIG_NF_NAT)
119
#if IS_ENABLED(CONFIG_NF_NAT)
121
	struct rhash_head	nat_bysource;
120
	struct hlist_node	nat_bysource;
122
#endif
121
#endif
123
	/* Storage reserved for other modules, must be the last member */
122
	/* Storage reserved for other modules, must be the last member */
124
	union nf_conntrack_proto proto;
123
	union nf_conntrack_proto proto;
(-)a/include/net/netfilter/nf_nat.h (-1 lines)
Lines 1-6 Link Here
1
#ifndef _NF_NAT_H
1
#ifndef _NF_NAT_H
2
#define _NF_NAT_H
2
#define _NF_NAT_H
3
#include <linux/rhashtable.h>
4
#include <linux/netfilter_ipv4.h>
3
#include <linux/netfilter_ipv4.h>
5
#include <linux/netfilter/nf_nat.h>
4
#include <linux/netfilter/nf_nat.h>
6
#include <net/netfilter/nf_conntrack_tuple.h>
5
#include <net/netfilter/nf_conntrack_tuple.h>
(-)a/net/netfilter/nf_nat_core.c (-68 / +59 lines)
Lines 30-48 Link Here
30
#include <net/netfilter/nf_conntrack_zones.h>
30
#include <net/netfilter/nf_conntrack_zones.h>
31
#include <linux/netfilter/nf_nat.h>
31
#include <linux/netfilter/nf_nat.h>
32
32
33
static DEFINE_SPINLOCK(nf_nat_lock);
34
33
static DEFINE_MUTEX(nf_nat_proto_mutex);
35
static DEFINE_MUTEX(nf_nat_proto_mutex);
34
static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
36
static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
35
						__read_mostly;
37
						__read_mostly;
36
static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO]
38
static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO]
37
						__read_mostly;
39
						__read_mostly;
38
40
39
struct nf_nat_conn_key {
41
static struct hlist_head *nf_nat_bysource __read_mostly;
40
	const struct net *net;
42
static unsigned int nf_nat_htable_size __read_mostly;
41
	const struct nf_conntrack_tuple *tuple;
43
static unsigned int nf_nat_hash_rnd __read_mostly;
42
	const struct nf_conntrack_zone *zone;
43
};
44
45
static struct rhashtable nf_nat_bysource_table;
46
44
47
inline const struct nf_nat_l3proto *
45
inline const struct nf_nat_l3proto *
48
__nf_nat_l3proto_find(u8 family)
46
__nf_nat_l3proto_find(u8 family)
Lines 121-137 int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family) Link Here
121
EXPORT_SYMBOL(nf_xfrm_me_harder);
119
EXPORT_SYMBOL(nf_xfrm_me_harder);
122
#endif /* CONFIG_XFRM */
120
#endif /* CONFIG_XFRM */
123
121
124
static u32 nf_nat_bysource_hash(const void *data, u32 len, u32 seed)
122
/* We keep an extra hash for each conntrack, for fast searching. */
123
static inline unsigned int
124
hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
125
{
125
{
126
	const struct nf_conntrack_tuple *t;
126
	unsigned int hash;
127
	const struct nf_conn *ct = data;
127
128
	get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
128
129
129
	t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
130
	/* Original src, to ensure we map it consistently if poss. */
130
	/* Original src, to ensure we map it consistently if poss. */
131
	hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
132
		      tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n));
131
133
132
	seed ^= net_hash_mix(nf_ct_net(ct));
134
	return reciprocal_scale(hash, nf_nat_htable_size);
133
	return jhash2((const u32 *)&t->src, sizeof(t->src) / sizeof(u32),
134
		      t->dst.protonum ^ seed);
135
}
135
}
136
136
137
/* Is this tuple already taken? (not by us) */
137
/* Is this tuple already taken? (not by us) */
Lines 187-212 same_src(const struct nf_conn *ct, Link Here
187
		t->src.u.all == tuple->src.u.all);
187
		t->src.u.all == tuple->src.u.all);
188
}
188
}
189
189
190
static int nf_nat_bysource_cmp(struct rhashtable_compare_arg *arg,
191
			       const void *obj)
192
{
193
	const struct nf_nat_conn_key *key = arg->key;
194
	const struct nf_conn *ct = obj;
195
196
	return same_src(ct, key->tuple) &&
197
	       net_eq(nf_ct_net(ct), key->net) &&
198
	       nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL);
199
}
200
201
static struct rhashtable_params nf_nat_bysource_params = {
202
	.head_offset = offsetof(struct nf_conn, nat_bysource),
203
	.obj_hashfn = nf_nat_bysource_hash,
204
	.obj_cmpfn = nf_nat_bysource_cmp,
205
	.nelem_hint = 256,
206
	.min_size = 1024,
207
	.nulls_base = (1U << RHT_BASE_SHIFT),
208
};
209
210
/* Only called for SRC manip */
190
/* Only called for SRC manip */
211
static int
191
static int
212
find_appropriate_src(struct net *net,
192
find_appropriate_src(struct net *net,
Lines 217-239 find_appropriate_src(struct net *net, Link Here
217
		     struct nf_conntrack_tuple *result,
197
		     struct nf_conntrack_tuple *result,
218
		     const struct nf_nat_range *range)
198
		     const struct nf_nat_range *range)
219
{
199
{
200
	unsigned int h = hash_by_src(net, tuple);
220
	const struct nf_conn *ct;
201
	const struct nf_conn *ct;
221
	struct nf_nat_conn_key key = {
222
		.net = net,
223
		.tuple = tuple,
224
		.zone = zone
225
	};
226
227
	ct = rhashtable_lookup_fast(&nf_nat_bysource_table, &key,
228
				    nf_nat_bysource_params);
229
	if (!ct)
230
		return 0;
231
232
	nf_ct_invert_tuplepr(result,
233
			     &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
234
	result->dst = tuple->dst;
235
202
236
	return in_range(l3proto, l4proto, result, range);
203
	hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
204
		if (same_src(ct, tuple) &&
205
		    net_eq(net, nf_ct_net(ct)) &&
206
		    nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
207
			/* Copy source part from reply tuple. */
208
			nf_ct_invert_tuplepr(result,
209
				       &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
210
			result->dst = tuple->dst;
211
212
			if (in_range(l3proto, l4proto, result, range))
213
				return 1;
214
		}
215
	}
216
	return 0;
237
}
217
}
238
218
239
/* For [FUTURE] fragmentation handling, we want the least-used
219
/* For [FUTURE] fragmentation handling, we want the least-used
Lines 405-410 nf_nat_setup_info(struct nf_conn *ct, Link Here
405
		  const struct nf_nat_range *range,
385
		  const struct nf_nat_range *range,
406
		  enum nf_nat_manip_type maniptype)
386
		  enum nf_nat_manip_type maniptype)
407
{
387
{
388
	struct net *net = nf_ct_net(ct);
408
	struct nf_conntrack_tuple curr_tuple, new_tuple;
389
	struct nf_conntrack_tuple curr_tuple, new_tuple;
409
	struct nf_conn_nat *nat;
390
	struct nf_conn_nat *nat;
410
391
Lines 446-458 nf_nat_setup_info(struct nf_conn *ct, Link Here
446
	}
427
	}
447
428
448
	if (maniptype == NF_NAT_MANIP_SRC) {
429
	if (maniptype == NF_NAT_MANIP_SRC) {
449
		int err;
430
		unsigned int srchash;
450
431
451
		err = rhashtable_insert_fast(&nf_nat_bysource_table,
432
		srchash = hash_by_src(net,
452
					     &ct->nat_bysource,
433
				      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
453
					     nf_nat_bysource_params);
434
		spin_lock_bh(&nf_nat_lock);
454
		if (err)
435
		/* nf_conntrack_alter_reply might re-allocate extension aera */
455
			return NF_DROP;
436
		nat = nfct_nat(ct);
437
		hlist_add_head_rcu(&ct->nat_bysource,
438
				   &nf_nat_bysource[srchash]);
439
		spin_unlock_bh(&nf_nat_lock);
456
	}
440
	}
457
441
458
	/* It's done. */
442
	/* It's done. */
Lines 566-574 static int nf_nat_proto_clean(struct nf_conn *ct, void *data) Link Here
566
	 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
550
	 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
567
	 * will delete entry from already-freed table.
551
	 * will delete entry from already-freed table.
568
	 */
552
	 */
553
554
	spin_lock_bh(&nf_nat_lock);
555
	hlist_del_rcu(&ct->nat_bysource);
569
	ct->status &= ~IPS_NAT_DONE_MASK;
556
	ct->status &= ~IPS_NAT_DONE_MASK;
570
	rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource,
557
	spin_unlock_bh(&nf_nat_lock);
571
			       nf_nat_bysource_params);
572
558
573
	/* don't delete conntrack.  Although that would make things a lot
559
	/* don't delete conntrack.  Although that would make things a lot
574
	 * simpler, we'd end up flushing all conntracks on nat rmmod.
560
	 * simpler, we'd end up flushing all conntracks on nat rmmod.
Lines 698-705 static void nf_nat_cleanup_conntrack(struct nf_conn *ct) Link Here
698
	if (!nat)
684
	if (!nat)
699
		return;
685
		return;
700
686
701
	rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource,
687
	NF_CT_ASSERT(ct->status & IPS_SRC_NAT_DONE);
702
			       nf_nat_bysource_params);
688
689
	spin_lock_bh(&nf_nat_lock);
690
	hlist_del_rcu(&ct->nat_bysource);
691
	spin_unlock_bh(&nf_nat_lock);
703
}
692
}
704
693
705
static struct nf_ct_ext_type nat_extend __read_mostly = {
694
static struct nf_ct_ext_type nat_extend __read_mostly = {
Lines 834-846 static int __init nf_nat_init(void) Link Here
834
{
823
{
835
	int ret;
824
	int ret;
836
825
837
	ret = rhashtable_init(&nf_nat_bysource_table, &nf_nat_bysource_params);
826
	/* Leave them the same for the moment. */
838
	if (ret)
827
	nf_nat_htable_size = nf_conntrack_htable_size;
839
		return ret;
828
829
	nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
830
	if (!nf_nat_bysource)
831
		return -ENOMEM;
840
832
841
	ret = nf_ct_extend_register(&nat_extend);
833
	ret = nf_ct_extend_register(&nat_extend);
842
	if (ret < 0) {
834
	if (ret < 0) {
843
		rhashtable_destroy(&nf_nat_bysource_table);
835
		nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
844
		printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
836
		printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
845
		return ret;
837
		return ret;
846
	}
838
	}
Lines 864-870 static int __init nf_nat_init(void) Link Here
864
	return 0;
856
	return 0;
865
857
866
 cleanup_extend:
858
 cleanup_extend:
867
	rhashtable_destroy(&nf_nat_bysource_table);
859
	nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
868
	nf_ct_extend_unregister(&nat_extend);
860
	nf_ct_extend_unregister(&nat_extend);
869
	return ret;
861
	return ret;
870
}
862
}
Lines 882-889 static void __exit nf_nat_cleanup(void) Link Here
882
#endif
874
#endif
883
	for (i = 0; i < NFPROTO_NUMPROTO; i++)
875
	for (i = 0; i < NFPROTO_NUMPROTO; i++)
884
		kfree(nf_nat_l4protos[i]);
876
		kfree(nf_nat_l4protos[i]);
885
877
	synchronize_net();
886
	rhashtable_destroy(&nf_nat_bysource_table);
878
	nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
887
}
879
}
888
880
889
MODULE_LICENSE("GPL");
881
MODULE_LICENSE("GPL");
890
- 

Return to bug 196821