1/*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (C) 2006-2010 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13#include <linux/cache.h>
14#include <linux/capability.h>
15#include <linux/skbuff.h>
16#include <linux/kmod.h>
17#include <linux/vmalloc.h>
18#include <linux/netdevice.h>
19#include <linux/module.h>
20#include <linux/icmp.h>
21#include <net/ip.h>
22#include <net/compat.h>
23#include <asm/uaccess.h>
24#include <linux/mutex.h>
25#include <linux/proc_fs.h>
26#include <linux/err.h>
27#include <linux/cpumask.h>
28
29#include <linux/netfilter/x_tables.h>
30#include <linux/netfilter_ipv4/ip_tables.h>
31#include <net/netfilter/nf_log.h>
32#include "../../netfilter/xt_repldata.h"
33
34MODULE_LICENSE("GPL");
35MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36MODULE_DESCRIPTION("IPv4 packet filter");
37
38/*#define DEBUG_IP_FIREWALL*/
39/*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40/*#define DEBUG_IP_FIREWALL_USER*/
41
42#ifdef DEBUG_IP_FIREWALL
43#define dprintf(format, args...) pr_info(format , ## args)
44#else
45#define dprintf(format, args...)
46#endif
47
48#ifdef DEBUG_IP_FIREWALL_USER
49#define duprintf(format, args...) pr_info(format , ## args)
50#else
51#define duprintf(format, args...)
52#endif
53
54#ifdef CONFIG_NETFILTER_DEBUG
55#define IP_NF_ASSERT(x)		WARN_ON(!(x))
56#else
57#define IP_NF_ASSERT(x)
58#endif
59
60#if 0
61/* All the better to debug you with... */
62#define static
63#define inline
64#endif
65
66void *ipt_alloc_initial_table(const struct xt_table *info)
67{
68	return xt_alloc_initial_table(ipt, IPT);
69}
70EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
71
72/* Returns whether matches rule or not. */
73/* Performance critical - called for every packet */
74static inline bool
75ip_packet_match(const struct iphdr *ip,
76		const char *indev,
77		const char *outdev,
78		const struct ipt_ip *ipinfo,
79		int isfrag)
80{
81	unsigned long ret;
82
83#define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
84
85	if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
86		  IPT_INV_SRCIP) ||
87	    FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
88		  IPT_INV_DSTIP)) {
89		dprintf("Source or dest mismatch.\n");
90
91		dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
92			&ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr,
93			ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
94		dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
95			&ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr,
96			ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
97		return false;
98	}
99
100	ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
101
102	if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
103		dprintf("VIA in mismatch (%s vs %s).%s\n",
104			indev, ipinfo->iniface,
105			ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
106		return false;
107	}
108
109	ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
110
111	if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
112		dprintf("VIA out mismatch (%s vs %s).%s\n",
113			outdev, ipinfo->outiface,
114			ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
115		return false;
116	}
117
118	/* Check specific protocol */
119	if (ipinfo->proto &&
120	    FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
121		dprintf("Packet protocol %hi does not match %hi.%s\n",
122			ip->protocol, ipinfo->proto,
123			ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
124		return false;
125	}
126
127	/* If we have a fragment rule but the packet is not a fragment
128	 * then we return zero */
129	if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
130		dprintf("Fragment rule but not fragment.%s\n",
131			ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
132		return false;
133	}
134
135	return true;
136}
137
138static bool
139ip_checkentry(const struct ipt_ip *ip)
140{
141	if (ip->flags & ~IPT_F_MASK) {
142		duprintf("Unknown flag bits set: %08X\n",
143			 ip->flags & ~IPT_F_MASK);
144		return false;
145	}
146	if (ip->invflags & ~IPT_INV_MASK) {
147		duprintf("Unknown invflag bits set: %08X\n",
148			 ip->invflags & ~IPT_INV_MASK);
149		return false;
150	}
151	return true;
152}
153
154static unsigned int
155ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
156{
157	net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
158
159	return NF_DROP;
160}
161
162/* Performance critical */
163static inline struct ipt_entry *
164get_entry(const void *base, unsigned int offset)
165{
166	return (struct ipt_entry *)(base + offset);
167}
168
169/* All zeroes == unconditional rule. */
170/* Mildly perf critical (only if packet tracing is on) */
171static inline bool unconditional(const struct ipt_ip *ip)
172{
173	static const struct ipt_ip uncond;
174
175	return memcmp(ip, &uncond, sizeof(uncond)) == 0;
176#undef FWINV
177}
178
179/* for const-correctness */
180static inline const struct xt_entry_target *
181ipt_get_target_c(const struct ipt_entry *e)
182{
183	return ipt_get_target((struct ipt_entry *)e);
184}
185
186#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
187static const char *const hooknames[] = {
188	[NF_INET_PRE_ROUTING]		= "PREROUTING",
189	[NF_INET_LOCAL_IN]		= "INPUT",
190	[NF_INET_FORWARD]		= "FORWARD",
191	[NF_INET_LOCAL_OUT]		= "OUTPUT",
192	[NF_INET_POST_ROUTING]		= "POSTROUTING",
193};
194
195enum nf_ip_trace_comments {
196	NF_IP_TRACE_COMMENT_RULE,
197	NF_IP_TRACE_COMMENT_RETURN,
198	NF_IP_TRACE_COMMENT_POLICY,
199};
200
201static const char *const comments[] = {
202	[NF_IP_TRACE_COMMENT_RULE]	= "rule",
203	[NF_IP_TRACE_COMMENT_RETURN]	= "return",
204	[NF_IP_TRACE_COMMENT_POLICY]	= "policy",
205};
206
207static struct nf_loginfo trace_loginfo = {
208	.type = NF_LOG_TYPE_LOG,
209	.u = {
210		.log = {
211			.level = 4,
212			.logflags = NF_LOG_MASK,
213		},
214	},
215};
216
217/* Mildly perf critical (only if packet tracing is on) */
218static inline int
219get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
220		      const char *hookname, const char **chainname,
221		      const char **comment, unsigned int *rulenum)
222{
223	const struct xt_standard_target *t = (void *)ipt_get_target_c(s);
224
225	if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
226		/* Head of user chain: ERROR target with chainname */
227		*chainname = t->target.data;
228		(*rulenum) = 0;
229	} else if (s == e) {
230		(*rulenum)++;
231
232		if (s->target_offset == sizeof(struct ipt_entry) &&
233		    strcmp(t->target.u.kernel.target->name,
234			   XT_STANDARD_TARGET) == 0 &&
235		   t->verdict < 0 &&
236		   unconditional(&s->ip)) {
237			/* Tail of chains: STANDARD target (return/policy) */
238			*comment = *chainname == hookname
239				? comments[NF_IP_TRACE_COMMENT_POLICY]
240				: comments[NF_IP_TRACE_COMMENT_RETURN];
241		}
242		return 1;
243	} else
244		(*rulenum)++;
245
246	return 0;
247}
248
249static void trace_packet(const struct sk_buff *skb,
250			 unsigned int hook,
251			 const struct net_device *in,
252			 const struct net_device *out,
253			 const char *tablename,
254			 const struct xt_table_info *private,
255			 const struct ipt_entry *e)
256{
257	const void *table_base;
258	const struct ipt_entry *root;
259	const char *hookname, *chainname, *comment;
260	const struct ipt_entry *iter;
261	unsigned int rulenum = 0;
262	struct net *net = dev_net(in ? in : out);
263
264	table_base = private->entries[smp_processor_id()];
265	root = get_entry(table_base, private->hook_entry[hook]);
266
267	hookname = chainname = hooknames[hook];
268	comment = comments[NF_IP_TRACE_COMMENT_RULE];
269
270	xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
271		if (get_chainname_rulenum(iter, e, hookname,
272		    &chainname, &comment, &rulenum) != 0)
273			break;
274
275	nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo,
276		     "TRACE: %s:%s:%s:%u ",
277		     tablename, chainname, comment, rulenum);
278}
279#endif
280
281static inline __pure
282struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
283{
284	return (void *)entry + entry->next_offset;
285}
286
287/* Returns one of the generic firewall policies, like NF_ACCEPT. */
288unsigned int
289ipt_do_table(struct sk_buff *skb,
290	     unsigned int hook,
291	     const struct nf_hook_state *state,
292	     struct xt_table *table)
293{
294	static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
295	const struct iphdr *ip;
296	/* Initializing verdict to NF_DROP keeps gcc happy. */
297	unsigned int verdict = NF_DROP;
298	const char *indev, *outdev;
299	const void *table_base;
300	struct ipt_entry *e, **jumpstack;
301	unsigned int *stackptr, origptr, cpu;
302	const struct xt_table_info *private;
303	struct xt_action_param acpar;
304	unsigned int addend;
305
306	/* Initialization */
307	ip = ip_hdr(skb);
308	indev = state->in ? state->in->name : nulldevname;
309	outdev = state->out ? state->out->name : nulldevname;
310	/* We handle fragments by dealing with the first fragment as
311	 * if it was a normal packet.  All other fragments are treated
312	 * normally, except that they will NEVER match rules that ask
313	 * things we don't know, ie. tcp syn flag or ports).  If the
314	 * rule is also a fragment-specific rule, non-fragments won't
315	 * match it. */
316	acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
317	acpar.thoff   = ip_hdrlen(skb);
318	acpar.hotdrop = false;
319	acpar.in      = state->in;
320	acpar.out     = state->out;
321	acpar.family  = NFPROTO_IPV4;
322	acpar.hooknum = hook;
323
324	IP_NF_ASSERT(table->valid_hooks & (1 << hook));
325	local_bh_disable();
326	addend = xt_write_recseq_begin();
327	private = table->private;
328	cpu        = smp_processor_id();
329	/*
330	 * Ensure we load private-> members after we've fetched the base
331	 * pointer.
332	 */
333	smp_read_barrier_depends();
334	table_base = private->entries[cpu];
335	jumpstack  = (struct ipt_entry **)private->jumpstack[cpu];
336	stackptr   = per_cpu_ptr(private->stackptr, cpu);
337	origptr    = *stackptr;
338
339	e = get_entry(table_base, private->hook_entry[hook]);
340
341	pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
342		 table->name, hook, origptr,
343		 get_entry(table_base, private->underflow[hook]));
344
345	do {
346		const struct xt_entry_target *t;
347		const struct xt_entry_match *ematch;
348
349		IP_NF_ASSERT(e);
350		if (!ip_packet_match(ip, indev, outdev,
351		    &e->ip, acpar.fragoff)) {
352 no_match:
353			e = ipt_next_entry(e);
354			continue;
355		}
356
357		xt_ematch_foreach(ematch, e) {
358			acpar.match     = ematch->u.kernel.match;
359			acpar.matchinfo = ematch->data;
360			if (!acpar.match->match(skb, &acpar))
361				goto no_match;
362		}
363
364		ADD_COUNTER(e->counters, skb->len, 1);
365
366		t = ipt_get_target(e);
367		IP_NF_ASSERT(t->u.kernel.target);
368
369#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
370		/* The packet is traced: log it */
371		if (unlikely(skb->nf_trace))
372			trace_packet(skb, hook, state->in, state->out,
373				     table->name, private, e);
374#endif
375		/* Standard target? */
376		if (!t->u.kernel.target->target) {
377			int v;
378
379			v = ((struct xt_standard_target *)t)->verdict;
380			if (v < 0) {
381				/* Pop from stack? */
382				if (v != XT_RETURN) {
383					verdict = (unsigned int)(-v) - 1;
384					break;
385				}
386				if (*stackptr <= origptr) {
387					e = get_entry(table_base,
388					    private->underflow[hook]);
389					pr_debug("Underflow (this is normal) "
390						 "to %p\n", e);
391				} else {
392					e = jumpstack[--*stackptr];
393					pr_debug("Pulled %p out from pos %u\n",
394						 e, *stackptr);
395					e = ipt_next_entry(e);
396				}
397				continue;
398			}
399			if (table_base + v != ipt_next_entry(e) &&
400			    !(e->ip.flags & IPT_F_GOTO)) {
401				if (*stackptr >= private->stacksize) {
402					verdict = NF_DROP;
403					break;
404				}
405				jumpstack[(*stackptr)++] = e;
406				pr_debug("Pushed %p into pos %u\n",
407					 e, *stackptr - 1);
408			}
409
410			e = get_entry(table_base, v);
411			continue;
412		}
413
414		acpar.target   = t->u.kernel.target;
415		acpar.targinfo = t->data;
416
417		verdict = t->u.kernel.target->target(skb, &acpar);
418		/* Target might have changed stuff. */
419		ip = ip_hdr(skb);
420		if (verdict == XT_CONTINUE)
421			e = ipt_next_entry(e);
422		else
423			/* Verdict */
424			break;
425	} while (!acpar.hotdrop);
426	pr_debug("Exiting %s; resetting sp from %u to %u\n",
427		 __func__, *stackptr, origptr);
428	*stackptr = origptr;
429 	xt_write_recseq_end(addend);
430 	local_bh_enable();
431
432#ifdef DEBUG_ALLOW_ALL
433	return NF_ACCEPT;
434#else
435	if (acpar.hotdrop)
436		return NF_DROP;
437	else return verdict;
438#endif
439}
440
441/* Figures out from what hook each rule can be called: returns 0 if
442   there are loops.  Puts hook bitmask in comefrom. */
443static int
444mark_source_chains(const struct xt_table_info *newinfo,
445		   unsigned int valid_hooks, void *entry0)
446{
447	unsigned int hook;
448
449	/* No recursion; use packet counter to save back ptrs (reset
450	   to 0 as we leave), and comefrom to save source hook bitmask */
451	for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
452		unsigned int pos = newinfo->hook_entry[hook];
453		struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
454
455		if (!(valid_hooks & (1 << hook)))
456			continue;
457
458		/* Set initial back pointer. */
459		e->counters.pcnt = pos;
460
461		for (;;) {
462			const struct xt_standard_target *t
463				= (void *)ipt_get_target_c(e);
464			int visited = e->comefrom & (1 << hook);
465
466			if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
467				pr_err("iptables: loop hook %u pos %u %08X.\n",
468				       hook, pos, e->comefrom);
469				return 0;
470			}
471			e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
472
473			/* Unconditional return/END. */
474			if ((e->target_offset == sizeof(struct ipt_entry) &&
475			     (strcmp(t->target.u.user.name,
476				     XT_STANDARD_TARGET) == 0) &&
477			     t->verdict < 0 && unconditional(&e->ip)) ||
478			    visited) {
479				unsigned int oldpos, size;
480
481				if ((strcmp(t->target.u.user.name,
482			    		    XT_STANDARD_TARGET) == 0) &&
483				    t->verdict < -NF_MAX_VERDICT - 1) {
484					duprintf("mark_source_chains: bad "
485						"negative verdict (%i)\n",
486								t->verdict);
487					return 0;
488				}
489
490				/* Return: backtrack through the last
491				   big jump. */
492				do {
493					e->comefrom ^= (1<<NF_INET_NUMHOOKS);
494#ifdef DEBUG_IP_FIREWALL_USER
495					if (e->comefrom
496					    & (1 << NF_INET_NUMHOOKS)) {
497						duprintf("Back unset "
498							 "on hook %u "
499							 "rule %u\n",
500							 hook, pos);
501					}
502#endif
503					oldpos = pos;
504					pos = e->counters.pcnt;
505					e->counters.pcnt = 0;
506
507					/* We're at the start. */
508					if (pos == oldpos)
509						goto next;
510
511					e = (struct ipt_entry *)
512						(entry0 + pos);
513				} while (oldpos == pos + e->next_offset);
514
515				/* Move along one */
516				size = e->next_offset;
517				e = (struct ipt_entry *)
518					(entry0 + pos + size);
519				e->counters.pcnt = pos;
520				pos += size;
521			} else {
522				int newpos = t->verdict;
523
524				if (strcmp(t->target.u.user.name,
525					   XT_STANDARD_TARGET) == 0 &&
526				    newpos >= 0) {
527					if (newpos > newinfo->size -
528						sizeof(struct ipt_entry)) {
529						duprintf("mark_source_chains: "
530							"bad verdict (%i)\n",
531								newpos);
532						return 0;
533					}
534					/* This a jump; chase it. */
535					duprintf("Jump rule %u -> %u\n",
536						 pos, newpos);
537				} else {
538					/* ... this is a fallthru */
539					newpos = pos + e->next_offset;
540				}
541				e = (struct ipt_entry *)
542					(entry0 + newpos);
543				e->counters.pcnt = pos;
544				pos = newpos;
545			}
546		}
547		next:
548		duprintf("Finished chain %u\n", hook);
549	}
550	return 1;
551}
552
553static void cleanup_match(struct xt_entry_match *m, struct net *net)
554{
555	struct xt_mtdtor_param par;
556
557	par.net       = net;
558	par.match     = m->u.kernel.match;
559	par.matchinfo = m->data;
560	par.family    = NFPROTO_IPV4;
561	if (par.match->destroy != NULL)
562		par.match->destroy(&par);
563	module_put(par.match->me);
564}
565
566static int
567check_entry(const struct ipt_entry *e, const char *name)
568{
569	const struct xt_entry_target *t;
570
571	if (!ip_checkentry(&e->ip)) {
572		duprintf("ip check failed %p %s.\n", e, name);
573		return -EINVAL;
574	}
575
576	if (e->target_offset + sizeof(struct xt_entry_target) >
577	    e->next_offset)
578		return -EINVAL;
579
580	t = ipt_get_target_c(e);
581	if (e->target_offset + t->u.target_size > e->next_offset)
582		return -EINVAL;
583
584	return 0;
585}
586
587static int
588check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
589{
590	const struct ipt_ip *ip = par->entryinfo;
591	int ret;
592
593	par->match     = m->u.kernel.match;
594	par->matchinfo = m->data;
595
596	ret = xt_check_match(par, m->u.match_size - sizeof(*m),
597	      ip->proto, ip->invflags & IPT_INV_PROTO);
598	if (ret < 0) {
599		duprintf("check failed for `%s'.\n", par->match->name);
600		return ret;
601	}
602	return 0;
603}
604
605static int
606find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
607{
608	struct xt_match *match;
609	int ret;
610
611	match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
612				      m->u.user.revision);
613	if (IS_ERR(match)) {
614		duprintf("find_check_match: `%s' not found\n", m->u.user.name);
615		return PTR_ERR(match);
616	}
617	m->u.kernel.match = match;
618
619	ret = check_match(m, par);
620	if (ret)
621		goto err;
622
623	return 0;
624err:
625	module_put(m->u.kernel.match->me);
626	return ret;
627}
628
629static int check_target(struct ipt_entry *e, struct net *net, const char *name)
630{
631	struct xt_entry_target *t = ipt_get_target(e);
632	struct xt_tgchk_param par = {
633		.net       = net,
634		.table     = name,
635		.entryinfo = e,
636		.target    = t->u.kernel.target,
637		.targinfo  = t->data,
638		.hook_mask = e->comefrom,
639		.family    = NFPROTO_IPV4,
640	};
641	int ret;
642
643	ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
644	      e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
645	if (ret < 0) {
646		duprintf("check failed for `%s'.\n",
647			 t->u.kernel.target->name);
648		return ret;
649	}
650	return 0;
651}
652
653static int
654find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
655		 unsigned int size)
656{
657	struct xt_entry_target *t;
658	struct xt_target *target;
659	int ret;
660	unsigned int j;
661	struct xt_mtchk_param mtpar;
662	struct xt_entry_match *ematch;
663
664	ret = check_entry(e, name);
665	if (ret)
666		return ret;
667
668	j = 0;
669	mtpar.net	= net;
670	mtpar.table     = name;
671	mtpar.entryinfo = &e->ip;
672	mtpar.hook_mask = e->comefrom;
673	mtpar.family    = NFPROTO_IPV4;
674	xt_ematch_foreach(ematch, e) {
675		ret = find_check_match(ematch, &mtpar);
676		if (ret != 0)
677			goto cleanup_matches;
678		++j;
679	}
680
681	t = ipt_get_target(e);
682	target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
683					t->u.user.revision);
684	if (IS_ERR(target)) {
685		duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
686		ret = PTR_ERR(target);
687		goto cleanup_matches;
688	}
689	t->u.kernel.target = target;
690
691	ret = check_target(e, net, name);
692	if (ret)
693		goto err;
694	return 0;
695 err:
696	module_put(t->u.kernel.target->me);
697 cleanup_matches:
698	xt_ematch_foreach(ematch, e) {
699		if (j-- == 0)
700			break;
701		cleanup_match(ematch, net);
702	}
703	return ret;
704}
705
706static bool check_underflow(const struct ipt_entry *e)
707{
708	const struct xt_entry_target *t;
709	unsigned int verdict;
710
711	if (!unconditional(&e->ip))
712		return false;
713	t = ipt_get_target_c(e);
714	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
715		return false;
716	verdict = ((struct xt_standard_target *)t)->verdict;
717	verdict = -verdict - 1;
718	return verdict == NF_DROP || verdict == NF_ACCEPT;
719}
720
721static int
722check_entry_size_and_hooks(struct ipt_entry *e,
723			   struct xt_table_info *newinfo,
724			   const unsigned char *base,
725			   const unsigned char *limit,
726			   const unsigned int *hook_entries,
727			   const unsigned int *underflows,
728			   unsigned int valid_hooks)
729{
730	unsigned int h;
731
732	if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
733	    (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
734		duprintf("Bad offset %p\n", e);
735		return -EINVAL;
736	}
737
738	if (e->next_offset
739	    < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) {
740		duprintf("checking: element %p size %u\n",
741			 e, e->next_offset);
742		return -EINVAL;
743	}
744
745	/* Check hooks & underflows */
746	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
747		if (!(valid_hooks & (1 << h)))
748			continue;
749		if ((unsigned char *)e - base == hook_entries[h])
750			newinfo->hook_entry[h] = hook_entries[h];
751		if ((unsigned char *)e - base == underflows[h]) {
752			if (!check_underflow(e)) {
753				pr_err("Underflows must be unconditional and "
754				       "use the STANDARD target with "
755				       "ACCEPT/DROP\n");
756				return -EINVAL;
757			}
758			newinfo->underflow[h] = underflows[h];
759		}
760	}
761
762	/* Clear counters and comefrom */
763	e->counters = ((struct xt_counters) { 0, 0 });
764	e->comefrom = 0;
765	return 0;
766}
767
768static void
769cleanup_entry(struct ipt_entry *e, struct net *net)
770{
771	struct xt_tgdtor_param par;
772	struct xt_entry_target *t;
773	struct xt_entry_match *ematch;
774
775	/* Cleanup all matches */
776	xt_ematch_foreach(ematch, e)
777		cleanup_match(ematch, net);
778	t = ipt_get_target(e);
779
780	par.net      = net;
781	par.target   = t->u.kernel.target;
782	par.targinfo = t->data;
783	par.family   = NFPROTO_IPV4;
784	if (par.target->destroy != NULL)
785		par.target->destroy(&par);
786	module_put(par.target->me);
787}
788
789/* Checks and translates the user-supplied table segment (held in
790   newinfo) */
791static int
792translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
793                const struct ipt_replace *repl)
794{
795	struct ipt_entry *iter;
796	unsigned int i;
797	int ret = 0;
798
799	newinfo->size = repl->size;
800	newinfo->number = repl->num_entries;
801
802	/* Init all hooks to impossible value. */
803	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
804		newinfo->hook_entry[i] = 0xFFFFFFFF;
805		newinfo->underflow[i] = 0xFFFFFFFF;
806	}
807
808	duprintf("translate_table: size %u\n", newinfo->size);
809	i = 0;
810	/* Walk through entries, checking offsets. */
811	xt_entry_foreach(iter, entry0, newinfo->size) {
812		ret = check_entry_size_and_hooks(iter, newinfo, entry0,
813						 entry0 + repl->size,
814						 repl->hook_entry,
815						 repl->underflow,
816						 repl->valid_hooks);
817		if (ret != 0)
818			return ret;
819		++i;
820		if (strcmp(ipt_get_target(iter)->u.user.name,
821		    XT_ERROR_TARGET) == 0)
822			++newinfo->stacksize;
823	}
824
825	if (i != repl->num_entries) {
826		duprintf("translate_table: %u not %u entries\n",
827			 i, repl->num_entries);
828		return -EINVAL;
829	}
830
831	/* Check hooks all assigned */
832	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
833		/* Only hooks which are valid */
834		if (!(repl->valid_hooks & (1 << i)))
835			continue;
836		if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
837			duprintf("Invalid hook entry %u %u\n",
838				 i, repl->hook_entry[i]);
839			return -EINVAL;
840		}
841		if (newinfo->underflow[i] == 0xFFFFFFFF) {
842			duprintf("Invalid underflow %u %u\n",
843				 i, repl->underflow[i]);
844			return -EINVAL;
845		}
846	}
847
848	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
849		return -ELOOP;
850
851	/* Finally, each sanity check must pass */
852	i = 0;
853	xt_entry_foreach(iter, entry0, newinfo->size) {
854		ret = find_check_entry(iter, net, repl->name, repl->size);
855		if (ret != 0)
856			break;
857		++i;
858	}
859
860	if (ret != 0) {
861		xt_entry_foreach(iter, entry0, newinfo->size) {
862			if (i-- == 0)
863				break;
864			cleanup_entry(iter, net);
865		}
866		return ret;
867	}
868
869	/* And one copy for every other CPU */
870	for_each_possible_cpu(i) {
871		if (newinfo->entries[i] && newinfo->entries[i] != entry0)
872			memcpy(newinfo->entries[i], entry0, newinfo->size);
873	}
874
875	return ret;
876}
877
878static void
879get_counters(const struct xt_table_info *t,
880	     struct xt_counters counters[])
881{
882	struct ipt_entry *iter;
883	unsigned int cpu;
884	unsigned int i;
885
886	for_each_possible_cpu(cpu) {
887		seqcount_t *s = &per_cpu(xt_recseq, cpu);
888
889		i = 0;
890		xt_entry_foreach(iter, t->entries[cpu], t->size) {
891			u64 bcnt, pcnt;
892			unsigned int start;
893
894			do {
895				start = read_seqcount_begin(s);
896				bcnt = iter->counters.bcnt;
897				pcnt = iter->counters.pcnt;
898			} while (read_seqcount_retry(s, start));
899
900			ADD_COUNTER(counters[i], bcnt, pcnt);
901			++i; /* macro does multi eval of i */
902		}
903	}
904}
905
906static struct xt_counters *alloc_counters(const struct xt_table *table)
907{
908	unsigned int countersize;
909	struct xt_counters *counters;
910	const struct xt_table_info *private = table->private;
911
912	/* We need atomic snapshot of counters: rest doesn't change
913	   (other than comefrom, which userspace doesn't care
914	   about). */
915	countersize = sizeof(struct xt_counters) * private->number;
916	counters = vzalloc(countersize);
917
918	if (counters == NULL)
919		return ERR_PTR(-ENOMEM);
920
921	get_counters(private, counters);
922
923	return counters;
924}
925
926static int
927copy_entries_to_user(unsigned int total_size,
928		     const struct xt_table *table,
929		     void __user *userptr)
930{
931	unsigned int off, num;
932	const struct ipt_entry *e;
933	struct xt_counters *counters;
934	const struct xt_table_info *private = table->private;
935	int ret = 0;
936	const void *loc_cpu_entry;
937
938	counters = alloc_counters(table);
939	if (IS_ERR(counters))
940		return PTR_ERR(counters);
941
942	/* choose the copy that is on our node/cpu, ...
943	 * This choice is lazy (because current thread is
944	 * allowed to migrate to another cpu)
945	 */
946	loc_cpu_entry = private->entries[raw_smp_processor_id()];
947	if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
948		ret = -EFAULT;
949		goto free_counters;
950	}
951
952	/* FIXME: use iterator macros --RR */
953	/* ... then go back and fix counters and names */
954	for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
955		unsigned int i;
956		const struct xt_entry_match *m;
957		const struct xt_entry_target *t;
958
959		e = (struct ipt_entry *)(loc_cpu_entry + off);
960		if (copy_to_user(userptr + off
961				 + offsetof(struct ipt_entry, counters),
962				 &counters[num],
963				 sizeof(counters[num])) != 0) {
964			ret = -EFAULT;
965			goto free_counters;
966		}
967
968		for (i = sizeof(struct ipt_entry);
969		     i < e->target_offset;
970		     i += m->u.match_size) {
971			m = (void *)e + i;
972
973			if (copy_to_user(userptr + off + i
974					 + offsetof(struct xt_entry_match,
975						    u.user.name),
976					 m->u.kernel.match->name,
977					 strlen(m->u.kernel.match->name)+1)
978			    != 0) {
979				ret = -EFAULT;
980				goto free_counters;
981			}
982		}
983
984		t = ipt_get_target_c(e);
985		if (copy_to_user(userptr + off + e->target_offset
986				 + offsetof(struct xt_entry_target,
987					    u.user.name),
988				 t->u.kernel.target->name,
989				 strlen(t->u.kernel.target->name)+1) != 0) {
990			ret = -EFAULT;
991			goto free_counters;
992		}
993	}
994
995 free_counters:
996	vfree(counters);
997	return ret;
998}
999
1000#ifdef CONFIG_COMPAT
1001static void compat_standard_from_user(void *dst, const void *src)
1002{
1003	int v = *(compat_int_t *)src;
1004
1005	if (v > 0)
1006		v += xt_compat_calc_jump(AF_INET, v);
1007	memcpy(dst, &v, sizeof(v));
1008}
1009
1010static int compat_standard_to_user(void __user *dst, const void *src)
1011{
1012	compat_int_t cv = *(int *)src;
1013
1014	if (cv > 0)
1015		cv -= xt_compat_calc_jump(AF_INET, cv);
1016	return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1017}
1018
1019static int compat_calc_entry(const struct ipt_entry *e,
1020			     const struct xt_table_info *info,
1021			     const void *base, struct xt_table_info *newinfo)
1022{
1023	const struct xt_entry_match *ematch;
1024	const struct xt_entry_target *t;
1025	unsigned int entry_offset;
1026	int off, i, ret;
1027
1028	off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1029	entry_offset = (void *)e - base;
1030	xt_ematch_foreach(ematch, e)
1031		off += xt_compat_match_offset(ematch->u.kernel.match);
1032	t = ipt_get_target_c(e);
1033	off += xt_compat_target_offset(t->u.kernel.target);
1034	newinfo->size -= off;
1035	ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1036	if (ret)
1037		return ret;
1038
1039	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1040		if (info->hook_entry[i] &&
1041		    (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1042			newinfo->hook_entry[i] -= off;
1043		if (info->underflow[i] &&
1044		    (e < (struct ipt_entry *)(base + info->underflow[i])))
1045			newinfo->underflow[i] -= off;
1046	}
1047	return 0;
1048}
1049
1050static int compat_table_info(const struct xt_table_info *info,
1051			     struct xt_table_info *newinfo)
1052{
1053	struct ipt_entry *iter;
1054	void *loc_cpu_entry;
1055	int ret;
1056
1057	if (!newinfo || !info)
1058		return -EINVAL;
1059
1060	/* we dont care about newinfo->entries[] */
1061	memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1062	newinfo->initial_entries = 0;
1063	loc_cpu_entry = info->entries[raw_smp_processor_id()];
1064	xt_compat_init_offsets(AF_INET, info->number);
1065	xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1066		ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1067		if (ret != 0)
1068			return ret;
1069	}
1070	return 0;
1071}
1072#endif
1073
1074static int get_info(struct net *net, void __user *user,
1075                    const int *len, int compat)
1076{
1077	char name[XT_TABLE_MAXNAMELEN];
1078	struct xt_table *t;
1079	int ret;
1080
1081	if (*len != sizeof(struct ipt_getinfo)) {
1082		duprintf("length %u != %zu\n", *len,
1083			 sizeof(struct ipt_getinfo));
1084		return -EINVAL;
1085	}
1086
1087	if (copy_from_user(name, user, sizeof(name)) != 0)
1088		return -EFAULT;
1089
1090	name[XT_TABLE_MAXNAMELEN-1] = '\0';
1091#ifdef CONFIG_COMPAT
1092	if (compat)
1093		xt_compat_lock(AF_INET);
1094#endif
1095	t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1096				    "iptable_%s", name);
1097	if (!IS_ERR_OR_NULL(t)) {
1098		struct ipt_getinfo info;
1099		const struct xt_table_info *private = t->private;
1100#ifdef CONFIG_COMPAT
1101		struct xt_table_info tmp;
1102
1103		if (compat) {
1104			ret = compat_table_info(private, &tmp);
1105			xt_compat_flush_offsets(AF_INET);
1106			private = &tmp;
1107		}
1108#endif
1109		memset(&info, 0, sizeof(info));
1110		info.valid_hooks = t->valid_hooks;
1111		memcpy(info.hook_entry, private->hook_entry,
1112		       sizeof(info.hook_entry));
1113		memcpy(info.underflow, private->underflow,
1114		       sizeof(info.underflow));
1115		info.num_entries = private->number;
1116		info.size = private->size;
1117		strcpy(info.name, name);
1118
1119		if (copy_to_user(user, &info, *len) != 0)
1120			ret = -EFAULT;
1121		else
1122			ret = 0;
1123
1124		xt_table_unlock(t);
1125		module_put(t->me);
1126	} else
1127		ret = t ? PTR_ERR(t) : -ENOENT;
1128#ifdef CONFIG_COMPAT
1129	if (compat)
1130		xt_compat_unlock(AF_INET);
1131#endif
1132	return ret;
1133}
1134
1135static int
1136get_entries(struct net *net, struct ipt_get_entries __user *uptr,
1137	    const int *len)
1138{
1139	int ret;
1140	struct ipt_get_entries get;
1141	struct xt_table *t;
1142
1143	if (*len < sizeof(get)) {
1144		duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1145		return -EINVAL;
1146	}
1147	if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1148		return -EFAULT;
1149	if (*len != sizeof(struct ipt_get_entries) + get.size) {
1150		duprintf("get_entries: %u != %zu\n",
1151			 *len, sizeof(get) + get.size);
1152		return -EINVAL;
1153	}
1154
1155	t = xt_find_table_lock(net, AF_INET, get.name);
1156	if (!IS_ERR_OR_NULL(t)) {
1157		const struct xt_table_info *private = t->private;
1158		duprintf("t->private->number = %u\n", private->number);
1159		if (get.size == private->size)
1160			ret = copy_entries_to_user(private->size,
1161						   t, uptr->entrytable);
1162		else {
1163			duprintf("get_entries: I've got %u not %u!\n",
1164				 private->size, get.size);
1165			ret = -EAGAIN;
1166		}
1167		module_put(t->me);
1168		xt_table_unlock(t);
1169	} else
1170		ret = t ? PTR_ERR(t) : -ENOENT;
1171
1172	return ret;
1173}
1174
1175static int
1176__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1177	     struct xt_table_info *newinfo, unsigned int num_counters,
1178	     void __user *counters_ptr)
1179{
1180	int ret;
1181	struct xt_table *t;
1182	struct xt_table_info *oldinfo;
1183	struct xt_counters *counters;
1184	void *loc_cpu_old_entry;
1185	struct ipt_entry *iter;
1186
1187	ret = 0;
1188	counters = vzalloc(num_counters * sizeof(struct xt_counters));
1189	if (!counters) {
1190		ret = -ENOMEM;
1191		goto out;
1192	}
1193
1194	t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1195				    "iptable_%s", name);
1196	if (IS_ERR_OR_NULL(t)) {
1197		ret = t ? PTR_ERR(t) : -ENOENT;
1198		goto free_newinfo_counters_untrans;
1199	}
1200
1201	/* You lied! */
1202	if (valid_hooks != t->valid_hooks) {
1203		duprintf("Valid hook crap: %08X vs %08X\n",
1204			 valid_hooks, t->valid_hooks);
1205		ret = -EINVAL;
1206		goto put_module;
1207	}
1208
1209	oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1210	if (!oldinfo)
1211		goto put_module;
1212
1213	/* Update module usage count based on number of rules */
1214	duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1215		oldinfo->number, oldinfo->initial_entries, newinfo->number);
1216	if ((oldinfo->number > oldinfo->initial_entries) ||
1217	    (newinfo->number <= oldinfo->initial_entries))
1218		module_put(t->me);
1219	if ((oldinfo->number > oldinfo->initial_entries) &&
1220	    (newinfo->number <= oldinfo->initial_entries))
1221		module_put(t->me);
1222
1223	/* Get the old counters, and synchronize with replace */
1224	get_counters(oldinfo, counters);
1225
1226	/* Decrease module usage counts and free resource */
1227	loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1228	xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1229		cleanup_entry(iter, net);
1230
1231	xt_free_table_info(oldinfo);
1232	if (copy_to_user(counters_ptr, counters,
1233			 sizeof(struct xt_counters) * num_counters) != 0) {
1234		/* Silent error, can't fail, new table is already in place */
1235		net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
1236	}
1237	vfree(counters);
1238	xt_table_unlock(t);
1239	return ret;
1240
1241 put_module:
1242	module_put(t->me);
1243	xt_table_unlock(t);
1244 free_newinfo_counters_untrans:
1245	vfree(counters);
1246 out:
1247	return ret;
1248}
1249
1250static int
1251do_replace(struct net *net, const void __user *user, unsigned int len)
1252{
1253	int ret;
1254	struct ipt_replace tmp;
1255	struct xt_table_info *newinfo;
1256	void *loc_cpu_entry;
1257	struct ipt_entry *iter;
1258
1259	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1260		return -EFAULT;
1261
1262	/* overflow check */
1263	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1264		return -ENOMEM;
1265	if (tmp.num_counters == 0)
1266		return -EINVAL;
1267
1268	tmp.name[sizeof(tmp.name)-1] = 0;
1269
1270	newinfo = xt_alloc_table_info(tmp.size);
1271	if (!newinfo)
1272		return -ENOMEM;
1273
1274	/* choose the copy that is on our node/cpu */
1275	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1276	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1277			   tmp.size) != 0) {
1278		ret = -EFAULT;
1279		goto free_newinfo;
1280	}
1281
1282	ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1283	if (ret != 0)
1284		goto free_newinfo;
1285
1286	duprintf("Translated table\n");
1287
1288	ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1289			   tmp.num_counters, tmp.counters);
1290	if (ret)
1291		goto free_newinfo_untrans;
1292	return 0;
1293
1294 free_newinfo_untrans:
1295	xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1296		cleanup_entry(iter, net);
1297 free_newinfo:
1298	xt_free_table_info(newinfo);
1299	return ret;
1300}
1301
1302static int
1303do_add_counters(struct net *net, const void __user *user,
1304                unsigned int len, int compat)
1305{
1306	unsigned int i, curcpu;
1307	struct xt_counters_info tmp;
1308	struct xt_counters *paddc;
1309	unsigned int num_counters;
1310	const char *name;
1311	int size;
1312	void *ptmp;
1313	struct xt_table *t;
1314	const struct xt_table_info *private;
1315	int ret = 0;
1316	void *loc_cpu_entry;
1317	struct ipt_entry *iter;
1318	unsigned int addend;
1319#ifdef CONFIG_COMPAT
1320	struct compat_xt_counters_info compat_tmp;
1321
1322	if (compat) {
1323		ptmp = &compat_tmp;
1324		size = sizeof(struct compat_xt_counters_info);
1325	} else
1326#endif
1327	{
1328		ptmp = &tmp;
1329		size = sizeof(struct xt_counters_info);
1330	}
1331
1332	if (copy_from_user(ptmp, user, size) != 0)
1333		return -EFAULT;
1334
1335#ifdef CONFIG_COMPAT
1336	if (compat) {
1337		num_counters = compat_tmp.num_counters;
1338		name = compat_tmp.name;
1339	} else
1340#endif
1341	{
1342		num_counters = tmp.num_counters;
1343		name = tmp.name;
1344	}
1345
1346	if (len != size + num_counters * sizeof(struct xt_counters))
1347		return -EINVAL;
1348
1349	paddc = vmalloc(len - size);
1350	if (!paddc)
1351		return -ENOMEM;
1352
1353	if (copy_from_user(paddc, user + size, len - size) != 0) {
1354		ret = -EFAULT;
1355		goto free;
1356	}
1357
1358	t = xt_find_table_lock(net, AF_INET, name);
1359	if (IS_ERR_OR_NULL(t)) {
1360		ret = t ? PTR_ERR(t) : -ENOENT;
1361		goto free;
1362	}
1363
1364	local_bh_disable();
1365	private = t->private;
1366	if (private->number != num_counters) {
1367		ret = -EINVAL;
1368		goto unlock_up_free;
1369	}
1370
1371	i = 0;
1372	/* Choose the copy that is on our node */
1373	curcpu = smp_processor_id();
1374	loc_cpu_entry = private->entries[curcpu];
1375	addend = xt_write_recseq_begin();
1376	xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1377		ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1378		++i;
1379	}
1380	xt_write_recseq_end(addend);
1381 unlock_up_free:
1382	local_bh_enable();
1383	xt_table_unlock(t);
1384	module_put(t->me);
1385 free:
1386	vfree(paddc);
1387
1388	return ret;
1389}
1390
1391#ifdef CONFIG_COMPAT
1392struct compat_ipt_replace {
1393	char			name[XT_TABLE_MAXNAMELEN];
1394	u32			valid_hooks;
1395	u32			num_entries;
1396	u32			size;
1397	u32			hook_entry[NF_INET_NUMHOOKS];
1398	u32			underflow[NF_INET_NUMHOOKS];
1399	u32			num_counters;
1400	compat_uptr_t		counters;	/* struct xt_counters * */
1401	struct compat_ipt_entry	entries[0];
1402};
1403
1404static int
1405compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1406			  unsigned int *size, struct xt_counters *counters,
1407			  unsigned int i)
1408{
1409	struct xt_entry_target *t;
1410	struct compat_ipt_entry __user *ce;
1411	u_int16_t target_offset, next_offset;
1412	compat_uint_t origsize;
1413	const struct xt_entry_match *ematch;
1414	int ret = 0;
1415
1416	origsize = *size;
1417	ce = (struct compat_ipt_entry __user *)*dstptr;
1418	if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
1419	    copy_to_user(&ce->counters, &counters[i],
1420	    sizeof(counters[i])) != 0)
1421		return -EFAULT;
1422
1423	*dstptr += sizeof(struct compat_ipt_entry);
1424	*size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1425
1426	xt_ematch_foreach(ematch, e) {
1427		ret = xt_compat_match_to_user(ematch, dstptr, size);
1428		if (ret != 0)
1429			return ret;
1430	}
1431	target_offset = e->target_offset - (origsize - *size);
1432	t = ipt_get_target(e);
1433	ret = xt_compat_target_to_user(t, dstptr, size);
1434	if (ret)
1435		return ret;
1436	next_offset = e->next_offset - (origsize - *size);
1437	if (put_user(target_offset, &ce->target_offset) != 0 ||
1438	    put_user(next_offset, &ce->next_offset) != 0)
1439		return -EFAULT;
1440	return 0;
1441}
1442
1443static int
1444compat_find_calc_match(struct xt_entry_match *m,
1445		       const char *name,
1446		       const struct ipt_ip *ip,
1447		       unsigned int hookmask,
1448		       int *size)
1449{
1450	struct xt_match *match;
1451
1452	match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
1453				      m->u.user.revision);
1454	if (IS_ERR(match)) {
1455		duprintf("compat_check_calc_match: `%s' not found\n",
1456			 m->u.user.name);
1457		return PTR_ERR(match);
1458	}
1459	m->u.kernel.match = match;
1460	*size += xt_compat_match_offset(match);
1461	return 0;
1462}
1463
1464static void compat_release_entry(struct compat_ipt_entry *e)
1465{
1466	struct xt_entry_target *t;
1467	struct xt_entry_match *ematch;
1468
1469	/* Cleanup all matches */
1470	xt_ematch_foreach(ematch, e)
1471		module_put(ematch->u.kernel.match->me);
1472	t = compat_ipt_get_target(e);
1473	module_put(t->u.kernel.target->me);
1474}
1475
1476static int
1477check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1478				  struct xt_table_info *newinfo,
1479				  unsigned int *size,
1480				  const unsigned char *base,
1481				  const unsigned char *limit,
1482				  const unsigned int *hook_entries,
1483				  const unsigned int *underflows,
1484				  const char *name)
1485{
1486	struct xt_entry_match *ematch;
1487	struct xt_entry_target *t;
1488	struct xt_target *target;
1489	unsigned int entry_offset;
1490	unsigned int j;
1491	int ret, off, h;
1492
1493	duprintf("check_compat_entry_size_and_hooks %p\n", e);
1494	if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
1495	    (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1496		duprintf("Bad offset %p, limit = %p\n", e, limit);
1497		return -EINVAL;
1498	}
1499
1500	if (e->next_offset < sizeof(struct compat_ipt_entry) +
1501			     sizeof(struct compat_xt_entry_target)) {
1502		duprintf("checking: element %p size %u\n",
1503			 e, e->next_offset);
1504		return -EINVAL;
1505	}
1506
1507	/* For purposes of check_entry casting the compat entry is fine */
1508	ret = check_entry((struct ipt_entry *)e, name);
1509	if (ret)
1510		return ret;
1511
1512	off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1513	entry_offset = (void *)e - (void *)base;
1514	j = 0;
1515	xt_ematch_foreach(ematch, e) {
1516		ret = compat_find_calc_match(ematch, name,
1517					     &e->ip, e->comefrom, &off);
1518		if (ret != 0)
1519			goto release_matches;
1520		++j;
1521	}
1522
1523	t = compat_ipt_get_target(e);
1524	target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
1525					t->u.user.revision);
1526	if (IS_ERR(target)) {
1527		duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1528			 t->u.user.name);
1529		ret = PTR_ERR(target);
1530		goto release_matches;
1531	}
1532	t->u.kernel.target = target;
1533
1534	off += xt_compat_target_offset(target);
1535	*size += off;
1536	ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1537	if (ret)
1538		goto out;
1539
1540	/* Check hooks & underflows */
1541	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1542		if ((unsigned char *)e - base == hook_entries[h])
1543			newinfo->hook_entry[h] = hook_entries[h];
1544		if ((unsigned char *)e - base == underflows[h])
1545			newinfo->underflow[h] = underflows[h];
1546	}
1547
1548	/* Clear counters and comefrom */
1549	memset(&e->counters, 0, sizeof(e->counters));
1550	e->comefrom = 0;
1551	return 0;
1552
1553out:
1554	module_put(t->u.kernel.target->me);
1555release_matches:
1556	xt_ematch_foreach(ematch, e) {
1557		if (j-- == 0)
1558			break;
1559		module_put(ematch->u.kernel.match->me);
1560	}
1561	return ret;
1562}
1563
1564static int
1565compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1566			    unsigned int *size, const char *name,
1567			    struct xt_table_info *newinfo, unsigned char *base)
1568{
1569	struct xt_entry_target *t;
1570	struct xt_target *target;
1571	struct ipt_entry *de;
1572	unsigned int origsize;
1573	int ret, h;
1574	struct xt_entry_match *ematch;
1575
1576	ret = 0;
1577	origsize = *size;
1578	de = (struct ipt_entry *)*dstptr;
1579	memcpy(de, e, sizeof(struct ipt_entry));
1580	memcpy(&de->counters, &e->counters, sizeof(e->counters));
1581
1582	*dstptr += sizeof(struct ipt_entry);
1583	*size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1584
1585	xt_ematch_foreach(ematch, e) {
1586		ret = xt_compat_match_from_user(ematch, dstptr, size);
1587		if (ret != 0)
1588			return ret;
1589	}
1590	de->target_offset = e->target_offset - (origsize - *size);
1591	t = compat_ipt_get_target(e);
1592	target = t->u.kernel.target;
1593	xt_compat_target_from_user(t, dstptr, size);
1594
1595	de->next_offset = e->next_offset - (origsize - *size);
1596	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1597		if ((unsigned char *)de - base < newinfo->hook_entry[h])
1598			newinfo->hook_entry[h] -= origsize - *size;
1599		if ((unsigned char *)de - base < newinfo->underflow[h])
1600			newinfo->underflow[h] -= origsize - *size;
1601	}
1602	return ret;
1603}
1604
1605static int
1606compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
1607{
1608	struct xt_entry_match *ematch;
1609	struct xt_mtchk_param mtpar;
1610	unsigned int j;
1611	int ret = 0;
1612
1613	j = 0;
1614	mtpar.net	= net;
1615	mtpar.table     = name;
1616	mtpar.entryinfo = &e->ip;
1617	mtpar.hook_mask = e->comefrom;
1618	mtpar.family    = NFPROTO_IPV4;
1619	xt_ematch_foreach(ematch, e) {
1620		ret = check_match(ematch, &mtpar);
1621		if (ret != 0)
1622			goto cleanup_matches;
1623		++j;
1624	}
1625
1626	ret = check_target(e, net, name);
1627	if (ret)
1628		goto cleanup_matches;
1629	return 0;
1630
1631 cleanup_matches:
1632	xt_ematch_foreach(ematch, e) {
1633		if (j-- == 0)
1634			break;
1635		cleanup_match(ematch, net);
1636	}
1637	return ret;
1638}
1639
1640static int
1641translate_compat_table(struct net *net,
1642		       const char *name,
1643		       unsigned int valid_hooks,
1644		       struct xt_table_info **pinfo,
1645		       void **pentry0,
1646		       unsigned int total_size,
1647		       unsigned int number,
1648		       unsigned int *hook_entries,
1649		       unsigned int *underflows)
1650{
1651	unsigned int i, j;
1652	struct xt_table_info *newinfo, *info;
1653	void *pos, *entry0, *entry1;
1654	struct compat_ipt_entry *iter0;
1655	struct ipt_entry *iter1;
1656	unsigned int size;
1657	int ret;
1658
1659	info = *pinfo;
1660	entry0 = *pentry0;
1661	size = total_size;
1662	info->number = number;
1663
1664	/* Init all hooks to impossible value. */
1665	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1666		info->hook_entry[i] = 0xFFFFFFFF;
1667		info->underflow[i] = 0xFFFFFFFF;
1668	}
1669
1670	duprintf("translate_compat_table: size %u\n", info->size);
1671	j = 0;
1672	xt_compat_lock(AF_INET);
1673	xt_compat_init_offsets(AF_INET, number);
1674	/* Walk through entries, checking offsets. */
1675	xt_entry_foreach(iter0, entry0, total_size) {
1676		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1677							entry0,
1678							entry0 + total_size,
1679							hook_entries,
1680							underflows,
1681							name);
1682		if (ret != 0)
1683			goto out_unlock;
1684		++j;
1685	}
1686
1687	ret = -EINVAL;
1688	if (j != number) {
1689		duprintf("translate_compat_table: %u not %u entries\n",
1690			 j, number);
1691		goto out_unlock;
1692	}
1693
1694	/* Check hooks all assigned */
1695	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1696		/* Only hooks which are valid */
1697		if (!(valid_hooks & (1 << i)))
1698			continue;
1699		if (info->hook_entry[i] == 0xFFFFFFFF) {
1700			duprintf("Invalid hook entry %u %u\n",
1701				 i, hook_entries[i]);
1702			goto out_unlock;
1703		}
1704		if (info->underflow[i] == 0xFFFFFFFF) {
1705			duprintf("Invalid underflow %u %u\n",
1706				 i, underflows[i]);
1707			goto out_unlock;
1708		}
1709	}
1710
1711	ret = -ENOMEM;
1712	newinfo = xt_alloc_table_info(size);
1713	if (!newinfo)
1714		goto out_unlock;
1715
1716	newinfo->number = number;
1717	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1718		newinfo->hook_entry[i] = info->hook_entry[i];
1719		newinfo->underflow[i] = info->underflow[i];
1720	}
1721	entry1 = newinfo->entries[raw_smp_processor_id()];
1722	pos = entry1;
1723	size = total_size;
1724	xt_entry_foreach(iter0, entry0, total_size) {
1725		ret = compat_copy_entry_from_user(iter0, &pos, &size,
1726						  name, newinfo, entry1);
1727		if (ret != 0)
1728			break;
1729	}
1730	xt_compat_flush_offsets(AF_INET);
1731	xt_compat_unlock(AF_INET);
1732	if (ret)
1733		goto free_newinfo;
1734
1735	ret = -ELOOP;
1736	if (!mark_source_chains(newinfo, valid_hooks, entry1))
1737		goto free_newinfo;
1738
1739	i = 0;
1740	xt_entry_foreach(iter1, entry1, newinfo->size) {
1741		ret = compat_check_entry(iter1, net, name);
1742		if (ret != 0)
1743			break;
1744		++i;
1745		if (strcmp(ipt_get_target(iter1)->u.user.name,
1746		    XT_ERROR_TARGET) == 0)
1747			++newinfo->stacksize;
1748	}
1749	if (ret) {
1750		/*
1751		 * The first i matches need cleanup_entry (calls ->destroy)
1752		 * because they had called ->check already. The other j-i
1753		 * entries need only release.
1754		 */
1755		int skip = i;
1756		j -= i;
1757		xt_entry_foreach(iter0, entry0, newinfo->size) {
1758			if (skip-- > 0)
1759				continue;
1760			if (j-- == 0)
1761				break;
1762			compat_release_entry(iter0);
1763		}
1764		xt_entry_foreach(iter1, entry1, newinfo->size) {
1765			if (i-- == 0)
1766				break;
1767			cleanup_entry(iter1, net);
1768		}
1769		xt_free_table_info(newinfo);
1770		return ret;
1771	}
1772
1773	/* And one copy for every other CPU */
1774	for_each_possible_cpu(i)
1775		if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1776			memcpy(newinfo->entries[i], entry1, newinfo->size);
1777
1778	*pinfo = newinfo;
1779	*pentry0 = entry1;
1780	xt_free_table_info(info);
1781	return 0;
1782
1783free_newinfo:
1784	xt_free_table_info(newinfo);
1785out:
1786	xt_entry_foreach(iter0, entry0, total_size) {
1787		if (j-- == 0)
1788			break;
1789		compat_release_entry(iter0);
1790	}
1791	return ret;
1792out_unlock:
1793	xt_compat_flush_offsets(AF_INET);
1794	xt_compat_unlock(AF_INET);
1795	goto out;
1796}
1797
1798static int
1799compat_do_replace(struct net *net, void __user *user, unsigned int len)
1800{
1801	int ret;
1802	struct compat_ipt_replace tmp;
1803	struct xt_table_info *newinfo;
1804	void *loc_cpu_entry;
1805	struct ipt_entry *iter;
1806
1807	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1808		return -EFAULT;
1809
1810	/* overflow check */
1811	if (tmp.size >= INT_MAX / num_possible_cpus())
1812		return -ENOMEM;
1813	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1814		return -ENOMEM;
1815	if (tmp.num_counters == 0)
1816		return -EINVAL;
1817
1818	tmp.name[sizeof(tmp.name)-1] = 0;
1819
1820	newinfo = xt_alloc_table_info(tmp.size);
1821	if (!newinfo)
1822		return -ENOMEM;
1823
1824	/* choose the copy that is on our node/cpu */
1825	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1826	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1827			   tmp.size) != 0) {
1828		ret = -EFAULT;
1829		goto free_newinfo;
1830	}
1831
1832	ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1833				     &newinfo, &loc_cpu_entry, tmp.size,
1834				     tmp.num_entries, tmp.hook_entry,
1835				     tmp.underflow);
1836	if (ret != 0)
1837		goto free_newinfo;
1838
1839	duprintf("compat_do_replace: Translated table\n");
1840
1841	ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1842			   tmp.num_counters, compat_ptr(tmp.counters));
1843	if (ret)
1844		goto free_newinfo_untrans;
1845	return 0;
1846
1847 free_newinfo_untrans:
1848	xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1849		cleanup_entry(iter, net);
1850 free_newinfo:
1851	xt_free_table_info(newinfo);
1852	return ret;
1853}
1854
1855static int
1856compat_do_ipt_set_ctl(struct sock *sk,	int cmd, void __user *user,
1857		      unsigned int len)
1858{
1859	int ret;
1860
1861	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1862		return -EPERM;
1863
1864	switch (cmd) {
1865	case IPT_SO_SET_REPLACE:
1866		ret = compat_do_replace(sock_net(sk), user, len);
1867		break;
1868
1869	case IPT_SO_SET_ADD_COUNTERS:
1870		ret = do_add_counters(sock_net(sk), user, len, 1);
1871		break;
1872
1873	default:
1874		duprintf("do_ipt_set_ctl:  unknown request %i\n", cmd);
1875		ret = -EINVAL;
1876	}
1877
1878	return ret;
1879}
1880
1881struct compat_ipt_get_entries {
1882	char name[XT_TABLE_MAXNAMELEN];
1883	compat_uint_t size;
1884	struct compat_ipt_entry entrytable[0];
1885};
1886
1887static int
1888compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1889			    void __user *userptr)
1890{
1891	struct xt_counters *counters;
1892	const struct xt_table_info *private = table->private;
1893	void __user *pos;
1894	unsigned int size;
1895	int ret = 0;
1896	const void *loc_cpu_entry;
1897	unsigned int i = 0;
1898	struct ipt_entry *iter;
1899
1900	counters = alloc_counters(table);
1901	if (IS_ERR(counters))
1902		return PTR_ERR(counters);
1903
1904	/* choose the copy that is on our node/cpu, ...
1905	 * This choice is lazy (because current thread is
1906	 * allowed to migrate to another cpu)
1907	 */
1908	loc_cpu_entry = private->entries[raw_smp_processor_id()];
1909	pos = userptr;
1910	size = total_size;
1911	xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1912		ret = compat_copy_entry_to_user(iter, &pos,
1913						&size, counters, i++);
1914		if (ret != 0)
1915			break;
1916	}
1917
1918	vfree(counters);
1919	return ret;
1920}
1921
1922static int
1923compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1924		   int *len)
1925{
1926	int ret;
1927	struct compat_ipt_get_entries get;
1928	struct xt_table *t;
1929
1930	if (*len < sizeof(get)) {
1931		duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1932		return -EINVAL;
1933	}
1934
1935	if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1936		return -EFAULT;
1937
1938	if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1939		duprintf("compat_get_entries: %u != %zu\n",
1940			 *len, sizeof(get) + get.size);
1941		return -EINVAL;
1942	}
1943
1944	xt_compat_lock(AF_INET);
1945	t = xt_find_table_lock(net, AF_INET, get.name);
1946	if (!IS_ERR_OR_NULL(t)) {
1947		const struct xt_table_info *private = t->private;
1948		struct xt_table_info info;
1949		duprintf("t->private->number = %u\n", private->number);
1950		ret = compat_table_info(private, &info);
1951		if (!ret && get.size == info.size) {
1952			ret = compat_copy_entries_to_user(private->size,
1953							  t, uptr->entrytable);
1954		} else if (!ret) {
1955			duprintf("compat_get_entries: I've got %u not %u!\n",
1956				 private->size, get.size);
1957			ret = -EAGAIN;
1958		}
1959		xt_compat_flush_offsets(AF_INET);
1960		module_put(t->me);
1961		xt_table_unlock(t);
1962	} else
1963		ret = t ? PTR_ERR(t) : -ENOENT;
1964
1965	xt_compat_unlock(AF_INET);
1966	return ret;
1967}
1968
1969static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1970
1971static int
1972compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1973{
1974	int ret;
1975
1976	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1977		return -EPERM;
1978
1979	switch (cmd) {
1980	case IPT_SO_GET_INFO:
1981		ret = get_info(sock_net(sk), user, len, 1);
1982		break;
1983	case IPT_SO_GET_ENTRIES:
1984		ret = compat_get_entries(sock_net(sk), user, len);
1985		break;
1986	default:
1987		ret = do_ipt_get_ctl(sk, cmd, user, len);
1988	}
1989	return ret;
1990}
1991#endif
1992
1993static int
1994do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1995{
1996	int ret;
1997
1998	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1999		return -EPERM;
2000
2001	switch (cmd) {
2002	case IPT_SO_SET_REPLACE:
2003		ret = do_replace(sock_net(sk), user, len);
2004		break;
2005
2006	case IPT_SO_SET_ADD_COUNTERS:
2007		ret = do_add_counters(sock_net(sk), user, len, 0);
2008		break;
2009
2010	default:
2011		duprintf("do_ipt_set_ctl:  unknown request %i\n", cmd);
2012		ret = -EINVAL;
2013	}
2014
2015	return ret;
2016}
2017
2018static int
2019do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2020{
2021	int ret;
2022
2023	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2024		return -EPERM;
2025
2026	switch (cmd) {
2027	case IPT_SO_GET_INFO:
2028		ret = get_info(sock_net(sk), user, len, 0);
2029		break;
2030
2031	case IPT_SO_GET_ENTRIES:
2032		ret = get_entries(sock_net(sk), user, len);
2033		break;
2034
2035	case IPT_SO_GET_REVISION_MATCH:
2036	case IPT_SO_GET_REVISION_TARGET: {
2037		struct xt_get_revision rev;
2038		int target;
2039
2040		if (*len != sizeof(rev)) {
2041			ret = -EINVAL;
2042			break;
2043		}
2044		if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2045			ret = -EFAULT;
2046			break;
2047		}
2048		rev.name[sizeof(rev.name)-1] = 0;
2049
2050		if (cmd == IPT_SO_GET_REVISION_TARGET)
2051			target = 1;
2052		else
2053			target = 0;
2054
2055		try_then_request_module(xt_find_revision(AF_INET, rev.name,
2056							 rev.revision,
2057							 target, &ret),
2058					"ipt_%s", rev.name);
2059		break;
2060	}
2061
2062	default:
2063		duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2064		ret = -EINVAL;
2065	}
2066
2067	return ret;
2068}
2069
2070struct xt_table *ipt_register_table(struct net *net,
2071				    const struct xt_table *table,
2072				    const struct ipt_replace *repl)
2073{
2074	int ret;
2075	struct xt_table_info *newinfo;
2076	struct xt_table_info bootstrap = {0};
2077	void *loc_cpu_entry;
2078	struct xt_table *new_table;
2079
2080	newinfo = xt_alloc_table_info(repl->size);
2081	if (!newinfo) {
2082		ret = -ENOMEM;
2083		goto out;
2084	}
2085
2086	/* choose the copy on our node/cpu, but dont care about preemption */
2087	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2088	memcpy(loc_cpu_entry, repl->entries, repl->size);
2089
2090	ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2091	if (ret != 0)
2092		goto out_free;
2093
2094	new_table = xt_register_table(net, table, &bootstrap, newinfo);
2095	if (IS_ERR(new_table)) {
2096		ret = PTR_ERR(new_table);
2097		goto out_free;
2098	}
2099
2100	return new_table;
2101
2102out_free:
2103	xt_free_table_info(newinfo);
2104out:
2105	return ERR_PTR(ret);
2106}
2107
2108void ipt_unregister_table(struct net *net, struct xt_table *table)
2109{
2110	struct xt_table_info *private;
2111	void *loc_cpu_entry;
2112	struct module *table_owner = table->me;
2113	struct ipt_entry *iter;
2114
2115	private = xt_unregister_table(table);
2116
2117	/* Decrease module usage counts and free resources */
2118	loc_cpu_entry = private->entries[raw_smp_processor_id()];
2119	xt_entry_foreach(iter, loc_cpu_entry, private->size)
2120		cleanup_entry(iter, net);
2121	if (private->number > private->initial_entries)
2122		module_put(table_owner);
2123	xt_free_table_info(private);
2124}
2125
2126/* Returns 1 if the type and code is matched by the range, 0 otherwise */
2127static inline bool
2128icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2129		     u_int8_t type, u_int8_t code,
2130		     bool invert)
2131{
2132	return ((test_type == 0xFF) ||
2133		(type == test_type && code >= min_code && code <= max_code))
2134		^ invert;
2135}
2136
2137static bool
2138icmp_match(const struct sk_buff *skb, struct xt_action_param *par)
2139{
2140	const struct icmphdr *ic;
2141	struct icmphdr _icmph;
2142	const struct ipt_icmp *icmpinfo = par->matchinfo;
2143
2144	/* Must not be a fragment. */
2145	if (par->fragoff != 0)
2146		return false;
2147
2148	ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2149	if (ic == NULL) {
2150		/* We've been asked to examine this packet, and we
2151		 * can't.  Hence, no choice but to drop.
2152		 */
2153		duprintf("Dropping evil ICMP tinygram.\n");
2154		par->hotdrop = true;
2155		return false;
2156	}
2157
2158	return icmp_type_code_match(icmpinfo->type,
2159				    icmpinfo->code[0],
2160				    icmpinfo->code[1],
2161				    ic->type, ic->code,
2162				    !!(icmpinfo->invflags&IPT_ICMP_INV));
2163}
2164
2165static int icmp_checkentry(const struct xt_mtchk_param *par)
2166{
2167	const struct ipt_icmp *icmpinfo = par->matchinfo;
2168
2169	/* Must specify no unknown invflags */
2170	return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0;
2171}
2172
2173static struct xt_target ipt_builtin_tg[] __read_mostly = {
2174	{
2175		.name             = XT_STANDARD_TARGET,
2176		.targetsize       = sizeof(int),
2177		.family           = NFPROTO_IPV4,
2178#ifdef CONFIG_COMPAT
2179		.compatsize       = sizeof(compat_int_t),
2180		.compat_from_user = compat_standard_from_user,
2181		.compat_to_user   = compat_standard_to_user,
2182#endif
2183	},
2184	{
2185		.name             = XT_ERROR_TARGET,
2186		.target           = ipt_error,
2187		.targetsize       = XT_FUNCTION_MAXNAMELEN,
2188		.family           = NFPROTO_IPV4,
2189	},
2190};
2191
2192static struct nf_sockopt_ops ipt_sockopts = {
2193	.pf		= PF_INET,
2194	.set_optmin	= IPT_BASE_CTL,
2195	.set_optmax	= IPT_SO_SET_MAX+1,
2196	.set		= do_ipt_set_ctl,
2197#ifdef CONFIG_COMPAT
2198	.compat_set	= compat_do_ipt_set_ctl,
2199#endif
2200	.get_optmin	= IPT_BASE_CTL,
2201	.get_optmax	= IPT_SO_GET_MAX+1,
2202	.get		= do_ipt_get_ctl,
2203#ifdef CONFIG_COMPAT
2204	.compat_get	= compat_do_ipt_get_ctl,
2205#endif
2206	.owner		= THIS_MODULE,
2207};
2208
2209static struct xt_match ipt_builtin_mt[] __read_mostly = {
2210	{
2211		.name       = "icmp",
2212		.match      = icmp_match,
2213		.matchsize  = sizeof(struct ipt_icmp),
2214		.checkentry = icmp_checkentry,
2215		.proto      = IPPROTO_ICMP,
2216		.family     = NFPROTO_IPV4,
2217	},
2218};
2219
2220static int __net_init ip_tables_net_init(struct net *net)
2221{
2222	return xt_proto_init(net, NFPROTO_IPV4);
2223}
2224
2225static void __net_exit ip_tables_net_exit(struct net *net)
2226{
2227	xt_proto_fini(net, NFPROTO_IPV4);
2228}
2229
2230static struct pernet_operations ip_tables_net_ops = {
2231	.init = ip_tables_net_init,
2232	.exit = ip_tables_net_exit,
2233};
2234
2235static int __init ip_tables_init(void)
2236{
2237	int ret;
2238
2239	ret = register_pernet_subsys(&ip_tables_net_ops);
2240	if (ret < 0)
2241		goto err1;
2242
2243	/* No one else will be downing sem now, so we won't sleep */
2244	ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2245	if (ret < 0)
2246		goto err2;
2247	ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2248	if (ret < 0)
2249		goto err4;
2250
2251	/* Register setsockopt */
2252	ret = nf_register_sockopt(&ipt_sockopts);
2253	if (ret < 0)
2254		goto err5;
2255
2256	pr_info("(C) 2000-2006 Netfilter Core Team\n");
2257	return 0;
2258
2259err5:
2260	xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2261err4:
2262	xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2263err2:
2264	unregister_pernet_subsys(&ip_tables_net_ops);
2265err1:
2266	return ret;
2267}
2268
2269static void __exit ip_tables_fini(void)
2270{
2271	nf_unregister_sockopt(&ipt_sockopts);
2272
2273	xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2274	xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2275	unregister_pernet_subsys(&ip_tables_net_ops);
2276}
2277
2278EXPORT_SYMBOL(ipt_register_table);
2279EXPORT_SYMBOL(ipt_unregister_table);
2280EXPORT_SYMBOL(ipt_do_table);
2281module_init(ip_tables_init);
2282module_exit(ip_tables_fini);
2283