1/* Connection tracking via netlink socket. Allows for user space
2 * protocol helpers and general trouble making from userspace.
3 *
4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
8 *
9 * Initial connection tracking via netlink development funded and
10 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
11 *
12 * Further development of this code funded by Astaro AG (http://www.astaro.com)
13 *
14 * This software may be used and distributed according to the terms
15 * of the GNU General Public License, incorporated herein by reference.
16 */
17
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/rculist.h>
22#include <linux/rculist_nulls.h>
23#include <linux/types.h>
24#include <linux/timer.h>
25#include <linux/security.h>
26#include <linux/skbuff.h>
27#include <linux/errno.h>
28#include <linux/netlink.h>
29#include <linux/spinlock.h>
30#include <linux/interrupt.h>
31#include <linux/slab.h>
32
33#include <linux/netfilter.h>
34#include <net/netlink.h>
35#include <net/sock.h>
36#include <net/netfilter/nf_conntrack.h>
37#include <net/netfilter/nf_conntrack_core.h>
38#include <net/netfilter/nf_conntrack_expect.h>
39#include <net/netfilter/nf_conntrack_helper.h>
40#include <net/netfilter/nf_conntrack_seqadj.h>
41#include <net/netfilter/nf_conntrack_l3proto.h>
42#include <net/netfilter/nf_conntrack_l4proto.h>
43#include <net/netfilter/nf_conntrack_tuple.h>
44#include <net/netfilter/nf_conntrack_acct.h>
45#include <net/netfilter/nf_conntrack_zones.h>
46#include <net/netfilter/nf_conntrack_timestamp.h>
47#include <net/netfilter/nf_conntrack_labels.h>
48#ifdef CONFIG_NF_NAT_NEEDED
49#include <net/netfilter/nf_nat_core.h>
50#include <net/netfilter/nf_nat_l4proto.h>
51#include <net/netfilter/nf_nat_helper.h>
52#endif
53
54#include <linux/netfilter/nfnetlink.h>
55#include <linux/netfilter/nfnetlink_conntrack.h>
56
57MODULE_LICENSE("GPL");
58
59static char __initdata version[] = "0.93";
60
61static inline int
62ctnetlink_dump_tuples_proto(struct sk_buff *skb,
63			    const struct nf_conntrack_tuple *tuple,
64			    struct nf_conntrack_l4proto *l4proto)
65{
66	int ret = 0;
67	struct nlattr *nest_parms;
68
69	nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED);
70	if (!nest_parms)
71		goto nla_put_failure;
72	if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
73		goto nla_put_failure;
74
75	if (likely(l4proto->tuple_to_nlattr))
76		ret = l4proto->tuple_to_nlattr(skb, tuple);
77
78	nla_nest_end(skb, nest_parms);
79
80	return ret;
81
82nla_put_failure:
83	return -1;
84}
85
86static inline int
87ctnetlink_dump_tuples_ip(struct sk_buff *skb,
88			 const struct nf_conntrack_tuple *tuple,
89			 struct nf_conntrack_l3proto *l3proto)
90{
91	int ret = 0;
92	struct nlattr *nest_parms;
93
94	nest_parms = nla_nest_start(skb, CTA_TUPLE_IP | NLA_F_NESTED);
95	if (!nest_parms)
96		goto nla_put_failure;
97
98	if (likely(l3proto->tuple_to_nlattr))
99		ret = l3proto->tuple_to_nlattr(skb, tuple);
100
101	nla_nest_end(skb, nest_parms);
102
103	return ret;
104
105nla_put_failure:
106	return -1;
107}
108
109static int
110ctnetlink_dump_tuples(struct sk_buff *skb,
111		      const struct nf_conntrack_tuple *tuple)
112{
113	int ret;
114	struct nf_conntrack_l3proto *l3proto;
115	struct nf_conntrack_l4proto *l4proto;
116
117	rcu_read_lock();
118	l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
119	ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto);
120
121	if (ret >= 0) {
122		l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
123					       tuple->dst.protonum);
124		ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
125	}
126	rcu_read_unlock();
127	return ret;
128}
129
130static inline int
131ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
132{
133	if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
134		goto nla_put_failure;
135	return 0;
136
137nla_put_failure:
138	return -1;
139}
140
141static inline int
142ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
143{
144	long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ;
145
146	if (timeout < 0)
147		timeout = 0;
148
149	if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
150		goto nla_put_failure;
151	return 0;
152
153nla_put_failure:
154	return -1;
155}
156
157static inline int
158ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
159{
160	struct nf_conntrack_l4proto *l4proto;
161	struct nlattr *nest_proto;
162	int ret;
163
164	l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
165	if (!l4proto->to_nlattr)
166		return 0;
167
168	nest_proto = nla_nest_start(skb, CTA_PROTOINFO | NLA_F_NESTED);
169	if (!nest_proto)
170		goto nla_put_failure;
171
172	ret = l4proto->to_nlattr(skb, nest_proto, ct);
173
174	nla_nest_end(skb, nest_proto);
175
176	return ret;
177
178nla_put_failure:
179	return -1;
180}
181
182static inline int
183ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct)
184{
185	struct nlattr *nest_helper;
186	const struct nf_conn_help *help = nfct_help(ct);
187	struct nf_conntrack_helper *helper;
188
189	if (!help)
190		return 0;
191
192	helper = rcu_dereference(help->helper);
193	if (!helper)
194		goto out;
195
196	nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED);
197	if (!nest_helper)
198		goto nla_put_failure;
199	if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
200		goto nla_put_failure;
201
202	if (helper->to_nlattr)
203		helper->to_nlattr(skb, ct);
204
205	nla_nest_end(skb, nest_helper);
206out:
207	return 0;
208
209nla_put_failure:
210	return -1;
211}
212
213static int
214dump_counters(struct sk_buff *skb, struct nf_conn_acct *acct,
215	      enum ip_conntrack_dir dir, int type)
216{
217	enum ctattr_type attr = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
218	struct nf_conn_counter *counter = acct->counter;
219	struct nlattr *nest_count;
220	u64 pkts, bytes;
221
222	if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
223		pkts = atomic64_xchg(&counter[dir].packets, 0);
224		bytes = atomic64_xchg(&counter[dir].bytes, 0);
225	} else {
226		pkts = atomic64_read(&counter[dir].packets);
227		bytes = atomic64_read(&counter[dir].bytes);
228	}
229
230	nest_count = nla_nest_start(skb, attr | NLA_F_NESTED);
231	if (!nest_count)
232		goto nla_put_failure;
233
234	if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)) ||
235	    nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)))
236		goto nla_put_failure;
237
238	nla_nest_end(skb, nest_count);
239
240	return 0;
241
242nla_put_failure:
243	return -1;
244}
245
246static int
247ctnetlink_dump_acct(struct sk_buff *skb, const struct nf_conn *ct, int type)
248{
249	struct nf_conn_acct *acct = nf_conn_acct_find(ct);
250
251	if (!acct)
252		return 0;
253
254	if (dump_counters(skb, acct, IP_CT_DIR_ORIGINAL, type) < 0)
255		return -1;
256	if (dump_counters(skb, acct, IP_CT_DIR_REPLY, type) < 0)
257		return -1;
258
259	return 0;
260}
261
262static int
263ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
264{
265	struct nlattr *nest_count;
266	const struct nf_conn_tstamp *tstamp;
267
268	tstamp = nf_conn_tstamp_find(ct);
269	if (!tstamp)
270		return 0;
271
272	nest_count = nla_nest_start(skb, CTA_TIMESTAMP | NLA_F_NESTED);
273	if (!nest_count)
274		goto nla_put_failure;
275
276	if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)) ||
277	    (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
278					       cpu_to_be64(tstamp->stop))))
279		goto nla_put_failure;
280	nla_nest_end(skb, nest_count);
281
282	return 0;
283
284nla_put_failure:
285	return -1;
286}
287
288#ifdef CONFIG_NF_CONNTRACK_MARK
289static inline int
290ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
291{
292	if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
293		goto nla_put_failure;
294	return 0;
295
296nla_put_failure:
297	return -1;
298}
299#else
300#define ctnetlink_dump_mark(a, b) (0)
301#endif
302
303#ifdef CONFIG_NF_CONNTRACK_SECMARK
304static inline int
305ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
306{
307	struct nlattr *nest_secctx;
308	int len, ret;
309	char *secctx;
310
311	ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
312	if (ret)
313		return 0;
314
315	ret = -1;
316	nest_secctx = nla_nest_start(skb, CTA_SECCTX | NLA_F_NESTED);
317	if (!nest_secctx)
318		goto nla_put_failure;
319
320	if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
321		goto nla_put_failure;
322	nla_nest_end(skb, nest_secctx);
323
324	ret = 0;
325nla_put_failure:
326	security_release_secctx(secctx, len);
327	return ret;
328}
329#else
330#define ctnetlink_dump_secctx(a, b) (0)
331#endif
332
333#ifdef CONFIG_NF_CONNTRACK_LABELS
334static int ctnetlink_label_size(const struct nf_conn *ct)
335{
336	struct nf_conn_labels *labels = nf_ct_labels_find(ct);
337
338	if (!labels)
339		return 0;
340	return nla_total_size(labels->words * sizeof(long));
341}
342
343static int
344ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
345{
346	struct nf_conn_labels *labels = nf_ct_labels_find(ct);
347	unsigned int len, i;
348
349	if (!labels)
350		return 0;
351
352	len = labels->words * sizeof(long);
353	i = 0;
354	do {
355		if (labels->bits[i] != 0)
356			return nla_put(skb, CTA_LABELS, len, labels->bits);
357		i++;
358	} while (i < labels->words);
359
360	return 0;
361}
362#else
363#define ctnetlink_dump_labels(a, b) (0)
364#define ctnetlink_label_size(a)	(0)
365#endif
366
367#define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
368
369static inline int
370ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
371{
372	struct nlattr *nest_parms;
373
374	if (!(ct->status & IPS_EXPECTED))
375		return 0;
376
377	nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER | NLA_F_NESTED);
378	if (!nest_parms)
379		goto nla_put_failure;
380	if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0)
381		goto nla_put_failure;
382	nla_nest_end(skb, nest_parms);
383
384	return 0;
385
386nla_put_failure:
387	return -1;
388}
389
390static int
391dump_ct_seq_adj(struct sk_buff *skb, const struct nf_ct_seqadj *seq, int type)
392{
393	struct nlattr *nest_parms;
394
395	nest_parms = nla_nest_start(skb, type | NLA_F_NESTED);
396	if (!nest_parms)
397		goto nla_put_failure;
398
399	if (nla_put_be32(skb, CTA_SEQADJ_CORRECTION_POS,
400			 htonl(seq->correction_pos)) ||
401	    nla_put_be32(skb, CTA_SEQADJ_OFFSET_BEFORE,
402			 htonl(seq->offset_before)) ||
403	    nla_put_be32(skb, CTA_SEQADJ_OFFSET_AFTER,
404			 htonl(seq->offset_after)))
405		goto nla_put_failure;
406
407	nla_nest_end(skb, nest_parms);
408
409	return 0;
410
411nla_put_failure:
412	return -1;
413}
414
415static inline int
416ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
417{
418	struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
419	struct nf_ct_seqadj *seq;
420
421	if (!(ct->status & IPS_SEQ_ADJUST) || !seqadj)
422		return 0;
423
424	seq = &seqadj->seq[IP_CT_DIR_ORIGINAL];
425	if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_ORIG) == -1)
426		return -1;
427
428	seq = &seqadj->seq[IP_CT_DIR_REPLY];
429	if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_REPLY) == -1)
430		return -1;
431
432	return 0;
433}
434
435static inline int
436ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
437{
438	if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
439		goto nla_put_failure;
440	return 0;
441
442nla_put_failure:
443	return -1;
444}
445
446static inline int
447ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
448{
449	if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
450		goto nla_put_failure;
451	return 0;
452
453nla_put_failure:
454	return -1;
455}
456
457static int
458ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
459		    struct nf_conn *ct)
460{
461	struct nlmsghdr *nlh;
462	struct nfgenmsg *nfmsg;
463	struct nlattr *nest_parms;
464	unsigned int flags = portid ? NLM_F_MULTI : 0, event;
465
466	event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_NEW);
467	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
468	if (nlh == NULL)
469		goto nlmsg_failure;
470
471	nfmsg = nlmsg_data(nlh);
472	nfmsg->nfgen_family = nf_ct_l3num(ct);
473	nfmsg->version      = NFNETLINK_V0;
474	nfmsg->res_id	    = 0;
475
476	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
477	if (!nest_parms)
478		goto nla_put_failure;
479	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
480		goto nla_put_failure;
481	nla_nest_end(skb, nest_parms);
482
483	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
484	if (!nest_parms)
485		goto nla_put_failure;
486	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
487		goto nla_put_failure;
488	nla_nest_end(skb, nest_parms);
489
490	if (nf_ct_zone(ct) &&
491	    nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
492		goto nla_put_failure;
493
494	if (ctnetlink_dump_status(skb, ct) < 0 ||
495	    ctnetlink_dump_timeout(skb, ct) < 0 ||
496	    ctnetlink_dump_acct(skb, ct, type) < 0 ||
497	    ctnetlink_dump_timestamp(skb, ct) < 0 ||
498	    ctnetlink_dump_protoinfo(skb, ct) < 0 ||
499	    ctnetlink_dump_helpinfo(skb, ct) < 0 ||
500	    ctnetlink_dump_mark(skb, ct) < 0 ||
501	    ctnetlink_dump_secctx(skb, ct) < 0 ||
502	    ctnetlink_dump_labels(skb, ct) < 0 ||
503	    ctnetlink_dump_id(skb, ct) < 0 ||
504	    ctnetlink_dump_use(skb, ct) < 0 ||
505	    ctnetlink_dump_master(skb, ct) < 0 ||
506	    ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
507		goto nla_put_failure;
508
509	nlmsg_end(skb, nlh);
510	return skb->len;
511
512nlmsg_failure:
513nla_put_failure:
514	nlmsg_cancel(skb, nlh);
515	return -1;
516}
517
518static inline size_t
519ctnetlink_proto_size(const struct nf_conn *ct)
520{
521	struct nf_conntrack_l3proto *l3proto;
522	struct nf_conntrack_l4proto *l4proto;
523	size_t len = 0;
524
525	rcu_read_lock();
526	l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
527	len += l3proto->nla_size;
528
529	l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
530	len += l4proto->nla_size;
531	rcu_read_unlock();
532
533	return len;
534}
535
536static inline size_t
537ctnetlink_acct_size(const struct nf_conn *ct)
538{
539	if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
540		return 0;
541	return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
542	       + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
543	       + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
544	       ;
545}
546
547static inline int
548ctnetlink_secctx_size(const struct nf_conn *ct)
549{
550#ifdef CONFIG_NF_CONNTRACK_SECMARK
551	int len, ret;
552
553	ret = security_secid_to_secctx(ct->secmark, NULL, &len);
554	if (ret)
555		return 0;
556
557	return nla_total_size(0) /* CTA_SECCTX */
558	       + nla_total_size(sizeof(char) * len); /* CTA_SECCTX_NAME */
559#else
560	return 0;
561#endif
562}
563
564static inline size_t
565ctnetlink_timestamp_size(const struct nf_conn *ct)
566{
567#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
568	if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
569		return 0;
570	return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t));
571#else
572	return 0;
573#endif
574}
575
576static inline size_t
577ctnetlink_nlmsg_size(const struct nf_conn *ct)
578{
579	return NLMSG_ALIGN(sizeof(struct nfgenmsg))
580	       + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
581	       + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
582	       + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
583	       + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
584	       + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
585	       + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
586	       + ctnetlink_acct_size(ct)
587	       + ctnetlink_timestamp_size(ct)
588	       + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
589	       + nla_total_size(0) /* CTA_PROTOINFO */
590	       + nla_total_size(0) /* CTA_HELP */
591	       + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
592	       + ctnetlink_secctx_size(ct)
593#ifdef CONFIG_NF_NAT_NEEDED
594	       + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
595	       + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
596#endif
597#ifdef CONFIG_NF_CONNTRACK_MARK
598	       + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
599#endif
600#ifdef CONFIG_NF_CONNTRACK_ZONES
601	       + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
602#endif
603	       + ctnetlink_proto_size(ct)
604	       + ctnetlink_label_size(ct)
605	       ;
606}
607
608#ifdef CONFIG_NF_CONNTRACK_EVENTS
609static int
610ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
611{
612	struct net *net;
613	struct nlmsghdr *nlh;
614	struct nfgenmsg *nfmsg;
615	struct nlattr *nest_parms;
616	struct nf_conn *ct = item->ct;
617	struct sk_buff *skb;
618	unsigned int type;
619	unsigned int flags = 0, group;
620	int err;
621
622	/* ignore our fake conntrack entry */
623	if (nf_ct_is_untracked(ct))
624		return 0;
625
626	if (events & (1 << IPCT_DESTROY)) {
627		type = IPCTNL_MSG_CT_DELETE;
628		group = NFNLGRP_CONNTRACK_DESTROY;
629	} else  if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
630		type = IPCTNL_MSG_CT_NEW;
631		flags = NLM_F_CREATE|NLM_F_EXCL;
632		group = NFNLGRP_CONNTRACK_NEW;
633	} else  if (events) {
634		type = IPCTNL_MSG_CT_NEW;
635		group = NFNLGRP_CONNTRACK_UPDATE;
636	} else
637		return 0;
638
639	net = nf_ct_net(ct);
640	if (!item->report && !nfnetlink_has_listeners(net, group))
641		return 0;
642
643	skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
644	if (skb == NULL)
645		goto errout;
646
647	type |= NFNL_SUBSYS_CTNETLINK << 8;
648	nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
649	if (nlh == NULL)
650		goto nlmsg_failure;
651
652	nfmsg = nlmsg_data(nlh);
653	nfmsg->nfgen_family = nf_ct_l3num(ct);
654	nfmsg->version	= NFNETLINK_V0;
655	nfmsg->res_id	= 0;
656
657	rcu_read_lock();
658	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
659	if (!nest_parms)
660		goto nla_put_failure;
661	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
662		goto nla_put_failure;
663	nla_nest_end(skb, nest_parms);
664
665	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
666	if (!nest_parms)
667		goto nla_put_failure;
668	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
669		goto nla_put_failure;
670	nla_nest_end(skb, nest_parms);
671
672	if (nf_ct_zone(ct) &&
673	    nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
674		goto nla_put_failure;
675
676	if (ctnetlink_dump_id(skb, ct) < 0)
677		goto nla_put_failure;
678
679	if (ctnetlink_dump_status(skb, ct) < 0)
680		goto nla_put_failure;
681
682	if (events & (1 << IPCT_DESTROY)) {
683		if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
684		    ctnetlink_dump_timestamp(skb, ct) < 0)
685			goto nla_put_failure;
686	} else {
687		if (ctnetlink_dump_timeout(skb, ct) < 0)
688			goto nla_put_failure;
689
690		if (events & (1 << IPCT_PROTOINFO)
691		    && ctnetlink_dump_protoinfo(skb, ct) < 0)
692			goto nla_put_failure;
693
694		if ((events & (1 << IPCT_HELPER) || nfct_help(ct))
695		    && ctnetlink_dump_helpinfo(skb, ct) < 0)
696			goto nla_put_failure;
697
698#ifdef CONFIG_NF_CONNTRACK_SECMARK
699		if ((events & (1 << IPCT_SECMARK) || ct->secmark)
700		    && ctnetlink_dump_secctx(skb, ct) < 0)
701			goto nla_put_failure;
702#endif
703		if (events & (1 << IPCT_LABEL) &&
704		     ctnetlink_dump_labels(skb, ct) < 0)
705			goto nla_put_failure;
706
707		if (events & (1 << IPCT_RELATED) &&
708		    ctnetlink_dump_master(skb, ct) < 0)
709			goto nla_put_failure;
710
711		if (events & (1 << IPCT_SEQADJ) &&
712		    ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
713			goto nla_put_failure;
714	}
715
716#ifdef CONFIG_NF_CONNTRACK_MARK
717	if ((events & (1 << IPCT_MARK) || ct->mark)
718	    && ctnetlink_dump_mark(skb, ct) < 0)
719		goto nla_put_failure;
720#endif
721	rcu_read_unlock();
722
723	nlmsg_end(skb, nlh);
724	err = nfnetlink_send(skb, net, item->portid, group, item->report,
725			     GFP_ATOMIC);
726	if (err == -ENOBUFS || err == -EAGAIN)
727		return -ENOBUFS;
728
729	return 0;
730
731nla_put_failure:
732	rcu_read_unlock();
733	nlmsg_cancel(skb, nlh);
734nlmsg_failure:
735	kfree_skb(skb);
736errout:
737	if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0)
738		return -ENOBUFS;
739
740	return 0;
741}
742#endif /* CONFIG_NF_CONNTRACK_EVENTS */
743
744static int ctnetlink_done(struct netlink_callback *cb)
745{
746	if (cb->args[1])
747		nf_ct_put((struct nf_conn *)cb->args[1]);
748	kfree(cb->data);
749	return 0;
750}
751
752struct ctnetlink_filter {
753	struct {
754		u_int32_t val;
755		u_int32_t mask;
756	} mark;
757};
758
759static struct ctnetlink_filter *
760ctnetlink_alloc_filter(const struct nlattr * const cda[])
761{
762#ifdef CONFIG_NF_CONNTRACK_MARK
763	struct ctnetlink_filter *filter;
764
765	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
766	if (filter == NULL)
767		return ERR_PTR(-ENOMEM);
768
769	filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
770	filter->mark.mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
771
772	return filter;
773#else
774	return ERR_PTR(-EOPNOTSUPP);
775#endif
776}
777
778static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
779{
780	struct ctnetlink_filter *filter = data;
781
782	if (filter == NULL)
783		return 1;
784
785#ifdef CONFIG_NF_CONNTRACK_MARK
786	if ((ct->mark & filter->mark.mask) == filter->mark.val)
787		return 1;
788#endif
789
790	return 0;
791}
792
793static int
794ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
795{
796	struct net *net = sock_net(skb->sk);
797	struct nf_conn *ct, *last;
798	struct nf_conntrack_tuple_hash *h;
799	struct hlist_nulls_node *n;
800	struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
801	u_int8_t l3proto = nfmsg->nfgen_family;
802	int res;
803	spinlock_t *lockp;
804
805	last = (struct nf_conn *)cb->args[1];
806
807	local_bh_disable();
808	for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
809restart:
810		lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS];
811		spin_lock(lockp);
812		if (cb->args[0] >= net->ct.htable_size) {
813			spin_unlock(lockp);
814			goto out;
815		}
816		hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
817					 hnnode) {
818			if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
819				continue;
820			ct = nf_ct_tuplehash_to_ctrack(h);
821			/* Dump entries of a given L3 protocol number.
822			 * If it is not specified, ie. l3proto == 0,
823			 * then dump everything. */
824			if (l3proto && nf_ct_l3num(ct) != l3proto)
825				continue;
826			if (cb->args[1]) {
827				if (ct != last)
828					continue;
829				cb->args[1] = 0;
830			}
831			if (!ctnetlink_filter_match(ct, cb->data))
832				continue;
833
834			rcu_read_lock();
835			res =
836			ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
837					    cb->nlh->nlmsg_seq,
838					    NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
839					    ct);
840			rcu_read_unlock();
841			if (res < 0) {
842				nf_conntrack_get(&ct->ct_general);
843				cb->args[1] = (unsigned long)ct;
844				spin_unlock(lockp);
845				goto out;
846			}
847		}
848		spin_unlock(lockp);
849		if (cb->args[1]) {
850			cb->args[1] = 0;
851			goto restart;
852		}
853	}
854out:
855	local_bh_enable();
856	if (last)
857		nf_ct_put(last);
858
859	return skb->len;
860}
861
862static inline int
863ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple)
864{
865	struct nlattr *tb[CTA_IP_MAX+1];
866	struct nf_conntrack_l3proto *l3proto;
867	int ret = 0;
868
869	ret = nla_parse_nested(tb, CTA_IP_MAX, attr, NULL);
870	if (ret < 0)
871		return ret;
872
873	rcu_read_lock();
874	l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
875
876	if (likely(l3proto->nlattr_to_tuple)) {
877		ret = nla_validate_nested(attr, CTA_IP_MAX,
878					  l3proto->nla_policy);
879		if (ret == 0)
880			ret = l3proto->nlattr_to_tuple(tb, tuple);
881	}
882
883	rcu_read_unlock();
884
885	return ret;
886}
887
888static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = {
889	[CTA_PROTO_NUM]	= { .type = NLA_U8 },
890};
891
892static inline int
893ctnetlink_parse_tuple_proto(struct nlattr *attr,
894			    struct nf_conntrack_tuple *tuple)
895{
896	struct nlattr *tb[CTA_PROTO_MAX+1];
897	struct nf_conntrack_l4proto *l4proto;
898	int ret = 0;
899
900	ret = nla_parse_nested(tb, CTA_PROTO_MAX, attr, proto_nla_policy);
901	if (ret < 0)
902		return ret;
903
904	if (!tb[CTA_PROTO_NUM])
905		return -EINVAL;
906	tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
907
908	rcu_read_lock();
909	l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
910
911	if (likely(l4proto->nlattr_to_tuple)) {
912		ret = nla_validate_nested(attr, CTA_PROTO_MAX,
913					  l4proto->nla_policy);
914		if (ret == 0)
915			ret = l4proto->nlattr_to_tuple(tb, tuple);
916	}
917
918	rcu_read_unlock();
919
920	return ret;
921}
922
923static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
924	[CTA_TUPLE_IP]		= { .type = NLA_NESTED },
925	[CTA_TUPLE_PROTO]	= { .type = NLA_NESTED },
926};
927
928static int
929ctnetlink_parse_tuple(const struct nlattr * const cda[],
930		      struct nf_conntrack_tuple *tuple,
931		      enum ctattr_type type, u_int8_t l3num)
932{
933	struct nlattr *tb[CTA_TUPLE_MAX+1];
934	int err;
935
936	memset(tuple, 0, sizeof(*tuple));
937
938	err = nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy);
939	if (err < 0)
940		return err;
941
942	if (!tb[CTA_TUPLE_IP])
943		return -EINVAL;
944
945	tuple->src.l3num = l3num;
946
947	err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);
948	if (err < 0)
949		return err;
950
951	if (!tb[CTA_TUPLE_PROTO])
952		return -EINVAL;
953
954	err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple);
955	if (err < 0)
956		return err;
957
958	/* orig and expect tuples get DIR_ORIGINAL */
959	if (type == CTA_TUPLE_REPLY)
960		tuple->dst.dir = IP_CT_DIR_REPLY;
961	else
962		tuple->dst.dir = IP_CT_DIR_ORIGINAL;
963
964	return 0;
965}
966
967static int
968ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone)
969{
970	if (attr)
971#ifdef CONFIG_NF_CONNTRACK_ZONES
972		*zone = ntohs(nla_get_be16(attr));
973#else
974		return -EOPNOTSUPP;
975#endif
976	else
977		*zone = 0;
978
979	return 0;
980}
981
982static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
983	[CTA_HELP_NAME]		= { .type = NLA_NUL_STRING,
984				    .len = NF_CT_HELPER_NAME_LEN - 1 },
985};
986
987static inline int
988ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
989		     struct nlattr **helpinfo)
990{
991	int err;
992	struct nlattr *tb[CTA_HELP_MAX+1];
993
994	err = nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy);
995	if (err < 0)
996		return err;
997
998	if (!tb[CTA_HELP_NAME])
999		return -EINVAL;
1000
1001	*helper_name = nla_data(tb[CTA_HELP_NAME]);
1002
1003	if (tb[CTA_HELP_INFO])
1004		*helpinfo = tb[CTA_HELP_INFO];
1005
1006	return 0;
1007}
1008
1009static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
1010	[CTA_TUPLE_ORIG]	= { .type = NLA_NESTED },
1011	[CTA_TUPLE_REPLY]	= { .type = NLA_NESTED },
1012	[CTA_STATUS] 		= { .type = NLA_U32 },
1013	[CTA_PROTOINFO]		= { .type = NLA_NESTED },
1014	[CTA_HELP]		= { .type = NLA_NESTED },
1015	[CTA_NAT_SRC]		= { .type = NLA_NESTED },
1016	[CTA_TIMEOUT] 		= { .type = NLA_U32 },
1017	[CTA_MARK]		= { .type = NLA_U32 },
1018	[CTA_ID]		= { .type = NLA_U32 },
1019	[CTA_NAT_DST]		= { .type = NLA_NESTED },
1020	[CTA_TUPLE_MASTER]	= { .type = NLA_NESTED },
1021	[CTA_NAT_SEQ_ADJ_ORIG]  = { .type = NLA_NESTED },
1022	[CTA_NAT_SEQ_ADJ_REPLY] = { .type = NLA_NESTED },
1023	[CTA_ZONE]		= { .type = NLA_U16 },
1024	[CTA_MARK_MASK]		= { .type = NLA_U32 },
1025	[CTA_LABELS]		= { .type = NLA_BINARY,
1026				    .len = NF_CT_LABELS_MAX_SIZE },
1027	[CTA_LABELS_MASK]	= { .type = NLA_BINARY,
1028				    .len = NF_CT_LABELS_MAX_SIZE },
1029};
1030
1031static int ctnetlink_flush_conntrack(struct net *net,
1032				     const struct nlattr * const cda[],
1033				     u32 portid, int report)
1034{
1035	struct ctnetlink_filter *filter = NULL;
1036
1037	if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
1038		filter = ctnetlink_alloc_filter(cda);
1039		if (IS_ERR(filter))
1040			return PTR_ERR(filter);
1041	}
1042
1043	nf_ct_iterate_cleanup(net, ctnetlink_filter_match, filter,
1044			      portid, report);
1045	kfree(filter);
1046
1047	return 0;
1048}
1049
1050static int
1051ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
1052			const struct nlmsghdr *nlh,
1053			const struct nlattr * const cda[])
1054{
1055	struct net *net = sock_net(ctnl);
1056	struct nf_conntrack_tuple_hash *h;
1057	struct nf_conntrack_tuple tuple;
1058	struct nf_conn *ct;
1059	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1060	u_int8_t u3 = nfmsg->nfgen_family;
1061	u16 zone;
1062	int err;
1063
1064	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1065	if (err < 0)
1066		return err;
1067
1068	if (cda[CTA_TUPLE_ORIG])
1069		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
1070	else if (cda[CTA_TUPLE_REPLY])
1071		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
1072	else {
1073		return ctnetlink_flush_conntrack(net, cda,
1074						 NETLINK_CB(skb).portid,
1075						 nlmsg_report(nlh));
1076	}
1077
1078	if (err < 0)
1079		return err;
1080
1081	h = nf_conntrack_find_get(net, zone, &tuple);
1082	if (!h)
1083		return -ENOENT;
1084
1085	ct = nf_ct_tuplehash_to_ctrack(h);
1086
1087	if (cda[CTA_ID]) {
1088		u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
1089		if (id != (u32)(unsigned long)ct) {
1090			nf_ct_put(ct);
1091			return -ENOENT;
1092		}
1093	}
1094
1095	if (del_timer(&ct->timeout))
1096		nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(nlh));
1097
1098	nf_ct_put(ct);
1099
1100	return 0;
1101}
1102
1103static int
1104ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
1105			const struct nlmsghdr *nlh,
1106			const struct nlattr * const cda[])
1107{
1108	struct net *net = sock_net(ctnl);
1109	struct nf_conntrack_tuple_hash *h;
1110	struct nf_conntrack_tuple tuple;
1111	struct nf_conn *ct;
1112	struct sk_buff *skb2 = NULL;
1113	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1114	u_int8_t u3 = nfmsg->nfgen_family;
1115	u16 zone;
1116	int err;
1117
1118	if (nlh->nlmsg_flags & NLM_F_DUMP) {
1119		struct netlink_dump_control c = {
1120			.dump = ctnetlink_dump_table,
1121			.done = ctnetlink_done,
1122		};
1123
1124		if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
1125			struct ctnetlink_filter *filter;
1126
1127			filter = ctnetlink_alloc_filter(cda);
1128			if (IS_ERR(filter))
1129				return PTR_ERR(filter);
1130
1131			c.data = filter;
1132		}
1133		return netlink_dump_start(ctnl, skb, nlh, &c);
1134	}
1135
1136	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1137	if (err < 0)
1138		return err;
1139
1140	if (cda[CTA_TUPLE_ORIG])
1141		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
1142	else if (cda[CTA_TUPLE_REPLY])
1143		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
1144	else
1145		return -EINVAL;
1146
1147	if (err < 0)
1148		return err;
1149
1150	h = nf_conntrack_find_get(net, zone, &tuple);
1151	if (!h)
1152		return -ENOENT;
1153
1154	ct = nf_ct_tuplehash_to_ctrack(h);
1155
1156	err = -ENOMEM;
1157	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1158	if (skb2 == NULL) {
1159		nf_ct_put(ct);
1160		return -ENOMEM;
1161	}
1162
1163	rcu_read_lock();
1164	err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1165				  NFNL_MSG_TYPE(nlh->nlmsg_type), ct);
1166	rcu_read_unlock();
1167	nf_ct_put(ct);
1168	if (err <= 0)
1169		goto free;
1170
1171	err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1172	if (err < 0)
1173		goto out;
1174
1175	return 0;
1176
1177free:
1178	kfree_skb(skb2);
1179out:
1180	/* this avoids a loop in nfnetlink. */
1181	return err == -EAGAIN ? -ENOBUFS : err;
1182}
1183
1184static int ctnetlink_done_list(struct netlink_callback *cb)
1185{
1186	if (cb->args[1])
1187		nf_ct_put((struct nf_conn *)cb->args[1]);
1188	return 0;
1189}
1190
1191static int
1192ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
1193{
1194	struct nf_conn *ct, *last;
1195	struct nf_conntrack_tuple_hash *h;
1196	struct hlist_nulls_node *n;
1197	struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
1198	u_int8_t l3proto = nfmsg->nfgen_family;
1199	int res;
1200	int cpu;
1201	struct hlist_nulls_head *list;
1202	struct net *net = sock_net(skb->sk);
1203
1204	if (cb->args[2])
1205		return 0;
1206
1207	last = (struct nf_conn *)cb->args[1];
1208
1209	for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
1210		struct ct_pcpu *pcpu;
1211
1212		if (!cpu_possible(cpu))
1213			continue;
1214
1215		pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1216		spin_lock_bh(&pcpu->lock);
1217		list = dying ? &pcpu->dying : &pcpu->unconfirmed;
1218restart:
1219		hlist_nulls_for_each_entry(h, n, list, hnnode) {
1220			ct = nf_ct_tuplehash_to_ctrack(h);
1221			if (l3proto && nf_ct_l3num(ct) != l3proto)
1222				continue;
1223			if (cb->args[1]) {
1224				if (ct != last)
1225					continue;
1226				cb->args[1] = 0;
1227			}
1228			rcu_read_lock();
1229			res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
1230						  cb->nlh->nlmsg_seq,
1231						  NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
1232						  ct);
1233			rcu_read_unlock();
1234			if (res < 0) {
1235				if (!atomic_inc_not_zero(&ct->ct_general.use))
1236					continue;
1237				cb->args[0] = cpu;
1238				cb->args[1] = (unsigned long)ct;
1239				spin_unlock_bh(&pcpu->lock);
1240				goto out;
1241			}
1242		}
1243		if (cb->args[1]) {
1244			cb->args[1] = 0;
1245			goto restart;
1246		}
1247		spin_unlock_bh(&pcpu->lock);
1248	}
1249	cb->args[2] = 1;
1250out:
1251	if (last)
1252		nf_ct_put(last);
1253
1254	return skb->len;
1255}
1256
1257static int
1258ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
1259{
1260	return ctnetlink_dump_list(skb, cb, true);
1261}
1262
1263static int
1264ctnetlink_get_ct_dying(struct sock *ctnl, struct sk_buff *skb,
1265		       const struct nlmsghdr *nlh,
1266		       const struct nlattr * const cda[])
1267{
1268	if (nlh->nlmsg_flags & NLM_F_DUMP) {
1269		struct netlink_dump_control c = {
1270			.dump = ctnetlink_dump_dying,
1271			.done = ctnetlink_done_list,
1272		};
1273		return netlink_dump_start(ctnl, skb, nlh, &c);
1274	}
1275
1276	return -EOPNOTSUPP;
1277}
1278
1279static int
1280ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
1281{
1282	return ctnetlink_dump_list(skb, cb, false);
1283}
1284
1285static int
1286ctnetlink_get_ct_unconfirmed(struct sock *ctnl, struct sk_buff *skb,
1287			     const struct nlmsghdr *nlh,
1288			     const struct nlattr * const cda[])
1289{
1290	if (nlh->nlmsg_flags & NLM_F_DUMP) {
1291		struct netlink_dump_control c = {
1292			.dump = ctnetlink_dump_unconfirmed,
1293			.done = ctnetlink_done_list,
1294		};
1295		return netlink_dump_start(ctnl, skb, nlh, &c);
1296	}
1297
1298	return -EOPNOTSUPP;
1299}
1300
1301#ifdef CONFIG_NF_NAT_NEEDED
1302static int
1303ctnetlink_parse_nat_setup(struct nf_conn *ct,
1304			  enum nf_nat_manip_type manip,
1305			  const struct nlattr *attr)
1306{
1307	typeof(nfnetlink_parse_nat_setup_hook) parse_nat_setup;
1308	int err;
1309
1310	parse_nat_setup = rcu_dereference(nfnetlink_parse_nat_setup_hook);
1311	if (!parse_nat_setup) {
1312#ifdef CONFIG_MODULES
1313		rcu_read_unlock();
1314		nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1315		if (request_module("nf-nat") < 0) {
1316			nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1317			rcu_read_lock();
1318			return -EOPNOTSUPP;
1319		}
1320		nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1321		rcu_read_lock();
1322		if (nfnetlink_parse_nat_setup_hook)
1323			return -EAGAIN;
1324#endif
1325		return -EOPNOTSUPP;
1326	}
1327
1328	err = parse_nat_setup(ct, manip, attr);
1329	if (err == -EAGAIN) {
1330#ifdef CONFIG_MODULES
1331		rcu_read_unlock();
1332		nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1333		if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) {
1334			nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1335			rcu_read_lock();
1336			return -EOPNOTSUPP;
1337		}
1338		nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1339		rcu_read_lock();
1340#else
1341		err = -EOPNOTSUPP;
1342#endif
1343	}
1344	return err;
1345}
1346#endif
1347
1348static int
1349ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
1350{
1351	unsigned long d;
1352	unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
1353	d = ct->status ^ status;
1354
1355	if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
1356		/* unchangeable */
1357		return -EBUSY;
1358
1359	if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
1360		/* SEEN_REPLY bit can only be set */
1361		return -EBUSY;
1362
1363	if (d & IPS_ASSURED && !(status & IPS_ASSURED))
1364		/* ASSURED bit can only be set */
1365		return -EBUSY;
1366
1367	/* Be careful here, modifying NAT bits can screw up things,
1368	 * so don't let users modify them directly if they don't pass
1369	 * nf_nat_range. */
1370	ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK);
1371	return 0;
1372}
1373
1374static int
1375ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
1376{
1377#ifdef CONFIG_NF_NAT_NEEDED
1378	int ret;
1379
1380	if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
1381		return 0;
1382
1383	ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
1384					cda[CTA_NAT_DST]);
1385	if (ret < 0)
1386		return ret;
1387
1388	ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_SRC,
1389					cda[CTA_NAT_SRC]);
1390	return ret;
1391#else
1392	if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
1393		return 0;
1394	return -EOPNOTSUPP;
1395#endif
1396}
1397
1398static inline int
1399ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1400{
1401	struct nf_conntrack_helper *helper;
1402	struct nf_conn_help *help = nfct_help(ct);
1403	char *helpname = NULL;
1404	struct nlattr *helpinfo = NULL;
1405	int err;
1406
1407	/* don't change helper of sibling connections */
1408	if (ct->master)
1409		return -EBUSY;
1410
1411	err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1412	if (err < 0)
1413		return err;
1414
1415	if (!strcmp(helpname, "")) {
1416		if (help && help->helper) {
1417			/* we had a helper before ... */
1418			nf_ct_remove_expectations(ct);
1419			RCU_INIT_POINTER(help->helper, NULL);
1420		}
1421
1422		return 0;
1423	}
1424
1425	helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1426					    nf_ct_protonum(ct));
1427	if (helper == NULL) {
1428#ifdef CONFIG_MODULES
1429		spin_unlock_bh(&nf_conntrack_expect_lock);
1430
1431		if (request_module("nfct-helper-%s", helpname) < 0) {
1432			spin_lock_bh(&nf_conntrack_expect_lock);
1433			return -EOPNOTSUPP;
1434		}
1435
1436		spin_lock_bh(&nf_conntrack_expect_lock);
1437		helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1438						    nf_ct_protonum(ct));
1439		if (helper)
1440			return -EAGAIN;
1441#endif
1442		return -EOPNOTSUPP;
1443	}
1444
1445	if (help) {
1446		if (help->helper == helper) {
1447			/* update private helper data if allowed. */
1448			if (helper->from_nlattr)
1449				helper->from_nlattr(helpinfo, ct);
1450			return 0;
1451		} else
1452			return -EBUSY;
1453	}
1454
1455	/* we cannot set a helper for an existing conntrack */
1456	return -EOPNOTSUPP;
1457}
1458
1459static inline int
1460ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[])
1461{
1462	u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
1463
1464	if (!del_timer(&ct->timeout))
1465		return -ETIME;
1466
1467	ct->timeout.expires = jiffies + timeout * HZ;
1468	add_timer(&ct->timeout);
1469
1470	return 0;
1471}
1472
1473static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
1474	[CTA_PROTOINFO_TCP]	= { .type = NLA_NESTED },
1475	[CTA_PROTOINFO_DCCP]	= { .type = NLA_NESTED },
1476	[CTA_PROTOINFO_SCTP]	= { .type = NLA_NESTED },
1477};
1478
1479static inline int
1480ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[])
1481{
1482	const struct nlattr *attr = cda[CTA_PROTOINFO];
1483	struct nlattr *tb[CTA_PROTOINFO_MAX+1];
1484	struct nf_conntrack_l4proto *l4proto;
1485	int err = 0;
1486
1487	err = nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy);
1488	if (err < 0)
1489		return err;
1490
1491	rcu_read_lock();
1492	l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
1493	if (l4proto->from_nlattr)
1494		err = l4proto->from_nlattr(tb, ct);
1495	rcu_read_unlock();
1496
1497	return err;
1498}
1499
1500static const struct nla_policy seqadj_policy[CTA_SEQADJ_MAX+1] = {
1501	[CTA_SEQADJ_CORRECTION_POS]	= { .type = NLA_U32 },
1502	[CTA_SEQADJ_OFFSET_BEFORE]	= { .type = NLA_U32 },
1503	[CTA_SEQADJ_OFFSET_AFTER]	= { .type = NLA_U32 },
1504};
1505
1506static inline int
1507change_seq_adj(struct nf_ct_seqadj *seq, const struct nlattr * const attr)
1508{
1509	int err;
1510	struct nlattr *cda[CTA_SEQADJ_MAX+1];
1511
1512	err = nla_parse_nested(cda, CTA_SEQADJ_MAX, attr, seqadj_policy);
1513	if (err < 0)
1514		return err;
1515
1516	if (!cda[CTA_SEQADJ_CORRECTION_POS])
1517		return -EINVAL;
1518
1519	seq->correction_pos =
1520		ntohl(nla_get_be32(cda[CTA_SEQADJ_CORRECTION_POS]));
1521
1522	if (!cda[CTA_SEQADJ_OFFSET_BEFORE])
1523		return -EINVAL;
1524
1525	seq->offset_before =
1526		ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_BEFORE]));
1527
1528	if (!cda[CTA_SEQADJ_OFFSET_AFTER])
1529		return -EINVAL;
1530
1531	seq->offset_after =
1532		ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_AFTER]));
1533
1534	return 0;
1535}
1536
1537static int
1538ctnetlink_change_seq_adj(struct nf_conn *ct,
1539			 const struct nlattr * const cda[])
1540{
1541	struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
1542	int ret = 0;
1543
1544	if (!seqadj)
1545		return 0;
1546
1547	if (cda[CTA_SEQ_ADJ_ORIG]) {
1548		ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_ORIGINAL],
1549				     cda[CTA_SEQ_ADJ_ORIG]);
1550		if (ret < 0)
1551			return ret;
1552
1553		ct->status |= IPS_SEQ_ADJUST;
1554	}
1555
1556	if (cda[CTA_SEQ_ADJ_REPLY]) {
1557		ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_REPLY],
1558				     cda[CTA_SEQ_ADJ_REPLY]);
1559		if (ret < 0)
1560			return ret;
1561
1562		ct->status |= IPS_SEQ_ADJUST;
1563	}
1564
1565	return 0;
1566}
1567
1568static int
1569ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[])
1570{
1571#ifdef CONFIG_NF_CONNTRACK_LABELS
1572	size_t len = nla_len(cda[CTA_LABELS]);
1573	const void *mask = cda[CTA_LABELS_MASK];
1574
1575	if (len & (sizeof(u32)-1)) /* must be multiple of u32 */
1576		return -EINVAL;
1577
1578	if (mask) {
1579		if (nla_len(cda[CTA_LABELS_MASK]) == 0 ||
1580		    nla_len(cda[CTA_LABELS_MASK]) != len)
1581			return -EINVAL;
1582		mask = nla_data(cda[CTA_LABELS_MASK]);
1583	}
1584
1585	len /= sizeof(u32);
1586
1587	return nf_connlabels_replace(ct, nla_data(cda[CTA_LABELS]), mask, len);
1588#else
1589	return -EOPNOTSUPP;
1590#endif
1591}
1592
1593static int
1594ctnetlink_change_conntrack(struct nf_conn *ct,
1595			   const struct nlattr * const cda[])
1596{
1597	int err;
1598
1599	/* only allow NAT changes and master assignation for new conntracks */
1600	if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER])
1601		return -EOPNOTSUPP;
1602
1603	if (cda[CTA_HELP]) {
1604		err = ctnetlink_change_helper(ct, cda);
1605		if (err < 0)
1606			return err;
1607	}
1608
1609	if (cda[CTA_TIMEOUT]) {
1610		err = ctnetlink_change_timeout(ct, cda);
1611		if (err < 0)
1612			return err;
1613	}
1614
1615	if (cda[CTA_STATUS]) {
1616		err = ctnetlink_change_status(ct, cda);
1617		if (err < 0)
1618			return err;
1619	}
1620
1621	if (cda[CTA_PROTOINFO]) {
1622		err = ctnetlink_change_protoinfo(ct, cda);
1623		if (err < 0)
1624			return err;
1625	}
1626
1627#if defined(CONFIG_NF_CONNTRACK_MARK)
1628	if (cda[CTA_MARK])
1629		ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1630#endif
1631
1632	if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
1633		err = ctnetlink_change_seq_adj(ct, cda);
1634		if (err < 0)
1635			return err;
1636	}
1637
1638	if (cda[CTA_LABELS]) {
1639		err = ctnetlink_attach_labels(ct, cda);
1640		if (err < 0)
1641			return err;
1642	}
1643
1644	return 0;
1645}
1646
1647static struct nf_conn *
1648ctnetlink_create_conntrack(struct net *net, u16 zone,
1649			   const struct nlattr * const cda[],
1650			   struct nf_conntrack_tuple *otuple,
1651			   struct nf_conntrack_tuple *rtuple,
1652			   u8 u3)
1653{
1654	struct nf_conn *ct;
1655	int err = -EINVAL;
1656	struct nf_conntrack_helper *helper;
1657	struct nf_conn_tstamp *tstamp;
1658
1659	ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
1660	if (IS_ERR(ct))
1661		return ERR_PTR(-ENOMEM);
1662
1663	if (!cda[CTA_TIMEOUT])
1664		goto err1;
1665	ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
1666
1667	ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
1668
1669	rcu_read_lock();
1670 	if (cda[CTA_HELP]) {
1671		char *helpname = NULL;
1672		struct nlattr *helpinfo = NULL;
1673
1674		err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1675 		if (err < 0)
1676			goto err2;
1677
1678		helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1679						    nf_ct_protonum(ct));
1680		if (helper == NULL) {
1681			rcu_read_unlock();
1682#ifdef CONFIG_MODULES
1683			if (request_module("nfct-helper-%s", helpname) < 0) {
1684				err = -EOPNOTSUPP;
1685				goto err1;
1686			}
1687
1688			rcu_read_lock();
1689			helper = __nf_conntrack_helper_find(helpname,
1690							    nf_ct_l3num(ct),
1691							    nf_ct_protonum(ct));
1692			if (helper) {
1693				err = -EAGAIN;
1694				goto err2;
1695			}
1696			rcu_read_unlock();
1697#endif
1698			err = -EOPNOTSUPP;
1699			goto err1;
1700		} else {
1701			struct nf_conn_help *help;
1702
1703			help = nf_ct_helper_ext_add(ct, helper, GFP_ATOMIC);
1704			if (help == NULL) {
1705				err = -ENOMEM;
1706				goto err2;
1707			}
1708			/* set private helper data if allowed. */
1709			if (helper->from_nlattr)
1710				helper->from_nlattr(helpinfo, ct);
1711
1712			/* not in hash table yet so not strictly necessary */
1713			RCU_INIT_POINTER(help->helper, helper);
1714		}
1715	} else {
1716		/* try an implicit helper assignation */
1717		err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1718		if (err < 0)
1719			goto err2;
1720	}
1721
1722	err = ctnetlink_setup_nat(ct, cda);
1723	if (err < 0)
1724		goto err2;
1725
1726	nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1727	nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1728	nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
1729	nf_ct_labels_ext_add(ct);
1730
1731	/* we must add conntrack extensions before confirmation. */
1732	ct->status |= IPS_CONFIRMED;
1733
1734	if (cda[CTA_STATUS]) {
1735		err = ctnetlink_change_status(ct, cda);
1736		if (err < 0)
1737			goto err2;
1738	}
1739
1740	if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
1741		err = ctnetlink_change_seq_adj(ct, cda);
1742		if (err < 0)
1743			goto err2;
1744	}
1745
1746	memset(&ct->proto, 0, sizeof(ct->proto));
1747	if (cda[CTA_PROTOINFO]) {
1748		err = ctnetlink_change_protoinfo(ct, cda);
1749		if (err < 0)
1750			goto err2;
1751	}
1752
1753#if defined(CONFIG_NF_CONNTRACK_MARK)
1754	if (cda[CTA_MARK])
1755		ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1756#endif
1757
1758	/* setup master conntrack: this is a confirmed expectation */
1759	if (cda[CTA_TUPLE_MASTER]) {
1760		struct nf_conntrack_tuple master;
1761		struct nf_conntrack_tuple_hash *master_h;
1762		struct nf_conn *master_ct;
1763
1764		err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3);
1765		if (err < 0)
1766			goto err2;
1767
1768		master_h = nf_conntrack_find_get(net, zone, &master);
1769		if (master_h == NULL) {
1770			err = -ENOENT;
1771			goto err2;
1772		}
1773		master_ct = nf_ct_tuplehash_to_ctrack(master_h);
1774		__set_bit(IPS_EXPECTED_BIT, &ct->status);
1775		ct->master = master_ct;
1776	}
1777	tstamp = nf_conn_tstamp_find(ct);
1778	if (tstamp)
1779		tstamp->start = ktime_get_real_ns();
1780
1781	err = nf_conntrack_hash_check_insert(ct);
1782	if (err < 0)
1783		goto err2;
1784
1785	rcu_read_unlock();
1786
1787	return ct;
1788
1789err2:
1790	rcu_read_unlock();
1791err1:
1792	nf_conntrack_free(ct);
1793	return ERR_PTR(err);
1794}
1795
1796static int
1797ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1798			const struct nlmsghdr *nlh,
1799			const struct nlattr * const cda[])
1800{
1801	struct net *net = sock_net(ctnl);
1802	struct nf_conntrack_tuple otuple, rtuple;
1803	struct nf_conntrack_tuple_hash *h = NULL;
1804	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1805	struct nf_conn *ct;
1806	u_int8_t u3 = nfmsg->nfgen_family;
1807	u16 zone;
1808	int err;
1809
1810	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1811	if (err < 0)
1812		return err;
1813
1814	if (cda[CTA_TUPLE_ORIG]) {
1815		err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3);
1816		if (err < 0)
1817			return err;
1818	}
1819
1820	if (cda[CTA_TUPLE_REPLY]) {
1821		err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3);
1822		if (err < 0)
1823			return err;
1824	}
1825
1826	if (cda[CTA_TUPLE_ORIG])
1827		h = nf_conntrack_find_get(net, zone, &otuple);
1828	else if (cda[CTA_TUPLE_REPLY])
1829		h = nf_conntrack_find_get(net, zone, &rtuple);
1830
1831	if (h == NULL) {
1832		err = -ENOENT;
1833		if (nlh->nlmsg_flags & NLM_F_CREATE) {
1834			enum ip_conntrack_events events;
1835
1836			if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
1837				return -EINVAL;
1838
1839			ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
1840							&rtuple, u3);
1841			if (IS_ERR(ct))
1842				return PTR_ERR(ct);
1843
1844			err = 0;
1845			if (test_bit(IPS_EXPECTED_BIT, &ct->status))
1846				events = IPCT_RELATED;
1847			else
1848				events = IPCT_NEW;
1849
1850			if (cda[CTA_LABELS] &&
1851			    ctnetlink_attach_labels(ct, cda) == 0)
1852				events |= (1 << IPCT_LABEL);
1853
1854			nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1855						      (1 << IPCT_ASSURED) |
1856						      (1 << IPCT_HELPER) |
1857						      (1 << IPCT_PROTOINFO) |
1858						      (1 << IPCT_SEQADJ) |
1859						      (1 << IPCT_MARK) | events,
1860						      ct, NETLINK_CB(skb).portid,
1861						      nlmsg_report(nlh));
1862			nf_ct_put(ct);
1863		}
1864
1865		return err;
1866	}
1867	/* implicit 'else' */
1868
1869	err = -EEXIST;
1870	ct = nf_ct_tuplehash_to_ctrack(h);
1871	if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
1872		spin_lock_bh(&nf_conntrack_expect_lock);
1873		err = ctnetlink_change_conntrack(ct, cda);
1874		spin_unlock_bh(&nf_conntrack_expect_lock);
1875		if (err == 0) {
1876			nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1877						      (1 << IPCT_ASSURED) |
1878						      (1 << IPCT_HELPER) |
1879						      (1 << IPCT_LABEL) |
1880						      (1 << IPCT_PROTOINFO) |
1881						      (1 << IPCT_SEQADJ) |
1882						      (1 << IPCT_MARK),
1883						      ct, NETLINK_CB(skb).portid,
1884						      nlmsg_report(nlh));
1885		}
1886	}
1887
1888	nf_ct_put(ct);
1889	return err;
1890}
1891
1892static int
1893ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
1894				__u16 cpu, const struct ip_conntrack_stat *st)
1895{
1896	struct nlmsghdr *nlh;
1897	struct nfgenmsg *nfmsg;
1898	unsigned int flags = portid ? NLM_F_MULTI : 0, event;
1899
1900	event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS_CPU);
1901	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
1902	if (nlh == NULL)
1903		goto nlmsg_failure;
1904
1905	nfmsg = nlmsg_data(nlh);
1906	nfmsg->nfgen_family = AF_UNSPEC;
1907	nfmsg->version      = NFNETLINK_V0;
1908	nfmsg->res_id	    = htons(cpu);
1909
1910	if (nla_put_be32(skb, CTA_STATS_SEARCHED, htonl(st->searched)) ||
1911	    nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
1912	    nla_put_be32(skb, CTA_STATS_NEW, htonl(st->new)) ||
1913	    nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
1914	    nla_put_be32(skb, CTA_STATS_IGNORE, htonl(st->ignore)) ||
1915	    nla_put_be32(skb, CTA_STATS_DELETE, htonl(st->delete)) ||
1916	    nla_put_be32(skb, CTA_STATS_DELETE_LIST, htonl(st->delete_list)) ||
1917	    nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) ||
1918	    nla_put_be32(skb, CTA_STATS_INSERT_FAILED,
1919				htonl(st->insert_failed)) ||
1920	    nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) ||
1921	    nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) ||
1922	    nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) ||
1923	    nla_put_be32(skb, CTA_STATS_SEARCH_RESTART,
1924				htonl(st->search_restart)))
1925		goto nla_put_failure;
1926
1927	nlmsg_end(skb, nlh);
1928	return skb->len;
1929
1930nla_put_failure:
1931nlmsg_failure:
1932	nlmsg_cancel(skb, nlh);
1933	return -1;
1934}
1935
1936static int
1937ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
1938{
1939	int cpu;
1940	struct net *net = sock_net(skb->sk);
1941
1942	if (cb->args[0] == nr_cpu_ids)
1943		return 0;
1944
1945	for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
1946		const struct ip_conntrack_stat *st;
1947
1948		if (!cpu_possible(cpu))
1949			continue;
1950
1951		st = per_cpu_ptr(net->ct.stat, cpu);
1952		if (ctnetlink_ct_stat_cpu_fill_info(skb,
1953						    NETLINK_CB(cb->skb).portid,
1954						    cb->nlh->nlmsg_seq,
1955						    cpu, st) < 0)
1956				break;
1957	}
1958	cb->args[0] = cpu;
1959
1960	return skb->len;
1961}
1962
1963static int
1964ctnetlink_stat_ct_cpu(struct sock *ctnl, struct sk_buff *skb,
1965		      const struct nlmsghdr *nlh,
1966		      const struct nlattr * const cda[])
1967{
1968	if (nlh->nlmsg_flags & NLM_F_DUMP) {
1969		struct netlink_dump_control c = {
1970			.dump = ctnetlink_ct_stat_cpu_dump,
1971		};
1972		return netlink_dump_start(ctnl, skb, nlh, &c);
1973	}
1974
1975	return 0;
1976}
1977
1978static int
1979ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
1980			    struct net *net)
1981{
1982	struct nlmsghdr *nlh;
1983	struct nfgenmsg *nfmsg;
1984	unsigned int flags = portid ? NLM_F_MULTI : 0, event;
1985	unsigned int nr_conntracks = atomic_read(&net->ct.count);
1986
1987	event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS);
1988	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
1989	if (nlh == NULL)
1990		goto nlmsg_failure;
1991
1992	nfmsg = nlmsg_data(nlh);
1993	nfmsg->nfgen_family = AF_UNSPEC;
1994	nfmsg->version      = NFNETLINK_V0;
1995	nfmsg->res_id	    = 0;
1996
1997	if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
1998		goto nla_put_failure;
1999
2000	nlmsg_end(skb, nlh);
2001	return skb->len;
2002
2003nla_put_failure:
2004nlmsg_failure:
2005	nlmsg_cancel(skb, nlh);
2006	return -1;
2007}
2008
2009static int
2010ctnetlink_stat_ct(struct sock *ctnl, struct sk_buff *skb,
2011		  const struct nlmsghdr *nlh,
2012		  const struct nlattr * const cda[])
2013{
2014	struct sk_buff *skb2;
2015	int err;
2016
2017	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2018	if (skb2 == NULL)
2019		return -ENOMEM;
2020
2021	err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).portid,
2022					  nlh->nlmsg_seq,
2023					  NFNL_MSG_TYPE(nlh->nlmsg_type),
2024					  sock_net(skb->sk));
2025	if (err <= 0)
2026		goto free;
2027
2028	err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
2029	if (err < 0)
2030		goto out;
2031
2032	return 0;
2033
2034free:
2035	kfree_skb(skb2);
2036out:
2037	/* this avoids a loop in nfnetlink. */
2038	return err == -EAGAIN ? -ENOBUFS : err;
2039}
2040
2041static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
2042	[CTA_EXPECT_MASTER]	= { .type = NLA_NESTED },
2043	[CTA_EXPECT_TUPLE]	= { .type = NLA_NESTED },
2044	[CTA_EXPECT_MASK]	= { .type = NLA_NESTED },
2045	[CTA_EXPECT_TIMEOUT]	= { .type = NLA_U32 },
2046	[CTA_EXPECT_ID]		= { .type = NLA_U32 },
2047	[CTA_EXPECT_HELP_NAME]	= { .type = NLA_NUL_STRING,
2048				    .len = NF_CT_HELPER_NAME_LEN - 1 },
2049	[CTA_EXPECT_ZONE]	= { .type = NLA_U16 },
2050	[CTA_EXPECT_FLAGS]	= { .type = NLA_U32 },
2051	[CTA_EXPECT_CLASS]	= { .type = NLA_U32 },
2052	[CTA_EXPECT_NAT]	= { .type = NLA_NESTED },
2053	[CTA_EXPECT_FN]		= { .type = NLA_NUL_STRING },
2054};
2055
2056static struct nf_conntrack_expect *
2057ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct,
2058		       struct nf_conntrack_helper *helper,
2059		       struct nf_conntrack_tuple *tuple,
2060		       struct nf_conntrack_tuple *mask);
2061
2062#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
2063static size_t
2064ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
2065{
2066	return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
2067	       + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
2068	       + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
2069	       + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
2070	       + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
2071	       + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
2072	       + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
2073	       + nla_total_size(0) /* CTA_PROTOINFO */
2074	       + nla_total_size(0) /* CTA_HELP */
2075	       + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
2076	       + ctnetlink_secctx_size(ct)
2077#ifdef CONFIG_NF_NAT_NEEDED
2078	       + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
2079	       + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
2080#endif
2081#ifdef CONFIG_NF_CONNTRACK_MARK
2082	       + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
2083#endif
2084#ifdef CONFIG_NF_CONNTRACK_ZONES
2085	       + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
2086#endif
2087	       + ctnetlink_proto_size(ct)
2088	       ;
2089}
2090
2091static int
2092ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
2093{
2094	struct nlattr *nest_parms;
2095
2096	rcu_read_lock();
2097	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
2098	if (!nest_parms)
2099		goto nla_put_failure;
2100	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
2101		goto nla_put_failure;
2102	nla_nest_end(skb, nest_parms);
2103
2104	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
2105	if (!nest_parms)
2106		goto nla_put_failure;
2107	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
2108		goto nla_put_failure;
2109	nla_nest_end(skb, nest_parms);
2110
2111	if (nf_ct_zone(ct)) {
2112		if (nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
2113			goto nla_put_failure;
2114	}
2115
2116	if (ctnetlink_dump_id(skb, ct) < 0)
2117		goto nla_put_failure;
2118
2119	if (ctnetlink_dump_status(skb, ct) < 0)
2120		goto nla_put_failure;
2121
2122	if (ctnetlink_dump_timeout(skb, ct) < 0)
2123		goto nla_put_failure;
2124
2125	if (ctnetlink_dump_protoinfo(skb, ct) < 0)
2126		goto nla_put_failure;
2127
2128	if (ctnetlink_dump_helpinfo(skb, ct) < 0)
2129		goto nla_put_failure;
2130
2131#ifdef CONFIG_NF_CONNTRACK_SECMARK
2132	if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0)
2133		goto nla_put_failure;
2134#endif
2135	if (ct->master && ctnetlink_dump_master(skb, ct) < 0)
2136		goto nla_put_failure;
2137
2138	if ((ct->status & IPS_SEQ_ADJUST) &&
2139	    ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
2140		goto nla_put_failure;
2141
2142#ifdef CONFIG_NF_CONNTRACK_MARK
2143	if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0)
2144		goto nla_put_failure;
2145#endif
2146	if (ctnetlink_dump_labels(skb, ct) < 0)
2147		goto nla_put_failure;
2148	rcu_read_unlock();
2149	return 0;
2150
2151nla_put_failure:
2152	rcu_read_unlock();
2153	return -ENOSPC;
2154}
2155
2156static int
2157ctnetlink_nfqueue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
2158{
2159	int err;
2160
2161	if (cda[CTA_TIMEOUT]) {
2162		err = ctnetlink_change_timeout(ct, cda);
2163		if (err < 0)
2164			return err;
2165	}
2166	if (cda[CTA_STATUS]) {
2167		err = ctnetlink_change_status(ct, cda);
2168		if (err < 0)
2169			return err;
2170	}
2171	if (cda[CTA_HELP]) {
2172		err = ctnetlink_change_helper(ct, cda);
2173		if (err < 0)
2174			return err;
2175	}
2176	if (cda[CTA_LABELS]) {
2177		err = ctnetlink_attach_labels(ct, cda);
2178		if (err < 0)
2179			return err;
2180	}
2181#if defined(CONFIG_NF_CONNTRACK_MARK)
2182	if (cda[CTA_MARK]) {
2183		u32 mask = 0, mark, newmark;
2184		if (cda[CTA_MARK_MASK])
2185			mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
2186
2187		mark = ntohl(nla_get_be32(cda[CTA_MARK]));
2188		newmark = (ct->mark & mask) ^ mark;
2189		if (newmark != ct->mark)
2190			ct->mark = newmark;
2191	}
2192#endif
2193	return 0;
2194}
2195
2196static int
2197ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
2198{
2199	struct nlattr *cda[CTA_MAX+1];
2200	int ret;
2201
2202	ret = nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy);
2203	if (ret < 0)
2204		return ret;
2205
2206	spin_lock_bh(&nf_conntrack_expect_lock);
2207	ret = ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct);
2208	spin_unlock_bh(&nf_conntrack_expect_lock);
2209
2210	return ret;
2211}
2212
2213static int ctnetlink_nfqueue_exp_parse(const struct nlattr * const *cda,
2214				       const struct nf_conn *ct,
2215				       struct nf_conntrack_tuple *tuple,
2216				       struct nf_conntrack_tuple *mask)
2217{
2218	int err;
2219
2220	err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
2221				    nf_ct_l3num(ct));
2222	if (err < 0)
2223		return err;
2224
2225	return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
2226				     nf_ct_l3num(ct));
2227}
2228
2229static int
2230ctnetlink_nfqueue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
2231				u32 portid, u32 report)
2232{
2233	struct nlattr *cda[CTA_EXPECT_MAX+1];
2234	struct nf_conntrack_tuple tuple, mask;
2235	struct nf_conntrack_helper *helper = NULL;
2236	struct nf_conntrack_expect *exp;
2237	int err;
2238
2239	err = nla_parse_nested(cda, CTA_EXPECT_MAX, attr, exp_nla_policy);
2240	if (err < 0)
2241		return err;
2242
2243	err = ctnetlink_nfqueue_exp_parse((const struct nlattr * const *)cda,
2244					  ct, &tuple, &mask);
2245	if (err < 0)
2246		return err;
2247
2248	if (cda[CTA_EXPECT_HELP_NAME]) {
2249		const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2250
2251		helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
2252						    nf_ct_protonum(ct));
2253		if (helper == NULL)
2254			return -EOPNOTSUPP;
2255	}
2256
2257	exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct,
2258				     helper, &tuple, &mask);
2259	if (IS_ERR(exp))
2260		return PTR_ERR(exp);
2261
2262	err = nf_ct_expect_related_report(exp, portid, report);
2263	if (err < 0) {
2264		nf_ct_expect_put(exp);
2265		return err;
2266	}
2267
2268	return 0;
2269}
2270
2271static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
2272	.build_size	= ctnetlink_nfqueue_build_size,
2273	.build		= ctnetlink_nfqueue_build,
2274	.parse		= ctnetlink_nfqueue_parse,
2275	.attach_expect	= ctnetlink_nfqueue_attach_expect,
2276	.seq_adjust	= nf_ct_tcp_seqadj_set,
2277};
2278#endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */
2279
2280/***********************************************************************
2281 * EXPECT
2282 ***********************************************************************/
2283
2284static inline int
2285ctnetlink_exp_dump_tuple(struct sk_buff *skb,
2286			 const struct nf_conntrack_tuple *tuple,
2287			 enum ctattr_expect type)
2288{
2289	struct nlattr *nest_parms;
2290
2291	nest_parms = nla_nest_start(skb, type | NLA_F_NESTED);
2292	if (!nest_parms)
2293		goto nla_put_failure;
2294	if (ctnetlink_dump_tuples(skb, tuple) < 0)
2295		goto nla_put_failure;
2296	nla_nest_end(skb, nest_parms);
2297
2298	return 0;
2299
2300nla_put_failure:
2301	return -1;
2302}
2303
2304static inline int
2305ctnetlink_exp_dump_mask(struct sk_buff *skb,
2306			const struct nf_conntrack_tuple *tuple,
2307			const struct nf_conntrack_tuple_mask *mask)
2308{
2309	int ret;
2310	struct nf_conntrack_l3proto *l3proto;
2311	struct nf_conntrack_l4proto *l4proto;
2312	struct nf_conntrack_tuple m;
2313	struct nlattr *nest_parms;
2314
2315	memset(&m, 0xFF, sizeof(m));
2316	memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
2317	m.src.u.all = mask->src.u.all;
2318	m.dst.protonum = tuple->dst.protonum;
2319
2320	nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED);
2321	if (!nest_parms)
2322		goto nla_put_failure;
2323
2324	rcu_read_lock();
2325	l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
2326	ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto);
2327	if (ret >= 0) {
2328		l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
2329					       tuple->dst.protonum);
2330	ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
2331	}
2332	rcu_read_unlock();
2333
2334	if (unlikely(ret < 0))
2335		goto nla_put_failure;
2336
2337	nla_nest_end(skb, nest_parms);
2338
2339	return 0;
2340
2341nla_put_failure:
2342	return -1;
2343}
2344
2345static const union nf_inet_addr any_addr;
2346
2347static int
2348ctnetlink_exp_dump_expect(struct sk_buff *skb,
2349			  const struct nf_conntrack_expect *exp)
2350{
2351	struct nf_conn *master = exp->master;
2352	long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
2353	struct nf_conn_help *help;
2354#ifdef CONFIG_NF_NAT_NEEDED
2355	struct nlattr *nest_parms;
2356	struct nf_conntrack_tuple nat_tuple = {};
2357#endif
2358	struct nf_ct_helper_expectfn *expfn;
2359
2360	if (timeout < 0)
2361		timeout = 0;
2362
2363	if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
2364		goto nla_put_failure;
2365	if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0)
2366		goto nla_put_failure;
2367	if (ctnetlink_exp_dump_tuple(skb,
2368				 &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
2369				 CTA_EXPECT_MASTER) < 0)
2370		goto nla_put_failure;
2371
2372#ifdef CONFIG_NF_NAT_NEEDED
2373	if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) ||
2374	    exp->saved_proto.all) {
2375		nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT | NLA_F_NESTED);
2376		if (!nest_parms)
2377			goto nla_put_failure;
2378
2379		if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
2380			goto nla_put_failure;
2381
2382		nat_tuple.src.l3num = nf_ct_l3num(master);
2383		nat_tuple.src.u3 = exp->saved_addr;
2384		nat_tuple.dst.protonum = nf_ct_protonum(master);
2385		nat_tuple.src.u = exp->saved_proto;
2386
2387		if (ctnetlink_exp_dump_tuple(skb, &nat_tuple,
2388						CTA_EXPECT_NAT_TUPLE) < 0)
2389	                goto nla_put_failure;
2390	        nla_nest_end(skb, nest_parms);
2391	}
2392#endif
2393	if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
2394	    nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
2395	    nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
2396	    nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
2397		goto nla_put_failure;
2398	help = nfct_help(master);
2399	if (help) {
2400		struct nf_conntrack_helper *helper;
2401
2402		helper = rcu_dereference(help->helper);
2403		if (helper &&
2404		    nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
2405			goto nla_put_failure;
2406	}
2407	expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
2408	if (expfn != NULL &&
2409	    nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
2410		goto nla_put_failure;
2411
2412	return 0;
2413
2414nla_put_failure:
2415	return -1;
2416}
2417
2418static int
2419ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
2420			int event, const struct nf_conntrack_expect *exp)
2421{
2422	struct nlmsghdr *nlh;
2423	struct nfgenmsg *nfmsg;
2424	unsigned int flags = portid ? NLM_F_MULTI : 0;
2425
2426	event |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
2427	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
2428	if (nlh == NULL)
2429		goto nlmsg_failure;
2430
2431	nfmsg = nlmsg_data(nlh);
2432	nfmsg->nfgen_family = exp->tuple.src.l3num;
2433	nfmsg->version	    = NFNETLINK_V0;
2434	nfmsg->res_id	    = 0;
2435
2436	if (ctnetlink_exp_dump_expect(skb, exp) < 0)
2437		goto nla_put_failure;
2438
2439	nlmsg_end(skb, nlh);
2440	return skb->len;
2441
2442nlmsg_failure:
2443nla_put_failure:
2444	nlmsg_cancel(skb, nlh);
2445	return -1;
2446}
2447
2448#ifdef CONFIG_NF_CONNTRACK_EVENTS
2449static int
2450ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
2451{
2452	struct nf_conntrack_expect *exp = item->exp;
2453	struct net *net = nf_ct_exp_net(exp);
2454	struct nlmsghdr *nlh;
2455	struct nfgenmsg *nfmsg;
2456	struct sk_buff *skb;
2457	unsigned int type, group;
2458	int flags = 0;
2459
2460	if (events & (1 << IPEXP_DESTROY)) {
2461		type = IPCTNL_MSG_EXP_DELETE;
2462		group = NFNLGRP_CONNTRACK_EXP_DESTROY;
2463	} else if (events & (1 << IPEXP_NEW)) {
2464		type = IPCTNL_MSG_EXP_NEW;
2465		flags = NLM_F_CREATE|NLM_F_EXCL;
2466		group = NFNLGRP_CONNTRACK_EXP_NEW;
2467	} else
2468		return 0;
2469
2470	if (!item->report && !nfnetlink_has_listeners(net, group))
2471		return 0;
2472
2473	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
2474	if (skb == NULL)
2475		goto errout;
2476
2477	type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
2478	nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
2479	if (nlh == NULL)
2480		goto nlmsg_failure;
2481
2482	nfmsg = nlmsg_data(nlh);
2483	nfmsg->nfgen_family = exp->tuple.src.l3num;
2484	nfmsg->version	    = NFNETLINK_V0;
2485	nfmsg->res_id	    = 0;
2486
2487	rcu_read_lock();
2488	if (ctnetlink_exp_dump_expect(skb, exp) < 0)
2489		goto nla_put_failure;
2490	rcu_read_unlock();
2491
2492	nlmsg_end(skb, nlh);
2493	nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC);
2494	return 0;
2495
2496nla_put_failure:
2497	rcu_read_unlock();
2498	nlmsg_cancel(skb, nlh);
2499nlmsg_failure:
2500	kfree_skb(skb);
2501errout:
2502	nfnetlink_set_err(net, 0, 0, -ENOBUFS);
2503	return 0;
2504}
2505#endif
2506static int ctnetlink_exp_done(struct netlink_callback *cb)
2507{
2508	if (cb->args[1])
2509		nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
2510	return 0;
2511}
2512
2513static int
2514ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2515{
2516	struct net *net = sock_net(skb->sk);
2517	struct nf_conntrack_expect *exp, *last;
2518	struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
2519	u_int8_t l3proto = nfmsg->nfgen_family;
2520
2521	rcu_read_lock();
2522	last = (struct nf_conntrack_expect *)cb->args[1];
2523	for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
2524restart:
2525		hlist_for_each_entry(exp, &net->ct.expect_hash[cb->args[0]],
2526				     hnode) {
2527			if (l3proto && exp->tuple.src.l3num != l3proto)
2528				continue;
2529			if (cb->args[1]) {
2530				if (exp != last)
2531					continue;
2532				cb->args[1] = 0;
2533			}
2534			if (ctnetlink_exp_fill_info(skb,
2535						    NETLINK_CB(cb->skb).portid,
2536						    cb->nlh->nlmsg_seq,
2537						    IPCTNL_MSG_EXP_NEW,
2538						    exp) < 0) {
2539				if (!atomic_inc_not_zero(&exp->use))
2540					continue;
2541				cb->args[1] = (unsigned long)exp;
2542				goto out;
2543			}
2544		}
2545		if (cb->args[1]) {
2546			cb->args[1] = 0;
2547			goto restart;
2548		}
2549	}
2550out:
2551	rcu_read_unlock();
2552	if (last)
2553		nf_ct_expect_put(last);
2554
2555	return skb->len;
2556}
2557
2558static int
2559ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2560{
2561	struct nf_conntrack_expect *exp, *last;
2562	struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
2563	struct nf_conn *ct = cb->data;
2564	struct nf_conn_help *help = nfct_help(ct);
2565	u_int8_t l3proto = nfmsg->nfgen_family;
2566
2567	if (cb->args[0])
2568		return 0;
2569
2570	rcu_read_lock();
2571	last = (struct nf_conntrack_expect *)cb->args[1];
2572restart:
2573	hlist_for_each_entry(exp, &help->expectations, lnode) {
2574		if (l3proto && exp->tuple.src.l3num != l3proto)
2575			continue;
2576		if (cb->args[1]) {
2577			if (exp != last)
2578				continue;
2579			cb->args[1] = 0;
2580		}
2581		if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).portid,
2582					    cb->nlh->nlmsg_seq,
2583					    IPCTNL_MSG_EXP_NEW,
2584					    exp) < 0) {
2585			if (!atomic_inc_not_zero(&exp->use))
2586				continue;
2587			cb->args[1] = (unsigned long)exp;
2588			goto out;
2589		}
2590	}
2591	if (cb->args[1]) {
2592		cb->args[1] = 0;
2593		goto restart;
2594	}
2595	cb->args[0] = 1;
2596out:
2597	rcu_read_unlock();
2598	if (last)
2599		nf_ct_expect_put(last);
2600
2601	return skb->len;
2602}
2603
2604static int ctnetlink_dump_exp_ct(struct sock *ctnl, struct sk_buff *skb,
2605				 const struct nlmsghdr *nlh,
2606				 const struct nlattr * const cda[])
2607{
2608	int err;
2609	struct net *net = sock_net(ctnl);
2610	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2611	u_int8_t u3 = nfmsg->nfgen_family;
2612	struct nf_conntrack_tuple tuple;
2613	struct nf_conntrack_tuple_hash *h;
2614	struct nf_conn *ct;
2615	u16 zone = 0;
2616	struct netlink_dump_control c = {
2617		.dump = ctnetlink_exp_ct_dump_table,
2618		.done = ctnetlink_exp_done,
2619	};
2620
2621	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
2622	if (err < 0)
2623		return err;
2624
2625	if (cda[CTA_EXPECT_ZONE]) {
2626		err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2627		if (err < 0)
2628			return err;
2629	}
2630
2631	h = nf_conntrack_find_get(net, zone, &tuple);
2632	if (!h)
2633		return -ENOENT;
2634
2635	ct = nf_ct_tuplehash_to_ctrack(h);
2636	c.data = ct;
2637
2638	err = netlink_dump_start(ctnl, skb, nlh, &c);
2639	nf_ct_put(ct);
2640
2641	return err;
2642}
2643
2644static int
2645ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
2646		     const struct nlmsghdr *nlh,
2647		     const struct nlattr * const cda[])
2648{
2649	struct net *net = sock_net(ctnl);
2650	struct nf_conntrack_tuple tuple;
2651	struct nf_conntrack_expect *exp;
2652	struct sk_buff *skb2;
2653	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2654	u_int8_t u3 = nfmsg->nfgen_family;
2655	u16 zone;
2656	int err;
2657
2658	if (nlh->nlmsg_flags & NLM_F_DUMP) {
2659		if (cda[CTA_EXPECT_MASTER])
2660			return ctnetlink_dump_exp_ct(ctnl, skb, nlh, cda);
2661		else {
2662			struct netlink_dump_control c = {
2663				.dump = ctnetlink_exp_dump_table,
2664				.done = ctnetlink_exp_done,
2665			};
2666			return netlink_dump_start(ctnl, skb, nlh, &c);
2667		}
2668	}
2669
2670	err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2671	if (err < 0)
2672		return err;
2673
2674	if (cda[CTA_EXPECT_TUPLE])
2675		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2676	else if (cda[CTA_EXPECT_MASTER])
2677		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
2678	else
2679		return -EINVAL;
2680
2681	if (err < 0)
2682		return err;
2683
2684	exp = nf_ct_expect_find_get(net, zone, &tuple);
2685	if (!exp)
2686		return -ENOENT;
2687
2688	if (cda[CTA_EXPECT_ID]) {
2689		__be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
2690		if (ntohl(id) != (u32)(unsigned long)exp) {
2691			nf_ct_expect_put(exp);
2692			return -ENOENT;
2693		}
2694	}
2695
2696	err = -ENOMEM;
2697	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2698	if (skb2 == NULL) {
2699		nf_ct_expect_put(exp);
2700		goto out;
2701	}
2702
2703	rcu_read_lock();
2704	err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).portid,
2705				      nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
2706	rcu_read_unlock();
2707	nf_ct_expect_put(exp);
2708	if (err <= 0)
2709		goto free;
2710
2711	err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
2712	if (err < 0)
2713		goto out;
2714
2715	return 0;
2716
2717free:
2718	kfree_skb(skb2);
2719out:
2720	/* this avoids a loop in nfnetlink. */
2721	return err == -EAGAIN ? -ENOBUFS : err;
2722}
2723
2724static int
2725ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
2726		     const struct nlmsghdr *nlh,
2727		     const struct nlattr * const cda[])
2728{
2729	struct net *net = sock_net(ctnl);
2730	struct nf_conntrack_expect *exp;
2731	struct nf_conntrack_tuple tuple;
2732	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2733	struct hlist_node *next;
2734	u_int8_t u3 = nfmsg->nfgen_family;
2735	unsigned int i;
2736	u16 zone;
2737	int err;
2738
2739	if (cda[CTA_EXPECT_TUPLE]) {
2740		/* delete a single expect by tuple */
2741		err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2742		if (err < 0)
2743			return err;
2744
2745		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2746		if (err < 0)
2747			return err;
2748
2749		/* bump usage count to 2 */
2750		exp = nf_ct_expect_find_get(net, zone, &tuple);
2751		if (!exp)
2752			return -ENOENT;
2753
2754		if (cda[CTA_EXPECT_ID]) {
2755			__be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
2756			if (ntohl(id) != (u32)(unsigned long)exp) {
2757				nf_ct_expect_put(exp);
2758				return -ENOENT;
2759			}
2760		}
2761
2762		/* after list removal, usage count == 1 */
2763		spin_lock_bh(&nf_conntrack_expect_lock);
2764		if (del_timer(&exp->timeout)) {
2765			nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid,
2766						   nlmsg_report(nlh));
2767			nf_ct_expect_put(exp);
2768		}
2769		spin_unlock_bh(&nf_conntrack_expect_lock);
2770		/* have to put what we 'get' above.
2771		 * after this line usage count == 0 */
2772		nf_ct_expect_put(exp);
2773	} else if (cda[CTA_EXPECT_HELP_NAME]) {
2774		char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2775		struct nf_conn_help *m_help;
2776
2777		/* delete all expectations for this helper */
2778		spin_lock_bh(&nf_conntrack_expect_lock);
2779		for (i = 0; i < nf_ct_expect_hsize; i++) {
2780			hlist_for_each_entry_safe(exp, next,
2781						  &net->ct.expect_hash[i],
2782						  hnode) {
2783				m_help = nfct_help(exp->master);
2784				if (!strcmp(m_help->helper->name, name) &&
2785				    del_timer(&exp->timeout)) {
2786					nf_ct_unlink_expect_report(exp,
2787							NETLINK_CB(skb).portid,
2788							nlmsg_report(nlh));
2789					nf_ct_expect_put(exp);
2790				}
2791			}
2792		}
2793		spin_unlock_bh(&nf_conntrack_expect_lock);
2794	} else {
2795		/* This basically means we have to flush everything*/
2796		spin_lock_bh(&nf_conntrack_expect_lock);
2797		for (i = 0; i < nf_ct_expect_hsize; i++) {
2798			hlist_for_each_entry_safe(exp, next,
2799						  &net->ct.expect_hash[i],
2800						  hnode) {
2801				if (del_timer(&exp->timeout)) {
2802					nf_ct_unlink_expect_report(exp,
2803							NETLINK_CB(skb).portid,
2804							nlmsg_report(nlh));
2805					nf_ct_expect_put(exp);
2806				}
2807			}
2808		}
2809		spin_unlock_bh(&nf_conntrack_expect_lock);
2810	}
2811
2812	return 0;
2813}
2814static int
2815ctnetlink_change_expect(struct nf_conntrack_expect *x,
2816			const struct nlattr * const cda[])
2817{
2818	if (cda[CTA_EXPECT_TIMEOUT]) {
2819		if (!del_timer(&x->timeout))
2820			return -ETIME;
2821
2822		x->timeout.expires = jiffies +
2823			ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
2824		add_timer(&x->timeout);
2825	}
2826	return 0;
2827}
2828
2829static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
2830	[CTA_EXPECT_NAT_DIR]	= { .type = NLA_U32 },
2831	[CTA_EXPECT_NAT_TUPLE]	= { .type = NLA_NESTED },
2832};
2833
2834static int
2835ctnetlink_parse_expect_nat(const struct nlattr *attr,
2836			   struct nf_conntrack_expect *exp,
2837			   u_int8_t u3)
2838{
2839#ifdef CONFIG_NF_NAT_NEEDED
2840	struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
2841	struct nf_conntrack_tuple nat_tuple = {};
2842	int err;
2843
2844	err = nla_parse_nested(tb, CTA_EXPECT_NAT_MAX, attr, exp_nat_nla_policy);
2845	if (err < 0)
2846		return err;
2847
2848	if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
2849		return -EINVAL;
2850
2851	err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
2852					&nat_tuple, CTA_EXPECT_NAT_TUPLE, u3);
2853	if (err < 0)
2854		return err;
2855
2856	exp->saved_addr = nat_tuple.src.u3;
2857	exp->saved_proto = nat_tuple.src.u;
2858	exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
2859
2860	return 0;
2861#else
2862	return -EOPNOTSUPP;
2863#endif
2864}
2865
2866static struct nf_conntrack_expect *
2867ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
2868		       struct nf_conntrack_helper *helper,
2869		       struct nf_conntrack_tuple *tuple,
2870		       struct nf_conntrack_tuple *mask)
2871{
2872	u_int32_t class = 0;
2873	struct nf_conntrack_expect *exp;
2874	struct nf_conn_help *help;
2875	int err;
2876
2877	if (cda[CTA_EXPECT_CLASS] && helper) {
2878		class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
2879		if (class > helper->expect_class_max)
2880			return ERR_PTR(-EINVAL);
2881	}
2882	exp = nf_ct_expect_alloc(ct);
2883	if (!exp)
2884		return ERR_PTR(-ENOMEM);
2885
2886	help = nfct_help(ct);
2887	if (!help) {
2888		if (!cda[CTA_EXPECT_TIMEOUT]) {
2889			err = -EINVAL;
2890			goto err_out;
2891		}
2892		exp->timeout.expires =
2893		  jiffies + ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
2894
2895		exp->flags = NF_CT_EXPECT_USERSPACE;
2896		if (cda[CTA_EXPECT_FLAGS]) {
2897			exp->flags |=
2898				ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
2899		}
2900	} else {
2901		if (cda[CTA_EXPECT_FLAGS]) {
2902			exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
2903			exp->flags &= ~NF_CT_EXPECT_USERSPACE;
2904		} else
2905			exp->flags = 0;
2906	}
2907	if (cda[CTA_EXPECT_FN]) {
2908		const char *name = nla_data(cda[CTA_EXPECT_FN]);
2909		struct nf_ct_helper_expectfn *expfn;
2910
2911		expfn = nf_ct_helper_expectfn_find_by_name(name);
2912		if (expfn == NULL) {
2913			err = -EINVAL;
2914			goto err_out;
2915		}
2916		exp->expectfn = expfn->expectfn;
2917	} else
2918		exp->expectfn = NULL;
2919
2920	exp->class = class;
2921	exp->master = ct;
2922	exp->helper = helper;
2923	exp->tuple = *tuple;
2924	exp->mask.src.u3 = mask->src.u3;
2925	exp->mask.src.u.all = mask->src.u.all;
2926
2927	if (cda[CTA_EXPECT_NAT]) {
2928		err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
2929						 exp, nf_ct_l3num(ct));
2930		if (err < 0)
2931			goto err_out;
2932	}
2933	return exp;
2934err_out:
2935	nf_ct_expect_put(exp);
2936	return ERR_PTR(err);
2937}
2938
2939static int
2940ctnetlink_create_expect(struct net *net, u16 zone,
2941			const struct nlattr * const cda[],
2942			u_int8_t u3, u32 portid, int report)
2943{
2944	struct nf_conntrack_tuple tuple, mask, master_tuple;
2945	struct nf_conntrack_tuple_hash *h = NULL;
2946	struct nf_conntrack_helper *helper = NULL;
2947	struct nf_conntrack_expect *exp;
2948	struct nf_conn *ct;
2949	int err;
2950
2951	/* caller guarantees that those three CTA_EXPECT_* exist */
2952	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2953	if (err < 0)
2954		return err;
2955	err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
2956	if (err < 0)
2957		return err;
2958	err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
2959	if (err < 0)
2960		return err;
2961
2962	/* Look for master conntrack of this expectation */
2963	h = nf_conntrack_find_get(net, zone, &master_tuple);
2964	if (!h)
2965		return -ENOENT;
2966	ct = nf_ct_tuplehash_to_ctrack(h);
2967
2968	if (cda[CTA_EXPECT_HELP_NAME]) {
2969		const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2970
2971		helper = __nf_conntrack_helper_find(helpname, u3,
2972						    nf_ct_protonum(ct));
2973		if (helper == NULL) {
2974#ifdef CONFIG_MODULES
2975			if (request_module("nfct-helper-%s", helpname) < 0) {
2976				err = -EOPNOTSUPP;
2977				goto err_ct;
2978			}
2979			helper = __nf_conntrack_helper_find(helpname, u3,
2980							    nf_ct_protonum(ct));
2981			if (helper) {
2982				err = -EAGAIN;
2983				goto err_ct;
2984			}
2985#endif
2986			err = -EOPNOTSUPP;
2987			goto err_ct;
2988		}
2989	}
2990
2991	exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
2992	if (IS_ERR(exp)) {
2993		err = PTR_ERR(exp);
2994		goto err_ct;
2995	}
2996
2997	err = nf_ct_expect_related_report(exp, portid, report);
2998	nf_ct_expect_put(exp);
2999err_ct:
3000	nf_ct_put(ct);
3001	return err;
3002}
3003
3004static int
3005ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
3006		     const struct nlmsghdr *nlh,
3007		     const struct nlattr * const cda[])
3008{
3009	struct net *net = sock_net(ctnl);
3010	struct nf_conntrack_tuple tuple;
3011	struct nf_conntrack_expect *exp;
3012	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3013	u_int8_t u3 = nfmsg->nfgen_family;
3014	u16 zone;
3015	int err;
3016
3017	if (!cda[CTA_EXPECT_TUPLE]
3018	    || !cda[CTA_EXPECT_MASK]
3019	    || !cda[CTA_EXPECT_MASTER])
3020		return -EINVAL;
3021
3022	err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
3023	if (err < 0)
3024		return err;
3025
3026	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
3027	if (err < 0)
3028		return err;
3029
3030	spin_lock_bh(&nf_conntrack_expect_lock);
3031	exp = __nf_ct_expect_find(net, zone, &tuple);
3032
3033	if (!exp) {
3034		spin_unlock_bh(&nf_conntrack_expect_lock);
3035		err = -ENOENT;
3036		if (nlh->nlmsg_flags & NLM_F_CREATE) {
3037			err = ctnetlink_create_expect(net, zone, cda,
3038						      u3,
3039						      NETLINK_CB(skb).portid,
3040						      nlmsg_report(nlh));
3041		}
3042		return err;
3043	}
3044
3045	err = -EEXIST;
3046	if (!(nlh->nlmsg_flags & NLM_F_EXCL))
3047		err = ctnetlink_change_expect(exp, cda);
3048	spin_unlock_bh(&nf_conntrack_expect_lock);
3049
3050	return err;
3051}
3052
3053static int
3054ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu,
3055			     const struct ip_conntrack_stat *st)
3056{
3057	struct nlmsghdr *nlh;
3058	struct nfgenmsg *nfmsg;
3059	unsigned int flags = portid ? NLM_F_MULTI : 0, event;
3060
3061	event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_EXP_GET_STATS_CPU);
3062	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
3063	if (nlh == NULL)
3064		goto nlmsg_failure;
3065
3066	nfmsg = nlmsg_data(nlh);
3067	nfmsg->nfgen_family = AF_UNSPEC;
3068	nfmsg->version      = NFNETLINK_V0;
3069	nfmsg->res_id	    = htons(cpu);
3070
3071	if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) ||
3072	    nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) ||
3073	    nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete)))
3074		goto nla_put_failure;
3075
3076	nlmsg_end(skb, nlh);
3077	return skb->len;
3078
3079nla_put_failure:
3080nlmsg_failure:
3081	nlmsg_cancel(skb, nlh);
3082	return -1;
3083}
3084
3085static int
3086ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
3087{
3088	int cpu;
3089	struct net *net = sock_net(skb->sk);
3090
3091	if (cb->args[0] == nr_cpu_ids)
3092		return 0;
3093
3094	for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
3095		const struct ip_conntrack_stat *st;
3096
3097		if (!cpu_possible(cpu))
3098			continue;
3099
3100		st = per_cpu_ptr(net->ct.stat, cpu);
3101		if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid,
3102						 cb->nlh->nlmsg_seq,
3103						 cpu, st) < 0)
3104			break;
3105	}
3106	cb->args[0] = cpu;
3107
3108	return skb->len;
3109}
3110
3111static int
3112ctnetlink_stat_exp_cpu(struct sock *ctnl, struct sk_buff *skb,
3113		       const struct nlmsghdr *nlh,
3114		       const struct nlattr * const cda[])
3115{
3116	if (nlh->nlmsg_flags & NLM_F_DUMP) {
3117		struct netlink_dump_control c = {
3118			.dump = ctnetlink_exp_stat_cpu_dump,
3119		};
3120		return netlink_dump_start(ctnl, skb, nlh, &c);
3121	}
3122
3123	return 0;
3124}
3125
3126#ifdef CONFIG_NF_CONNTRACK_EVENTS
3127static struct nf_ct_event_notifier ctnl_notifier = {
3128	.fcn = ctnetlink_conntrack_event,
3129};
3130
3131static struct nf_exp_event_notifier ctnl_notifier_exp = {
3132	.fcn = ctnetlink_expect_event,
3133};
3134#endif
3135
3136static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
3137	[IPCTNL_MSG_CT_NEW]		= { .call = ctnetlink_new_conntrack,
3138					    .attr_count = CTA_MAX,
3139					    .policy = ct_nla_policy },
3140	[IPCTNL_MSG_CT_GET] 		= { .call = ctnetlink_get_conntrack,
3141					    .attr_count = CTA_MAX,
3142					    .policy = ct_nla_policy },
3143	[IPCTNL_MSG_CT_DELETE]  	= { .call = ctnetlink_del_conntrack,
3144					    .attr_count = CTA_MAX,
3145					    .policy = ct_nla_policy },
3146	[IPCTNL_MSG_CT_GET_CTRZERO] 	= { .call = ctnetlink_get_conntrack,
3147					    .attr_count = CTA_MAX,
3148					    .policy = ct_nla_policy },
3149	[IPCTNL_MSG_CT_GET_STATS_CPU]	= { .call = ctnetlink_stat_ct_cpu },
3150	[IPCTNL_MSG_CT_GET_STATS]	= { .call = ctnetlink_stat_ct },
3151	[IPCTNL_MSG_CT_GET_DYING]	= { .call = ctnetlink_get_ct_dying },
3152	[IPCTNL_MSG_CT_GET_UNCONFIRMED]	= { .call = ctnetlink_get_ct_unconfirmed },
3153};
3154
3155static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
3156	[IPCTNL_MSG_EXP_GET]		= { .call = ctnetlink_get_expect,
3157					    .attr_count = CTA_EXPECT_MAX,
3158					    .policy = exp_nla_policy },
3159	[IPCTNL_MSG_EXP_NEW]		= { .call = ctnetlink_new_expect,
3160					    .attr_count = CTA_EXPECT_MAX,
3161					    .policy = exp_nla_policy },
3162	[IPCTNL_MSG_EXP_DELETE]		= { .call = ctnetlink_del_expect,
3163					    .attr_count = CTA_EXPECT_MAX,
3164					    .policy = exp_nla_policy },
3165	[IPCTNL_MSG_EXP_GET_STATS_CPU]	= { .call = ctnetlink_stat_exp_cpu },
3166};
3167
3168static const struct nfnetlink_subsystem ctnl_subsys = {
3169	.name				= "conntrack",
3170	.subsys_id			= NFNL_SUBSYS_CTNETLINK,
3171	.cb_count			= IPCTNL_MSG_MAX,
3172	.cb				= ctnl_cb,
3173};
3174
3175static const struct nfnetlink_subsystem ctnl_exp_subsys = {
3176	.name				= "conntrack_expect",
3177	.subsys_id			= NFNL_SUBSYS_CTNETLINK_EXP,
3178	.cb_count			= IPCTNL_MSG_EXP_MAX,
3179	.cb				= ctnl_exp_cb,
3180};
3181
3182MODULE_ALIAS("ip_conntrack_netlink");
3183MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
3184MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
3185
3186static int __net_init ctnetlink_net_init(struct net *net)
3187{
3188#ifdef CONFIG_NF_CONNTRACK_EVENTS
3189	int ret;
3190
3191	ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
3192	if (ret < 0) {
3193		pr_err("ctnetlink_init: cannot register notifier.\n");
3194		goto err_out;
3195	}
3196
3197	ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
3198	if (ret < 0) {
3199		pr_err("ctnetlink_init: cannot expect register notifier.\n");
3200		goto err_unreg_notifier;
3201	}
3202#endif
3203	return 0;
3204
3205#ifdef CONFIG_NF_CONNTRACK_EVENTS
3206err_unreg_notifier:
3207	nf_conntrack_unregister_notifier(net, &ctnl_notifier);
3208err_out:
3209	return ret;
3210#endif
3211}
3212
3213static void ctnetlink_net_exit(struct net *net)
3214{
3215#ifdef CONFIG_NF_CONNTRACK_EVENTS
3216	nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
3217	nf_conntrack_unregister_notifier(net, &ctnl_notifier);
3218#endif
3219}
3220
3221static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
3222{
3223	struct net *net;
3224
3225	list_for_each_entry(net, net_exit_list, exit_list)
3226		ctnetlink_net_exit(net);
3227}
3228
3229static struct pernet_operations ctnetlink_net_ops = {
3230	.init		= ctnetlink_net_init,
3231	.exit_batch	= ctnetlink_net_exit_batch,
3232};
3233
3234static int __init ctnetlink_init(void)
3235{
3236	int ret;
3237
3238	pr_info("ctnetlink v%s: registering with nfnetlink.\n", version);
3239	ret = nfnetlink_subsys_register(&ctnl_subsys);
3240	if (ret < 0) {
3241		pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
3242		goto err_out;
3243	}
3244
3245	ret = nfnetlink_subsys_register(&ctnl_exp_subsys);
3246	if (ret < 0) {
3247		pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n");
3248		goto err_unreg_subsys;
3249	}
3250
3251	ret = register_pernet_subsys(&ctnetlink_net_ops);
3252	if (ret < 0) {
3253		pr_err("ctnetlink_init: cannot register pernet operations\n");
3254		goto err_unreg_exp_subsys;
3255	}
3256#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
3257	/* setup interaction between nf_queue and nf_conntrack_netlink. */
3258	RCU_INIT_POINTER(nfq_ct_hook, &ctnetlink_nfqueue_hook);
3259#endif
3260	return 0;
3261
3262err_unreg_exp_subsys:
3263	nfnetlink_subsys_unregister(&ctnl_exp_subsys);
3264err_unreg_subsys:
3265	nfnetlink_subsys_unregister(&ctnl_subsys);
3266err_out:
3267	return ret;
3268}
3269
3270static void __exit ctnetlink_exit(void)
3271{
3272	pr_info("ctnetlink: unregistering from nfnetlink.\n");
3273
3274	unregister_pernet_subsys(&ctnetlink_net_ops);
3275	nfnetlink_subsys_unregister(&ctnl_exp_subsys);
3276	nfnetlink_subsys_unregister(&ctnl_subsys);
3277#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
3278	RCU_INIT_POINTER(nfq_ct_hook, NULL);
3279#endif
3280}
3281
3282module_init(ctnetlink_init);
3283module_exit(ctnetlink_exit);
3284