1 /*
2  *	DCCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Based on net/dccp6/ipv6.c
6  *
7  *	Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8  *
9  *	This program is free software; you can redistribute it and/or
10  *      modify it under the terms of the GNU General Public License
11  *      as published by the Free Software Foundation; either version
12  *      2 of the License, or (at your option) any later version.
13  */
14 
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/xfrm.h>
19 
20 #include <net/addrconf.h>
21 #include <net/inet_common.h>
22 #include <net/inet_hashtables.h>
23 #include <net/inet_sock.h>
24 #include <net/inet6_connection_sock.h>
25 #include <net/inet6_hashtables.h>
26 #include <net/ip6_route.h>
27 #include <net/ipv6.h>
28 #include <net/protocol.h>
29 #include <net/transp_v6.h>
30 #include <net/ip6_checksum.h>
31 #include <net/xfrm.h>
32 #include <net/secure_seq.h>
33 
34 #include "dccp.h"
35 #include "ipv6.h"
36 #include "feat.h"
37 
38 /* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */
39 
40 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped;
41 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
42 
43 /* add pseudo-header to DCCP checksum stored in skb->csum */
dccp_v6_csum_finish(struct sk_buff * skb,const struct in6_addr * saddr,const struct in6_addr * daddr)44 static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
45 				      const struct in6_addr *saddr,
46 				      const struct in6_addr *daddr)
47 {
48 	return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
49 }
50 
dccp_v6_send_check(struct sock * sk,struct sk_buff * skb)51 static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
52 {
53 	struct ipv6_pinfo *np = inet6_sk(sk);
54 	struct dccp_hdr *dh = dccp_hdr(skb);
55 
56 	dccp_csum_outgoing(skb);
57 	dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &sk->sk_v6_daddr);
58 }
59 
dccp_v6_init_sequence(struct sk_buff * skb)60 static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
61 {
62 	return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
63 					     ipv6_hdr(skb)->saddr.s6_addr32,
64 					     dccp_hdr(skb)->dccph_dport,
65 					     dccp_hdr(skb)->dccph_sport     );
66 
67 }
68 
dccp_v6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)69 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
70 			u8 type, u8 code, int offset, __be32 info)
71 {
72 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
73 	const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
74 	struct dccp_sock *dp;
75 	struct ipv6_pinfo *np;
76 	struct sock *sk;
77 	int err;
78 	__u64 seq;
79 	struct net *net = dev_net(skb->dev);
80 
81 	if (skb->len < offset + sizeof(*dh) ||
82 	    skb->len < offset + __dccp_basic_hdr_len(dh)) {
83 		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
84 				   ICMP6_MIB_INERRORS);
85 		return;
86 	}
87 
88 	sk = __inet6_lookup_established(net, &dccp_hashinfo,
89 					&hdr->daddr, dh->dccph_dport,
90 					&hdr->saddr, ntohs(dh->dccph_sport),
91 					inet6_iif(skb));
92 
93 	if (!sk) {
94 		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
95 				   ICMP6_MIB_INERRORS);
96 		return;
97 	}
98 
99 	if (sk->sk_state == DCCP_TIME_WAIT) {
100 		inet_twsk_put(inet_twsk(sk));
101 		return;
102 	}
103 	seq = dccp_hdr_seq(dh);
104 	if (sk->sk_state == DCCP_NEW_SYN_RECV)
105 		return dccp_req_err(sk, seq);
106 
107 	bh_lock_sock(sk);
108 	if (sock_owned_by_user(sk))
109 		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
110 
111 	if (sk->sk_state == DCCP_CLOSED)
112 		goto out;
113 
114 	dp = dccp_sk(sk);
115 	if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
116 	    !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
117 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
118 		goto out;
119 	}
120 
121 	np = inet6_sk(sk);
122 
123 	if (type == NDISC_REDIRECT) {
124 		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
125 
126 		if (dst)
127 			dst->ops->redirect(dst, sk, skb);
128 		goto out;
129 	}
130 
131 	if (type == ICMPV6_PKT_TOOBIG) {
132 		struct dst_entry *dst = NULL;
133 
134 		if (!ip6_sk_accept_pmtu(sk))
135 			goto out;
136 
137 		if (sock_owned_by_user(sk))
138 			goto out;
139 		if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
140 			goto out;
141 
142 		dst = inet6_csk_update_pmtu(sk, ntohl(info));
143 		if (!dst)
144 			goto out;
145 
146 		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst))
147 			dccp_sync_mss(sk, dst_mtu(dst));
148 		goto out;
149 	}
150 
151 	icmpv6_err_convert(type, code, &err);
152 
153 	/* Might be for an request_sock */
154 	switch (sk->sk_state) {
155 	case DCCP_REQUESTING:
156 	case DCCP_RESPOND:  /* Cannot happen.
157 			       It can, it SYNs are crossed. --ANK */
158 		if (!sock_owned_by_user(sk)) {
159 			DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
160 			sk->sk_err = err;
161 			/*
162 			 * Wake people up to see the error
163 			 * (see connect in sock.c)
164 			 */
165 			sk->sk_error_report(sk);
166 			dccp_done(sk);
167 		} else
168 			sk->sk_err_soft = err;
169 		goto out;
170 	}
171 
172 	if (!sock_owned_by_user(sk) && np->recverr) {
173 		sk->sk_err = err;
174 		sk->sk_error_report(sk);
175 	} else
176 		sk->sk_err_soft = err;
177 
178 out:
179 	bh_unlock_sock(sk);
180 	sock_put(sk);
181 }
182 
183 
dccp_v6_send_response(struct sock * sk,struct request_sock * req)184 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
185 {
186 	struct inet_request_sock *ireq = inet_rsk(req);
187 	struct ipv6_pinfo *np = inet6_sk(sk);
188 	struct sk_buff *skb;
189 	struct in6_addr *final_p, final;
190 	struct flowi6 fl6;
191 	int err = -1;
192 	struct dst_entry *dst;
193 
194 	memset(&fl6, 0, sizeof(fl6));
195 	fl6.flowi6_proto = IPPROTO_DCCP;
196 	fl6.daddr = ireq->ir_v6_rmt_addr;
197 	fl6.saddr = ireq->ir_v6_loc_addr;
198 	fl6.flowlabel = 0;
199 	fl6.flowi6_oif = ireq->ir_iif;
200 	fl6.fl6_dport = ireq->ir_rmt_port;
201 	fl6.fl6_sport = htons(ireq->ir_num);
202 	security_req_classify_flow(req, flowi6_to_flowi(&fl6));
203 
204 
205 	rcu_read_lock();
206 	final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
207 	rcu_read_unlock();
208 
209 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
210 	if (IS_ERR(dst)) {
211 		err = PTR_ERR(dst);
212 		dst = NULL;
213 		goto done;
214 	}
215 
216 	skb = dccp_make_response(sk, dst, req);
217 	if (skb != NULL) {
218 		struct dccp_hdr *dh = dccp_hdr(skb);
219 
220 		dh->dccph_checksum = dccp_v6_csum_finish(skb,
221 							 &ireq->ir_v6_loc_addr,
222 							 &ireq->ir_v6_rmt_addr);
223 		fl6.daddr = ireq->ir_v6_rmt_addr;
224 		rcu_read_lock();
225 		err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
226 			       np->tclass);
227 		rcu_read_unlock();
228 		err = net_xmit_eval(err);
229 	}
230 
231 done:
232 	dst_release(dst);
233 	return err;
234 }
235 
dccp_v6_reqsk_destructor(struct request_sock * req)236 static void dccp_v6_reqsk_destructor(struct request_sock *req)
237 {
238 	dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
239 	kfree_skb(inet_rsk(req)->pktopts);
240 }
241 
dccp_v6_ctl_send_reset(struct sock * sk,struct sk_buff * rxskb)242 static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
243 {
244 	const struct ipv6hdr *rxip6h;
245 	struct sk_buff *skb;
246 	struct flowi6 fl6;
247 	struct net *net = dev_net(skb_dst(rxskb)->dev);
248 	struct sock *ctl_sk = net->dccp.v6_ctl_sk;
249 	struct dst_entry *dst;
250 
251 	if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
252 		return;
253 
254 	if (!ipv6_unicast_destination(rxskb))
255 		return;
256 
257 	skb = dccp_ctl_make_reset(ctl_sk, rxskb);
258 	if (skb == NULL)
259 		return;
260 
261 	rxip6h = ipv6_hdr(rxskb);
262 	dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
263 							    &rxip6h->daddr);
264 
265 	memset(&fl6, 0, sizeof(fl6));
266 	fl6.daddr = rxip6h->saddr;
267 	fl6.saddr = rxip6h->daddr;
268 
269 	fl6.flowi6_proto = IPPROTO_DCCP;
270 	fl6.flowi6_oif = inet6_iif(rxskb);
271 	fl6.fl6_dport = dccp_hdr(skb)->dccph_dport;
272 	fl6.fl6_sport = dccp_hdr(skb)->dccph_sport;
273 	security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
274 
275 	/* sk = NULL, but it is safe for now. RST socket required. */
276 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
277 	if (!IS_ERR(dst)) {
278 		skb_dst_set(skb, dst);
279 		ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
280 		DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
281 		DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
282 		return;
283 	}
284 
285 	kfree_skb(skb);
286 }
287 
288 static struct request_sock_ops dccp6_request_sock_ops = {
289 	.family		= AF_INET6,
290 	.obj_size	= sizeof(struct dccp6_request_sock),
291 	.rtx_syn_ack	= dccp_v6_send_response,
292 	.send_ack	= dccp_reqsk_send_ack,
293 	.destructor	= dccp_v6_reqsk_destructor,
294 	.send_reset	= dccp_v6_ctl_send_reset,
295 	.syn_ack_timeout = dccp_syn_ack_timeout,
296 };
297 
dccp_v6_hnd_req(struct sock * sk,struct sk_buff * skb)298 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
299 {
300 	const struct dccp_hdr *dh = dccp_hdr(skb);
301 	const struct ipv6hdr *iph = ipv6_hdr(skb);
302 	struct request_sock *req;
303 	struct sock *nsk;
304 
305 	req = inet6_csk_search_req(sk, dh->dccph_sport, &iph->saddr,
306 				   &iph->daddr, inet6_iif(skb));
307 	if (req) {
308 		nsk = dccp_check_req(sk, skb, req);
309 		if (!nsk)
310 			reqsk_put(req);
311 		return nsk;
312 	}
313 	nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
314 					 &iph->saddr, dh->dccph_sport,
315 					 &iph->daddr, ntohs(dh->dccph_dport),
316 					 inet6_iif(skb));
317 	if (nsk != NULL) {
318 		if (nsk->sk_state != DCCP_TIME_WAIT) {
319 			bh_lock_sock(nsk);
320 			return nsk;
321 		}
322 		inet_twsk_put(inet_twsk(nsk));
323 		return NULL;
324 	}
325 
326 	return sk;
327 }
328 
dccp_v6_conn_request(struct sock * sk,struct sk_buff * skb)329 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
330 {
331 	struct request_sock *req;
332 	struct dccp_request_sock *dreq;
333 	struct inet_request_sock *ireq;
334 	struct ipv6_pinfo *np = inet6_sk(sk);
335 	const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
336 	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
337 
338 	if (skb->protocol == htons(ETH_P_IP))
339 		return dccp_v4_conn_request(sk, skb);
340 
341 	if (!ipv6_unicast_destination(skb))
342 		return 0;	/* discard, don't send a reset here */
343 
344 	if (dccp_bad_service_code(sk, service)) {
345 		dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
346 		goto drop;
347 	}
348 	/*
349 	 * There are no SYN attacks on IPv6, yet...
350 	 */
351 	dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
352 	if (inet_csk_reqsk_queue_is_full(sk))
353 		goto drop;
354 
355 	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
356 		goto drop;
357 
358 	req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk);
359 	if (req == NULL)
360 		goto drop;
361 
362 	if (dccp_reqsk_init(req, dccp_sk(sk), skb))
363 		goto drop_and_free;
364 
365 	dreq = dccp_rsk(req);
366 	if (dccp_parse_options(sk, dreq, skb))
367 		goto drop_and_free;
368 
369 	if (security_inet_conn_request(sk, skb, req))
370 		goto drop_and_free;
371 
372 	ireq = inet_rsk(req);
373 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
374 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
375 	ireq->ireq_family = AF_INET6;
376 
377 	if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
378 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
379 	    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
380 		atomic_inc(&skb->users);
381 		ireq->pktopts = skb;
382 	}
383 	ireq->ir_iif = sk->sk_bound_dev_if;
384 
385 	/* So that link locals have meaning */
386 	if (!sk->sk_bound_dev_if &&
387 	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
388 		ireq->ir_iif = inet6_iif(skb);
389 
390 	/*
391 	 * Step 3: Process LISTEN state
392 	 *
393 	 *   Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
394 	 *
395 	 * Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child().
396 	 */
397 	dreq->dreq_isr	   = dcb->dccpd_seq;
398 	dreq->dreq_gsr     = dreq->dreq_isr;
399 	dreq->dreq_iss	   = dccp_v6_init_sequence(skb);
400 	dreq->dreq_gss     = dreq->dreq_iss;
401 	dreq->dreq_service = service;
402 
403 	if (dccp_v6_send_response(sk, req))
404 		goto drop_and_free;
405 
406 	inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
407 	return 0;
408 
409 drop_and_free:
410 	reqsk_free(req);
411 drop:
412 	DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
413 	return -1;
414 }
415 
dccp_v6_request_recv_sock(struct sock * sk,struct sk_buff * skb,struct request_sock * req,struct dst_entry * dst)416 static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
417 					      struct sk_buff *skb,
418 					      struct request_sock *req,
419 					      struct dst_entry *dst)
420 {
421 	struct inet_request_sock *ireq = inet_rsk(req);
422 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
423 	struct ipv6_txoptions *opt;
424 	struct inet_sock *newinet;
425 	struct dccp6_sock *newdp6;
426 	struct sock *newsk;
427 
428 	if (skb->protocol == htons(ETH_P_IP)) {
429 		/*
430 		 *	v6 mapped
431 		 */
432 		newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
433 		if (newsk == NULL)
434 			return NULL;
435 
436 		newdp6 = (struct dccp6_sock *)newsk;
437 		newinet = inet_sk(newsk);
438 		newinet->pinet6 = &newdp6->inet6;
439 		newnp = inet6_sk(newsk);
440 
441 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
442 
443 		newnp->saddr = newsk->sk_v6_rcv_saddr;
444 
445 		inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
446 		newsk->sk_backlog_rcv = dccp_v4_do_rcv;
447 		newnp->pktoptions  = NULL;
448 		newnp->opt	   = NULL;
449 		newnp->mcast_oif   = inet6_iif(skb);
450 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
451 
452 		/*
453 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
454 		 * here, dccp_create_openreq_child now does this for us, see the comment in
455 		 * that function for the gory details. -acme
456 		 */
457 
458 		/* It is tricky place. Until this moment IPv4 tcp
459 		   worked with IPv6 icsk.icsk_af_ops.
460 		   Sync it now.
461 		 */
462 		dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
463 
464 		return newsk;
465 	}
466 
467 
468 	if (sk_acceptq_is_full(sk))
469 		goto out_overflow;
470 
471 	if (dst == NULL) {
472 		struct in6_addr *final_p, final;
473 		struct flowi6 fl6;
474 
475 		memset(&fl6, 0, sizeof(fl6));
476 		fl6.flowi6_proto = IPPROTO_DCCP;
477 		fl6.daddr = ireq->ir_v6_rmt_addr;
478 		final_p = fl6_update_dst(&fl6, np->opt, &final);
479 		fl6.saddr = ireq->ir_v6_loc_addr;
480 		fl6.flowi6_oif = sk->sk_bound_dev_if;
481 		fl6.fl6_dport = ireq->ir_rmt_port;
482 		fl6.fl6_sport = htons(ireq->ir_num);
483 		security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
484 
485 		dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
486 		if (IS_ERR(dst))
487 			goto out;
488 	}
489 
490 	newsk = dccp_create_openreq_child(sk, req, skb);
491 	if (newsk == NULL)
492 		goto out_nonewsk;
493 
494 	/*
495 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
496 	 * count here, dccp_create_openreq_child now does this for us, see the
497 	 * comment in that function for the gory details. -acme
498 	 */
499 
500 	__ip6_dst_store(newsk, dst, NULL, NULL);
501 	newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
502 						      NETIF_F_TSO);
503 	newdp6 = (struct dccp6_sock *)newsk;
504 	newinet = inet_sk(newsk);
505 	newinet->pinet6 = &newdp6->inet6;
506 	newnp = inet6_sk(newsk);
507 
508 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
509 
510 	newsk->sk_v6_daddr	= ireq->ir_v6_rmt_addr;
511 	newnp->saddr		= ireq->ir_v6_loc_addr;
512 	newsk->sk_v6_rcv_saddr	= ireq->ir_v6_loc_addr;
513 	newsk->sk_bound_dev_if	= ireq->ir_iif;
514 
515 	/* Now IPv6 options...
516 
517 	   First: no IPv4 options.
518 	 */
519 	newinet->inet_opt = NULL;
520 
521 	/* Clone RX bits */
522 	newnp->rxopt.all = np->rxopt.all;
523 
524 	/* Clone pktoptions received with SYN */
525 	newnp->pktoptions = NULL;
526 	if (ireq->pktopts != NULL) {
527 		newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
528 		consume_skb(ireq->pktopts);
529 		ireq->pktopts = NULL;
530 		if (newnp->pktoptions)
531 			skb_set_owner_r(newnp->pktoptions, newsk);
532 	}
533 	newnp->opt	  = NULL;
534 	newnp->mcast_oif  = inet6_iif(skb);
535 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
536 
537 	/*
538 	 * Clone native IPv6 options from listening socket (if any)
539 	 *
540 	 * Yes, keeping reference count would be much more clever, but we make
541 	 * one more one thing there: reattach optmem to newsk.
542 	 */
543 	opt = rcu_dereference(np->opt);
544 	if (opt) {
545 		opt = ipv6_dup_options(newsk, opt);
546 		RCU_INIT_POINTER(newnp->opt, opt);
547 	}
548 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
549 	if (opt)
550 		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
551 						    opt->opt_flen;
552 
553 	dccp_sync_mss(newsk, dst_mtu(dst));
554 
555 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
556 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
557 
558 	if (__inet_inherit_port(sk, newsk) < 0) {
559 		inet_csk_prepare_forced_close(newsk);
560 		dccp_done(newsk);
561 		goto out;
562 	}
563 	__inet_hash(newsk, NULL);
564 
565 	return newsk;
566 
567 out_overflow:
568 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
569 out_nonewsk:
570 	dst_release(dst);
571 out:
572 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
573 	return NULL;
574 }
575 
576 /* The socket must have it's spinlock held when we get
577  * here.
578  *
579  * We have a potential double-lock case here, so even when
580  * doing backlog processing we use the BH locking scheme.
581  * This is because we cannot sleep with the original spinlock
582  * held.
583  */
dccp_v6_do_rcv(struct sock * sk,struct sk_buff * skb)584 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
585 {
586 	struct ipv6_pinfo *np = inet6_sk(sk);
587 	struct sk_buff *opt_skb = NULL;
588 
589 	/* Imagine: socket is IPv6. IPv4 packet arrives,
590 	   goes to IPv4 receive handler and backlogged.
591 	   From backlog it always goes here. Kerboom...
592 	   Fortunately, dccp_rcv_established and rcv_established
593 	   handle them correctly, but it is not case with
594 	   dccp_v6_hnd_req and dccp_v6_ctl_send_reset().   --ANK
595 	 */
596 
597 	if (skb->protocol == htons(ETH_P_IP))
598 		return dccp_v4_do_rcv(sk, skb);
599 
600 	if (sk_filter(sk, skb))
601 		goto discard;
602 
603 	/*
604 	 * socket locking is here for SMP purposes as backlog rcv is currently
605 	 * called with bh processing disabled.
606 	 */
607 
608 	/* Do Stevens' IPV6_PKTOPTIONS.
609 
610 	   Yes, guys, it is the only place in our code, where we
611 	   may make it not affecting IPv4.
612 	   The rest of code is protocol independent,
613 	   and I do not like idea to uglify IPv4.
614 
615 	   Actually, all the idea behind IPV6_PKTOPTIONS
616 	   looks not very well thought. For now we latch
617 	   options, received in the last packet, enqueued
618 	   by tcp. Feel free to propose better solution.
619 					       --ANK (980728)
620 	 */
621 	if (np->rxopt.all)
622 	/*
623 	 * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
624 	 *        (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
625 	 */
626 		opt_skb = skb_clone(skb, GFP_ATOMIC);
627 
628 	if (sk->sk_state == DCCP_OPEN) { /* Fast path */
629 		if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
630 			goto reset;
631 		if (opt_skb) {
632 			/* XXX This is where we would goto ipv6_pktoptions. */
633 			__kfree_skb(opt_skb);
634 		}
635 		return 0;
636 	}
637 
638 	/*
639 	 *  Step 3: Process LISTEN state
640 	 *     If S.state == LISTEN,
641 	 *	 If P.type == Request or P contains a valid Init Cookie option,
642 	 *	      (* Must scan the packet's options to check for Init
643 	 *		 Cookies.  Only Init Cookies are processed here,
644 	 *		 however; other options are processed in Step 8.  This
645 	 *		 scan need only be performed if the endpoint uses Init
646 	 *		 Cookies *)
647 	 *	      (* Generate a new socket and switch to that socket *)
648 	 *	      Set S := new socket for this port pair
649 	 *	      S.state = RESPOND
650 	 *	      Choose S.ISS (initial seqno) or set from Init Cookies
651 	 *	      Initialize S.GAR := S.ISS
652 	 *	      Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
653 	 *	      Continue with S.state == RESPOND
654 	 *	      (* A Response packet will be generated in Step 11 *)
655 	 *	 Otherwise,
656 	 *	      Generate Reset(No Connection) unless P.type == Reset
657 	 *	      Drop packet and return
658 	 *
659 	 * NOTE: the check for the packet types is done in
660 	 *	 dccp_rcv_state_process
661 	 */
662 	if (sk->sk_state == DCCP_LISTEN) {
663 		struct sock *nsk = dccp_v6_hnd_req(sk, skb);
664 
665 		if (nsk == NULL)
666 			goto discard;
667 		/*
668 		 * Queue it on the new socket if the new socket is active,
669 		 * otherwise we just shortcircuit this and continue with
670 		 * the new socket..
671 		 */
672 		if (nsk != sk) {
673 			if (dccp_child_process(sk, nsk, skb))
674 				goto reset;
675 			if (opt_skb != NULL)
676 				__kfree_skb(opt_skb);
677 			return 0;
678 		}
679 	}
680 
681 	if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
682 		goto reset;
683 	if (opt_skb) {
684 		/* XXX This is where we would goto ipv6_pktoptions. */
685 		__kfree_skb(opt_skb);
686 	}
687 	return 0;
688 
689 reset:
690 	dccp_v6_ctl_send_reset(sk, skb);
691 discard:
692 	if (opt_skb != NULL)
693 		__kfree_skb(opt_skb);
694 	kfree_skb(skb);
695 	return 0;
696 }
697 
dccp_v6_rcv(struct sk_buff * skb)698 static int dccp_v6_rcv(struct sk_buff *skb)
699 {
700 	const struct dccp_hdr *dh;
701 	struct sock *sk;
702 	int min_cov;
703 
704 	/* Step 1: Check header basics */
705 
706 	if (dccp_invalid_packet(skb))
707 		goto discard_it;
708 
709 	/* Step 1: If header checksum is incorrect, drop packet and return. */
710 	if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
711 				     &ipv6_hdr(skb)->daddr)) {
712 		DCCP_WARN("dropped packet with invalid checksum\n");
713 		goto discard_it;
714 	}
715 
716 	dh = dccp_hdr(skb);
717 
718 	DCCP_SKB_CB(skb)->dccpd_seq  = dccp_hdr_seq(dh);
719 	DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
720 
721 	if (dccp_packet_without_ack(skb))
722 		DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
723 	else
724 		DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
725 
726 	/* Step 2:
727 	 *	Look up flow ID in table and get corresponding socket */
728 	sk = __inet6_lookup_skb(&dccp_hashinfo, skb,
729 			        dh->dccph_sport, dh->dccph_dport,
730 				inet6_iif(skb));
731 	/*
732 	 * Step 2:
733 	 *	If no socket ...
734 	 */
735 	if (sk == NULL) {
736 		dccp_pr_debug("failed to look up flow ID in table and "
737 			      "get corresponding socket\n");
738 		goto no_dccp_socket;
739 	}
740 
741 	/*
742 	 * Step 2:
743 	 *	... or S.state == TIMEWAIT,
744 	 *		Generate Reset(No Connection) unless P.type == Reset
745 	 *		Drop packet and return
746 	 */
747 	if (sk->sk_state == DCCP_TIME_WAIT) {
748 		dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
749 		inet_twsk_put(inet_twsk(sk));
750 		goto no_dccp_socket;
751 	}
752 
753 	/*
754 	 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
755 	 *	o if MinCsCov = 0, only packets with CsCov = 0 are accepted
756 	 *	o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
757 	 */
758 	min_cov = dccp_sk(sk)->dccps_pcrlen;
759 	if (dh->dccph_cscov  &&  (min_cov == 0 || dh->dccph_cscov < min_cov))  {
760 		dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
761 			      dh->dccph_cscov, min_cov);
762 		/* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
763 		goto discard_and_relse;
764 	}
765 
766 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
767 		goto discard_and_relse;
768 
769 	return sk_receive_skb(sk, skb, 1) ? -1 : 0;
770 
771 no_dccp_socket:
772 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
773 		goto discard_it;
774 	/*
775 	 * Step 2:
776 	 *	If no socket ...
777 	 *		Generate Reset(No Connection) unless P.type == Reset
778 	 *		Drop packet and return
779 	 */
780 	if (dh->dccph_type != DCCP_PKT_RESET) {
781 		DCCP_SKB_CB(skb)->dccpd_reset_code =
782 					DCCP_RESET_CODE_NO_CONNECTION;
783 		dccp_v6_ctl_send_reset(sk, skb);
784 	}
785 
786 discard_it:
787 	kfree_skb(skb);
788 	return 0;
789 
790 discard_and_relse:
791 	sock_put(sk);
792 	goto discard_it;
793 }
794 
dccp_v6_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)795 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
796 			   int addr_len)
797 {
798 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
799 	struct inet_connection_sock *icsk = inet_csk(sk);
800 	struct inet_sock *inet = inet_sk(sk);
801 	struct ipv6_pinfo *np = inet6_sk(sk);
802 	struct dccp_sock *dp = dccp_sk(sk);
803 	struct in6_addr *saddr = NULL, *final_p, final;
804 	struct ipv6_txoptions *opt;
805 	struct flowi6 fl6;
806 	struct dst_entry *dst;
807 	int addr_type;
808 	int err;
809 
810 	dp->dccps_role = DCCP_ROLE_CLIENT;
811 
812 	if (addr_len < SIN6_LEN_RFC2133)
813 		return -EINVAL;
814 
815 	if (usin->sin6_family != AF_INET6)
816 		return -EAFNOSUPPORT;
817 
818 	memset(&fl6, 0, sizeof(fl6));
819 
820 	if (np->sndflow) {
821 		fl6.flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
822 		IP6_ECN_flow_init(fl6.flowlabel);
823 		if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
824 			struct ip6_flowlabel *flowlabel;
825 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
826 			if (flowlabel == NULL)
827 				return -EINVAL;
828 			fl6_sock_release(flowlabel);
829 		}
830 	}
831 	/*
832 	 * connect() to INADDR_ANY means loopback (BSD'ism).
833 	 */
834 	if (ipv6_addr_any(&usin->sin6_addr))
835 		usin->sin6_addr.s6_addr[15] = 1;
836 
837 	addr_type = ipv6_addr_type(&usin->sin6_addr);
838 
839 	if (addr_type & IPV6_ADDR_MULTICAST)
840 		return -ENETUNREACH;
841 
842 	if (addr_type & IPV6_ADDR_LINKLOCAL) {
843 		if (addr_len >= sizeof(struct sockaddr_in6) &&
844 		    usin->sin6_scope_id) {
845 			/* If interface is set while binding, indices
846 			 * must coincide.
847 			 */
848 			if (sk->sk_bound_dev_if &&
849 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
850 				return -EINVAL;
851 
852 			sk->sk_bound_dev_if = usin->sin6_scope_id;
853 		}
854 
855 		/* Connect to link-local address requires an interface */
856 		if (!sk->sk_bound_dev_if)
857 			return -EINVAL;
858 	}
859 
860 	sk->sk_v6_daddr = usin->sin6_addr;
861 	np->flow_label = fl6.flowlabel;
862 
863 	/*
864 	 * DCCP over IPv4
865 	 */
866 	if (addr_type == IPV6_ADDR_MAPPED) {
867 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
868 		struct sockaddr_in sin;
869 
870 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
871 
872 		if (__ipv6_only_sock(sk))
873 			return -ENETUNREACH;
874 
875 		sin.sin_family = AF_INET;
876 		sin.sin_port = usin->sin6_port;
877 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
878 
879 		icsk->icsk_af_ops = &dccp_ipv6_mapped;
880 		sk->sk_backlog_rcv = dccp_v4_do_rcv;
881 
882 		err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
883 		if (err) {
884 			icsk->icsk_ext_hdr_len = exthdrlen;
885 			icsk->icsk_af_ops = &dccp_ipv6_af_ops;
886 			sk->sk_backlog_rcv = dccp_v6_do_rcv;
887 			goto failure;
888 		}
889 		np->saddr = sk->sk_v6_rcv_saddr;
890 		return err;
891 	}
892 
893 	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
894 		saddr = &sk->sk_v6_rcv_saddr;
895 
896 	fl6.flowi6_proto = IPPROTO_DCCP;
897 	fl6.daddr = sk->sk_v6_daddr;
898 	fl6.saddr = saddr ? *saddr : np->saddr;
899 	fl6.flowi6_oif = sk->sk_bound_dev_if;
900 	fl6.fl6_dport = usin->sin6_port;
901 	fl6.fl6_sport = inet->inet_sport;
902 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
903 
904 	opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
905 	final_p = fl6_update_dst(&fl6, opt, &final);
906 
907 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
908 	if (IS_ERR(dst)) {
909 		err = PTR_ERR(dst);
910 		goto failure;
911 	}
912 
913 	if (saddr == NULL) {
914 		saddr = &fl6.saddr;
915 		sk->sk_v6_rcv_saddr = *saddr;
916 	}
917 
918 	/* set the source address */
919 	np->saddr = *saddr;
920 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
921 
922 	__ip6_dst_store(sk, dst, NULL, NULL);
923 
924 	icsk->icsk_ext_hdr_len = 0;
925 	if (opt)
926 		icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
927 
928 	inet->inet_dport = usin->sin6_port;
929 
930 	dccp_set_state(sk, DCCP_REQUESTING);
931 	err = inet6_hash_connect(&dccp_death_row, sk);
932 	if (err)
933 		goto late_failure;
934 
935 	dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
936 						      sk->sk_v6_daddr.s6_addr32,
937 						      inet->inet_sport,
938 						      inet->inet_dport);
939 	err = dccp_connect(sk);
940 	if (err)
941 		goto late_failure;
942 
943 	return 0;
944 
945 late_failure:
946 	dccp_set_state(sk, DCCP_CLOSED);
947 	__sk_dst_reset(sk);
948 failure:
949 	inet->inet_dport = 0;
950 	sk->sk_route_caps = 0;
951 	return err;
952 }
953 
954 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
955 	.queue_xmit	   = inet6_csk_xmit,
956 	.send_check	   = dccp_v6_send_check,
957 	.rebuild_header	   = inet6_sk_rebuild_header,
958 	.conn_request	   = dccp_v6_conn_request,
959 	.syn_recv_sock	   = dccp_v6_request_recv_sock,
960 	.net_header_len	   = sizeof(struct ipv6hdr),
961 	.setsockopt	   = ipv6_setsockopt,
962 	.getsockopt	   = ipv6_getsockopt,
963 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
964 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
965 	.bind_conflict	   = inet6_csk_bind_conflict,
966 #ifdef CONFIG_COMPAT
967 	.compat_setsockopt = compat_ipv6_setsockopt,
968 	.compat_getsockopt = compat_ipv6_getsockopt,
969 #endif
970 };
971 
972 /*
973  *	DCCP over IPv4 via INET6 API
974  */
975 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
976 	.queue_xmit	   = ip_queue_xmit,
977 	.send_check	   = dccp_v4_send_check,
978 	.rebuild_header	   = inet_sk_rebuild_header,
979 	.conn_request	   = dccp_v6_conn_request,
980 	.syn_recv_sock	   = dccp_v6_request_recv_sock,
981 	.net_header_len	   = sizeof(struct iphdr),
982 	.setsockopt	   = ipv6_setsockopt,
983 	.getsockopt	   = ipv6_getsockopt,
984 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
985 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
986 #ifdef CONFIG_COMPAT
987 	.compat_setsockopt = compat_ipv6_setsockopt,
988 	.compat_getsockopt = compat_ipv6_getsockopt,
989 #endif
990 };
991 
992 /* NOTE: A lot of things set to zero explicitly by call to
993  *       sk_alloc() so need not be done here.
994  */
dccp_v6_init_sock(struct sock * sk)995 static int dccp_v6_init_sock(struct sock *sk)
996 {
997 	static __u8 dccp_v6_ctl_sock_initialized;
998 	int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
999 
1000 	if (err == 0) {
1001 		if (unlikely(!dccp_v6_ctl_sock_initialized))
1002 			dccp_v6_ctl_sock_initialized = 1;
1003 		inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1004 	}
1005 
1006 	return err;
1007 }
1008 
dccp_v6_destroy_sock(struct sock * sk)1009 static void dccp_v6_destroy_sock(struct sock *sk)
1010 {
1011 	dccp_destroy_sock(sk);
1012 	inet6_destroy_sock(sk);
1013 }
1014 
1015 static struct timewait_sock_ops dccp6_timewait_sock_ops = {
1016 	.twsk_obj_size	= sizeof(struct dccp6_timewait_sock),
1017 };
1018 
1019 static struct proto dccp_v6_prot = {
1020 	.name		   = "DCCPv6",
1021 	.owner		   = THIS_MODULE,
1022 	.close		   = dccp_close,
1023 	.connect	   = dccp_v6_connect,
1024 	.disconnect	   = dccp_disconnect,
1025 	.ioctl		   = dccp_ioctl,
1026 	.init		   = dccp_v6_init_sock,
1027 	.setsockopt	   = dccp_setsockopt,
1028 	.getsockopt	   = dccp_getsockopt,
1029 	.sendmsg	   = dccp_sendmsg,
1030 	.recvmsg	   = dccp_recvmsg,
1031 	.backlog_rcv	   = dccp_v6_do_rcv,
1032 	.hash		   = inet_hash,
1033 	.unhash		   = inet_unhash,
1034 	.accept		   = inet_csk_accept,
1035 	.get_port	   = inet_csk_get_port,
1036 	.shutdown	   = dccp_shutdown,
1037 	.destroy	   = dccp_v6_destroy_sock,
1038 	.orphan_count	   = &dccp_orphan_count,
1039 	.max_header	   = MAX_DCCP_HEADER,
1040 	.obj_size	   = sizeof(struct dccp6_sock),
1041 	.slab_flags	   = SLAB_DESTROY_BY_RCU,
1042 	.rsk_prot	   = &dccp6_request_sock_ops,
1043 	.twsk_prot	   = &dccp6_timewait_sock_ops,
1044 	.h.hashinfo	   = &dccp_hashinfo,
1045 #ifdef CONFIG_COMPAT
1046 	.compat_setsockopt = compat_dccp_setsockopt,
1047 	.compat_getsockopt = compat_dccp_getsockopt,
1048 #endif
1049 };
1050 
1051 static const struct inet6_protocol dccp_v6_protocol = {
1052 	.handler	= dccp_v6_rcv,
1053 	.err_handler	= dccp_v6_err,
1054 	.flags		= INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1055 };
1056 
1057 static const struct proto_ops inet6_dccp_ops = {
1058 	.family		   = PF_INET6,
1059 	.owner		   = THIS_MODULE,
1060 	.release	   = inet6_release,
1061 	.bind		   = inet6_bind,
1062 	.connect	   = inet_stream_connect,
1063 	.socketpair	   = sock_no_socketpair,
1064 	.accept		   = inet_accept,
1065 	.getname	   = inet6_getname,
1066 	.poll		   = dccp_poll,
1067 	.ioctl		   = inet6_ioctl,
1068 	.listen		   = inet_dccp_listen,
1069 	.shutdown	   = inet_shutdown,
1070 	.setsockopt	   = sock_common_setsockopt,
1071 	.getsockopt	   = sock_common_getsockopt,
1072 	.sendmsg	   = inet_sendmsg,
1073 	.recvmsg	   = sock_common_recvmsg,
1074 	.mmap		   = sock_no_mmap,
1075 	.sendpage	   = sock_no_sendpage,
1076 #ifdef CONFIG_COMPAT
1077 	.compat_setsockopt = compat_sock_common_setsockopt,
1078 	.compat_getsockopt = compat_sock_common_getsockopt,
1079 #endif
1080 };
1081 
1082 static struct inet_protosw dccp_v6_protosw = {
1083 	.type		= SOCK_DCCP,
1084 	.protocol	= IPPROTO_DCCP,
1085 	.prot		= &dccp_v6_prot,
1086 	.ops		= &inet6_dccp_ops,
1087 	.flags		= INET_PROTOSW_ICSK,
1088 };
1089 
dccp_v6_init_net(struct net * net)1090 static int __net_init dccp_v6_init_net(struct net *net)
1091 {
1092 	if (dccp_hashinfo.bhash == NULL)
1093 		return -ESOCKTNOSUPPORT;
1094 
1095 	return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6,
1096 				    SOCK_DCCP, IPPROTO_DCCP, net);
1097 }
1098 
dccp_v6_exit_net(struct net * net)1099 static void __net_exit dccp_v6_exit_net(struct net *net)
1100 {
1101 	inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
1102 }
1103 
1104 static struct pernet_operations dccp_v6_ops = {
1105 	.init   = dccp_v6_init_net,
1106 	.exit   = dccp_v6_exit_net,
1107 };
1108 
dccp_v6_init(void)1109 static int __init dccp_v6_init(void)
1110 {
1111 	int err = proto_register(&dccp_v6_prot, 1);
1112 
1113 	if (err != 0)
1114 		goto out;
1115 
1116 	err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1117 	if (err != 0)
1118 		goto out_unregister_proto;
1119 
1120 	inet6_register_protosw(&dccp_v6_protosw);
1121 
1122 	err = register_pernet_subsys(&dccp_v6_ops);
1123 	if (err != 0)
1124 		goto out_destroy_ctl_sock;
1125 out:
1126 	return err;
1127 
1128 out_destroy_ctl_sock:
1129 	inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1130 	inet6_unregister_protosw(&dccp_v6_protosw);
1131 out_unregister_proto:
1132 	proto_unregister(&dccp_v6_prot);
1133 	goto out;
1134 }
1135 
dccp_v6_exit(void)1136 static void __exit dccp_v6_exit(void)
1137 {
1138 	unregister_pernet_subsys(&dccp_v6_ops);
1139 	inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1140 	inet6_unregister_protosw(&dccp_v6_protosw);
1141 	proto_unregister(&dccp_v6_prot);
1142 }
1143 
1144 module_init(dccp_v6_init);
1145 module_exit(dccp_v6_exit);
1146 
1147 /*
1148  * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1149  * values directly, Also cover the case where the protocol is not specified,
1150  * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1151  */
1152 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
1153 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
1154 MODULE_LICENSE("GPL");
1155 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1156 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");
1157