Lines Matching refs:sk
51 static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb) in dccp_v6_send_check() argument
53 struct ipv6_pinfo *np = inet6_sk(sk); in dccp_v6_send_check()
57 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &sk->sk_v6_daddr); in dccp_v6_send_check()
76 struct sock *sk; in dccp_v6_err() local
88 sk = __inet6_lookup_established(net, &dccp_hashinfo, in dccp_v6_err()
93 if (!sk) { in dccp_v6_err()
99 if (sk->sk_state == DCCP_TIME_WAIT) { in dccp_v6_err()
100 inet_twsk_put(inet_twsk(sk)); in dccp_v6_err()
104 if (sk->sk_state == DCCP_NEW_SYN_RECV) in dccp_v6_err()
105 return dccp_req_err(sk, seq); in dccp_v6_err()
107 bh_lock_sock(sk); in dccp_v6_err()
108 if (sock_owned_by_user(sk)) in dccp_v6_err()
111 if (sk->sk_state == DCCP_CLOSED) in dccp_v6_err()
114 dp = dccp_sk(sk); in dccp_v6_err()
115 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && in dccp_v6_err()
121 np = inet6_sk(sk); in dccp_v6_err()
124 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); in dccp_v6_err()
127 dst->ops->redirect(dst, sk, skb); in dccp_v6_err()
134 if (!ip6_sk_accept_pmtu(sk)) in dccp_v6_err()
137 if (sock_owned_by_user(sk)) in dccp_v6_err()
139 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED)) in dccp_v6_err()
142 dst = inet6_csk_update_pmtu(sk, ntohl(info)); in dccp_v6_err()
146 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) in dccp_v6_err()
147 dccp_sync_mss(sk, dst_mtu(dst)); in dccp_v6_err()
154 switch (sk->sk_state) { in dccp_v6_err()
158 if (!sock_owned_by_user(sk)) { in dccp_v6_err()
160 sk->sk_err = err; in dccp_v6_err()
165 sk->sk_error_report(sk); in dccp_v6_err()
166 dccp_done(sk); in dccp_v6_err()
168 sk->sk_err_soft = err; in dccp_v6_err()
172 if (!sock_owned_by_user(sk) && np->recverr) { in dccp_v6_err()
173 sk->sk_err = err; in dccp_v6_err()
174 sk->sk_error_report(sk); in dccp_v6_err()
176 sk->sk_err_soft = err; in dccp_v6_err()
179 bh_unlock_sock(sk); in dccp_v6_err()
180 sock_put(sk); in dccp_v6_err()
184 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req) in dccp_v6_send_response() argument
187 struct ipv6_pinfo *np = inet6_sk(sk); in dccp_v6_send_response()
209 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); in dccp_v6_send_response()
216 skb = dccp_make_response(sk, dst, req); in dccp_v6_send_response()
225 err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt), in dccp_v6_send_response()
242 static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) in dccp_v6_ctl_send_reset() argument
298 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) in dccp_v6_hnd_req() argument
305 req = inet6_csk_search_req(sk, dh->dccph_sport, &iph->saddr, in dccp_v6_hnd_req()
308 nsk = dccp_check_req(sk, skb, req); in dccp_v6_hnd_req()
313 nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo, in dccp_v6_hnd_req()
326 return sk; in dccp_v6_hnd_req()
329 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) in dccp_v6_conn_request() argument
334 struct ipv6_pinfo *np = inet6_sk(sk); in dccp_v6_conn_request()
339 return dccp_v4_conn_request(sk, skb); in dccp_v6_conn_request()
344 if (dccp_bad_service_code(sk, service)) { in dccp_v6_conn_request()
352 if (inet_csk_reqsk_queue_is_full(sk)) in dccp_v6_conn_request()
355 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) in dccp_v6_conn_request()
358 req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk); in dccp_v6_conn_request()
362 if (dccp_reqsk_init(req, dccp_sk(sk), skb)) in dccp_v6_conn_request()
366 if (dccp_parse_options(sk, dreq, skb)) in dccp_v6_conn_request()
369 if (security_inet_conn_request(sk, skb, req)) in dccp_v6_conn_request()
377 if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) || in dccp_v6_conn_request()
383 ireq->ir_iif = sk->sk_bound_dev_if; in dccp_v6_conn_request()
386 if (!sk->sk_bound_dev_if && in dccp_v6_conn_request()
403 if (dccp_v6_send_response(sk, req)) in dccp_v6_conn_request()
406 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); in dccp_v6_conn_request()
416 static struct sock *dccp_v6_request_recv_sock(struct sock *sk, in dccp_v6_request_recv_sock() argument
422 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); in dccp_v6_request_recv_sock()
432 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst); in dccp_v6_request_recv_sock()
468 if (sk_acceptq_is_full(sk)) in dccp_v6_request_recv_sock()
480 fl6.flowi6_oif = sk->sk_bound_dev_if; in dccp_v6_request_recv_sock()
483 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); in dccp_v6_request_recv_sock()
485 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); in dccp_v6_request_recv_sock()
490 newsk = dccp_create_openreq_child(sk, req, skb); in dccp_v6_request_recv_sock()
558 if (__inet_inherit_port(sk, newsk) < 0) { in dccp_v6_request_recv_sock()
568 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); in dccp_v6_request_recv_sock()
572 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); in dccp_v6_request_recv_sock()
584 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) in dccp_v6_do_rcv() argument
586 struct ipv6_pinfo *np = inet6_sk(sk); in dccp_v6_do_rcv()
598 return dccp_v4_do_rcv(sk, skb); in dccp_v6_do_rcv()
600 if (sk_filter(sk, skb)) in dccp_v6_do_rcv()
628 if (sk->sk_state == DCCP_OPEN) { /* Fast path */ in dccp_v6_do_rcv()
629 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len)) in dccp_v6_do_rcv()
662 if (sk->sk_state == DCCP_LISTEN) { in dccp_v6_do_rcv()
663 struct sock *nsk = dccp_v6_hnd_req(sk, skb); in dccp_v6_do_rcv()
672 if (nsk != sk) { in dccp_v6_do_rcv()
673 if (dccp_child_process(sk, nsk, skb)) in dccp_v6_do_rcv()
681 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len)) in dccp_v6_do_rcv()
690 dccp_v6_ctl_send_reset(sk, skb); in dccp_v6_do_rcv()
701 struct sock *sk; in dccp_v6_rcv() local
728 sk = __inet6_lookup_skb(&dccp_hashinfo, skb, in dccp_v6_rcv()
735 if (sk == NULL) { in dccp_v6_rcv()
747 if (sk->sk_state == DCCP_TIME_WAIT) { in dccp_v6_rcv()
749 inet_twsk_put(inet_twsk(sk)); in dccp_v6_rcv()
758 min_cov = dccp_sk(sk)->dccps_pcrlen; in dccp_v6_rcv()
766 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) in dccp_v6_rcv()
769 return sk_receive_skb(sk, skb, 1) ? -1 : 0; in dccp_v6_rcv()
783 dccp_v6_ctl_send_reset(sk, skb); in dccp_v6_rcv()
791 sock_put(sk); in dccp_v6_rcv()
795 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, in dccp_v6_connect() argument
799 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_v6_connect()
800 struct inet_sock *inet = inet_sk(sk); in dccp_v6_connect()
801 struct ipv6_pinfo *np = inet6_sk(sk); in dccp_v6_connect()
802 struct dccp_sock *dp = dccp_sk(sk); in dccp_v6_connect()
825 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); in dccp_v6_connect()
848 if (sk->sk_bound_dev_if && in dccp_v6_connect()
849 sk->sk_bound_dev_if != usin->sin6_scope_id) in dccp_v6_connect()
852 sk->sk_bound_dev_if = usin->sin6_scope_id; in dccp_v6_connect()
856 if (!sk->sk_bound_dev_if) in dccp_v6_connect()
860 sk->sk_v6_daddr = usin->sin6_addr; in dccp_v6_connect()
870 SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); in dccp_v6_connect()
872 if (__ipv6_only_sock(sk)) in dccp_v6_connect()
880 sk->sk_backlog_rcv = dccp_v4_do_rcv; in dccp_v6_connect()
882 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); in dccp_v6_connect()
886 sk->sk_backlog_rcv = dccp_v6_do_rcv; in dccp_v6_connect()
889 np->saddr = sk->sk_v6_rcv_saddr; in dccp_v6_connect()
893 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) in dccp_v6_connect()
894 saddr = &sk->sk_v6_rcv_saddr; in dccp_v6_connect()
897 fl6.daddr = sk->sk_v6_daddr; in dccp_v6_connect()
899 fl6.flowi6_oif = sk->sk_bound_dev_if; in dccp_v6_connect()
902 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); in dccp_v6_connect()
904 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); in dccp_v6_connect()
907 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); in dccp_v6_connect()
915 sk->sk_v6_rcv_saddr = *saddr; in dccp_v6_connect()
922 __ip6_dst_store(sk, dst, NULL, NULL); in dccp_v6_connect()
930 dccp_set_state(sk, DCCP_REQUESTING); in dccp_v6_connect()
931 err = inet6_hash_connect(&dccp_death_row, sk); in dccp_v6_connect()
936 sk->sk_v6_daddr.s6_addr32, in dccp_v6_connect()
939 err = dccp_connect(sk); in dccp_v6_connect()
946 dccp_set_state(sk, DCCP_CLOSED); in dccp_v6_connect()
947 __sk_dst_reset(sk); in dccp_v6_connect()
950 sk->sk_route_caps = 0; in dccp_v6_connect()
995 static int dccp_v6_init_sock(struct sock *sk) in dccp_v6_init_sock() argument
998 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized); in dccp_v6_init_sock()
1003 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops; in dccp_v6_init_sock()
1009 static void dccp_v6_destroy_sock(struct sock *sk) in dccp_v6_destroy_sock() argument
1011 dccp_destroy_sock(sk); in dccp_v6_destroy_sock()
1012 inet6_destroy_sock(sk); in dccp_v6_destroy_sock()