Lines Matching refs:sk

42 int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)  in dccp_v4_connect()  argument
45 struct inet_sock *inet = inet_sk(sk); in dccp_v4_connect()
46 struct dccp_sock *dp = dccp_sk(sk); in dccp_v4_connect()
65 sock_owned_by_user(sk)); in dccp_v4_connect()
76 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, in dccp_v4_connect()
78 orig_sport, orig_dport, sk); in dccp_v4_connect()
92 sk_rcv_saddr_set(sk, inet->inet_saddr); in dccp_v4_connect()
94 sk_daddr_set(sk, daddr); in dccp_v4_connect()
96 inet_csk(sk)->icsk_ext_hdr_len = 0; in dccp_v4_connect()
98 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; in dccp_v4_connect()
105 dccp_set_state(sk, DCCP_REQUESTING); in dccp_v4_connect()
106 err = inet_hash_connect(&dccp_death_row, sk); in dccp_v4_connect()
111 inet->inet_sport, inet->inet_dport, sk); in dccp_v4_connect()
118 sk_setup_caps(sk, &rt->dst); in dccp_v4_connect()
126 err = dccp_connect(sk); in dccp_v4_connect()
136 dccp_set_state(sk, DCCP_CLOSED); in dccp_v4_connect()
138 sk->sk_route_caps = 0; in dccp_v4_connect()
147 static inline void dccp_do_pmtu_discovery(struct sock *sk, in dccp_do_pmtu_discovery() argument
152 const struct inet_sock *inet = inet_sk(sk); in dccp_do_pmtu_discovery()
153 const struct dccp_sock *dp = dccp_sk(sk); in dccp_do_pmtu_discovery()
159 if (sk->sk_state == DCCP_LISTEN) in dccp_do_pmtu_discovery()
162 dst = inet_csk_update_pmtu(sk, mtu); in dccp_do_pmtu_discovery()
169 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst)) in dccp_do_pmtu_discovery()
170 sk->sk_err_soft = EMSGSIZE; in dccp_do_pmtu_discovery()
175 ip_sk_accept_pmtu(sk) && in dccp_do_pmtu_discovery()
176 inet_csk(sk)->icsk_pmtu_cookie > mtu) { in dccp_do_pmtu_discovery()
177 dccp_sync_mss(sk, mtu); in dccp_do_pmtu_discovery()
186 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC); in dccp_do_pmtu_discovery()
190 static void dccp_do_redirect(struct sk_buff *skb, struct sock *sk) in dccp_do_redirect() argument
192 struct dst_entry *dst = __sk_dst_check(sk, 0); in dccp_do_redirect()
195 dst->ops->redirect(dst, sk, skb); in dccp_do_redirect()
198 void dccp_req_err(struct sock *sk, u64 seq) in dccp_req_err() argument
200 struct request_sock *req = inet_reqsk(sk); in dccp_req_err()
201 struct net *net = sock_net(sk); in dccp_req_err()
207 WARN_ON(req->sk); in dccp_req_err()
245 struct sock *sk; in dccp_v4_err() local
256 sk = __inet_lookup_established(net, &dccp_hashinfo, in dccp_v4_err()
260 if (!sk) { in dccp_v4_err()
265 if (sk->sk_state == DCCP_TIME_WAIT) { in dccp_v4_err()
266 inet_twsk_put(inet_twsk(sk)); in dccp_v4_err()
270 if (sk->sk_state == DCCP_NEW_SYN_RECV) in dccp_v4_err()
271 return dccp_req_err(sk, seq); in dccp_v4_err()
273 bh_lock_sock(sk); in dccp_v4_err()
277 if (sock_owned_by_user(sk)) in dccp_v4_err()
280 if (sk->sk_state == DCCP_CLOSED) in dccp_v4_err()
283 dp = dccp_sk(sk); in dccp_v4_err()
284 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && in dccp_v4_err()
292 dccp_do_redirect(skb, sk); in dccp_v4_err()
305 if (!sock_owned_by_user(sk)) in dccp_v4_err()
306 dccp_do_pmtu_discovery(sk, iph, info); in dccp_v4_err()
319 switch (sk->sk_state) { in dccp_v4_err()
322 if (!sock_owned_by_user(sk)) { in dccp_v4_err()
324 sk->sk_err = err; in dccp_v4_err()
326 sk->sk_error_report(sk); in dccp_v4_err()
328 dccp_done(sk); in dccp_v4_err()
330 sk->sk_err_soft = err; in dccp_v4_err()
350 inet = inet_sk(sk); in dccp_v4_err()
351 if (!sock_owned_by_user(sk) && inet->recverr) { in dccp_v4_err()
352 sk->sk_err = err; in dccp_v4_err()
353 sk->sk_error_report(sk); in dccp_v4_err()
355 sk->sk_err_soft = err; in dccp_v4_err()
357 bh_unlock_sock(sk); in dccp_v4_err()
358 sock_put(sk); in dccp_v4_err()
367 void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb) in dccp_v4_send_check() argument
369 const struct inet_sock *inet = inet_sk(sk); in dccp_v4_send_check()
393 struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb, in dccp_v4_request_recv_sock() argument
401 if (sk_acceptq_is_full(sk)) in dccp_v4_request_recv_sock()
404 newsk = dccp_create_openreq_child(sk, req, skb); in dccp_v4_request_recv_sock()
419 if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL) in dccp_v4_request_recv_sock()
426 if (__inet_inherit_port(sk, newsk) < 0) in dccp_v4_request_recv_sock()
433 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); in dccp_v4_request_recv_sock()
437 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); in dccp_v4_request_recv_sock()
446 static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) in dccp_v4_hnd_req() argument
452 struct request_sock *req = inet_csk_search_req(sk, dh->dccph_sport, in dccp_v4_hnd_req()
455 nsk = dccp_check_req(sk, skb, req); in dccp_v4_hnd_req()
460 nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo, in dccp_v4_hnd_req()
473 return sk; in dccp_v4_hnd_req()
476 static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk, in dccp_v4_route_skb() argument
485 .flowi4_tos = RT_CONN_FLAGS(sk), in dccp_v4_route_skb()
486 .flowi4_proto = sk->sk_protocol, in dccp_v4_route_skb()
492 rt = ip_route_output_flow(net, &fl4, sk); in dccp_v4_route_skb()
501 static int dccp_v4_send_response(struct sock *sk, struct request_sock *req) in dccp_v4_send_response() argument
508 dst = inet_csk_route_req(sk, &fl4, req); in dccp_v4_send_response()
512 skb = dccp_make_response(sk, dst, req); in dccp_v4_send_response()
519 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, in dccp_v4_send_response()
530 static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) in dccp_v4_ctl_send_reset() argument
593 int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) in dccp_v4_conn_request() argument
605 if (dccp_bad_service_code(sk, service)) { in dccp_v4_conn_request()
615 if (inet_csk_reqsk_queue_is_full(sk)) in dccp_v4_conn_request()
624 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) in dccp_v4_conn_request()
627 req = inet_reqsk_alloc(&dccp_request_sock_ops, sk); in dccp_v4_conn_request()
631 if (dccp_reqsk_init(req, dccp_sk(sk), skb)) in dccp_v4_conn_request()
635 if (dccp_parse_options(sk, dreq, skb)) in dccp_v4_conn_request()
638 if (security_inet_conn_request(sk, skb, req)) in dccp_v4_conn_request()
645 ireq->ir_iif = sk->sk_bound_dev_if; in dccp_v4_conn_request()
660 if (dccp_v4_send_response(sk, req)) in dccp_v4_conn_request()
663 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); in dccp_v4_conn_request()
674 int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) in dccp_v4_do_rcv() argument
678 if (sk->sk_state == DCCP_OPEN) { /* Fast path */ in dccp_v4_do_rcv()
679 if (dccp_rcv_established(sk, skb, dh, skb->len)) in dccp_v4_do_rcv()
707 if (sk->sk_state == DCCP_LISTEN) { in dccp_v4_do_rcv()
708 struct sock *nsk = dccp_v4_hnd_req(sk, skb); in dccp_v4_do_rcv()
713 if (nsk != sk) { in dccp_v4_do_rcv()
714 if (dccp_child_process(sk, nsk, skb)) in dccp_v4_do_rcv()
720 if (dccp_rcv_state_process(sk, skb, dh, skb->len)) in dccp_v4_do_rcv()
725 dccp_v4_ctl_send_reset(sk, skb); in dccp_v4_do_rcv()
809 struct sock *sk; in dccp_v4_rcv() local
846 sk = __inet_lookup_skb(&dccp_hashinfo, skb, in dccp_v4_rcv()
852 if (sk == NULL) { in dccp_v4_rcv()
864 if (sk->sk_state == DCCP_TIME_WAIT) { in dccp_v4_rcv()
866 inet_twsk_put(inet_twsk(sk)); in dccp_v4_rcv()
875 min_cov = dccp_sk(sk)->dccps_pcrlen; in dccp_v4_rcv()
885 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) in dccp_v4_rcv()
889 return sk_receive_skb(sk, skb, 1); in dccp_v4_rcv()
903 dccp_v4_ctl_send_reset(sk, skb); in dccp_v4_rcv()
911 sock_put(sk); in dccp_v4_rcv()
933 static int dccp_v4_init_sock(struct sock *sk) in dccp_v4_init_sock() argument
936 int err = dccp_init_sock(sk, dccp_v4_ctl_sock_initialized); in dccp_v4_init_sock()
941 inet_csk(sk)->icsk_af_ops = &dccp_ipv4_af_ops; in dccp_v4_init_sock()