Lines Matching refs:sk
269 void tcp_time_wait(struct sock *sk, int state, int timeo) in tcp_time_wait() argument
271 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_time_wait()
272 const struct tcp_sock *tp = tcp_sk(sk); in tcp_time_wait()
277 recycle_ok = tcp_remember_stamp(sk); in tcp_time_wait()
279 tw = inet_twsk_alloc(sk, &tcp_death_row, state); in tcp_time_wait()
284 struct inet_sock *inet = inet_sk(sk); in tcp_time_wait()
298 struct ipv6_pinfo *np = inet6_sk(sk); in tcp_time_wait()
300 tw->tw_v6_daddr = sk->sk_v6_daddr; in tcp_time_wait()
301 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; in tcp_time_wait()
304 tw->tw_ipv6only = sk->sk_ipv6only; in tcp_time_wait()
318 key = tp->af_specific->md5_lookup(sk, sk); in tcp_time_wait()
341 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); in tcp_time_wait()
348 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW); in tcp_time_wait()
351 tcp_update_metrics(sk); in tcp_time_wait()
352 tcp_done(sk); in tcp_time_wait()
355 void tcp_twsk_destructor(struct sock *sk) in tcp_twsk_destructor() argument
358 struct tcp_timewait_sock *twsk = tcp_twsk(sk); in tcp_twsk_destructor()
367 struct sock *sk, struct dst_entry *dst) in tcp_openreq_init_rwin() argument
370 struct tcp_sock *tp = tcp_sk(sk); in tcp_openreq_init_rwin()
381 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && in tcp_openreq_init_rwin()
382 (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0)) in tcp_openreq_init_rwin()
383 req->window_clamp = tcp_full_space(sk); in tcp_openreq_init_rwin()
386 tcp_select_initial_window(tcp_full_space(sk), in tcp_openreq_init_rwin()
403 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) in tcp_ca_openreq_child() argument
405 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_openreq_child()
426 tcp_assign_congestion_control(sk); in tcp_ca_openreq_child()
428 tcp_set_ca_state(sk, TCP_CA_Open); in tcp_ca_openreq_child()
438 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *sk… in tcp_create_openreq_child() argument
440 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); in tcp_create_openreq_child()
531 if (newtp->af_specific->md5_lookup(sk, newsk)) in tcp_create_openreq_child()
541 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS); in tcp_create_openreq_child()
558 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, in tcp_check_req() argument
568 BUG_ON(fastopen == (sk->sk_state == TCP_LISTEN)); in tcp_check_req()
612 if (!tcp_oow_rate_limited(sock_net(sk), skb, in tcp_check_req()
616 !inet_rtx_syn_ack(sk, req)) { in tcp_check_req()
689 return sk; in tcp_check_req()
702 req->rsk_ops->send_ack(sk, skb, req); in tcp_check_req()
704 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); in tcp_check_req()
723 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); in tcp_check_req()
740 return sk; in tcp_check_req()
743 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && in tcp_check_req()
746 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); in tcp_check_req()
756 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); in tcp_check_req()
760 inet_csk_reqsk_queue_drop(sk, req); in tcp_check_req()
761 inet_csk_reqsk_queue_add(sk, req, child); in tcp_check_req()
780 req->rsk_ops->send_reset(sk, skb); in tcp_check_req()
782 reqsk_fastopen_remove(sk, req, true); in tcp_check_req()
783 tcp_reset(sk); in tcp_check_req()
786 inet_csk_reqsk_queue_drop(sk, req); in tcp_check_req()
787 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); in tcp_check_req()