Lines Matching refs:sk
329 void tcp_enter_memory_pressure(struct sock *sk) in tcp_enter_memory_pressure() argument
332 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); in tcp_enter_memory_pressure()
380 void tcp_init_sock(struct sock *sk) in tcp_init_sock() argument
382 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_sock()
383 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_sock()
386 tcp_init_xmit_timers(sk); in tcp_init_sock()
410 tcp_assign_congestion_control(sk); in tcp_init_sock()
414 sk->sk_state = TCP_CLOSE; in tcp_init_sock()
416 sk->sk_write_space = sk_stream_write_space; in tcp_init_sock()
417 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); in tcp_init_sock()
421 sk->sk_sndbuf = sysctl_tcp_wmem[1]; in tcp_init_sock()
422 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; in tcp_init_sock()
425 sock_update_memcg(sk); in tcp_init_sock()
426 sk_sockets_allocated_inc(sk); in tcp_init_sock()
431 static void tcp_tx_timestamp(struct sock *sk, struct sk_buff *skb) in tcp_tx_timestamp() argument
433 if (sk->sk_tsflags) { in tcp_tx_timestamp()
436 sock_tx_timestamp(sk, &shinfo->tx_flags); in tcp_tx_timestamp()
452 struct sock *sk = sock->sk; in tcp_poll() local
453 const struct tcp_sock *tp = tcp_sk(sk); in tcp_poll()
455 sock_rps_record_flow(sk); in tcp_poll()
457 sock_poll_wait(file, sk_sleep(sk), wait); in tcp_poll()
458 if (sk->sk_state == TCP_LISTEN) in tcp_poll()
459 return inet_csk_listen_poll(sk); in tcp_poll()
495 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) in tcp_poll()
497 if (sk->sk_shutdown & RCV_SHUTDOWN) in tcp_poll()
501 if (sk->sk_state != TCP_SYN_SENT && in tcp_poll()
502 (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk)) { in tcp_poll()
503 int target = sock_rcvlowat(sk, 0, INT_MAX); in tcp_poll()
506 !sock_flag(sk, SOCK_URGINLINE) && in tcp_poll()
516 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { in tcp_poll()
517 if (sk_stream_is_writeable(sk)) { in tcp_poll()
521 &sk->sk_socket->flags); in tcp_poll()
522 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in tcp_poll()
530 if (sk_stream_is_writeable(sk)) in tcp_poll()
541 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) in tcp_poll()
548 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) in tcp_ioctl() argument
550 struct tcp_sock *tp = tcp_sk(sk); in tcp_ioctl()
556 if (sk->sk_state == TCP_LISTEN) in tcp_ioctl()
559 slow = lock_sock_fast(sk); in tcp_ioctl()
560 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) in tcp_ioctl()
562 else if (sock_flag(sk, SOCK_URGINLINE) || in tcp_ioctl()
570 if (answ && sock_flag(sk, SOCK_DONE)) in tcp_ioctl()
574 unlock_sock_fast(sk, slow); in tcp_ioctl()
580 if (sk->sk_state == TCP_LISTEN) in tcp_ioctl()
583 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) in tcp_ioctl()
589 if (sk->sk_state == TCP_LISTEN) in tcp_ioctl()
592 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) in tcp_ioctl()
616 static void skb_entail(struct sock *sk, struct sk_buff *skb) in skb_entail() argument
618 struct tcp_sock *tp = tcp_sk(sk); in skb_entail()
626 tcp_add_write_queue_tail(sk, skb); in skb_entail()
627 sk->sk_wmem_queued += skb->truesize; in skb_entail()
628 sk_mem_charge(sk, skb->truesize); in skb_entail()
649 static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, in tcp_should_autocork() argument
654 skb != tcp_write_queue_head(sk) && in tcp_should_autocork()
655 atomic_read(&sk->sk_wmem_alloc) > skb->truesize; in tcp_should_autocork()
658 static void tcp_push(struct sock *sk, int flags, int mss_now, in tcp_push() argument
661 struct tcp_sock *tp = tcp_sk(sk); in tcp_push()
664 if (!tcp_send_head(sk)) in tcp_push()
667 skb = tcp_write_queue_tail(sk); in tcp_push()
673 if (tcp_should_autocork(sk, skb, size_goal)) { in tcp_push()
677 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); in tcp_push()
683 if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize) in tcp_push()
690 __tcp_push_pending_frames(sk, mss_now, nonagle); in tcp_push()
706 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) in __tcp_splice_read() argument
714 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); in __tcp_splice_read()
733 struct sock *sk = sock->sk; in tcp_splice_read() local
743 sock_rps_record_flow(sk); in tcp_splice_read()
752 lock_sock(sk); in tcp_splice_read()
754 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); in tcp_splice_read()
756 ret = __tcp_splice_read(sk, &tss); in tcp_splice_read()
762 if (sock_flag(sk, SOCK_DONE)) in tcp_splice_read()
764 if (sk->sk_err) { in tcp_splice_read()
765 ret = sock_error(sk); in tcp_splice_read()
768 if (sk->sk_shutdown & RCV_SHUTDOWN) in tcp_splice_read()
770 if (sk->sk_state == TCP_CLOSE) { in tcp_splice_read()
775 if (!sock_flag(sk, SOCK_DONE)) in tcp_splice_read()
783 sk_wait_data(sk, &timeo); in tcp_splice_read()
795 release_sock(sk); in tcp_splice_read()
796 lock_sock(sk); in tcp_splice_read()
798 if (sk->sk_err || sk->sk_state == TCP_CLOSE || in tcp_splice_read()
799 (sk->sk_shutdown & RCV_SHUTDOWN) || in tcp_splice_read()
804 release_sock(sk); in tcp_splice_read()
813 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) in sk_stream_alloc_skb() argument
820 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); in sk_stream_alloc_skb()
822 if (sk_wmem_schedule(sk, skb->truesize)) { in sk_stream_alloc_skb()
823 skb_reserve(skb, sk->sk_prot->max_header); in sk_stream_alloc_skb()
833 sk->sk_prot->enter_memory_pressure(sk); in sk_stream_alloc_skb()
834 sk_stream_moderate_sndbuf(sk); in sk_stream_alloc_skb()
839 static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, in tcp_xmit_size_goal() argument
842 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_size_goal()
845 if (!large_allowed || !sk_can_gso(sk)) in tcp_xmit_size_goal()
849 new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER; in tcp_xmit_size_goal()
857 sk->sk_gso_max_segs); in tcp_xmit_size_goal()
864 static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) in tcp_send_mss() argument
868 mss_now = tcp_current_mss(sk); in tcp_send_mss()
869 *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); in tcp_send_mss()
874 static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, in do_tcp_sendpages() argument
877 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_sendpages()
881 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); in do_tcp_sendpages()
887 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && in do_tcp_sendpages()
888 !tcp_passive_fastopen(sk)) { in do_tcp_sendpages()
889 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) in do_tcp_sendpages()
893 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); in do_tcp_sendpages()
895 mss_now = tcp_send_mss(sk, &size_goal, flags); in do_tcp_sendpages()
899 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) in do_tcp_sendpages()
903 struct sk_buff *skb = tcp_write_queue_tail(sk); in do_tcp_sendpages()
907 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { in do_tcp_sendpages()
909 if (!sk_stream_memory_free(sk)) in do_tcp_sendpages()
912 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); in do_tcp_sendpages()
916 skb_entail(sk, skb); in do_tcp_sendpages()
929 if (!sk_wmem_schedule(sk, copy)) in do_tcp_sendpages()
943 sk->sk_wmem_queued += copy; in do_tcp_sendpages()
944 sk_mem_charge(sk, copy); in do_tcp_sendpages()
956 tcp_tx_timestamp(sk, skb); in do_tcp_sendpages()
965 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); in do_tcp_sendpages()
966 } else if (skb == tcp_send_head(sk)) in do_tcp_sendpages()
967 tcp_push_one(sk, mss_now); in do_tcp_sendpages()
971 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in do_tcp_sendpages()
973 tcp_push(sk, flags & ~MSG_MORE, mss_now, in do_tcp_sendpages()
976 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) in do_tcp_sendpages()
979 mss_now = tcp_send_mss(sk, &size_goal, flags); in do_tcp_sendpages()
984 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); in do_tcp_sendpages()
991 return sk_stream_error(sk, flags, err); in do_tcp_sendpages()
994 int tcp_sendpage(struct sock *sk, struct page *page, int offset, in tcp_sendpage() argument
999 if (!(sk->sk_route_caps & NETIF_F_SG) || in tcp_sendpage()
1000 !(sk->sk_route_caps & NETIF_F_ALL_CSUM)) in tcp_sendpage()
1001 return sock_no_sendpage(sk->sk_socket, page, offset, size, in tcp_sendpage()
1004 lock_sock(sk); in tcp_sendpage()
1005 res = do_tcp_sendpages(sk, page, offset, size, flags); in tcp_sendpage()
1006 release_sock(sk); in tcp_sendpage()
1011 static inline int select_size(const struct sock *sk, bool sg) in select_size() argument
1013 const struct tcp_sock *tp = tcp_sk(sk); in select_size()
1017 if (sk_can_gso(sk)) { in select_size()
1042 static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, in tcp_sendmsg_fastopen() argument
1045 struct tcp_sock *tp = tcp_sk(sk); in tcp_sendmsg_fastopen()
1054 sk->sk_allocation); in tcp_sendmsg_fastopen()
1061 err = __inet_stream_connect(sk->sk_socket, msg->msg_name, in tcp_sendmsg_fastopen()
1068 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) in tcp_sendmsg() argument
1070 struct tcp_sock *tp = tcp_sk(sk); in tcp_sendmsg()
1077 lock_sock(sk); in tcp_sendmsg()
1081 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); in tcp_sendmsg()
1088 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); in tcp_sendmsg()
1094 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && in tcp_sendmsg()
1095 !tcp_passive_fastopen(sk)) { in tcp_sendmsg()
1096 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) in tcp_sendmsg()
1102 copied = tcp_send_rcvq(sk, msg, size); in tcp_sendmsg()
1114 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); in tcp_sendmsg()
1116 mss_now = tcp_send_mss(sk, &size_goal, flags); in tcp_sendmsg()
1122 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) in tcp_sendmsg()
1125 sg = !!(sk->sk_route_caps & NETIF_F_SG); in tcp_sendmsg()
1131 skb = tcp_write_queue_tail(sk); in tcp_sendmsg()
1132 if (tcp_send_head(sk)) { in tcp_sendmsg()
1143 if (!sk_stream_memory_free(sk)) in tcp_sendmsg()
1146 skb = sk_stream_alloc_skb(sk, in tcp_sendmsg()
1147 select_size(sk, sg), in tcp_sendmsg()
1148 sk->sk_allocation); in tcp_sendmsg()
1155 if (sk->sk_route_caps & NETIF_F_ALL_CSUM) in tcp_sendmsg()
1158 skb_entail(sk, skb); in tcp_sendmsg()
1178 err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy); in tcp_sendmsg()
1184 struct page_frag *pfrag = sk_page_frag(sk); in tcp_sendmsg()
1186 if (!sk_page_frag_refill(sk, pfrag)) in tcp_sendmsg()
1200 if (!sk_wmem_schedule(sk, copy)) in tcp_sendmsg()
1203 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, in tcp_sendmsg()
1230 tcp_tx_timestamp(sk, skb); in tcp_sendmsg()
1239 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); in tcp_sendmsg()
1240 } else if (skb == tcp_send_head(sk)) in tcp_sendmsg()
1241 tcp_push_one(sk, mss_now); in tcp_sendmsg()
1245 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in tcp_sendmsg()
1248 tcp_push(sk, flags & ~MSG_MORE, mss_now, in tcp_sendmsg()
1251 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) in tcp_sendmsg()
1254 mss_now = tcp_send_mss(sk, &size_goal, flags); in tcp_sendmsg()
1259 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); in tcp_sendmsg()
1261 release_sock(sk); in tcp_sendmsg()
1266 tcp_unlink_write_queue(skb, sk); in tcp_sendmsg()
1270 tcp_check_send_head(sk, skb); in tcp_sendmsg()
1271 sk_wmem_free_skb(sk, skb); in tcp_sendmsg()
1278 err = sk_stream_error(sk, flags, err); in tcp_sendmsg()
1279 release_sock(sk); in tcp_sendmsg()
1289 static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) in tcp_recv_urg() argument
1291 struct tcp_sock *tp = tcp_sk(sk); in tcp_recv_urg()
1294 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || in tcp_recv_urg()
1298 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) in tcp_recv_urg()
1321 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) in tcp_recv_urg()
1333 static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) in tcp_peek_sndq() argument
1340 skb_queue_walk(&sk->sk_write_queue, skb) { in tcp_peek_sndq()
1357 static void tcp_cleanup_rbuf(struct sock *sk, int copied) in tcp_cleanup_rbuf() argument
1359 struct tcp_sock *tp = tcp_sk(sk); in tcp_cleanup_rbuf()
1362 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); in tcp_cleanup_rbuf()
1368 if (inet_csk_ack_scheduled(sk)) { in tcp_cleanup_rbuf()
1369 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_rbuf()
1385 !atomic_read(&sk->sk_rmem_alloc))) in tcp_cleanup_rbuf()
1395 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { in tcp_cleanup_rbuf()
1400 __u32 new_window = __tcp_select_window(sk); in tcp_cleanup_rbuf()
1412 tcp_send_ack(sk); in tcp_cleanup_rbuf()
1415 static void tcp_prequeue_process(struct sock *sk) in tcp_prequeue_process() argument
1418 struct tcp_sock *tp = tcp_sk(sk); in tcp_prequeue_process()
1420 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED); in tcp_prequeue_process()
1426 sk_backlog_rcv(sk, skb); in tcp_prequeue_process()
1433 static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) in tcp_recv_skb() argument
1438 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { in tcp_recv_skb()
1450 sk_eat_skb(sk, skb); in tcp_recv_skb()
1466 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, in tcp_read_sock() argument
1470 struct tcp_sock *tp = tcp_sk(sk); in tcp_read_sock()
1475 if (sk->sk_state == TCP_LISTEN) in tcp_read_sock()
1477 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { in tcp_read_sock()
1506 skb = tcp_recv_skb(sk, seq - 1, &offset); in tcp_read_sock()
1516 sk_eat_skb(sk, skb); in tcp_read_sock()
1520 sk_eat_skb(sk, skb); in tcp_read_sock()
1527 tcp_rcv_space_adjust(sk); in tcp_read_sock()
1531 tcp_recv_skb(sk, seq, &offset); in tcp_read_sock()
1532 tcp_cleanup_rbuf(sk, copied); in tcp_read_sock()
1546 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, in tcp_recvmsg() argument
1549 struct tcp_sock *tp = tcp_sk(sk); in tcp_recvmsg()
1562 return inet_recv_error(sk, msg, len, addr_len); in tcp_recvmsg()
1564 if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && in tcp_recvmsg()
1565 (sk->sk_state == TCP_ESTABLISHED)) in tcp_recvmsg()
1566 sk_busy_loop(sk, nonblock); in tcp_recvmsg()
1568 lock_sock(sk); in tcp_recvmsg()
1571 if (sk->sk_state == TCP_LISTEN) in tcp_recvmsg()
1574 timeo = sock_rcvtimeo(sk, nonblock); in tcp_recvmsg()
1601 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); in tcp_recvmsg()
1618 skb_queue_walk(&sk->sk_receive_queue, skb) { in tcp_recvmsg()
1642 if (copied >= target && !sk->sk_backlog.tail) in tcp_recvmsg()
1646 if (sk->sk_err || in tcp_recvmsg()
1647 sk->sk_state == TCP_CLOSE || in tcp_recvmsg()
1648 (sk->sk_shutdown & RCV_SHUTDOWN) || in tcp_recvmsg()
1653 if (sock_flag(sk, SOCK_DONE)) in tcp_recvmsg()
1656 if (sk->sk_err) { in tcp_recvmsg()
1657 copied = sock_error(sk); in tcp_recvmsg()
1661 if (sk->sk_shutdown & RCV_SHUTDOWN) in tcp_recvmsg()
1664 if (sk->sk_state == TCP_CLOSE) { in tcp_recvmsg()
1665 if (!sock_flag(sk, SOCK_DONE)) { in tcp_recvmsg()
1686 tcp_cleanup_rbuf(sk, copied); in tcp_recvmsg()
1735 release_sock(sk); in tcp_recvmsg()
1736 lock_sock(sk); in tcp_recvmsg()
1738 sk_wait_data(sk, &timeo); in tcp_recvmsg()
1746 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); in tcp_recvmsg()
1754 tcp_prequeue_process(sk); in tcp_recvmsg()
1757 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); in tcp_recvmsg()
1783 if (!sock_flag(sk, SOCK_URGINLINE)) { in tcp_recvmsg()
1810 tcp_rcv_space_adjust(sk); in tcp_recvmsg()
1815 tcp_fast_path_check(sk); in tcp_recvmsg()
1823 sk_eat_skb(sk, skb); in tcp_recvmsg()
1830 sk_eat_skb(sk, skb); in tcp_recvmsg()
1840 tcp_prequeue_process(sk); in tcp_recvmsg()
1843 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); in tcp_recvmsg()
1858 tcp_cleanup_rbuf(sk, copied); in tcp_recvmsg()
1860 release_sock(sk); in tcp_recvmsg()
1864 release_sock(sk); in tcp_recvmsg()
1868 err = tcp_recv_urg(sk, msg, len, flags); in tcp_recvmsg()
1872 err = tcp_peek_sndq(sk, msg, len); in tcp_recvmsg()
1877 void tcp_set_state(struct sock *sk, int state) in tcp_set_state() argument
1879 int oldstate = sk->sk_state; in tcp_set_state()
1884 TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); in tcp_set_state()
1889 TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); in tcp_set_state()
1891 sk->sk_prot->unhash(sk); in tcp_set_state()
1892 if (inet_csk(sk)->icsk_bind_hash && in tcp_set_state()
1893 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) in tcp_set_state()
1894 inet_put_port(sk); in tcp_set_state()
1898 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); in tcp_set_state()
1904 sk->sk_state = state; in tcp_set_state()
1907 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); in tcp_set_state()
1936 static int tcp_close_state(struct sock *sk) in tcp_close_state() argument
1938 int next = (int)new_state[sk->sk_state]; in tcp_close_state()
1941 tcp_set_state(sk, ns); in tcp_close_state()
1951 void tcp_shutdown(struct sock *sk, int how) in tcp_shutdown() argument
1961 if ((1 << sk->sk_state) & in tcp_shutdown()
1965 if (tcp_close_state(sk)) in tcp_shutdown()
1966 tcp_send_fin(sk); in tcp_shutdown()
1971 bool tcp_check_oom(struct sock *sk, int shift) in tcp_check_oom() argument
1975 too_many_orphans = tcp_too_many_orphans(sk, shift); in tcp_check_oom()
1976 out_of_socket_memory = tcp_out_of_memory(sk); in tcp_check_oom()
1985 void tcp_close(struct sock *sk, long timeout) in tcp_close() argument
1991 lock_sock(sk); in tcp_close()
1992 sk->sk_shutdown = SHUTDOWN_MASK; in tcp_close()
1994 if (sk->sk_state == TCP_LISTEN) { in tcp_close()
1995 tcp_set_state(sk, TCP_CLOSE); in tcp_close()
1998 inet_csk_listen_stop(sk); in tcp_close()
2007 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { in tcp_close()
2016 sk_mem_reclaim(sk); in tcp_close()
2019 if (sk->sk_state == TCP_CLOSE) in tcp_close()
2029 if (unlikely(tcp_sk(sk)->repair)) { in tcp_close()
2030 sk->sk_prot->disconnect(sk, 0); in tcp_close()
2033 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); in tcp_close()
2034 tcp_set_state(sk, TCP_CLOSE); in tcp_close()
2035 tcp_send_active_reset(sk, sk->sk_allocation); in tcp_close()
2036 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { in tcp_close()
2038 sk->sk_prot->disconnect(sk, 0); in tcp_close()
2039 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA); in tcp_close()
2040 } else if (tcp_close_state(sk)) { in tcp_close()
2070 tcp_send_fin(sk); in tcp_close()
2073 sk_stream_wait_close(sk, timeout); in tcp_close()
2076 state = sk->sk_state; in tcp_close()
2077 sock_hold(sk); in tcp_close()
2078 sock_orphan(sk); in tcp_close()
2081 release_sock(sk); in tcp_close()
2088 bh_lock_sock(sk); in tcp_close()
2089 WARN_ON(sock_owned_by_user(sk)); in tcp_close()
2091 percpu_counter_inc(sk->sk_prot->orphan_count); in tcp_close()
2094 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) in tcp_close()
2111 if (sk->sk_state == TCP_FIN_WAIT2) { in tcp_close()
2112 struct tcp_sock *tp = tcp_sk(sk); in tcp_close()
2114 tcp_set_state(sk, TCP_CLOSE); in tcp_close()
2115 tcp_send_active_reset(sk, GFP_ATOMIC); in tcp_close()
2116 NET_INC_STATS_BH(sock_net(sk), in tcp_close()
2119 const int tmo = tcp_fin_time(sk); in tcp_close()
2122 inet_csk_reset_keepalive_timer(sk, in tcp_close()
2125 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); in tcp_close()
2130 if (sk->sk_state != TCP_CLOSE) { in tcp_close()
2131 sk_mem_reclaim(sk); in tcp_close()
2132 if (tcp_check_oom(sk, 0)) { in tcp_close()
2133 tcp_set_state(sk, TCP_CLOSE); in tcp_close()
2134 tcp_send_active_reset(sk, GFP_ATOMIC); in tcp_close()
2135 NET_INC_STATS_BH(sock_net(sk), in tcp_close()
2140 if (sk->sk_state == TCP_CLOSE) { in tcp_close()
2141 struct request_sock *req = tcp_sk(sk)->fastopen_rsk; in tcp_close()
2147 reqsk_fastopen_remove(sk, req, false); in tcp_close()
2148 inet_csk_destroy_sock(sk); in tcp_close()
2153 bh_unlock_sock(sk); in tcp_close()
2155 sock_put(sk); in tcp_close()
2168 int tcp_disconnect(struct sock *sk, int flags) in tcp_disconnect() argument
2170 struct inet_sock *inet = inet_sk(sk); in tcp_disconnect()
2171 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_disconnect()
2172 struct tcp_sock *tp = tcp_sk(sk); in tcp_disconnect()
2174 int old_state = sk->sk_state; in tcp_disconnect()
2177 tcp_set_state(sk, TCP_CLOSE); in tcp_disconnect()
2181 inet_csk_listen_stop(sk); in tcp_disconnect()
2183 sk->sk_err = ECONNABORTED; in tcp_disconnect()
2190 tcp_send_active_reset(sk, gfp_any()); in tcp_disconnect()
2191 sk->sk_err = ECONNRESET; in tcp_disconnect()
2193 sk->sk_err = ECONNRESET; in tcp_disconnect()
2195 tcp_clear_xmit_timers(sk); in tcp_disconnect()
2196 __skb_queue_purge(&sk->sk_receive_queue); in tcp_disconnect()
2197 tcp_write_queue_purge(sk); in tcp_disconnect()
2202 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) in tcp_disconnect()
2203 inet_reset_saddr(sk); in tcp_disconnect()
2205 sk->sk_shutdown = 0; in tcp_disconnect()
2206 sock_reset_flag(sk, SOCK_DONE); in tcp_disconnect()
2217 tcp_set_ca_state(sk, TCP_CA_Open); in tcp_disconnect()
2219 inet_csk_delack_init(sk); in tcp_disconnect()
2220 tcp_init_send_head(sk); in tcp_disconnect()
2222 __sk_dst_reset(sk); in tcp_disconnect()
2226 sk->sk_error_report(sk); in tcp_disconnect()
2231 void tcp_sock_destruct(struct sock *sk) in tcp_sock_destruct() argument
2233 inet_sock_destruct(sk); in tcp_sock_destruct()
2235 kfree(inet_csk(sk)->icsk_accept_queue.fastopenq); in tcp_sock_destruct()
2238 static inline bool tcp_can_repair_sock(const struct sock *sk) in tcp_can_repair_sock() argument
2240 return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && in tcp_can_repair_sock()
2241 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED)); in tcp_can_repair_sock()
2296 static int do_tcp_setsockopt(struct sock *sk, int level, in do_tcp_setsockopt() argument
2299 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_setsockopt()
2300 struct inet_connection_sock *icsk = inet_csk(sk); in do_tcp_setsockopt()
2318 lock_sock(sk); in do_tcp_setsockopt()
2319 err = tcp_set_congestion_control(sk, name); in do_tcp_setsockopt()
2320 release_sock(sk); in do_tcp_setsockopt()
2334 lock_sock(sk); in do_tcp_setsockopt()
2359 tcp_push_pending_frames(sk); in do_tcp_setsockopt()
2383 if (!tcp_can_repair_sock(sk)) in do_tcp_setsockopt()
2387 sk->sk_reuse = SK_FORCE_REUSE; in do_tcp_setsockopt()
2391 sk->sk_reuse = SK_NO_REUSE; in do_tcp_setsockopt()
2392 tcp_send_window_probe(sk); in do_tcp_setsockopt()
2408 if (sk->sk_state != TCP_CLOSE) in do_tcp_setsockopt()
2421 else if (sk->sk_state == TCP_ESTABLISHED) in do_tcp_setsockopt()
2447 tcp_push_pending_frames(sk); in do_tcp_setsockopt()
2456 if (sock_flag(sk, SOCK_KEEPOPEN) && in do_tcp_setsockopt()
2457 !((1 << sk->sk_state) & in do_tcp_setsockopt()
2464 inet_csk_reset_keepalive_timer(sk, elapsed); in do_tcp_setsockopt()
2505 if (sk->sk_state != TCP_CLOSE) { in do_tcp_setsockopt()
2520 if ((1 << sk->sk_state) & in do_tcp_setsockopt()
2522 inet_csk_ack_scheduled(sk)) { in do_tcp_setsockopt()
2524 tcp_cleanup_rbuf(sk, 1); in do_tcp_setsockopt()
2534 err = tp->af_specific->md5_parse(sk, optval, optlen); in do_tcp_setsockopt()
2548 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | in do_tcp_setsockopt()
2552 err = fastopen_init_queue(sk, val); in do_tcp_setsockopt()
2565 sk->sk_write_space(sk); in do_tcp_setsockopt()
2572 release_sock(sk); in do_tcp_setsockopt()
2576 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, in tcp_setsockopt() argument
2579 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_setsockopt()
2582 return icsk->icsk_af_ops->setsockopt(sk, level, optname, in tcp_setsockopt()
2584 return do_tcp_setsockopt(sk, level, optname, optval, optlen); in tcp_setsockopt()
2589 int compat_tcp_setsockopt(struct sock *sk, int level, int optname, in compat_tcp_setsockopt() argument
2593 return inet_csk_compat_setsockopt(sk, level, optname, in compat_tcp_setsockopt()
2595 return do_tcp_setsockopt(sk, level, optname, optval, optlen); in compat_tcp_setsockopt()
2601 void tcp_get_info(struct sock *sk, struct tcp_info *info) in tcp_get_info() argument
2603 const struct tcp_sock *tp = tcp_sk(sk); in tcp_get_info()
2604 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_get_info()
2612 info->tcpi_state = sk->sk_state; in tcp_get_info()
2640 if (sk->sk_state == TCP_LISTEN) { in tcp_get_info()
2641 info->tcpi_unacked = sk->sk_ack_backlog; in tcp_get_info()
2642 info->tcpi_sacked = sk->sk_max_ack_backlog; in tcp_get_info()
2669 rate = READ_ONCE(sk->sk_pacing_rate); in tcp_get_info()
2673 rate = READ_ONCE(sk->sk_max_pacing_rate); in tcp_get_info()
2685 static int do_tcp_getsockopt(struct sock *sk, int level, in do_tcp_getsockopt() argument
2688 struct inet_connection_sock *icsk = inet_csk(sk); in do_tcp_getsockopt()
2689 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_getsockopt()
2703 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) in do_tcp_getsockopt()
2744 tcp_get_info(sk, &info); in do_tcp_getsockopt()
2764 sz = ca_ops->get_info(sk, ~0U, &attr, &info); in do_tcp_getsockopt()
2842 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, in tcp_getsockopt() argument
2845 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_getsockopt()
2848 return icsk->icsk_af_ops->getsockopt(sk, level, optname, in tcp_getsockopt()
2850 return do_tcp_getsockopt(sk, level, optname, optval, optlen); in tcp_getsockopt()
2855 int compat_tcp_getsockopt(struct sock *sk, int level, int optname, in compat_tcp_getsockopt() argument
2859 return inet_csk_compat_getsockopt(sk, level, optname, in compat_tcp_getsockopt()
2861 return do_tcp_getsockopt(sk, level, optname, optval, optlen); in compat_tcp_getsockopt()
2994 void tcp_done(struct sock *sk) in tcp_done() argument
2996 struct request_sock *req = tcp_sk(sk)->fastopen_rsk; in tcp_done()
2998 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) in tcp_done()
2999 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); in tcp_done()
3001 tcp_set_state(sk, TCP_CLOSE); in tcp_done()
3002 tcp_clear_xmit_timers(sk); in tcp_done()
3004 reqsk_fastopen_remove(sk, req, false); in tcp_done()
3006 sk->sk_shutdown = SHUTDOWN_MASK; in tcp_done()
3008 if (!sock_flag(sk, SOCK_DEAD)) in tcp_done()
3009 sk->sk_state_change(sk); in tcp_done()
3011 inet_csk_destroy_sock(sk); in tcp_done()