Lines Matching refs:sk
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) in tcp_twsk_unique() argument
111 struct tcp_sock *tp = tcp_sk(sk); in tcp_twsk_unique()
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) in tcp_v4_connect() argument
144 struct inet_sock *inet = inet_sk(sk); in tcp_v4_connect()
145 struct tcp_sock *tp = tcp_sk(sk); in tcp_v4_connect()
161 sock_owned_by_user(sk)); in tcp_v4_connect()
172 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, in tcp_v4_connect()
174 orig_sport, orig_dport, sk); in tcp_v4_connect()
178 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); in tcp_v4_connect()
192 sk_rcv_saddr_set(sk, inet->inet_saddr); in tcp_v4_connect()
204 tcp_fetch_timewait_stamp(sk, &rt->dst); in tcp_v4_connect()
207 sk_daddr_set(sk, daddr); in tcp_v4_connect()
209 inet_csk(sk)->icsk_ext_hdr_len = 0; in tcp_v4_connect()
211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; in tcp_v4_connect()
220 tcp_set_state(sk, TCP_SYN_SENT); in tcp_v4_connect()
221 err = inet_hash_connect(&tcp_death_row, sk); in tcp_v4_connect()
225 inet_set_txhash(sk); in tcp_v4_connect()
228 inet->inet_sport, inet->inet_dport, sk); in tcp_v4_connect()
235 sk->sk_gso_type = SKB_GSO_TCPV4; in tcp_v4_connect()
236 sk_setup_caps(sk, &rt->dst); in tcp_v4_connect()
246 err = tcp_connect(sk); in tcp_v4_connect()
259 tcp_set_state(sk, TCP_CLOSE); in tcp_v4_connect()
261 sk->sk_route_caps = 0; in tcp_v4_connect()
272 void tcp_v4_mtu_reduced(struct sock *sk) in tcp_v4_mtu_reduced() argument
275 struct inet_sock *inet = inet_sk(sk); in tcp_v4_mtu_reduced()
276 u32 mtu = tcp_sk(sk)->mtu_info; in tcp_v4_mtu_reduced()
278 dst = inet_csk_update_pmtu(sk, mtu); in tcp_v4_mtu_reduced()
285 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst)) in tcp_v4_mtu_reduced()
286 sk->sk_err_soft = EMSGSIZE; in tcp_v4_mtu_reduced()
291 ip_sk_accept_pmtu(sk) && in tcp_v4_mtu_reduced()
292 inet_csk(sk)->icsk_pmtu_cookie > mtu) { in tcp_v4_mtu_reduced()
293 tcp_sync_mss(sk, mtu); in tcp_v4_mtu_reduced()
300 tcp_simple_retransmit(sk); in tcp_v4_mtu_reduced()
305 static void do_redirect(struct sk_buff *skb, struct sock *sk) in do_redirect() argument
307 struct dst_entry *dst = __sk_dst_check(sk, 0); in do_redirect()
310 dst->ops->redirect(dst, sk, skb); in do_redirect()
315 void tcp_req_err(struct sock *sk, u32 seq) in tcp_req_err() argument
317 struct request_sock *req = inet_reqsk(sk); in tcp_req_err()
318 struct net *net = sock_net(sk); in tcp_req_err()
323 WARN_ON(req->sk); in tcp_req_err()
366 struct sock *sk; in tcp_v4_err() local
374 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr, in tcp_v4_err()
377 if (!sk) { in tcp_v4_err()
381 if (sk->sk_state == TCP_TIME_WAIT) { in tcp_v4_err()
382 inet_twsk_put(inet_twsk(sk)); in tcp_v4_err()
386 if (sk->sk_state == TCP_NEW_SYN_RECV) in tcp_v4_err()
387 return tcp_req_err(sk, seq); in tcp_v4_err()
389 bh_lock_sock(sk); in tcp_v4_err()
395 if (sock_owned_by_user(sk)) { in tcp_v4_err()
399 if (sk->sk_state == TCP_CLOSE) in tcp_v4_err()
402 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { in tcp_v4_err()
407 icsk = inet_csk(sk); in tcp_v4_err()
408 tp = tcp_sk(sk); in tcp_v4_err()
412 if (sk->sk_state != TCP_LISTEN && in tcp_v4_err()
420 do_redirect(icmp_skb, sk); in tcp_v4_err()
437 if (sk->sk_state == TCP_LISTEN) in tcp_v4_err()
441 if (!sock_owned_by_user(sk)) { in tcp_v4_err()
442 tcp_v4_mtu_reduced(sk); in tcp_v4_err()
445 sock_hold(sk); in tcp_v4_err()
459 if (sock_owned_by_user(sk)) in tcp_v4_err()
467 skb = tcp_write_queue_head(sk); in tcp_v4_err()
475 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_v4_err()
480 tcp_retransmit_timer(sk); in tcp_v4_err()
491 switch (sk->sk_state) { in tcp_v4_err()
497 if (fastopen && !fastopen->sk) in tcp_v4_err()
500 if (!sock_owned_by_user(sk)) { in tcp_v4_err()
501 sk->sk_err = err; in tcp_v4_err()
503 sk->sk_error_report(sk); in tcp_v4_err()
505 tcp_done(sk); in tcp_v4_err()
507 sk->sk_err_soft = err; in tcp_v4_err()
528 inet = inet_sk(sk); in tcp_v4_err()
529 if (!sock_owned_by_user(sk) && inet->recverr) { in tcp_v4_err()
530 sk->sk_err = err; in tcp_v4_err()
531 sk->sk_error_report(sk); in tcp_v4_err()
533 sk->sk_err_soft = err; in tcp_v4_err()
537 bh_unlock_sock(sk); in tcp_v4_err()
538 sock_put(sk); in tcp_v4_err()
558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) in tcp_v4_send_check() argument
560 const struct inet_sock *inet = inet_sk(sk); in tcp_v4_send_check()
579 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) in tcp_v4_send_reset() argument
605 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL) in tcp_v4_send_reset()
627 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); in tcp_v4_send_reset()
630 if (!sk && hash_location) { in tcp_v4_send_reset()
655 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *) in tcp_v4_send_reset()
678 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; in tcp_v4_send_reset()
683 if (sk) in tcp_v4_send_reset()
684 arg.bound_dev_if = sk->sk_bound_dev_if; in tcp_v4_send_reset()
780 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) in tcp_v4_timewait_ack() argument
782 struct inet_timewait_sock *tw = inet_twsk(sk); in tcp_v4_timewait_ack()
783 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); in tcp_v4_timewait_ack()
785 tcp_v4_send_ack(sock_net(sk), skb, in tcp_v4_timewait_ack()
799 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, in tcp_v4_reqsk_send_ack() argument
805 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : in tcp_v4_reqsk_send_ack()
806 tcp_sk(sk)->snd_nxt; in tcp_v4_reqsk_send_ack()
808 tcp_v4_send_ack(sock_net(sk), skb, seq, in tcp_v4_reqsk_send_ack()
813 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, in tcp_v4_reqsk_send_ack()
824 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, in tcp_v4_send_synack() argument
836 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) in tcp_v4_send_synack()
839 skb = tcp_make_synack(sk, dst, req, foc); in tcp_v4_send_synack()
845 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, in tcp_v4_send_synack()
871 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, in tcp_md5_do_lookup() argument
875 const struct tcp_sock *tp = tcp_sk(sk); in tcp_md5_do_lookup()
882 sock_owned_by_user(sk) || in tcp_md5_do_lookup()
883 lockdep_is_held(&sk->sk_lock.slock)); in tcp_md5_do_lookup()
900 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, in tcp_v4_md5_lookup() argument
906 return tcp_md5_do_lookup(sk, addr, AF_INET); in tcp_v4_md5_lookup()
911 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, in tcp_md5_do_add() argument
916 struct tcp_sock *tp = tcp_sk(sk); in tcp_md5_do_add()
919 key = tcp_md5_do_lookup(sk, addr, family); in tcp_md5_do_add()
928 sock_owned_by_user(sk) || in tcp_md5_do_add()
929 lockdep_is_held(&sk->sk_lock.slock)); in tcp_md5_do_add()
935 sk_nocaps_add(sk, NETIF_F_GSO_MASK); in tcp_md5_do_add()
940 key = sock_kmalloc(sk, sizeof(*key), gfp); in tcp_md5_do_add()
944 sock_kfree_s(sk, key, sizeof(*key)); in tcp_md5_do_add()
959 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family) in tcp_md5_do_del() argument
963 key = tcp_md5_do_lookup(sk, addr, family); in tcp_md5_do_del()
967 atomic_sub(sizeof(*key), &sk->sk_omem_alloc); in tcp_md5_do_del()
973 static void tcp_clear_md5_list(struct sock *sk) in tcp_clear_md5_list() argument
975 struct tcp_sock *tp = tcp_sk(sk); in tcp_clear_md5_list()
984 atomic_sub(sizeof(*key), &sk->sk_omem_alloc); in tcp_clear_md5_list()
989 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, in tcp_v4_parse_md5_keys() argument
1005 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, in tcp_v4_parse_md5_keys()
1011 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, in tcp_v4_parse_md5_keys()
1072 const struct sock *sk, in tcp_v4_md5_hash_skb() argument
1080 if (sk) { /* valid for establish/request sockets */ in tcp_v4_md5_hash_skb()
1081 saddr = sk->sk_rcv_saddr; in tcp_v4_md5_hash_skb()
1082 daddr = sk->sk_daddr; in tcp_v4_md5_hash_skb()
1120 static bool tcp_v4_inbound_md5_hash(struct sock *sk, in tcp_v4_inbound_md5_hash() argument
1138 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr, in tcp_v4_inbound_md5_hash()
1147 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); in tcp_v4_inbound_md5_hash()
1152 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); in tcp_v4_inbound_md5_hash()
1186 static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl, in tcp_v4_route_req() argument
1190 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req); in tcp_v4_route_req()
1228 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) in tcp_v4_conn_request() argument
1235 &tcp_request_sock_ipv4_ops, sk, skb); in tcp_v4_conn_request()
1238 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); in tcp_v4_conn_request()
1248 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, in tcp_v4_syn_recv_sock() argument
1261 if (sk_acceptq_is_full(sk)) in tcp_v4_syn_recv_sock()
1264 newsk = tcp_create_openreq_child(sk, req, skb); in tcp_v4_syn_recv_sock()
1290 dst = inet_csk_route_child_sock(sk, newsk, req); in tcp_v4_syn_recv_sock()
1302 if (tcp_sk(sk)->rx_opt.user_mss && in tcp_v4_syn_recv_sock()
1303 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) in tcp_v4_syn_recv_sock()
1304 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; in tcp_v4_syn_recv_sock()
1310 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr, in tcp_v4_syn_recv_sock()
1325 if (__inet_inherit_port(sk, newsk) < 0) in tcp_v4_syn_recv_sock()
1332 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); in tcp_v4_syn_recv_sock()
1336 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); in tcp_v4_syn_recv_sock()
1345 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) in tcp_v4_hnd_req() argument
1352 req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr); in tcp_v4_hnd_req()
1354 nsk = tcp_check_req(sk, skb, req, false); in tcp_v4_hnd_req()
1355 if (!nsk || nsk == sk) in tcp_v4_hnd_req()
1360 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr, in tcp_v4_hnd_req()
1374 sk = cookie_v4_check(sk, skb); in tcp_v4_hnd_req()
1376 return sk; in tcp_v4_hnd_req()
1387 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) in tcp_v4_do_rcv() argument
1391 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ in tcp_v4_do_rcv()
1392 struct dst_entry *dst = sk->sk_rx_dst; in tcp_v4_do_rcv()
1394 sock_rps_save_rxhash(sk, skb); in tcp_v4_do_rcv()
1395 sk_mark_napi_id(sk, skb); in tcp_v4_do_rcv()
1397 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || in tcp_v4_do_rcv()
1400 sk->sk_rx_dst = NULL; in tcp_v4_do_rcv()
1403 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len); in tcp_v4_do_rcv()
1410 if (sk->sk_state == TCP_LISTEN) { in tcp_v4_do_rcv()
1411 struct sock *nsk = tcp_v4_hnd_req(sk, skb); in tcp_v4_do_rcv()
1415 if (nsk != sk) { in tcp_v4_do_rcv()
1417 sk_mark_napi_id(sk, skb); in tcp_v4_do_rcv()
1418 if (tcp_child_process(sk, nsk, skb)) { in tcp_v4_do_rcv()
1425 sock_rps_save_rxhash(sk, skb); in tcp_v4_do_rcv()
1427 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { in tcp_v4_do_rcv()
1428 rsk = sk; in tcp_v4_do_rcv()
1445 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); in tcp_v4_do_rcv()
1446 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); in tcp_v4_do_rcv()
1455 struct sock *sk; in tcp_v4_early_demux() local
1469 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo, in tcp_v4_early_demux()
1473 if (sk) { in tcp_v4_early_demux()
1474 skb->sk = sk; in tcp_v4_early_demux()
1476 if (sk_fullsock(sk)) { in tcp_v4_early_demux()
1477 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); in tcp_v4_early_demux()
1482 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) in tcp_v4_early_demux()
1495 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) in tcp_prequeue() argument
1497 struct tcp_sock *tp = tcp_sk(sk); in tcp_prequeue()
1512 if (likely(sk->sk_rx_dst)) in tcp_prequeue()
1519 if (tp->ucopy.memory > sk->sk_rcvbuf) { in tcp_prequeue()
1522 BUG_ON(sock_owned_by_user(sk)); in tcp_prequeue()
1525 sk_backlog_rcv(sk, skb1); in tcp_prequeue()
1526 NET_INC_STATS_BH(sock_net(sk), in tcp_prequeue()
1532 wake_up_interruptible_sync_poll(sk_sleep(sk), in tcp_prequeue()
1534 if (!inet_csk_ack_scheduled(sk)) in tcp_prequeue()
1535 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, in tcp_prequeue()
1536 (3 * tcp_rto_min(sk)) / 4, in tcp_prequeue()
1551 struct sock *sk; in tcp_v4_rcv() local
1597 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); in tcp_v4_rcv()
1598 if (!sk) in tcp_v4_rcv()
1602 if (sk->sk_state == TCP_TIME_WAIT) in tcp_v4_rcv()
1605 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { in tcp_v4_rcv()
1610 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) in tcp_v4_rcv()
1620 if (tcp_v4_inbound_md5_hash(sk, skb)) in tcp_v4_rcv()
1626 if (sk_filter(sk, skb)) in tcp_v4_rcv()
1629 sk_incoming_cpu_update(sk); in tcp_v4_rcv()
1632 bh_lock_sock_nested(sk); in tcp_v4_rcv()
1634 if (!sock_owned_by_user(sk)) { in tcp_v4_rcv()
1635 if (!tcp_prequeue(sk, skb)) in tcp_v4_rcv()
1636 ret = tcp_v4_do_rcv(sk, skb); in tcp_v4_rcv()
1637 } else if (unlikely(sk_add_backlog(sk, skb, in tcp_v4_rcv()
1638 sk->sk_rcvbuf + sk->sk_sndbuf))) { in tcp_v4_rcv()
1639 bh_unlock_sock(sk); in tcp_v4_rcv()
1643 bh_unlock_sock(sk); in tcp_v4_rcv()
1645 sock_put(sk); in tcp_v4_rcv()
1668 sock_put(sk); in tcp_v4_rcv()
1673 inet_twsk_put(inet_twsk(sk)); in tcp_v4_rcv()
1678 inet_twsk_put(inet_twsk(sk)); in tcp_v4_rcv()
1682 inet_twsk_put(inet_twsk(sk)); in tcp_v4_rcv()
1685 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { in tcp_v4_rcv()
1693 inet_twsk_deschedule(inet_twsk(sk)); in tcp_v4_rcv()
1694 inet_twsk_put(inet_twsk(sk)); in tcp_v4_rcv()
1695 sk = sk2; in tcp_v4_rcv()
1701 tcp_v4_timewait_ack(sk, skb); in tcp_v4_rcv()
1716 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) in inet_sk_rx_dst_set() argument
1721 sk->sk_rx_dst = dst; in inet_sk_rx_dst_set()
1722 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; in inet_sk_rx_dst_set()
1759 static int tcp_v4_init_sock(struct sock *sk) in tcp_v4_init_sock() argument
1761 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v4_init_sock()
1763 tcp_init_sock(sk); in tcp_v4_init_sock()
1768 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific; in tcp_v4_init_sock()
1774 void tcp_v4_destroy_sock(struct sock *sk) in tcp_v4_destroy_sock() argument
1776 struct tcp_sock *tp = tcp_sk(sk); in tcp_v4_destroy_sock()
1778 tcp_clear_xmit_timers(sk); in tcp_v4_destroy_sock()
1780 tcp_cleanup_congestion_control(sk); in tcp_v4_destroy_sock()
1783 tcp_write_queue_purge(sk); in tcp_v4_destroy_sock()
1791 tcp_clear_md5_list(sk); in tcp_v4_destroy_sock()
1801 if (inet_csk(sk)->icsk_bind_hash) in tcp_v4_destroy_sock()
1802 inet_put_port(sk); in tcp_v4_destroy_sock()
1809 sk_sockets_allocated_dec(sk); in tcp_v4_destroy_sock()
1810 sock_release_memcg(sk); in tcp_v4_destroy_sock()
1826 struct sock *sk = cur; in listening_get_next() local
1831 if (!sk) { in listening_get_next()
1834 sk = sk_nulls_head(&ilb->head); in listening_get_next()
1860 sk = sk_nulls_next(st->syn_wait_sk); in listening_get_next()
1864 icsk = inet_csk(sk); in listening_get_next()
1869 sk = sk_nulls_next(sk); in listening_get_next()
1872 sk_nulls_for_each_from(sk, node) { in listening_get_next()
1873 if (!net_eq(sock_net(sk), net)) in listening_get_next()
1875 if (sk->sk_family == st->family) { in listening_get_next()
1876 cur = sk; in listening_get_next()
1879 icsk = inet_csk(sk); in listening_get_next()
1883 st->uid = sock_i_uid(sk); in listening_get_next()
1884 st->syn_wait_sk = sk; in listening_get_next()
1896 sk = sk_nulls_head(&ilb->head); in listening_get_next()
1937 struct sock *sk; in established_get_first() local
1946 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { in established_get_first()
1947 if (sk->sk_family != st->family || in established_get_first()
1948 !net_eq(sock_net(sk), net)) { in established_get_first()
1951 rc = sk; in established_get_first()
1962 struct sock *sk = cur; in established_get_next() local
1970 sk = sk_nulls_next(sk); in established_get_next()
1972 sk_nulls_for_each_from(sk, node) { in established_get_next()
1973 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) in established_get_next()
1974 return sk; in established_get_next()
2187 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i) in get_tcp4_sock() argument
2191 const struct tcp_sock *tp = tcp_sk(sk); in get_tcp4_sock()
2192 const struct inet_connection_sock *icsk = inet_csk(sk); in get_tcp4_sock()
2193 const struct inet_sock *inet = inet_sk(sk); in get_tcp4_sock()
2209 } else if (timer_pending(&sk->sk_timer)) { in get_tcp4_sock()
2211 timer_expires = sk->sk_timer.expires; in get_tcp4_sock()
2217 if (sk->sk_state == TCP_LISTEN) in get_tcp4_sock()
2218 rx_queue = sk->sk_ack_backlog; in get_tcp4_sock()
2227 i, src, srcp, dest, destp, sk->sk_state, in get_tcp4_sock()
2233 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)), in get_tcp4_sock()
2235 sock_i_ino(sk), in get_tcp4_sock()
2236 atomic_read(&sk->sk_refcnt), sk, in get_tcp4_sock()
2241 sk->sk_state == TCP_LISTEN ? in get_tcp4_sock()
2270 struct sock *sk = v; in tcp4_seq_show() local
2284 if (sk->sk_state == TCP_TIME_WAIT) in tcp4_seq_show()
2408 struct sock *sk; in tcp_sk_init() local
2410 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW, in tcp_sk_init()
2414 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; in tcp_sk_init()