Lines Matching refs:sk

81 	struct sock sk;  member
104 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
105 static void tipc_data_ready(struct sock *sk);
106 static void tipc_write_space(struct sock *sk);
230 static struct tipc_sock *tipc_sk(const struct sock *sk) in tipc_sk() argument
232 return container_of(sk, struct tipc_sock, sk); in tipc_sk()
245 static void tsk_advance_rx_queue(struct sock *sk) in tsk_advance_rx_queue() argument
247 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); in tsk_advance_rx_queue()
255 static void tsk_rej_rx_queue(struct sock *sk) in tsk_rej_rx_queue() argument
259 u32 own_node = tsk_own_node(tipc_sk(sk)); in tsk_rej_rx_queue()
261 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) { in tsk_rej_rx_queue()
263 tipc_link_xmit_skb(sock_net(sk), skb, dnode, 0); in tsk_rej_rx_queue()
274 struct tipc_net *tn = net_generic(sock_net(&tsk->sk), tipc_net_id); in tsk_peer_msg()
318 struct sock *sk; in tipc_sk_create() local
345 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto); in tipc_sk_create()
346 if (sk == NULL) in tipc_sk_create()
349 tsk = tipc_sk(sk); in tipc_sk_create()
353 tn = net_generic(sock_net(sk), tipc_net_id); in tipc_sk_create()
360 sock_init_data(sock, sk); in tipc_sk_create()
366 setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk); in tipc_sk_create()
367 sk->sk_backlog_rcv = tipc_backlog_rcv; in tipc_sk_create()
368 sk->sk_rcvbuf = sysctl_tipc_rmem[1]; in tipc_sk_create()
369 sk->sk_data_ready = tipc_data_ready; in tipc_sk_create()
370 sk->sk_write_space = tipc_write_space; in tipc_sk_create()
387 sock_put(&tsk->sk); in tipc_sk_callback()
408 struct sock *sk = sock->sk; in tipc_release() local
418 if (sk == NULL) in tipc_release()
421 net = sock_net(sk); in tipc_release()
422 tsk = tipc_sk(sk); in tipc_release()
423 lock_sock(sk); in tipc_release()
431 skb = __skb_dequeue(&sk->sk_receive_queue); in tipc_release()
451 if (del_timer_sync(&sk->sk_timer) && in tipc_release()
453 sock_put(sk); in tipc_release()
466 __skb_queue_purge(&sk->sk_receive_queue); in tipc_release()
470 release_sock(sk); in tipc_release()
473 sock->sk = NULL; in tipc_release()
496 struct sock *sk = sock->sk; in tipc_bind() local
498 struct tipc_sock *tsk = tipc_sk(sk); in tipc_bind()
501 lock_sock(sk); in tipc_bind()
534 release_sock(sk); in tipc_bind()
555 struct tipc_sock *tsk = tipc_sk(sock->sk); in tipc_getname()
556 struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id); in tipc_getname()
622 struct sock *sk = sock->sk; in tipc_poll() local
623 struct tipc_sock *tsk = tipc_sk(sk); in tipc_poll()
626 sock_poll_wait(file, sk_sleep(sk), wait); in tipc_poll()
640 if (!skb_queue_empty(&sk->sk_receive_queue)) in tipc_poll()
665 struct sock *sk = sock->sk; in tipc_sendmcast() local
666 struct tipc_sock *tsk = tipc_sk(sk); in tipc_sendmcast()
667 struct net *net = sock_net(sk); in tipc_sendmcast()
669 struct sk_buff_head *pktchain = &sk->sk_write_queue; in tipc_sendmcast()
701 tipc_sk(sk)->link_cong = 1; in tipc_sendmcast()
786 tsk->sk.sk_write_space(&tsk->sk); in tipc_sk_proto_rcv()
801 struct sock *sk = sock->sk; in tipc_wait_for_sndmsg() local
802 struct tipc_sock *tsk = tipc_sk(sk); in tipc_wait_for_sndmsg()
807 int err = sock_error(sk); in tipc_wait_for_sndmsg()
817 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); in tipc_wait_for_sndmsg()
818 done = sk_wait_event(sk, timeo_p, !tsk->link_cong); in tipc_wait_for_sndmsg()
819 finish_wait(sk_sleep(sk), &wait); in tipc_wait_for_sndmsg()
840 struct sock *sk = sock->sk; in tipc_sendmsg() local
843 lock_sock(sk); in tipc_sendmsg()
845 release_sock(sk); in tipc_sendmsg()
853 struct sock *sk = sock->sk; in __tipc_sendmsg() local
854 struct tipc_sock *tsk = tipc_sk(sk); in __tipc_sendmsg()
855 struct net *net = sock_net(sk); in __tipc_sendmsg()
858 struct sk_buff_head *pktchain = &sk->sk_write_queue; in __tipc_sendmsg()
890 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); in __tipc_sendmsg()
953 struct sock *sk = sock->sk; in tipc_wait_for_sndpkt() local
954 struct tipc_sock *tsk = tipc_sk(sk); in tipc_wait_for_sndpkt()
959 int err = sock_error(sk); in tipc_wait_for_sndpkt()
971 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); in tipc_wait_for_sndpkt()
972 done = sk_wait_event(sk, timeo_p, in tipc_wait_for_sndpkt()
976 finish_wait(sk_sleep(sk), &wait); in tipc_wait_for_sndpkt()
994 struct sock *sk = sock->sk; in tipc_send_stream() local
997 lock_sock(sk); in tipc_send_stream()
999 release_sock(sk); in tipc_send_stream()
1006 struct sock *sk = sock->sk; in __tipc_send_stream() local
1007 struct net *net = sock_net(sk); in __tipc_send_stream()
1008 struct tipc_sock *tsk = tipc_sk(sk); in __tipc_send_stream()
1010 struct sk_buff_head *pktchain = &sk->sk_write_queue; in __tipc_send_stream()
1036 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); in __tipc_send_stream()
1097 struct sock *sk = &tsk->sk; in tipc_sk_finish_conn() local
1098 struct net *net = sock_net(sk); in tipc_sk_finish_conn()
1110 sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv); in tipc_sk_finish_conn()
1211 struct net *net = sock_net(&tsk->sk); in tipc_sk_send_ack()
1231 struct sock *sk = sock->sk; in tipc_wait_for_rcvmsg() local
1237 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); in tipc_wait_for_rcvmsg()
1238 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { in tipc_wait_for_rcvmsg()
1243 release_sock(sk); in tipc_wait_for_rcvmsg()
1245 lock_sock(sk); in tipc_wait_for_rcvmsg()
1248 if (!skb_queue_empty(&sk->sk_receive_queue)) in tipc_wait_for_rcvmsg()
1257 finish_wait(sk_sleep(sk), &wait); in tipc_wait_for_rcvmsg()
1276 struct sock *sk = sock->sk; in tipc_recvmsg() local
1277 struct tipc_sock *tsk = tipc_sk(sk); in tipc_recvmsg()
1289 lock_sock(sk); in tipc_recvmsg()
1296 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); in tipc_recvmsg()
1305 buf = skb_peek(&sk->sk_receive_queue); in tipc_recvmsg()
1312 tsk_advance_rx_queue(sk); in tipc_recvmsg()
1349 tsk_advance_rx_queue(sk); in tipc_recvmsg()
1352 release_sock(sk); in tipc_recvmsg()
1370 struct sock *sk = sock->sk; in tipc_recv_stream() local
1371 struct tipc_sock *tsk = tipc_sk(sk); in tipc_recv_stream()
1385 lock_sock(sk); in tipc_recv_stream()
1392 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); in tipc_recv_stream()
1393 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); in tipc_recv_stream()
1402 buf = skb_peek(&sk->sk_receive_queue); in tipc_recv_stream()
1409 tsk_advance_rx_queue(sk); in tipc_recv_stream()
1458 tsk_advance_rx_queue(sk); in tipc_recv_stream()
1463 (!skb_queue_empty(&sk->sk_receive_queue) || in tipc_recv_stream()
1470 release_sock(sk); in tipc_recv_stream()
1478 static void tipc_write_space(struct sock *sk) in tipc_write_space() argument
1483 wq = rcu_dereference(sk->sk_wq); in tipc_write_space()
1495 static void tipc_data_ready(struct sock *sk) in tipc_data_ready() argument
1500 wq = rcu_dereference(sk->sk_wq); in tipc_data_ready()
1516 struct sock *sk = &tsk->sk; in filter_connect() local
1517 struct net *net = sock_net(sk); in filter_connect()
1518 struct socket *sock = sk->sk_socket; in filter_connect()
1547 sk->sk_err = ECONNREFUSED; in filter_connect()
1554 sk->sk_err = EINVAL; in filter_connect()
1571 if (waitqueue_active(sk_sleep(sk))) in filter_connect()
1572 wake_up_interruptible(sk_sleep(sk)); in filter_connect()
1608 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf) in rcvbuf_limit() argument
1615 return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE << in rcvbuf_limit()
1631 static int filter_rcv(struct sock *sk, struct sk_buff **skb) in filter_rcv() argument
1633 struct socket *sock = sk->sk_socket; in filter_rcv()
1634 struct tipc_sock *tsk = tipc_sk(sk); in filter_rcv()
1636 unsigned int limit = rcvbuf_limit(sk, *skb); in filter_rcv()
1647 sk->sk_write_space(sk); in filter_rcv()
1666 if (sk_rmem_alloc_get(sk) + (*skb)->truesize >= limit) in filter_rcv()
1671 __skb_queue_tail(&sk->sk_receive_queue, *skb); in filter_rcv()
1672 skb_set_owner_r(*skb, sk); in filter_rcv()
1674 sk->sk_data_ready(sk); in filter_rcv()
1688 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb) in tipc_backlog_rcv() argument
1693 struct tipc_sock *tsk = tipc_sk(sk); in tipc_backlog_rcv()
1694 struct net *net = sock_net(sk); in tipc_backlog_rcv()
1697 err = filter_rcv(sk, &skb); in tipc_backlog_rcv()
1722 static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, in tipc_sk_enqueue() argument
1737 if (!sock_owned_by_user(sk)) { in tipc_sk_enqueue()
1738 err = filter_rcv(sk, &skb); in tipc_sk_enqueue()
1744 dcnt = &tipc_sk(sk)->dupl_rcvcnt; in tipc_sk_enqueue()
1745 if (sk->sk_backlog.len) in tipc_sk_enqueue()
1747 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt); in tipc_sk_enqueue()
1748 if (likely(!sk_add_backlog(sk, skb, lim))) in tipc_sk_enqueue()
1771 struct sock *sk; in tipc_sk_rcv() local
1779 sk = &tsk->sk; in tipc_sk_rcv()
1780 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) { in tipc_sk_rcv()
1781 err = tipc_sk_enqueue(inputq, sk, dport, &skb); in tipc_sk_rcv()
1782 spin_unlock_bh(&sk->sk_lock.slock); in tipc_sk_rcv()
1785 sock_put(sk); in tipc_sk_rcv()
1808 struct sock *sk = sock->sk; in tipc_wait_for_connect() local
1813 int err = sock_error(sk); in tipc_wait_for_connect()
1821 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); in tipc_wait_for_connect()
1822 done = sk_wait_event(sk, timeo_p, sock->state != SS_CONNECTING); in tipc_wait_for_connect()
1823 finish_wait(sk_sleep(sk), &wait); in tipc_wait_for_connect()
1840 struct sock *sk = sock->sk; in tipc_connect() local
1841 struct tipc_sock *tsk = tipc_sk(sk); in tipc_connect()
1848 lock_sock(sk); in tipc_connect()
1914 release_sock(sk); in tipc_connect()
1927 struct sock *sk = sock->sk; in tipc_listen() local
1930 lock_sock(sk); in tipc_listen()
1939 release_sock(sk); in tipc_listen()
1945 struct sock *sk = sock->sk; in tipc_wait_for_accept() local
1955 prepare_to_wait_exclusive(sk_sleep(sk), &wait, in tipc_wait_for_accept()
1957 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { in tipc_wait_for_accept()
1958 release_sock(sk); in tipc_wait_for_accept()
1960 lock_sock(sk); in tipc_wait_for_accept()
1963 if (!skb_queue_empty(&sk->sk_receive_queue)) in tipc_wait_for_accept()
1975 finish_wait(sk_sleep(sk), &wait); in tipc_wait_for_accept()
1989 struct sock *new_sk, *sk = sock->sk; in tipc_accept() local
1996 lock_sock(sk); in tipc_accept()
2002 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); in tipc_accept()
2007 buf = skb_peek(&sk->sk_receive_queue); in tipc_accept()
2009 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1); in tipc_accept()
2012 security_sk_clone(sock->sk, new_sock->sk); in tipc_accept()
2014 new_sk = new_sock->sk; in tipc_accept()
2044 tsk_advance_rx_queue(sk); in tipc_accept()
2047 __skb_dequeue(&sk->sk_receive_queue); in tipc_accept()
2053 release_sock(sk); in tipc_accept()
2068 struct sock *sk = sock->sk; in tipc_shutdown() local
2069 struct net *net = sock_net(sk); in tipc_shutdown()
2070 struct tipc_sock *tsk = tipc_sk(sk); in tipc_shutdown()
2078 lock_sock(sk); in tipc_shutdown()
2086 skb = __skb_dequeue(&sk->sk_receive_queue); in tipc_shutdown()
2114 __skb_queue_purge(&sk->sk_receive_queue); in tipc_shutdown()
2117 sk->sk_state_change(sk); in tipc_shutdown()
2125 release_sock(sk); in tipc_shutdown()
2132 struct sock *sk = &tsk->sk; in tipc_sk_timeout() local
2137 bh_lock_sock(sk); in tipc_sk_timeout()
2139 bh_unlock_sock(sk); in tipc_sk_timeout()
2146 if (!sock_owned_by_user(sk)) { in tipc_sk_timeout()
2147 sk->sk_socket->state = SS_DISCONNECTING; in tipc_sk_timeout()
2149 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk), in tipc_sk_timeout()
2151 sk->sk_state_change(sk); in tipc_sk_timeout()
2154 sk_reset_timer(sk, &sk->sk_timer, (HZ / 20)); in tipc_sk_timeout()
2162 sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv); in tipc_sk_timeout()
2164 bh_unlock_sock(sk); in tipc_sk_timeout()
2166 tipc_link_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid); in tipc_sk_timeout()
2168 sock_put(sk); in tipc_sk_timeout()
2174 struct net *net = sock_net(&tsk->sk); in tipc_sk_publish()
2198 struct net *net = sock_net(&tsk->sk); in tipc_sk_withdraw()
2243 spin_lock_bh(&tsk->sk.sk_lock.slock); in tipc_sk_reinit()
2247 spin_unlock_bh(&tsk->sk.sk_lock.slock); in tipc_sk_reinit()
2261 sock_hold(&tsk->sk); in tipc_sk_lookup()
2269 struct sock *sk = &tsk->sk; in tipc_sk_insert() local
2270 struct net *net = sock_net(sk); in tipc_sk_insert()
2280 sock_hold(&tsk->sk); in tipc_sk_insert()
2284 sock_put(&tsk->sk); in tipc_sk_insert()
2292 struct sock *sk = &tsk->sk; in tipc_sk_remove() local
2293 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); in tipc_sk_remove()
2296 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); in tipc_sk_remove()
2297 __sock_put(sk); in tipc_sk_remove()
2344 struct sock *sk = sock->sk; in tipc_setsockopt() local
2345 struct tipc_sock *tsk = tipc_sk(sk); in tipc_setsockopt()
2359 lock_sock(sk); in tipc_setsockopt()
2375 tipc_sk(sk)->conn_timeout = value; in tipc_setsockopt()
2382 release_sock(sk); in tipc_setsockopt()
2403 struct sock *sk = sock->sk; in tipc_getsockopt() local
2404 struct tipc_sock *tsk = tipc_sk(sk); in tipc_getsockopt()
2417 lock_sock(sk); in tipc_getsockopt()
2437 value = skb_queue_len(&sk->sk_receive_queue); in tipc_getsockopt()
2443 release_sock(sk); in tipc_getsockopt()
2459 struct sock *sk = sock->sk; in tipc_ioctl() local
2467 if (!tipc_node_get_linkname(sock_net(sk), in tipc_ioctl()
2634 struct net *net = sock_net(skb->sk); in __tipc_nl_add_sk()
2677 struct net *net = sock_net(skb->sk); in tipc_nl_sk_dump()
2686 spin_lock_bh(&tsk->sk.sk_lock.slock); in tipc_nl_sk_dump()
2688 spin_unlock_bh(&tsk->sk.sk_lock.slock); in tipc_nl_sk_dump()
2695 spin_unlock_bh(&tsk->sk.sk_lock.slock); in tipc_nl_sk_dump()
2699 spin_unlock_bh(&tsk->sk.sk_lock.slock); in tipc_nl_sk_dump()
2796 struct net *net = sock_net(skb->sk); in tipc_nl_publ_dump()
2826 lock_sock(&tsk->sk); in tipc_nl_publ_dump()
2830 release_sock(&tsk->sk); in tipc_nl_publ_dump()
2831 sock_put(&tsk->sk); in tipc_nl_publ_dump()