Lines Matching refs:sk

139 			       struct sock *sk,  in udp_lib_lport_inuse()  argument
146 kuid_t uid = sock_i_uid(sk); in udp_lib_lport_inuse()
150 sk2 != sk && in udp_lib_lport_inuse()
152 (!sk2->sk_reuse || !sk->sk_reuse) && in udp_lib_lport_inuse()
153 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || in udp_lib_lport_inuse()
154 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && in udp_lib_lport_inuse()
155 (!sk2->sk_reuseport || !sk->sk_reuseport || in udp_lib_lport_inuse()
157 saddr_comp(sk, sk2)) { in udp_lib_lport_inuse()
172 struct sock *sk, in udp_lib_lport_inuse2() argument
178 kuid_t uid = sock_i_uid(sk); in udp_lib_lport_inuse2()
184 sk2 != sk && in udp_lib_lport_inuse2()
186 (!sk2->sk_reuse || !sk->sk_reuse) && in udp_lib_lport_inuse2()
187 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || in udp_lib_lport_inuse2()
188 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && in udp_lib_lport_inuse2()
189 (!sk2->sk_reuseport || !sk->sk_reuseport || in udp_lib_lport_inuse2()
191 saddr_comp(sk, sk2)) { in udp_lib_lport_inuse2()
209 int udp_lib_get_port(struct sock *sk, unsigned short snum, in udp_lib_get_port() argument
215 struct udp_table *udptable = sk->sk_prot->h.udp_table; in udp_lib_get_port()
217 struct net *net = sock_net(sk); in udp_lib_get_port()
239 udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, in udp_lib_get_port()
263 unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; in udp_lib_get_port()
273 sk, saddr_comp); in udp_lib_get_port()
277 sk, saddr_comp); in udp_lib_get_port()
285 if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, in udp_lib_get_port()
290 inet_sk(sk)->inet_num = snum; in udp_lib_get_port()
291 udp_sk(sk)->udp_port_hash = snum; in udp_lib_get_port()
292 udp_sk(sk)->udp_portaddr_hash ^= snum; in udp_lib_get_port()
293 if (sk_unhashed(sk)) { in udp_lib_get_port()
294 sk_nulls_add_node_rcu(sk, &hslot->head); in udp_lib_get_port()
296 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); in udp_lib_get_port()
298 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); in udp_lib_get_port()
300 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, in udp_lib_get_port()
328 int udp_v4_get_port(struct sock *sk, unsigned short snum) in udp_v4_get_port() argument
331 udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); in udp_v4_get_port()
333 udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); in udp_v4_get_port()
336 udp_sk(sk)->udp_portaddr_hash = hash2_partial; in udp_v4_get_port()
337 return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr); in udp_v4_get_port()
340 static inline int compute_score(struct sock *sk, struct net *net, in compute_score() argument
347 if (!net_eq(sock_net(sk), net) || in compute_score()
348 udp_sk(sk)->udp_port_hash != hnum || in compute_score()
349 ipv6_only_sock(sk)) in compute_score()
352 score = (sk->sk_family == PF_INET) ? 2 : 1; in compute_score()
353 inet = inet_sk(sk); in compute_score()
373 if (sk->sk_bound_dev_if) { in compute_score()
374 if (sk->sk_bound_dev_if != dif) in compute_score()
385 static inline int compute_score2(struct sock *sk, struct net *net, in compute_score2() argument
392 if (!net_eq(sock_net(sk), net) || in compute_score2()
393 ipv6_only_sock(sk)) in compute_score2()
396 inet = inet_sk(sk); in compute_score2()
402 score = (sk->sk_family == PF_INET) ? 2 : 1; in compute_score2()
416 if (sk->sk_bound_dev_if) { in compute_score2()
417 if (sk->sk_bound_dev_if != dif) in compute_score2()
443 struct sock *sk, *result; in udp4_lib_lookup2() local
451 udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { in udp4_lib_lookup2()
452 score = compute_score2(sk, net, saddr, sport, in udp4_lib_lookup2()
455 result = sk; in udp4_lib_lookup2()
457 reuseport = sk->sk_reuseport; in udp4_lib_lookup2()
466 result = sk; in udp4_lib_lookup2()
496 struct sock *sk, *result; in __udp4_lib_lookup() local
532 sk_nulls_for_each_rcu(sk, node, &hslot->head) { in __udp4_lib_lookup()
533 score = compute_score(sk, net, saddr, hnum, sport, in __udp4_lib_lookup()
536 result = sk; in __udp4_lib_lookup()
538 reuseport = sk->sk_reuseport; in __udp4_lib_lookup()
547 result = sk; in __udp4_lib_lookup()
591 static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, in __udp_is_mcast_sock() argument
596 struct inet_sock *inet = inet_sk(sk); in __udp_is_mcast_sock()
598 if (!net_eq(sock_net(sk), net) || in __udp_is_mcast_sock()
599 udp_sk(sk)->udp_port_hash != hnum || in __udp_is_mcast_sock()
603 ipv6_only_sock(sk) || in __udp_is_mcast_sock()
604 (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) in __udp_is_mcast_sock()
606 if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif)) in __udp_is_mcast_sock()
629 struct sock *sk; in __udp4_lib_err() local
634 sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, in __udp4_lib_err()
636 if (!sk) { in __udp4_lib_err()
643 inet = inet_sk(sk); in __udp4_lib_err()
658 ipv4_sk_update_pmtu(skb, sk, info); in __udp4_lib_err()
673 ipv4_sk_redirect(skb, sk); in __udp4_lib_err()
682 if (!harderr || sk->sk_state != TCP_ESTABLISHED) in __udp4_lib_err()
685 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); in __udp4_lib_err()
687 sk->sk_err = err; in __udp4_lib_err()
688 sk->sk_error_report(sk); in __udp4_lib_err()
690 sock_put(sk); in __udp4_lib_err()
701 void udp_flush_pending_frames(struct sock *sk) in udp_flush_pending_frames() argument
703 struct udp_sock *up = udp_sk(sk); in udp_flush_pending_frames()
708 ip_flush_pending_frames(sk); in udp_flush_pending_frames()
798 struct sock *sk = skb->sk; in udp_send_skb() local
799 struct inet_sock *inet = inet_sk(sk); in udp_send_skb()
802 int is_udplite = IS_UDPLITE(sk); in udp_send_skb()
819 else if (sk->sk_no_check_tx) { /* UDP csum disabled */ in udp_send_skb()
834 sk->sk_protocol, csum); in udp_send_skb()
839 err = ip_send_skb(sock_net(sk), skb); in udp_send_skb()
842 UDP_INC_STATS_USER(sock_net(sk), in udp_send_skb()
847 UDP_INC_STATS_USER(sock_net(sk), in udp_send_skb()
855 int udp_push_pending_frames(struct sock *sk) in udp_push_pending_frames() argument
857 struct udp_sock *up = udp_sk(sk); in udp_push_pending_frames()
858 struct inet_sock *inet = inet_sk(sk); in udp_push_pending_frames()
863 skb = ip_finish_skb(sk, fl4); in udp_push_pending_frames()
876 int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) in udp_sendmsg() argument
878 struct inet_sock *inet = inet_sk(sk); in udp_sendmsg()
879 struct udp_sock *up = udp_sk(sk); in udp_sendmsg()
890 int err, is_udplite = IS_UDPLITE(sk); in udp_sendmsg()
919 lock_sock(sk); in udp_sendmsg()
922 release_sock(sk); in udp_sendmsg()
927 release_sock(sk); in udp_sendmsg()
948 if (sk->sk_state != TCP_ESTABLISHED) in udp_sendmsg()
959 ipc.oif = sk->sk_bound_dev_if; in udp_sendmsg()
961 sock_tx_timestamp(sk, &ipc.tx_flags); in udp_sendmsg()
964 err = ip_cmsg_send(sock_net(sk), msg, &ipc, in udp_sendmsg()
965 sk->sk_family == AF_INET6); in udp_sendmsg()
997 if (sock_flag(sk, SOCK_LOCALROUTE) || in udp_sendmsg()
1014 rt = (struct rtable *)sk_dst_check(sk, 0); in udp_sendmsg()
1017 struct net *net = sock_net(sk); in udp_sendmsg()
1020 flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, in udp_sendmsg()
1021 RT_SCOPE_UNIVERSE, sk->sk_protocol, in udp_sendmsg()
1022 inet_sk_flowi_flags(sk), in udp_sendmsg()
1025 security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); in udp_sendmsg()
1026 rt = ip_route_output_flow(net, fl4, sk); in udp_sendmsg()
1037 !sock_flag(sk, SOCK_BROADCAST)) in udp_sendmsg()
1040 sk_dst_set(sk, dst_clone(&rt->dst)); in udp_sendmsg()
1053 skb = ip_make_skb(sk, fl4, getfrag, msg, ulen, in udp_sendmsg()
1062 lock_sock(sk); in udp_sendmsg()
1066 release_sock(sk); in udp_sendmsg()
1084 err = ip_append_data(sk, fl4, getfrag, msg, ulen, in udp_sendmsg()
1088 udp_flush_pending_frames(sk); in udp_sendmsg()
1090 err = udp_push_pending_frames(sk); in udp_sendmsg()
1091 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) in udp_sendmsg()
1093 release_sock(sk); in udp_sendmsg()
1108 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { in udp_sendmsg()
1109 UDP_INC_STATS_USER(sock_net(sk), in udp_sendmsg()
1123 int udp_sendpage(struct sock *sk, struct page *page, int offset, in udp_sendpage() argument
1126 struct inet_sock *inet = inet_sk(sk); in udp_sendpage()
1127 struct udp_sock *up = udp_sk(sk); in udp_sendpage()
1140 ret = udp_sendmsg(sk, &msg, 0); in udp_sendpage()
1145 lock_sock(sk); in udp_sendpage()
1148 release_sock(sk); in udp_sendpage()
1154 ret = ip_append_page(sk, &inet->cork.fl.u.ip4, in udp_sendpage()
1157 release_sock(sk); in udp_sendpage()
1158 return sock_no_sendpage(sk->sk_socket, page, offset, in udp_sendpage()
1162 udp_flush_pending_frames(sk); in udp_sendpage()
1168 ret = udp_push_pending_frames(sk); in udp_sendpage()
1172 release_sock(sk); in udp_sendpage()
1183 static unsigned int first_packet_length(struct sock *sk) in first_packet_length() argument
1185 struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue; in first_packet_length()
1194 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, in first_packet_length()
1195 IS_UDPLITE(sk)); in first_packet_length()
1196 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, in first_packet_length()
1197 IS_UDPLITE(sk)); in first_packet_length()
1198 atomic_inc(&sk->sk_drops); in first_packet_length()
1206 bool slow = lock_sock_fast(sk); in first_packet_length()
1209 sk_mem_reclaim_partial(sk); in first_packet_length()
1210 unlock_sock_fast(sk, slow); in first_packet_length()
1219 int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) in udp_ioctl() argument
1224 int amount = sk_wmem_alloc_get(sk); in udp_ioctl()
1231 unsigned int amount = first_packet_length(sk); in udp_ioctl()
1257 int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, in udp_recvmsg() argument
1260 struct inet_sock *inet = inet_sk(sk); in udp_recvmsg()
1266 int is_udplite = IS_UDPLITE(sk); in udp_recvmsg()
1270 return ip_recv_error(sk, msg, len, addr_len); in udp_recvmsg()
1273 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), in udp_recvmsg()
1310 atomic_inc(&sk->sk_drops); in udp_recvmsg()
1311 UDP_INC_STATS_USER(sock_net(sk), in udp_recvmsg()
1318 UDP_INC_STATS_USER(sock_net(sk), in udp_recvmsg()
1321 sock_recv_ts_and_drops(msg, sk, skb); in udp_recvmsg()
1339 skb_free_datagram_locked(sk, skb); in udp_recvmsg()
1344 slow = lock_sock_fast(sk); in udp_recvmsg()
1345 if (!skb_kill_datagram(sk, skb, flags)) { in udp_recvmsg()
1346 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); in udp_recvmsg()
1347 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); in udp_recvmsg()
1349 unlock_sock_fast(sk, slow); in udp_recvmsg()
1357 int udp_disconnect(struct sock *sk, int flags) in udp_disconnect() argument
1359 struct inet_sock *inet = inet_sk(sk); in udp_disconnect()
1364 sk->sk_state = TCP_CLOSE; in udp_disconnect()
1367 sock_rps_reset_rxhash(sk); in udp_disconnect()
1368 sk->sk_bound_dev_if = 0; in udp_disconnect()
1369 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) in udp_disconnect()
1370 inet_reset_saddr(sk); in udp_disconnect()
1372 if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { in udp_disconnect()
1373 sk->sk_prot->unhash(sk); in udp_disconnect()
1376 sk_dst_reset(sk); in udp_disconnect()
1381 void udp_lib_unhash(struct sock *sk) in udp_lib_unhash() argument
1383 if (sk_hashed(sk)) { in udp_lib_unhash()
1384 struct udp_table *udptable = sk->sk_prot->h.udp_table; in udp_lib_unhash()
1387 hslot = udp_hashslot(udptable, sock_net(sk), in udp_lib_unhash()
1388 udp_sk(sk)->udp_port_hash); in udp_lib_unhash()
1389 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); in udp_lib_unhash()
1392 if (sk_nulls_del_node_init_rcu(sk)) { in udp_lib_unhash()
1394 inet_sk(sk)->inet_num = 0; in udp_lib_unhash()
1395 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); in udp_lib_unhash()
1398 hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); in udp_lib_unhash()
1410 void udp_lib_rehash(struct sock *sk, u16 newhash) in udp_lib_rehash() argument
1412 if (sk_hashed(sk)) { in udp_lib_rehash()
1413 struct udp_table *udptable = sk->sk_prot->h.udp_table; in udp_lib_rehash()
1416 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); in udp_lib_rehash()
1418 udp_sk(sk)->udp_portaddr_hash = newhash; in udp_lib_rehash()
1420 hslot = udp_hashslot(udptable, sock_net(sk), in udp_lib_rehash()
1421 udp_sk(sk)->udp_port_hash); in udp_lib_rehash()
1426 hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); in udp_lib_rehash()
1431 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, in udp_lib_rehash()
1442 static void udp_v4_rehash(struct sock *sk) in udp_v4_rehash() argument
1444 u16 new_hash = udp4_portaddr_hash(sock_net(sk), in udp_v4_rehash()
1445 inet_sk(sk)->inet_rcv_saddr, in udp_v4_rehash()
1446 inet_sk(sk)->inet_num); in udp_v4_rehash()
1447 udp_lib_rehash(sk, new_hash); in udp_v4_rehash()
1450 static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) in __udp_queue_rcv_skb() argument
1454 if (inet_sk(sk)->inet_daddr) { in __udp_queue_rcv_skb()
1455 sock_rps_save_rxhash(sk, skb); in __udp_queue_rcv_skb()
1456 sk_mark_napi_id(sk, skb); in __udp_queue_rcv_skb()
1457 sk_incoming_cpu_update(sk); in __udp_queue_rcv_skb()
1460 rc = sock_queue_rcv_skb(sk, skb); in __udp_queue_rcv_skb()
1462 int is_udplite = IS_UDPLITE(sk); in __udp_queue_rcv_skb()
1466 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, in __udp_queue_rcv_skb()
1468 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); in __udp_queue_rcv_skb()
1470 trace_udp_fail_queue_rcv_skb(rc, sk); in __udp_queue_rcv_skb()
1494 int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) in udp_queue_rcv_skb() argument
1496 struct udp_sock *up = udp_sk(sk); in udp_queue_rcv_skb()
1498 int is_udplite = IS_UDPLITE(sk); in udp_queue_rcv_skb()
1503 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) in udp_queue_rcv_skb()
1508 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); in udp_queue_rcv_skb()
1530 ret = encap_rcv(sk, skb); in udp_queue_rcv_skb()
1532 UDP_INC_STATS_BH(sock_net(sk), in udp_queue_rcv_skb()
1576 if (rcu_access_pointer(sk->sk_filter) && in udp_queue_rcv_skb()
1580 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { in udp_queue_rcv_skb()
1581 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, in udp_queue_rcv_skb()
1588 ipv4_pktinfo_prepare(sk, skb); in udp_queue_rcv_skb()
1589 bh_lock_sock(sk); in udp_queue_rcv_skb()
1590 if (!sock_owned_by_user(sk)) in udp_queue_rcv_skb()
1591 rc = __udp_queue_rcv_skb(sk, skb); in udp_queue_rcv_skb()
1592 else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { in udp_queue_rcv_skb()
1593 bh_unlock_sock(sk); in udp_queue_rcv_skb()
1596 bh_unlock_sock(sk); in udp_queue_rcv_skb()
1601 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); in udp_queue_rcv_skb()
1603 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); in udp_queue_rcv_skb()
1604 atomic_inc(&sk->sk_drops); in udp_queue_rcv_skb()
1614 struct sock *sk; in flush_stack() local
1617 sk = stack[i]; in flush_stack()
1622 atomic_inc(&sk->sk_drops); in flush_stack()
1623 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, in flush_stack()
1624 IS_UDPLITE(sk)); in flush_stack()
1625 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, in flush_stack()
1626 IS_UDPLITE(sk)); in flush_stack()
1629 if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0) in flush_stack()
1632 sock_put(sk); in flush_stack()
1641 static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) in udp_sk_rx_dst_set() argument
1646 old = xchg(&sk->sk_rx_dst, dst); in udp_sk_rx_dst_set()
1661 struct sock *sk, *stack[256 / sizeof(struct sock *)]; in __udp4_lib_mcast_deliver() local
1666 unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node); in __udp4_lib_mcast_deliver()
1676 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); in __udp4_lib_mcast_deliver()
1680 sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) { in __udp4_lib_mcast_deliver()
1681 if (__udp_is_mcast_sock(net, sk, in __udp4_lib_mcast_deliver()
1690 stack[count++] = sk; in __udp4_lib_mcast_deliver()
1691 sock_hold(sk); in __udp4_lib_mcast_deliver()
1747 struct sock *sk; in __udp4_lib_rcv() local
1778 sk = skb_steal_sock(skb); in __udp4_lib_rcv()
1779 if (sk) { in __udp4_lib_rcv()
1783 if (unlikely(sk->sk_rx_dst != dst)) in __udp4_lib_rcv()
1784 udp_sk_rx_dst_set(sk, dst); in __udp4_lib_rcv()
1786 ret = udp_queue_rcv_skb(sk, skb); in __udp4_lib_rcv()
1787 sock_put(sk); in __udp4_lib_rcv()
1800 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); in __udp4_lib_rcv()
1801 if (sk) { in __udp4_lib_rcv()
1804 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) in __udp4_lib_rcv()
1808 ret = udp_queue_rcv_skb(sk, skb); in __udp4_lib_rcv()
1809 sock_put(sk); in __udp4_lib_rcv()
1869 struct sock *sk, *result; in __udp4_lib_mcast_demux_lookup() local
1883 sk_nulls_for_each_rcu(sk, node, &hslot->head) { in __udp4_lib_mcast_demux_lookup()
1884 if (__udp_is_mcast_sock(net, sk, in __udp4_lib_mcast_demux_lookup()
1888 result = sk; in __udp4_lib_mcast_demux_lookup()
1925 struct sock *sk, *result; in __udp4_lib_demux_lookup() local
1936 udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { in __udp4_lib_demux_lookup()
1937 if (INET_MATCH(sk, net, acookie, in __udp4_lib_demux_lookup()
1939 result = sk; in __udp4_lib_demux_lookup()
1947 else if (unlikely(!INET_MATCH(sk, net, acookie, in __udp4_lib_demux_lookup()
1963 struct sock *sk; in udp_v4_early_demux() local
1986 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, in udp_v4_early_demux()
1989 sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, in udp_v4_early_demux()
1995 if (!sk) in udp_v4_early_demux()
1998 skb->sk = sk; in udp_v4_early_demux()
2000 dst = READ_ONCE(sk->sk_rx_dst); in udp_v4_early_demux()
2020 void udp_destroy_sock(struct sock *sk) in udp_destroy_sock() argument
2022 struct udp_sock *up = udp_sk(sk); in udp_destroy_sock()
2023 bool slow = lock_sock_fast(sk); in udp_destroy_sock()
2024 udp_flush_pending_frames(sk); in udp_destroy_sock()
2025 unlock_sock_fast(sk, slow); in udp_destroy_sock()
2027 void (*encap_destroy)(struct sock *sk); in udp_destroy_sock()
2030 encap_destroy(sk); in udp_destroy_sock()
2037 int udp_lib_setsockopt(struct sock *sk, int level, int optname, in udp_lib_setsockopt() argument
2041 struct udp_sock *up = udp_sk(sk); in udp_lib_setsockopt()
2044 int is_udplite = IS_UDPLITE(sk); in udp_lib_setsockopt()
2060 lock_sock(sk); in udp_lib_setsockopt()
2061 push_pending_frames(sk); in udp_lib_setsockopt()
2062 release_sock(sk); in udp_lib_setsockopt()
2130 int udp_setsockopt(struct sock *sk, int level, int optname, in udp_setsockopt() argument
2134 return udp_lib_setsockopt(sk, level, optname, optval, optlen, in udp_setsockopt()
2136 return ip_setsockopt(sk, level, optname, optval, optlen); in udp_setsockopt()
2140 int compat_udp_setsockopt(struct sock *sk, int level, int optname, in compat_udp_setsockopt() argument
2144 return udp_lib_setsockopt(sk, level, optname, optval, optlen, in compat_udp_setsockopt()
2146 return compat_ip_setsockopt(sk, level, optname, optval, optlen); in compat_udp_setsockopt()
2150 int udp_lib_getsockopt(struct sock *sk, int level, int optname, in udp_lib_getsockopt() argument
2153 struct udp_sock *up = udp_sk(sk); in udp_lib_getsockopt()
2203 int udp_getsockopt(struct sock *sk, int level, int optname, in udp_getsockopt() argument
2207 return udp_lib_getsockopt(sk, level, optname, optval, optlen); in udp_getsockopt()
2208 return ip_getsockopt(sk, level, optname, optval, optlen); in udp_getsockopt()
2212 int compat_udp_getsockopt(struct sock *sk, int level, int optname, in compat_udp_getsockopt() argument
2216 return udp_lib_getsockopt(sk, level, optname, optval, optlen); in compat_udp_getsockopt()
2217 return compat_ip_getsockopt(sk, level, optname, optval, optlen); in compat_udp_getsockopt()
2236 struct sock *sk = sock->sk; in udp_poll() local
2238 sock_rps_record_flow(sk); in udp_poll()
2242 !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk)) in udp_poll()
2289 struct sock *sk; in udp_get_first() local
2302 sk_nulls_for_each(sk, node, &hslot->head) { in udp_get_first()
2303 if (!net_eq(sock_net(sk), net)) in udp_get_first()
2305 if (sk->sk_family == state->family) in udp_get_first()
2310 sk = NULL; in udp_get_first()
2312 return sk; in udp_get_first()
2315 static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) in udp_get_next() argument
2321 sk = sk_nulls_next(sk); in udp_get_next()
2322 } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); in udp_get_next()
2324 if (!sk) { in udp_get_next()
2329 return sk; in udp_get_next()
2334 struct sock *sk = udp_get_first(seq, 0); in udp_get_idx() local
2336 if (sk) in udp_get_idx()
2337 while (pos && (sk = udp_get_next(seq, sk)) != NULL) in udp_get_idx()
2339 return pos ? NULL : sk; in udp_get_idx()
2352 struct sock *sk; in udp_seq_next() local
2355 sk = udp_get_idx(seq, 0); in udp_seq_next()
2357 sk = udp_get_next(seq, v); in udp_seq_next()
2360 return sk; in udp_seq_next()