Lines Matching refs:sk

52 #define __iucv_sock_wait(sk, condition, timeo, ret)			\  argument
57 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
67 release_sock(sk); \
69 lock_sock(sk); \
70 ret = sock_error(sk); \
74 finish_wait(sk_sleep(sk), &__wait); \
77 #define iucv_sock_wait(sk, condition, timeo) \ argument
81 __iucv_sock_wait(sk, condition, timeo, __ret); \
85 static void iucv_sock_kill(struct sock *sk);
86 static void iucv_sock_close(struct sock *sk);
152 struct sock *sk; in afiucv_pm_freeze() local
159 sk_for_each(sk, &iucv_sk_list.head) { in afiucv_pm_freeze()
160 iucv = iucv_sk(sk); in afiucv_pm_freeze()
161 switch (sk->sk_state) { in afiucv_pm_freeze()
165 iucv_sever_path(sk, 0); in afiucv_pm_freeze()
189 struct sock *sk; in afiucv_pm_restore_thaw() local
195 sk_for_each(sk, &iucv_sk_list.head) { in afiucv_pm_restore_thaw()
196 switch (sk->sk_state) { in afiucv_pm_restore_thaw()
198 sk->sk_err = EPIPE; in afiucv_pm_restore_thaw()
199 sk->sk_state = IUCV_DISCONN; in afiucv_pm_restore_thaw()
200 sk->sk_state_change(sk); in afiucv_pm_restore_thaw()
272 static int iucv_sock_in_state(struct sock *sk, int state, int state2) in iucv_sock_in_state() argument
274 return (sk->sk_state == state || sk->sk_state == state2); in iucv_sock_in_state()
285 static inline int iucv_below_msglim(struct sock *sk) in iucv_below_msglim() argument
287 struct iucv_sock *iucv = iucv_sk(sk); in iucv_below_msglim()
289 if (sk->sk_state != IUCV_CONNECTED) in iucv_below_msglim()
301 static void iucv_sock_wake_msglim(struct sock *sk) in iucv_sock_wake_msglim() argument
306 wq = rcu_dereference(sk->sk_wq); in iucv_sock_wake_msglim()
309 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); in iucv_sock_wake_msglim()
384 struct sock *sk; in __iucv_get_sock_by_name() local
386 sk_for_each(sk, &iucv_sk_list.head) in __iucv_get_sock_by_name()
387 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) in __iucv_get_sock_by_name()
388 return sk; in __iucv_get_sock_by_name()
393 static void iucv_sock_destruct(struct sock *sk) in iucv_sock_destruct() argument
395 skb_queue_purge(&sk->sk_receive_queue); in iucv_sock_destruct()
396 skb_queue_purge(&sk->sk_error_queue); in iucv_sock_destruct()
398 sk_mem_reclaim(sk); in iucv_sock_destruct()
400 if (!sock_flag(sk, SOCK_DEAD)) { in iucv_sock_destruct()
401 pr_err("Attempt to release alive iucv socket %p\n", sk); in iucv_sock_destruct()
405 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); in iucv_sock_destruct()
406 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); in iucv_sock_destruct()
407 WARN_ON(sk->sk_wmem_queued); in iucv_sock_destruct()
408 WARN_ON(sk->sk_forward_alloc); in iucv_sock_destruct()
414 struct sock *sk; in iucv_sock_cleanup_listen() local
417 while ((sk = iucv_accept_dequeue(parent, NULL))) { in iucv_sock_cleanup_listen()
418 iucv_sock_close(sk); in iucv_sock_cleanup_listen()
419 iucv_sock_kill(sk); in iucv_sock_cleanup_listen()
426 static void iucv_sock_kill(struct sock *sk) in iucv_sock_kill() argument
428 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) in iucv_sock_kill()
431 iucv_sock_unlink(&iucv_sk_list, sk); in iucv_sock_kill()
432 sock_set_flag(sk, SOCK_DEAD); in iucv_sock_kill()
433 sock_put(sk); in iucv_sock_kill()
437 static void iucv_sever_path(struct sock *sk, int with_user_data) in iucv_sever_path() argument
440 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sever_path()
457 static int iucv_send_ctrl(struct sock *sk, u8 flags) in iucv_send_ctrl() argument
464 skb = sock_alloc_send_skb(sk, blen, 1, &err); in iucv_send_ctrl()
467 err = afiucv_hs_send(NULL, sk, skb, flags); in iucv_send_ctrl()
473 static void iucv_sock_close(struct sock *sk) in iucv_sock_close() argument
475 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_close()
479 lock_sock(sk); in iucv_sock_close()
481 switch (sk->sk_state) { in iucv_sock_close()
483 iucv_sock_cleanup_listen(sk); in iucv_sock_close()
488 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); in iucv_sock_close()
489 sk->sk_state = IUCV_DISCONN; in iucv_sock_close()
490 sk->sk_state_change(sk); in iucv_sock_close()
493 sk->sk_state = IUCV_CLOSING; in iucv_sock_close()
494 sk->sk_state_change(sk); in iucv_sock_close()
497 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) in iucv_sock_close()
498 timeo = sk->sk_lingertime; in iucv_sock_close()
501 iucv_sock_wait(sk, in iucv_sock_close()
502 iucv_sock_in_state(sk, IUCV_CLOSED, 0), in iucv_sock_close()
507 sk->sk_state = IUCV_CLOSED; in iucv_sock_close()
508 sk->sk_state_change(sk); in iucv_sock_close()
510 sk->sk_err = ECONNRESET; in iucv_sock_close()
511 sk->sk_state_change(sk); in iucv_sock_close()
517 iucv_sever_path(sk, 1); in iucv_sock_close()
523 sk->sk_bound_dev_if = 0; in iucv_sock_close()
527 sock_set_flag(sk, SOCK_ZAPPED); in iucv_sock_close()
529 release_sock(sk); in iucv_sock_close()
532 static void iucv_sock_init(struct sock *sk, struct sock *parent) in iucv_sock_init() argument
535 sk->sk_type = parent->sk_type; in iucv_sock_init()
540 struct sock *sk; in iucv_sock_alloc() local
543 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto); in iucv_sock_alloc()
544 if (!sk) in iucv_sock_alloc()
546 iucv = iucv_sk(sk); in iucv_sock_alloc()
548 sock_init_data(sock, sk); in iucv_sock_alloc()
569 sk->sk_destruct = iucv_sock_destruct; in iucv_sock_alloc()
570 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; in iucv_sock_alloc()
571 sk->sk_allocation = GFP_DMA; in iucv_sock_alloc()
573 sock_reset_flag(sk, SOCK_ZAPPED); in iucv_sock_alloc()
575 sk->sk_protocol = proto; in iucv_sock_alloc()
576 sk->sk_state = IUCV_OPEN; in iucv_sock_alloc()
578 iucv_sock_link(&iucv_sk_list, sk); in iucv_sock_alloc()
579 return sk; in iucv_sock_alloc()
586 struct sock *sk; in iucv_sock_create() local
605 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL); in iucv_sock_create()
606 if (!sk) in iucv_sock_create()
609 iucv_sock_init(sk, NULL); in iucv_sock_create()
614 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) in iucv_sock_link() argument
617 sk_add_node(sk, &l->head); in iucv_sock_link()
621 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) in iucv_sock_unlink() argument
624 sk_del_node_init(sk); in iucv_sock_unlink()
628 void iucv_accept_enqueue(struct sock *parent, struct sock *sk) in iucv_accept_enqueue() argument
633 sock_hold(sk); in iucv_accept_enqueue()
635 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); in iucv_accept_enqueue()
637 iucv_sk(sk)->parent = parent; in iucv_accept_enqueue()
641 void iucv_accept_unlink(struct sock *sk) in iucv_accept_unlink() argument
644 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); in iucv_accept_unlink()
647 list_del_init(&iucv_sk(sk)->accept_q); in iucv_accept_unlink()
649 sk_acceptq_removed(iucv_sk(sk)->parent); in iucv_accept_unlink()
650 iucv_sk(sk)->parent = NULL; in iucv_accept_unlink()
651 sock_put(sk); in iucv_accept_unlink()
657 struct sock *sk; in iucv_accept_dequeue() local
660 sk = (struct sock *) isk; in iucv_accept_dequeue()
661 lock_sock(sk); in iucv_accept_dequeue()
663 if (sk->sk_state == IUCV_CLOSED) { in iucv_accept_dequeue()
664 iucv_accept_unlink(sk); in iucv_accept_dequeue()
665 release_sock(sk); in iucv_accept_dequeue()
669 if (sk->sk_state == IUCV_CONNECTED || in iucv_accept_dequeue()
670 sk->sk_state == IUCV_DISCONN || in iucv_accept_dequeue()
672 iucv_accept_unlink(sk); in iucv_accept_dequeue()
674 sock_graft(sk, newsock); in iucv_accept_dequeue()
676 release_sock(sk); in iucv_accept_dequeue()
677 return sk; in iucv_accept_dequeue()
680 release_sock(sk); in iucv_accept_dequeue()
702 struct sock *sk = sock->sk; in iucv_sock_bind() local
715 lock_sock(sk); in iucv_sock_bind()
716 if (sk->sk_state != IUCV_OPEN) { in iucv_sock_bind()
723 iucv = iucv_sk(sk); in iucv_sock_bind()
748 sk->sk_bound_dev_if = dev->ifindex; in iucv_sock_bind()
751 sk->sk_state = IUCV_BOUND; in iucv_sock_bind()
765 sk->sk_state = IUCV_BOUND; in iucv_sock_bind()
777 release_sock(sk); in iucv_sock_bind()
782 static int iucv_sock_autobind(struct sock *sk) in iucv_sock_autobind() argument
784 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_autobind()
805 struct sock *sk = sock->sk; in afiucv_path_connect() local
806 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_path_connect()
823 sk); in afiucv_path_connect()
852 struct sock *sk = sock->sk; in iucv_sock_connect() local
853 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_connect()
859 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) in iucv_sock_connect()
862 if (sk->sk_state == IUCV_OPEN && in iucv_sock_connect()
866 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET) in iucv_sock_connect()
869 if (sk->sk_state == IUCV_OPEN) { in iucv_sock_connect()
870 err = iucv_sock_autobind(sk); in iucv_sock_connect()
875 lock_sock(sk); in iucv_sock_connect()
882 err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN); in iucv_sock_connect()
888 if (sk->sk_state != IUCV_CONNECTED) in iucv_sock_connect()
889 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, in iucv_sock_connect()
891 sock_sndtimeo(sk, flags & O_NONBLOCK)); in iucv_sock_connect()
893 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED) in iucv_sock_connect()
897 iucv_sever_path(sk, 0); in iucv_sock_connect()
900 release_sock(sk); in iucv_sock_connect()
907 struct sock *sk = sock->sk; in iucv_sock_listen() local
910 lock_sock(sk); in iucv_sock_listen()
913 if (sk->sk_state != IUCV_BOUND) in iucv_sock_listen()
919 sk->sk_max_ack_backlog = backlog; in iucv_sock_listen()
920 sk->sk_ack_backlog = 0; in iucv_sock_listen()
921 sk->sk_state = IUCV_LISTEN; in iucv_sock_listen()
925 release_sock(sk); in iucv_sock_listen()
934 struct sock *sk = sock->sk, *nsk; in iucv_sock_accept() local
938 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); in iucv_sock_accept()
940 if (sk->sk_state != IUCV_LISTEN) { in iucv_sock_accept()
945 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); in iucv_sock_accept()
948 add_wait_queue_exclusive(sk_sleep(sk), &wait); in iucv_sock_accept()
949 while (!(nsk = iucv_accept_dequeue(sk, newsock))) { in iucv_sock_accept()
956 release_sock(sk); in iucv_sock_accept()
958 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); in iucv_sock_accept()
960 if (sk->sk_state != IUCV_LISTEN) { in iucv_sock_accept()
972 remove_wait_queue(sk_sleep(sk), &wait); in iucv_sock_accept()
980 release_sock(sk); in iucv_sock_accept()
988 struct sock *sk = sock->sk; in iucv_sock_getname() local
989 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_getname()
1035 struct sock *sk = sock->sk; in iucv_sock_sendmsg() local
1036 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_sendmsg()
1047 err = sock_error(sk); in iucv_sock_sendmsg()
1055 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR)) in iucv_sock_sendmsg()
1058 lock_sock(sk); in iucv_sock_sendmsg()
1060 if (sk->sk_shutdown & SEND_SHUTDOWN) { in iucv_sock_sendmsg()
1066 if (sk->sk_state != IUCV_CONNECTED) { in iucv_sock_sendmsg()
1115 skb = sock_alloc_send_skb(sk, in iucv_sock_sendmsg()
1119 skb = sock_alloc_send_skb(sk, len, noblock, &err); in iucv_sock_sendmsg()
1130 timeo = sock_sndtimeo(sk, noblock); in iucv_sock_sendmsg()
1131 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo); in iucv_sock_sendmsg()
1136 if (sk->sk_state != IUCV_CONNECTED) { in iucv_sock_sendmsg()
1147 err = afiucv_hs_send(&txmsg, sk, skb, 0); in iucv_sock_sendmsg()
1195 release_sock(sk); in iucv_sock_sendmsg()
1201 release_sock(sk); in iucv_sock_sendmsg()
1209 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) in iucv_fragment_skb() argument
1216 if (dataleft >= sk->sk_rcvbuf / 4) in iucv_fragment_skb()
1217 size = sk->sk_rcvbuf / 4; in iucv_fragment_skb()
1237 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb); in iucv_fragment_skb()
1247 static void iucv_process_message(struct sock *sk, struct sk_buff *skb, in iucv_process_message() argument
1277 if (sk->sk_type == SOCK_STREAM && in iucv_process_message()
1278 skb->truesize >= sk->sk_rcvbuf / 4) { in iucv_process_message()
1279 rc = iucv_fragment_skb(sk, skb, len); in iucv_process_message()
1286 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); in iucv_process_message()
1295 if (sock_queue_rcv_skb(sk, skb)) in iucv_process_message()
1296 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); in iucv_process_message()
1303 static void iucv_process_message_q(struct sock *sk) in iucv_process_message_q() argument
1305 struct iucv_sock *iucv = iucv_sk(sk); in iucv_process_message_q()
1313 iucv_process_message(sk, skb, p->path, &p->msg); in iucv_process_message_q()
1325 struct sock *sk = sock->sk; in iucv_sock_recvmsg() local
1326 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_recvmsg()
1332 if ((sk->sk_state == IUCV_DISCONN) && in iucv_sock_recvmsg()
1334 skb_queue_empty(&sk->sk_receive_queue) && in iucv_sock_recvmsg()
1343 skb = skb_recv_datagram(sk, flags, noblock, &err); in iucv_sock_recvmsg()
1345 if (sk->sk_shutdown & RCV_SHUTDOWN) in iucv_sock_recvmsg()
1354 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; in iucv_sock_recvmsg()
1359 skb_queue_head(&sk->sk_receive_queue, skb); in iucv_sock_recvmsg()
1364 if (sk->sk_type == SOCK_SEQPACKET) { in iucv_sock_recvmsg()
1379 skb_queue_head(&sk->sk_receive_queue, skb); in iucv_sock_recvmsg()
1387 if (sk->sk_type == SOCK_STREAM) { in iucv_sock_recvmsg()
1390 skb_queue_head(&sk->sk_receive_queue, skb); in iucv_sock_recvmsg()
1400 iucv_sock_close(sk); in iucv_sock_recvmsg()
1410 if (sock_queue_rcv_skb(sk, rskb)) { in iucv_sock_recvmsg()
1420 iucv_process_message_q(sk); in iucv_sock_recvmsg()
1423 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN); in iucv_sock_recvmsg()
1425 sk->sk_state = IUCV_DISCONN; in iucv_sock_recvmsg()
1426 sk->sk_state_change(sk); in iucv_sock_recvmsg()
1435 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) in iucv_sock_recvmsg()
1444 struct sock *sk; in iucv_accept_poll() local
1447 sk = (struct sock *) isk; in iucv_accept_poll()
1449 if (sk->sk_state == IUCV_CONNECTED) in iucv_accept_poll()
1459 struct sock *sk = sock->sk; in iucv_sock_poll() local
1462 sock_poll_wait(file, sk_sleep(sk), wait); in iucv_sock_poll()
1464 if (sk->sk_state == IUCV_LISTEN) in iucv_sock_poll()
1465 return iucv_accept_poll(sk); in iucv_sock_poll()
1467 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) in iucv_sock_poll()
1469 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); in iucv_sock_poll()
1471 if (sk->sk_shutdown & RCV_SHUTDOWN) in iucv_sock_poll()
1474 if (sk->sk_shutdown == SHUTDOWN_MASK) in iucv_sock_poll()
1477 if (!skb_queue_empty(&sk->sk_receive_queue) || in iucv_sock_poll()
1478 (sk->sk_shutdown & RCV_SHUTDOWN)) in iucv_sock_poll()
1481 if (sk->sk_state == IUCV_CLOSED) in iucv_sock_poll()
1484 if (sk->sk_state == IUCV_DISCONN) in iucv_sock_poll()
1487 if (sock_writeable(sk) && iucv_below_msglim(sk)) in iucv_sock_poll()
1490 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); in iucv_sock_poll()
1497 struct sock *sk = sock->sk; in iucv_sock_shutdown() local
1498 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_shutdown()
1507 lock_sock(sk); in iucv_sock_shutdown()
1508 switch (sk->sk_state) { in iucv_sock_shutdown()
1539 iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT); in iucv_sock_shutdown()
1542 sk->sk_shutdown |= how; in iucv_sock_shutdown()
1551 skb_queue_purge(&sk->sk_receive_queue); in iucv_sock_shutdown()
1555 sk->sk_state_change(sk); in iucv_sock_shutdown()
1558 release_sock(sk); in iucv_sock_shutdown()
1564 struct sock *sk = sock->sk; in iucv_sock_release() local
1567 if (!sk) in iucv_sock_release()
1570 iucv_sock_close(sk); in iucv_sock_release()
1572 sock_orphan(sk); in iucv_sock_release()
1573 iucv_sock_kill(sk); in iucv_sock_release()
1581 struct sock *sk = sock->sk; in iucv_sock_setsockopt() local
1582 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_setsockopt()
1597 lock_sock(sk); in iucv_sock_setsockopt()
1606 switch (sk->sk_state) { in iucv_sock_setsockopt()
1623 release_sock(sk); in iucv_sock_setsockopt()
1631 struct sock *sk = sock->sk; in iucv_sock_getsockopt() local
1632 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_getsockopt()
1652 lock_sock(sk); in iucv_sock_getsockopt()
1655 release_sock(sk); in iucv_sock_getsockopt()
1658 if (sk->sk_state == IUCV_OPEN) in iucv_sock_getsockopt()
1684 struct sock *sk, *nsk; in iucv_callback_connreq() local
1693 sk = NULL; in iucv_callback_connreq()
1694 sk_for_each(sk, &iucv_sk_list.head) in iucv_callback_connreq()
1695 if (sk->sk_state == IUCV_LISTEN && in iucv_callback_connreq()
1696 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { in iucv_callback_connreq()
1701 iucv = iucv_sk(sk); in iucv_callback_connreq()
1709 bh_lock_sock(sk); in iucv_callback_connreq()
1715 if (sk->sk_state != IUCV_LISTEN) { in iucv_callback_connreq()
1722 if (sk_acceptq_is_full(sk)) { in iucv_callback_connreq()
1729 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC); in iucv_callback_connreq()
1737 iucv_sock_init(nsk, sk); in iucv_callback_connreq()
1762 iucv_accept_enqueue(sk, nsk); in iucv_callback_connreq()
1766 sk->sk_data_ready(sk); in iucv_callback_connreq()
1769 bh_unlock_sock(sk); in iucv_callback_connreq()
1775 struct sock *sk = path->private; in iucv_callback_connack() local
1777 sk->sk_state = IUCV_CONNECTED; in iucv_callback_connack()
1778 sk->sk_state_change(sk); in iucv_callback_connack()
1783 struct sock *sk = path->private; in iucv_callback_rx() local
1784 struct iucv_sock *iucv = iucv_sk(sk); in iucv_callback_rx()
1789 if (sk->sk_shutdown & RCV_SHUTDOWN) { in iucv_callback_rx()
1800 len = atomic_read(&sk->sk_rmem_alloc); in iucv_callback_rx()
1802 if (len > sk->sk_rcvbuf) in iucv_callback_rx()
1809 iucv_process_message(sk, skb, path, msg); in iucv_callback_rx()
1828 struct sock *sk = path->private; in iucv_callback_txdone() local
1830 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; in iucv_callback_txdone()
1834 bh_lock_sock(sk); in iucv_callback_txdone()
1853 iucv_sock_wake_msglim(sk); in iucv_callback_txdone()
1857 if (sk->sk_state == IUCV_CLOSING) { in iucv_callback_txdone()
1858 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { in iucv_callback_txdone()
1859 sk->sk_state = IUCV_CLOSED; in iucv_callback_txdone()
1860 sk->sk_state_change(sk); in iucv_callback_txdone()
1863 bh_unlock_sock(sk); in iucv_callback_txdone()
1869 struct sock *sk = path->private; in iucv_callback_connrej() local
1871 if (sk->sk_state == IUCV_CLOSED) in iucv_callback_connrej()
1874 bh_lock_sock(sk); in iucv_callback_connrej()
1875 iucv_sever_path(sk, 1); in iucv_callback_connrej()
1876 sk->sk_state = IUCV_DISCONN; in iucv_callback_connrej()
1878 sk->sk_state_change(sk); in iucv_callback_connrej()
1879 bh_unlock_sock(sk); in iucv_callback_connrej()
1887 struct sock *sk = path->private; in iucv_callback_shutdown() local
1889 bh_lock_sock(sk); in iucv_callback_shutdown()
1890 if (sk->sk_state != IUCV_CLOSED) { in iucv_callback_shutdown()
1891 sk->sk_shutdown |= SEND_SHUTDOWN; in iucv_callback_shutdown()
1892 sk->sk_state_change(sk); in iucv_callback_shutdown()
1894 bh_unlock_sock(sk); in iucv_callback_shutdown()
1922 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_syn() argument
1929 iucv = iucv_sk(sk); in afiucv_hs_callback_syn()
1939 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC); in afiucv_hs_callback_syn()
1940 bh_lock_sock(sk); in afiucv_hs_callback_syn()
1941 if ((sk->sk_state != IUCV_LISTEN) || in afiucv_hs_callback_syn()
1942 sk_acceptq_is_full(sk) || in afiucv_hs_callback_syn()
1949 bh_unlock_sock(sk); in afiucv_hs_callback_syn()
1954 iucv_sock_init(nsk, sk); in afiucv_hs_callback_syn()
1965 nsk->sk_bound_dev_if = sk->sk_bound_dev_if; in afiucv_hs_callback_syn()
1974 iucv_accept_enqueue(sk, nsk); in afiucv_hs_callback_syn()
1976 sk->sk_data_ready(sk); in afiucv_hs_callback_syn()
1979 bh_unlock_sock(sk); in afiucv_hs_callback_syn()
1988 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_synack() argument
1990 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_synack()
1996 if (sk->sk_state != IUCV_BOUND) in afiucv_hs_callback_synack()
1998 bh_lock_sock(sk); in afiucv_hs_callback_synack()
2000 sk->sk_state = IUCV_CONNECTED; in afiucv_hs_callback_synack()
2001 sk->sk_state_change(sk); in afiucv_hs_callback_synack()
2002 bh_unlock_sock(sk); in afiucv_hs_callback_synack()
2011 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_synfin() argument
2013 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_synfin()
2017 if (sk->sk_state != IUCV_BOUND) in afiucv_hs_callback_synfin()
2019 bh_lock_sock(sk); in afiucv_hs_callback_synfin()
2020 sk->sk_state = IUCV_DISCONN; in afiucv_hs_callback_synfin()
2021 sk->sk_state_change(sk); in afiucv_hs_callback_synfin()
2022 bh_unlock_sock(sk); in afiucv_hs_callback_synfin()
2031 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_fin() argument
2033 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_fin()
2038 bh_lock_sock(sk); in afiucv_hs_callback_fin()
2039 if (sk->sk_state == IUCV_CONNECTED) { in afiucv_hs_callback_fin()
2040 sk->sk_state = IUCV_DISCONN; in afiucv_hs_callback_fin()
2041 sk->sk_state_change(sk); in afiucv_hs_callback_fin()
2043 bh_unlock_sock(sk); in afiucv_hs_callback_fin()
2052 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_win() argument
2054 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_win()
2061 if (sk->sk_state != IUCV_CONNECTED) in afiucv_hs_callback_win()
2065 iucv_sock_wake_msglim(sk); in afiucv_hs_callback_win()
2072 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_rx() argument
2074 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_rx()
2081 if (sk->sk_state != IUCV_CONNECTED) { in afiucv_hs_callback_rx()
2086 if (sk->sk_shutdown & RCV_SHUTDOWN) { in afiucv_hs_callback_rx()
2102 if (sock_queue_rcv_skb(sk, skb)) { in afiucv_hs_callback_rx()
2107 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); in afiucv_hs_callback_rx()
2120 struct sock *sk; in afiucv_hs_rcv() local
2134 sk = NULL; in afiucv_hs_rcv()
2136 sk_for_each(sk, &iucv_sk_list.head) { in afiucv_hs_rcv()
2138 if ((!memcmp(&iucv_sk(sk)->src_name, in afiucv_hs_rcv()
2140 (!memcmp(&iucv_sk(sk)->src_user_id, in afiucv_hs_rcv()
2142 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) && in afiucv_hs_rcv()
2143 (!memcmp(&iucv_sk(sk)->dst_user_id, in afiucv_hs_rcv()
2145 iucv = iucv_sk(sk); in afiucv_hs_rcv()
2149 if ((!memcmp(&iucv_sk(sk)->src_name, in afiucv_hs_rcv()
2151 (!memcmp(&iucv_sk(sk)->src_user_id, in afiucv_hs_rcv()
2153 (!memcmp(&iucv_sk(sk)->dst_name, in afiucv_hs_rcv()
2155 (!memcmp(&iucv_sk(sk)->dst_user_id, in afiucv_hs_rcv()
2157 iucv = iucv_sk(sk); in afiucv_hs_rcv()
2164 sk = NULL; in afiucv_hs_rcv()
2178 err = afiucv_hs_callback_syn(sk, skb); in afiucv_hs_rcv()
2182 err = afiucv_hs_callback_synack(sk, skb); in afiucv_hs_rcv()
2186 err = afiucv_hs_callback_synfin(sk, skb); in afiucv_hs_rcv()
2190 err = afiucv_hs_callback_fin(sk, skb); in afiucv_hs_rcv()
2193 err = afiucv_hs_callback_win(sk, skb); in afiucv_hs_rcv()
2205 err = afiucv_hs_callback_rx(sk, skb); in afiucv_hs_rcv()
2221 struct sock *isk = skb->sk; in afiucv_hs_callback_txnotify()
2222 struct sock *sk = NULL; in afiucv_hs_callback_txnotify() local
2230 sk_for_each(sk, &iucv_sk_list.head) in afiucv_hs_callback_txnotify()
2231 if (sk == isk) { in afiucv_hs_callback_txnotify()
2232 iucv = iucv_sk(sk); in afiucv_hs_callback_txnotify()
2237 if (!iucv || sock_flag(sk, SOCK_ZAPPED)) in afiucv_hs_callback_txnotify()
2252 iucv_sock_wake_msglim(sk); in afiucv_hs_callback_txnotify()
2261 iucv_sock_wake_msglim(sk); in afiucv_hs_callback_txnotify()
2271 if (sk->sk_state == IUCV_CONNECTED) { in afiucv_hs_callback_txnotify()
2272 sk->sk_state = IUCV_DISCONN; in afiucv_hs_callback_txnotify()
2273 sk->sk_state_change(sk); in afiucv_hs_callback_txnotify()
2285 if (sk->sk_state == IUCV_CLOSING) { in afiucv_hs_callback_txnotify()
2286 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { in afiucv_hs_callback_txnotify()
2287 sk->sk_state = IUCV_CLOSED; in afiucv_hs_callback_txnotify()
2288 sk->sk_state_change(sk); in afiucv_hs_callback_txnotify()
2301 struct sock *sk; in afiucv_netdev_event() local
2307 sk_for_each(sk, &iucv_sk_list.head) { in afiucv_netdev_event()
2308 iucv = iucv_sk(sk); in afiucv_netdev_event()
2310 (sk->sk_state == IUCV_CONNECTED)) { in afiucv_netdev_event()
2312 iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); in afiucv_netdev_event()
2313 sk->sk_state = IUCV_DISCONN; in afiucv_netdev_event()
2314 sk->sk_state_change(sk); in afiucv_netdev_event()