Lines Matching refs:sk

55 #define __iucv_sock_wait(sk, condition, timeo, ret)			\  argument
60 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
70 release_sock(sk); \
72 lock_sock(sk); \
73 ret = sock_error(sk); \
77 finish_wait(sk_sleep(sk), &__wait); \
80 #define iucv_sock_wait(sk, condition, timeo) \ argument
84 __iucv_sock_wait(sk, condition, timeo, __ret); \
90 static void iucv_sock_kill(struct sock *sk);
91 static void iucv_sock_close(struct sock *sk);
93 static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify);
149 static int iucv_sock_in_state(struct sock *sk, int state, int state2) in iucv_sock_in_state() argument
151 return (sk->sk_state == state || sk->sk_state == state2); in iucv_sock_in_state()
162 static inline int iucv_below_msglim(struct sock *sk) in iucv_below_msglim() argument
164 struct iucv_sock *iucv = iucv_sk(sk); in iucv_below_msglim()
166 if (sk->sk_state != IUCV_CONNECTED) in iucv_below_msglim()
178 static void iucv_sock_wake_msglim(struct sock *sk) in iucv_sock_wake_msglim() argument
183 wq = rcu_dereference(sk->sk_wq); in iucv_sock_wake_msglim()
186 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); in iucv_sock_wake_msglim()
266 struct sock *sk; in __iucv_get_sock_by_name() local
268 sk_for_each(sk, &iucv_sk_list.head) in __iucv_get_sock_by_name()
269 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) in __iucv_get_sock_by_name()
270 return sk; in __iucv_get_sock_by_name()
275 static void iucv_sock_destruct(struct sock *sk) in iucv_sock_destruct() argument
277 skb_queue_purge(&sk->sk_receive_queue); in iucv_sock_destruct()
278 skb_queue_purge(&sk->sk_error_queue); in iucv_sock_destruct()
280 sk_mem_reclaim(sk); in iucv_sock_destruct()
282 if (!sock_flag(sk, SOCK_DEAD)) { in iucv_sock_destruct()
283 pr_err("Attempt to release alive iucv socket %p\n", sk); in iucv_sock_destruct()
287 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); in iucv_sock_destruct()
288 WARN_ON(refcount_read(&sk->sk_wmem_alloc)); in iucv_sock_destruct()
289 WARN_ON(sk->sk_wmem_queued); in iucv_sock_destruct()
290 WARN_ON(sk->sk_forward_alloc); in iucv_sock_destruct()
296 struct sock *sk; in iucv_sock_cleanup_listen() local
299 while ((sk = iucv_accept_dequeue(parent, NULL))) { in iucv_sock_cleanup_listen()
300 iucv_sock_close(sk); in iucv_sock_cleanup_listen()
301 iucv_sock_kill(sk); in iucv_sock_cleanup_listen()
307 static void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) in iucv_sock_link() argument
310 sk_add_node(sk, &l->head); in iucv_sock_link()
314 static void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) in iucv_sock_unlink() argument
317 sk_del_node_init(sk); in iucv_sock_unlink()
322 static void iucv_sock_kill(struct sock *sk) in iucv_sock_kill() argument
324 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) in iucv_sock_kill()
327 iucv_sock_unlink(&iucv_sk_list, sk); in iucv_sock_kill()
328 sock_set_flag(sk, SOCK_DEAD); in iucv_sock_kill()
329 sock_put(sk); in iucv_sock_kill()
333 static void iucv_sever_path(struct sock *sk, int with_user_data) in iucv_sever_path() argument
336 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sever_path()
353 static int iucv_send_ctrl(struct sock *sk, u8 flags) in iucv_send_ctrl() argument
355 struct iucv_sock *iucv = iucv_sk(sk); in iucv_send_ctrl()
363 if (sk->sk_shutdown & SEND_SHUTDOWN) { in iucv_send_ctrl()
365 shutdown = sk->sk_shutdown; in iucv_send_ctrl()
366 sk->sk_shutdown &= RCV_SHUTDOWN; in iucv_send_ctrl()
368 skb = sock_alloc_send_skb(sk, blen, 1, &err); in iucv_send_ctrl()
371 err = afiucv_hs_send(NULL, sk, skb, flags); in iucv_send_ctrl()
374 sk->sk_shutdown = shutdown; in iucv_send_ctrl()
379 static void iucv_sock_close(struct sock *sk) in iucv_sock_close() argument
381 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_close()
385 lock_sock(sk); in iucv_sock_close()
387 switch (sk->sk_state) { in iucv_sock_close()
389 iucv_sock_cleanup_listen(sk); in iucv_sock_close()
394 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); in iucv_sock_close()
395 sk->sk_state = IUCV_DISCONN; in iucv_sock_close()
396 sk->sk_state_change(sk); in iucv_sock_close()
401 sk->sk_state = IUCV_CLOSING; in iucv_sock_close()
402 sk->sk_state_change(sk); in iucv_sock_close()
405 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) in iucv_sock_close()
406 timeo = sk->sk_lingertime; in iucv_sock_close()
409 iucv_sock_wait(sk, in iucv_sock_close()
410 iucv_sock_in_state(sk, IUCV_CLOSED, 0), in iucv_sock_close()
416 sk->sk_state = IUCV_CLOSED; in iucv_sock_close()
417 sk->sk_state_change(sk); in iucv_sock_close()
419 sk->sk_err = ECONNRESET; in iucv_sock_close()
420 sk->sk_state_change(sk); in iucv_sock_close()
427 iucv_sever_path(sk, 1); in iucv_sock_close()
433 sk->sk_bound_dev_if = 0; in iucv_sock_close()
437 sock_set_flag(sk, SOCK_ZAPPED); in iucv_sock_close()
439 release_sock(sk); in iucv_sock_close()
442 static void iucv_sock_init(struct sock *sk, struct sock *parent) in iucv_sock_init() argument
445 sk->sk_type = parent->sk_type; in iucv_sock_init()
446 security_sk_clone(parent, sk); in iucv_sock_init()
452 struct sock *sk; in iucv_sock_alloc() local
455 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern); in iucv_sock_alloc()
456 if (!sk) in iucv_sock_alloc()
458 iucv = iucv_sk(sk); in iucv_sock_alloc()
460 sock_init_data(sock, sk); in iucv_sock_alloc()
482 sk->sk_destruct = iucv_sock_destruct; in iucv_sock_alloc()
483 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; in iucv_sock_alloc()
485 sock_reset_flag(sk, SOCK_ZAPPED); in iucv_sock_alloc()
487 sk->sk_protocol = proto; in iucv_sock_alloc()
488 sk->sk_state = IUCV_OPEN; in iucv_sock_alloc()
490 iucv_sock_link(&iucv_sk_list, sk); in iucv_sock_alloc()
491 return sk; in iucv_sock_alloc()
494 static void iucv_accept_enqueue(struct sock *parent, struct sock *sk) in iucv_accept_enqueue() argument
499 sock_hold(sk); in iucv_accept_enqueue()
501 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); in iucv_accept_enqueue()
503 iucv_sk(sk)->parent = parent; in iucv_accept_enqueue()
507 static void iucv_accept_unlink(struct sock *sk) in iucv_accept_unlink() argument
510 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); in iucv_accept_unlink()
513 list_del_init(&iucv_sk(sk)->accept_q); in iucv_accept_unlink()
515 sk_acceptq_removed(iucv_sk(sk)->parent); in iucv_accept_unlink()
516 iucv_sk(sk)->parent = NULL; in iucv_accept_unlink()
517 sock_put(sk); in iucv_accept_unlink()
524 struct sock *sk; in iucv_accept_dequeue() local
527 sk = (struct sock *) isk; in iucv_accept_dequeue()
528 lock_sock(sk); in iucv_accept_dequeue()
530 if (sk->sk_state == IUCV_CLOSED) { in iucv_accept_dequeue()
531 iucv_accept_unlink(sk); in iucv_accept_dequeue()
532 release_sock(sk); in iucv_accept_dequeue()
536 if (sk->sk_state == IUCV_CONNECTED || in iucv_accept_dequeue()
537 sk->sk_state == IUCV_DISCONN || in iucv_accept_dequeue()
539 iucv_accept_unlink(sk); in iucv_accept_dequeue()
541 sock_graft(sk, newsock); in iucv_accept_dequeue()
543 release_sock(sk); in iucv_accept_dequeue()
544 return sk; in iucv_accept_dequeue()
547 release_sock(sk); in iucv_accept_dequeue()
570 struct sock *sk = sock->sk; in iucv_sock_bind() local
580 lock_sock(sk); in iucv_sock_bind()
581 if (sk->sk_state != IUCV_OPEN) { in iucv_sock_bind()
588 iucv = iucv_sk(sk); in iucv_sock_bind()
613 sk->sk_bound_dev_if = dev->ifindex; in iucv_sock_bind()
616 sk->sk_state = IUCV_BOUND; in iucv_sock_bind()
630 sk->sk_state = IUCV_BOUND; in iucv_sock_bind()
632 sk->sk_allocation |= GFP_DMA; in iucv_sock_bind()
643 release_sock(sk); in iucv_sock_bind()
648 static int iucv_sock_autobind(struct sock *sk) in iucv_sock_autobind() argument
650 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_autobind()
658 sk->sk_allocation |= GFP_DMA; in iucv_sock_autobind()
673 struct sock *sk = sock->sk; in afiucv_path_connect() local
674 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_path_connect()
691 sk); in afiucv_path_connect()
720 struct sock *sk = sock->sk; in iucv_sock_connect() local
721 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_connect()
727 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) in iucv_sock_connect()
730 if (sk->sk_state == IUCV_OPEN && in iucv_sock_connect()
734 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET) in iucv_sock_connect()
737 if (sk->sk_state == IUCV_OPEN) { in iucv_sock_connect()
738 err = iucv_sock_autobind(sk); in iucv_sock_connect()
743 lock_sock(sk); in iucv_sock_connect()
750 err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN); in iucv_sock_connect()
756 if (sk->sk_state != IUCV_CONNECTED) in iucv_sock_connect()
757 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, in iucv_sock_connect()
759 sock_sndtimeo(sk, flags & O_NONBLOCK)); in iucv_sock_connect()
761 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED) in iucv_sock_connect()
765 iucv_sever_path(sk, 0); in iucv_sock_connect()
768 release_sock(sk); in iucv_sock_connect()
775 struct sock *sk = sock->sk; in iucv_sock_listen() local
778 lock_sock(sk); in iucv_sock_listen()
781 if (sk->sk_state != IUCV_BOUND) in iucv_sock_listen()
787 sk->sk_max_ack_backlog = backlog; in iucv_sock_listen()
788 sk->sk_ack_backlog = 0; in iucv_sock_listen()
789 sk->sk_state = IUCV_LISTEN; in iucv_sock_listen()
793 release_sock(sk); in iucv_sock_listen()
802 struct sock *sk = sock->sk, *nsk; in iucv_sock_accept() local
806 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); in iucv_sock_accept()
808 if (sk->sk_state != IUCV_LISTEN) { in iucv_sock_accept()
813 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); in iucv_sock_accept()
816 add_wait_queue_exclusive(sk_sleep(sk), &wait); in iucv_sock_accept()
817 while (!(nsk = iucv_accept_dequeue(sk, newsock))) { in iucv_sock_accept()
824 release_sock(sk); in iucv_sock_accept()
826 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); in iucv_sock_accept()
828 if (sk->sk_state != IUCV_LISTEN) { in iucv_sock_accept()
840 remove_wait_queue(sk_sleep(sk), &wait); in iucv_sock_accept()
848 release_sock(sk); in iucv_sock_accept()
856 struct sock *sk = sock->sk; in iucv_sock_getname() local
857 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_getname()
902 struct sock *sk = sock->sk; in iucv_sock_sendmsg() local
903 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_sendmsg()
916 err = sock_error(sk); in iucv_sock_sendmsg()
924 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR)) in iucv_sock_sendmsg()
927 lock_sock(sk); in iucv_sock_sendmsg()
929 if (sk->sk_shutdown & SEND_SHUTDOWN) { in iucv_sock_sendmsg()
935 if (sk->sk_state != IUCV_CONNECTED) { in iucv_sock_sendmsg()
998 skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear, in iucv_sock_sendmsg()
1012 timeo = sock_sndtimeo(sk, noblock); in iucv_sock_sendmsg()
1013 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo); in iucv_sock_sendmsg()
1018 if (sk->sk_state != IUCV_CONNECTED) { in iucv_sock_sendmsg()
1029 err = afiucv_hs_send(&txmsg, sk, skb, 0); in iucv_sock_sendmsg()
1100 release_sock(sk); in iucv_sock_sendmsg()
1106 release_sock(sk); in iucv_sock_sendmsg()
1142 static void iucv_process_message(struct sock *sk, struct sk_buff *skb, in iucv_process_message() argument
1191 if (sk_filter(sk, skb)) { in iucv_process_message()
1192 atomic_inc(&sk->sk_drops); /* skb rejected by filter */ in iucv_process_message()
1196 if (__sock_queue_rcv_skb(sk, skb)) /* handle rcv queue full */ in iucv_process_message()
1197 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); in iucv_process_message()
1204 static void iucv_process_message_q(struct sock *sk) in iucv_process_message_q() argument
1206 struct iucv_sock *iucv = iucv_sk(sk); in iucv_process_message_q()
1214 iucv_process_message(sk, skb, p->path, &p->msg); in iucv_process_message_q()
1226 struct sock *sk = sock->sk; in iucv_sock_recvmsg() local
1227 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_recvmsg()
1233 if ((sk->sk_state == IUCV_DISCONN) && in iucv_sock_recvmsg()
1235 skb_queue_empty(&sk->sk_receive_queue) && in iucv_sock_recvmsg()
1244 skb = skb_recv_datagram(sk, flags, noblock, &err); in iucv_sock_recvmsg()
1246 if (sk->sk_shutdown & RCV_SHUTDOWN) in iucv_sock_recvmsg()
1255 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; in iucv_sock_recvmsg()
1260 skb_queue_head(&sk->sk_receive_queue, skb); in iucv_sock_recvmsg()
1265 if (sk->sk_type == SOCK_SEQPACKET) { in iucv_sock_recvmsg()
1280 skb_queue_head(&sk->sk_receive_queue, skb); in iucv_sock_recvmsg()
1288 if (sk->sk_type == SOCK_STREAM) { in iucv_sock_recvmsg()
1291 skb_queue_head(&sk->sk_receive_queue, skb); in iucv_sock_recvmsg()
1301 iucv_sock_close(sk); in iucv_sock_recvmsg()
1311 if (__sock_queue_rcv_skb(sk, rskb)) { in iucv_sock_recvmsg()
1321 iucv_process_message_q(sk); in iucv_sock_recvmsg()
1324 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN); in iucv_sock_recvmsg()
1326 sk->sk_state = IUCV_DISCONN; in iucv_sock_recvmsg()
1327 sk->sk_state_change(sk); in iucv_sock_recvmsg()
1336 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) in iucv_sock_recvmsg()
1345 struct sock *sk; in iucv_accept_poll() local
1348 sk = (struct sock *) isk; in iucv_accept_poll()
1350 if (sk->sk_state == IUCV_CONNECTED) in iucv_accept_poll()
1360 struct sock *sk = sock->sk; in iucv_sock_poll() local
1365 if (sk->sk_state == IUCV_LISTEN) in iucv_sock_poll()
1366 return iucv_accept_poll(sk); in iucv_sock_poll()
1368 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) in iucv_sock_poll()
1370 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); in iucv_sock_poll()
1372 if (sk->sk_shutdown & RCV_SHUTDOWN) in iucv_sock_poll()
1375 if (sk->sk_shutdown == SHUTDOWN_MASK) in iucv_sock_poll()
1378 if (!skb_queue_empty(&sk->sk_receive_queue) || in iucv_sock_poll()
1379 (sk->sk_shutdown & RCV_SHUTDOWN)) in iucv_sock_poll()
1382 if (sk->sk_state == IUCV_CLOSED) in iucv_sock_poll()
1385 if (sk->sk_state == IUCV_DISCONN) in iucv_sock_poll()
1388 if (sock_writeable(sk) && iucv_below_msglim(sk)) in iucv_sock_poll()
1391 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); in iucv_sock_poll()
1398 struct sock *sk = sock->sk; in iucv_sock_shutdown() local
1399 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_shutdown()
1408 lock_sock(sk); in iucv_sock_shutdown()
1409 switch (sk->sk_state) { in iucv_sock_shutdown()
1421 sk->sk_state == IUCV_CONNECTED) { in iucv_sock_shutdown()
1441 iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT); in iucv_sock_shutdown()
1444 sk->sk_shutdown |= how; in iucv_sock_shutdown()
1453 skb_queue_purge(&sk->sk_receive_queue); in iucv_sock_shutdown()
1457 sk->sk_state_change(sk); in iucv_sock_shutdown()
1460 release_sock(sk); in iucv_sock_shutdown()
1466 struct sock *sk = sock->sk; in iucv_sock_release() local
1469 if (!sk) in iucv_sock_release()
1472 iucv_sock_close(sk); in iucv_sock_release()
1474 sock_orphan(sk); in iucv_sock_release()
1475 iucv_sock_kill(sk); in iucv_sock_release()
1483 struct sock *sk = sock->sk; in iucv_sock_setsockopt() local
1484 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_setsockopt()
1499 lock_sock(sk); in iucv_sock_setsockopt()
1508 switch (sk->sk_state) { in iucv_sock_setsockopt()
1525 release_sock(sk); in iucv_sock_setsockopt()
1533 struct sock *sk = sock->sk; in iucv_sock_getsockopt() local
1534 struct iucv_sock *iucv = iucv_sk(sk); in iucv_sock_getsockopt()
1554 lock_sock(sk); in iucv_sock_getsockopt()
1557 release_sock(sk); in iucv_sock_getsockopt()
1560 if (sk->sk_state == IUCV_OPEN) in iucv_sock_getsockopt()
1586 struct sock *sk, *nsk; in iucv_callback_connreq() local
1595 sk = NULL; in iucv_callback_connreq()
1596 sk_for_each(sk, &iucv_sk_list.head) in iucv_callback_connreq()
1597 if (sk->sk_state == IUCV_LISTEN && in iucv_callback_connreq()
1598 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { in iucv_callback_connreq()
1603 iucv = iucv_sk(sk); in iucv_callback_connreq()
1611 bh_lock_sock(sk); in iucv_callback_connreq()
1617 if (sk->sk_state != IUCV_LISTEN) { in iucv_callback_connreq()
1624 if (sk_acceptq_is_full(sk)) { in iucv_callback_connreq()
1631 nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0); in iucv_callback_connreq()
1639 iucv_sock_init(nsk, sk); in iucv_callback_connreq()
1666 iucv_accept_enqueue(sk, nsk); in iucv_callback_connreq()
1670 sk->sk_data_ready(sk); in iucv_callback_connreq()
1673 bh_unlock_sock(sk); in iucv_callback_connreq()
1679 struct sock *sk = path->private; in iucv_callback_connack() local
1681 sk->sk_state = IUCV_CONNECTED; in iucv_callback_connack()
1682 sk->sk_state_change(sk); in iucv_callback_connack()
1687 struct sock *sk = path->private; in iucv_callback_rx() local
1688 struct iucv_sock *iucv = iucv_sk(sk); in iucv_callback_rx()
1693 if (sk->sk_shutdown & RCV_SHUTDOWN) { in iucv_callback_rx()
1704 len = atomic_read(&sk->sk_rmem_alloc); in iucv_callback_rx()
1706 if (len > sk->sk_rcvbuf) in iucv_callback_rx()
1713 iucv_process_message(sk, skb, path, msg); in iucv_callback_rx()
1732 struct sock *sk = path->private; in iucv_callback_txdone() local
1739 iucv = iucv_sk(sk); in iucv_callback_txdone()
1742 bh_lock_sock(sk); in iucv_callback_txdone()
1761 iucv_sock_wake_msglim(sk); in iucv_callback_txdone()
1764 if (sk->sk_state == IUCV_CLOSING) { in iucv_callback_txdone()
1766 sk->sk_state = IUCV_CLOSED; in iucv_callback_txdone()
1767 sk->sk_state_change(sk); in iucv_callback_txdone()
1770 bh_unlock_sock(sk); in iucv_callback_txdone()
1776 struct sock *sk = path->private; in iucv_callback_connrej() local
1778 if (sk->sk_state == IUCV_CLOSED) in iucv_callback_connrej()
1781 bh_lock_sock(sk); in iucv_callback_connrej()
1782 iucv_sever_path(sk, 1); in iucv_callback_connrej()
1783 sk->sk_state = IUCV_DISCONN; in iucv_callback_connrej()
1785 sk->sk_state_change(sk); in iucv_callback_connrej()
1786 bh_unlock_sock(sk); in iucv_callback_connrej()
1794 struct sock *sk = path->private; in iucv_callback_shutdown() local
1796 bh_lock_sock(sk); in iucv_callback_shutdown()
1797 if (sk->sk_state != IUCV_CLOSED) { in iucv_callback_shutdown()
1798 sk->sk_shutdown |= SEND_SHUTDOWN; in iucv_callback_shutdown()
1799 sk->sk_state_change(sk); in iucv_callback_shutdown()
1801 bh_unlock_sock(sk); in iucv_callback_shutdown()
1837 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_syn() argument
1844 iucv = iucv_sk(sk); in afiucv_hs_callback_syn()
1853 nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0); in afiucv_hs_callback_syn()
1854 bh_lock_sock(sk); in afiucv_hs_callback_syn()
1855 if ((sk->sk_state != IUCV_LISTEN) || in afiucv_hs_callback_syn()
1856 sk_acceptq_is_full(sk) || in afiucv_hs_callback_syn()
1863 bh_unlock_sock(sk); in afiucv_hs_callback_syn()
1868 iucv_sock_init(nsk, sk); in afiucv_hs_callback_syn()
1879 nsk->sk_bound_dev_if = sk->sk_bound_dev_if; in afiucv_hs_callback_syn()
1888 iucv_accept_enqueue(sk, nsk); in afiucv_hs_callback_syn()
1890 sk->sk_data_ready(sk); in afiucv_hs_callback_syn()
1893 bh_unlock_sock(sk); in afiucv_hs_callback_syn()
1902 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_synack() argument
1904 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_synack()
1906 if (!iucv || sk->sk_state != IUCV_BOUND) { in afiucv_hs_callback_synack()
1911 bh_lock_sock(sk); in afiucv_hs_callback_synack()
1913 sk->sk_state = IUCV_CONNECTED; in afiucv_hs_callback_synack()
1914 sk->sk_state_change(sk); in afiucv_hs_callback_synack()
1915 bh_unlock_sock(sk); in afiucv_hs_callback_synack()
1923 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_synfin() argument
1925 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_synfin()
1927 if (!iucv || sk->sk_state != IUCV_BOUND) { in afiucv_hs_callback_synfin()
1932 bh_lock_sock(sk); in afiucv_hs_callback_synfin()
1933 sk->sk_state = IUCV_DISCONN; in afiucv_hs_callback_synfin()
1934 sk->sk_state_change(sk); in afiucv_hs_callback_synfin()
1935 bh_unlock_sock(sk); in afiucv_hs_callback_synfin()
1943 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_fin() argument
1945 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_fin()
1953 bh_lock_sock(sk); in afiucv_hs_callback_fin()
1954 if (sk->sk_state == IUCV_CONNECTED) { in afiucv_hs_callback_fin()
1955 sk->sk_state = IUCV_DISCONN; in afiucv_hs_callback_fin()
1956 sk->sk_state_change(sk); in afiucv_hs_callback_fin()
1958 bh_unlock_sock(sk); in afiucv_hs_callback_fin()
1966 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_win() argument
1968 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_win()
1973 if (sk->sk_state != IUCV_CONNECTED) in afiucv_hs_callback_win()
1977 iucv_sock_wake_msglim(sk); in afiucv_hs_callback_win()
1984 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) in afiucv_hs_callback_rx() argument
1986 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_rx()
1993 if (sk->sk_state != IUCV_CONNECTED) { in afiucv_hs_callback_rx()
1998 if (sk->sk_shutdown & RCV_SHUTDOWN) { in afiucv_hs_callback_rx()
2008 if (sk_filter(sk, skb)) { in afiucv_hs_callback_rx()
2009 atomic_inc(&sk->sk_drops); /* skb rejected by filter */ in afiucv_hs_callback_rx()
2016 if (__sock_queue_rcv_skb(sk, skb)) in afiucv_hs_callback_rx()
2020 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); in afiucv_hs_callback_rx()
2033 struct sock *sk; in afiucv_hs_rcv() local
2051 sk = NULL; in afiucv_hs_rcv()
2053 sk_for_each(sk, &iucv_sk_list.head) { in afiucv_hs_rcv()
2055 if ((!memcmp(&iucv_sk(sk)->src_name, in afiucv_hs_rcv()
2057 (!memcmp(&iucv_sk(sk)->src_user_id, in afiucv_hs_rcv()
2059 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) && in afiucv_hs_rcv()
2060 (!memcmp(&iucv_sk(sk)->dst_user_id, in afiucv_hs_rcv()
2062 iucv = iucv_sk(sk); in afiucv_hs_rcv()
2066 if ((!memcmp(&iucv_sk(sk)->src_name, in afiucv_hs_rcv()
2068 (!memcmp(&iucv_sk(sk)->src_user_id, in afiucv_hs_rcv()
2070 (!memcmp(&iucv_sk(sk)->dst_name, in afiucv_hs_rcv()
2072 (!memcmp(&iucv_sk(sk)->dst_user_id, in afiucv_hs_rcv()
2074 iucv = iucv_sk(sk); in afiucv_hs_rcv()
2081 sk = NULL; in afiucv_hs_rcv()
2095 err = afiucv_hs_callback_syn(sk, skb); in afiucv_hs_rcv()
2099 err = afiucv_hs_callback_synack(sk, skb); in afiucv_hs_rcv()
2103 err = afiucv_hs_callback_synfin(sk, skb); in afiucv_hs_rcv()
2107 err = afiucv_hs_callback_fin(sk, skb); in afiucv_hs_rcv()
2110 err = afiucv_hs_callback_win(sk, skb); in afiucv_hs_rcv()
2122 err = afiucv_hs_callback_rx(sk, skb); in afiucv_hs_rcv()
2135 static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify n) in afiucv_hs_callback_txnotify() argument
2137 struct iucv_sock *iucv = iucv_sk(sk); in afiucv_hs_callback_txnotify()
2139 if (sock_flag(sk, SOCK_ZAPPED)) in afiucv_hs_callback_txnotify()
2145 iucv_sock_wake_msglim(sk); in afiucv_hs_callback_txnotify()
2153 iucv_sock_wake_msglim(sk); in afiucv_hs_callback_txnotify()
2157 if (sk->sk_state == IUCV_CONNECTED) { in afiucv_hs_callback_txnotify()
2158 sk->sk_state = IUCV_DISCONN; in afiucv_hs_callback_txnotify()
2159 sk->sk_state_change(sk); in afiucv_hs_callback_txnotify()
2163 if (sk->sk_state == IUCV_CLOSING) { in afiucv_hs_callback_txnotify()
2165 sk->sk_state = IUCV_CLOSED; in afiucv_hs_callback_txnotify()
2166 sk->sk_state_change(sk); in afiucv_hs_callback_txnotify()
2178 struct sock *sk; in afiucv_netdev_event() local
2184 sk_for_each(sk, &iucv_sk_list.head) { in afiucv_netdev_event()
2185 iucv = iucv_sk(sk); in afiucv_netdev_event()
2187 (sk->sk_state == IUCV_CONNECTED)) { in afiucv_netdev_event()
2189 iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); in afiucv_netdev_event()
2190 sk->sk_state = IUCV_DISCONN; in afiucv_netdev_event()
2191 sk->sk_state_change(sk); in afiucv_netdev_event()
2230 struct sock *sk; in iucv_sock_create() local
2247 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern); in iucv_sock_create()
2248 if (!sk) in iucv_sock_create()
2251 iucv_sock_init(sk, NULL); in iucv_sock_create()