Lines Matching refs:sk
429 void (*sk_data_ready)(struct sock *sk);
481 struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
534 void (*sk_state_change)(struct sock *sk);
535 void (*sk_write_space)(struct sock *sk);
536 void (*sk_error_report)(struct sock *sk);
537 int (*sk_backlog_rcv)(struct sock *sk,
539 void (*sk_destruct)(struct sock *sk);
585 static inline bool sk_user_data_is_nocopy(const struct sock *sk) in sk_user_data_is_nocopy() argument
587 return ((uintptr_t)sk->sk_user_data & SK_USER_DATA_NOCOPY); in sk_user_data_is_nocopy()
590 #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) argument
603 __locked_read_sk_user_data_with_flags(const struct sock *sk, in __locked_read_sk_user_data_with_flags() argument
607 (uintptr_t)rcu_dereference_check(__sk_user_data(sk), in __locked_read_sk_user_data_with_flags()
608 lockdep_is_held(&sk->sk_callback_lock)); in __locked_read_sk_user_data_with_flags()
626 __rcu_dereference_sk_user_data_with_flags(const struct sock *sk, in __rcu_dereference_sk_user_data_with_flags() argument
629 uintptr_t sk_user_data = (uintptr_t)rcu_dereference(__sk_user_data(sk)); in __rcu_dereference_sk_user_data_with_flags()
638 #define rcu_dereference_sk_user_data(sk) \ argument
639 __rcu_dereference_sk_user_data_with_flags(sk, 0)
640 #define __rcu_assign_sk_user_data_with_flags(sk, ptr, flags) \ argument
646 rcu_assign_pointer(__sk_user_data((sk)), \
649 #define rcu_assign_sk_user_data(sk, ptr) \ argument
650 __rcu_assign_sk_user_data_with_flags(sk, ptr, 0)
653 struct net *sock_net(const struct sock *sk) in sock_net() argument
655 return read_pnet(&sk->sk_net); in sock_net()
659 void sock_net_set(struct sock *sk, struct net *net) in sock_net_set() argument
661 write_pnet(&sk->sk_net, net); in sock_net_set()
675 int sk_set_peek_off(struct sock *sk, int val);
677 static inline int sk_peek_offset(const struct sock *sk, int flags) in sk_peek_offset() argument
680 return READ_ONCE(sk->sk_peek_off); in sk_peek_offset()
686 static inline void sk_peek_offset_bwd(struct sock *sk, int val) in sk_peek_offset_bwd() argument
688 s32 off = READ_ONCE(sk->sk_peek_off); in sk_peek_offset_bwd()
692 WRITE_ONCE(sk->sk_peek_off, off); in sk_peek_offset_bwd()
696 static inline void sk_peek_offset_fwd(struct sock *sk, int val) in sk_peek_offset_fwd() argument
698 sk_peek_offset_bwd(sk, -val); in sk_peek_offset_fwd()
729 static inline struct sock *sk_next(const struct sock *sk) in sk_next() argument
731 return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node); in sk_next()
734 static inline struct sock *sk_nulls_next(const struct sock *sk) in sk_nulls_next() argument
736 return (!is_a_nulls(sk->sk_nulls_node.next)) ? in sk_nulls_next()
737 hlist_nulls_entry(sk->sk_nulls_node.next, in sk_nulls_next()
742 static inline bool sk_unhashed(const struct sock *sk) in sk_unhashed() argument
744 return hlist_unhashed(&sk->sk_node); in sk_unhashed()
747 static inline bool sk_hashed(const struct sock *sk) in sk_hashed() argument
749 return !sk_unhashed(sk); in sk_hashed()
757 static inline void __sk_del_node(struct sock *sk) in __sk_del_node() argument
759 __hlist_del(&sk->sk_node); in __sk_del_node()
763 static inline bool __sk_del_node_init(struct sock *sk) in __sk_del_node_init() argument
765 if (sk_hashed(sk)) { in __sk_del_node_init()
766 __sk_del_node(sk); in __sk_del_node_init()
767 sk_node_init(&sk->sk_node); in __sk_del_node_init()
779 static __always_inline void sock_hold(struct sock *sk) in sock_hold() argument
781 refcount_inc(&sk->sk_refcnt); in sock_hold()
787 static __always_inline void __sock_put(struct sock *sk) in __sock_put() argument
789 refcount_dec(&sk->sk_refcnt); in __sock_put()
792 static inline bool sk_del_node_init(struct sock *sk) in sk_del_node_init() argument
794 bool rc = __sk_del_node_init(sk); in sk_del_node_init()
798 WARN_ON(refcount_read(&sk->sk_refcnt) == 1); in sk_del_node_init()
799 __sock_put(sk); in sk_del_node_init()
803 #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) argument
805 static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk) in __sk_nulls_del_node_init_rcu() argument
807 if (sk_hashed(sk)) { in __sk_nulls_del_node_init_rcu()
808 hlist_nulls_del_init_rcu(&sk->sk_nulls_node); in __sk_nulls_del_node_init_rcu()
814 static inline bool sk_nulls_del_node_init_rcu(struct sock *sk) in sk_nulls_del_node_init_rcu() argument
816 bool rc = __sk_nulls_del_node_init_rcu(sk); in sk_nulls_del_node_init_rcu()
820 WARN_ON(refcount_read(&sk->sk_refcnt) == 1); in sk_nulls_del_node_init_rcu()
821 __sock_put(sk); in sk_nulls_del_node_init_rcu()
826 static inline void __sk_add_node(struct sock *sk, struct hlist_head *list) in __sk_add_node() argument
828 hlist_add_head(&sk->sk_node, list); in __sk_add_node()
831 static inline void sk_add_node(struct sock *sk, struct hlist_head *list) in sk_add_node() argument
833 sock_hold(sk); in sk_add_node()
834 __sk_add_node(sk, list); in sk_add_node()
837 static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) in sk_add_node_rcu() argument
839 sock_hold(sk); in sk_add_node_rcu()
840 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && in sk_add_node_rcu()
841 sk->sk_family == AF_INET6) in sk_add_node_rcu()
842 hlist_add_tail_rcu(&sk->sk_node, list); in sk_add_node_rcu()
844 hlist_add_head_rcu(&sk->sk_node, list); in sk_add_node_rcu()
847 static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list) in sk_add_node_tail_rcu() argument
849 sock_hold(sk); in sk_add_node_tail_rcu()
850 hlist_add_tail_rcu(&sk->sk_node, list); in sk_add_node_tail_rcu()
853 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) in __sk_nulls_add_node_rcu() argument
855 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); in __sk_nulls_add_node_rcu()
858 static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list) in __sk_nulls_add_node_tail_rcu() argument
860 hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list); in __sk_nulls_add_node_tail_rcu()
863 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) in sk_nulls_add_node_rcu() argument
865 sock_hold(sk); in sk_nulls_add_node_rcu()
866 __sk_nulls_add_node_rcu(sk, list); in sk_nulls_add_node_rcu()
869 static inline void __sk_del_bind_node(struct sock *sk) in __sk_del_bind_node() argument
871 __hlist_del(&sk->sk_bind_node); in __sk_del_bind_node()
874 static inline void sk_add_bind_node(struct sock *sk, in sk_add_bind_node() argument
877 hlist_add_head(&sk->sk_bind_node, list); in sk_add_bind_node()
914 static inline struct user_namespace *sk_user_ns(const struct sock *sk) in sk_user_ns() argument
920 return sk->sk_socket->file->f_cred->user_ns; in sk_user_ns()
965 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) in sock_set_flag() argument
967 __set_bit(flag, &sk->sk_flags); in sock_set_flag()
970 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) in sock_reset_flag() argument
972 __clear_bit(flag, &sk->sk_flags); in sock_reset_flag()
975 static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit, in sock_valbool_flag() argument
979 sock_set_flag(sk, bit); in sock_valbool_flag()
981 sock_reset_flag(sk, bit); in sock_valbool_flag()
984 static inline bool sock_flag(const struct sock *sk, enum sock_flags flag) in sock_flag() argument
986 return test_bit(flag, &sk->sk_flags); in sock_flag()
1008 static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask) in sk_gfp_mask() argument
1010 return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC); in sk_gfp_mask()
1013 static inline void sk_acceptq_removed(struct sock *sk) in sk_acceptq_removed() argument
1015 WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog - 1); in sk_acceptq_removed()
1018 static inline void sk_acceptq_added(struct sock *sk) in sk_acceptq_added() argument
1020 WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1); in sk_acceptq_added()
1027 static inline bool sk_acceptq_is_full(const struct sock *sk) in sk_acceptq_is_full() argument
1029 return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog); in sk_acceptq_is_full()
1035 static inline int sk_stream_min_wspace(const struct sock *sk) in sk_stream_min_wspace() argument
1037 return READ_ONCE(sk->sk_wmem_queued) >> 1; in sk_stream_min_wspace()
1040 static inline int sk_stream_wspace(const struct sock *sk) in sk_stream_wspace() argument
1042 return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued); in sk_stream_wspace()
1045 static inline void sk_wmem_queued_add(struct sock *sk, int val) in sk_wmem_queued_add() argument
1047 WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val); in sk_wmem_queued_add()
1050 static inline void sk_forward_alloc_add(struct sock *sk, int val) in sk_forward_alloc_add() argument
1053 WRITE_ONCE(sk->sk_forward_alloc, sk->sk_forward_alloc + val); in sk_forward_alloc_add()
1056 void sk_stream_write_space(struct sock *sk);
1059 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) in __sk_add_backlog() argument
1064 if (!sk->sk_backlog.tail) in __sk_add_backlog()
1065 WRITE_ONCE(sk->sk_backlog.head, skb); in __sk_add_backlog()
1067 sk->sk_backlog.tail->next = skb; in __sk_add_backlog()
1069 WRITE_ONCE(sk->sk_backlog.tail, skb); in __sk_add_backlog()
1078 static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit) in sk_rcvqueues_full() argument
1080 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); in sk_rcvqueues_full()
1086 static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb, in sk_add_backlog() argument
1089 if (sk_rcvqueues_full(sk, limit)) in sk_add_backlog()
1097 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) in sk_add_backlog()
1100 __sk_add_backlog(sk, skb); in sk_add_backlog()
1101 sk->sk_backlog.len += skb->truesize; in sk_add_backlog()
1105 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
1107 INDIRECT_CALLABLE_DECLARE(int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb));
1108 INDIRECT_CALLABLE_DECLARE(int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb));
1110 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) in sk_backlog_rcv() argument
1113 return __sk_backlog_rcv(sk, skb); in sk_backlog_rcv()
1115 return INDIRECT_CALL_INET(sk->sk_backlog_rcv, in sk_backlog_rcv()
1118 sk, skb); in sk_backlog_rcv()
1121 static inline void sk_incoming_cpu_update(struct sock *sk) in sk_incoming_cpu_update() argument
1125 if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu)) in sk_incoming_cpu_update()
1126 WRITE_ONCE(sk->sk_incoming_cpu, cpu); in sk_incoming_cpu_update()
1130 static inline void sock_rps_save_rxhash(struct sock *sk, in sock_rps_save_rxhash() argument
1137 if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash)) in sock_rps_save_rxhash()
1138 WRITE_ONCE(sk->sk_rxhash, skb->hash); in sock_rps_save_rxhash()
1142 static inline void sock_rps_reset_rxhash(struct sock *sk) in sock_rps_reset_rxhash() argument
1146 WRITE_ONCE(sk->sk_rxhash, 0); in sock_rps_reset_rxhash()
1165 int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
1166 int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
1167 void sk_stream_wait_close(struct sock *sk, long timeo_p);
1168 int sk_stream_error(struct sock *sk, int flags, int err);
1169 void sk_stream_kill_queues(struct sock *sk);
1170 void sk_set_memalloc(struct sock *sk);
1171 void sk_clear_memalloc(struct sock *sk);
1173 void __sk_flush_backlog(struct sock *sk);
1175 static inline bool sk_flush_backlog(struct sock *sk) in sk_flush_backlog() argument
1177 if (unlikely(READ_ONCE(sk->sk_backlog.tail))) { in sk_flush_backlog()
1178 __sk_flush_backlog(sk); in sk_flush_backlog()
1184 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
1198 static inline void sk_prot_clear_nulls(struct sock *sk, int size) in sk_prot_clear_nulls() argument
1201 memset(sk, 0, offsetof(struct sock, sk_node.next)); in sk_prot_clear_nulls()
1202 memset(&sk->sk_node.pprev, 0, in sk_prot_clear_nulls()
1217 void (*close)(struct sock *sk,
1219 int (*pre_connect)(struct sock *sk,
1222 int (*connect)(struct sock *sk,
1225 int (*disconnect)(struct sock *sk, int flags);
1227 struct sock * (*accept)(struct sock *sk,
1230 int (*ioctl)(struct sock *sk, int cmd,
1232 int (*init)(struct sock *sk);
1233 void (*destroy)(struct sock *sk);
1234 void (*shutdown)(struct sock *sk, int how);
1235 int (*setsockopt)(struct sock *sk, int level,
1238 int (*getsockopt)(struct sock *sk, int level,
1241 void (*keepalive)(struct sock *sk, int valbool);
1243 int (*compat_ioctl)(struct sock *sk,
1246 int (*sendmsg)(struct sock *sk, struct msghdr *msg,
1248 int (*recvmsg)(struct sock *sk, struct msghdr *msg,
1251 int (*bind)(struct sock *sk,
1253 int (*bind_add)(struct sock *sk,
1256 int (*backlog_rcv) (struct sock *sk,
1261 void (*release_cb)(struct sock *sk);
1264 int (*hash)(struct sock *sk);
1265 void (*unhash)(struct sock *sk);
1266 void (*rehash)(struct sock *sk);
1267 int (*get_port)(struct sock *sk, unsigned short snum);
1268 void (*put_port)(struct sock *sk);
1270 int (*psock_update_sk_prot)(struct sock *sk,
1281 int (*forward_alloc_get)(const struct sock *sk);
1284 bool (*stream_memory_free)(const struct sock *sk, int wake);
1285 bool (*sock_is_readable)(struct sock *sk);
1287 void (*enter_memory_pressure)(struct sock *sk);
1288 void (*leave_memory_pressure)(struct sock *sk);
1335 int (*diag_destroy)(struct sock *sk, int err);
1342 INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake));
1344 static inline int sk_forward_alloc_get(const struct sock *sk) in sk_forward_alloc_get() argument
1347 if (sk->sk_prot->forward_alloc_get) in sk_forward_alloc_get()
1348 return sk->sk_prot->forward_alloc_get(sk); in sk_forward_alloc_get()
1350 return READ_ONCE(sk->sk_forward_alloc); in sk_forward_alloc_get()
1353 static inline bool __sk_stream_memory_free(const struct sock *sk, int wake) in __sk_stream_memory_free() argument
1355 if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf)) in __sk_stream_memory_free()
1358 return sk->sk_prot->stream_memory_free ? in __sk_stream_memory_free()
1359 INDIRECT_CALL_INET_1(sk->sk_prot->stream_memory_free, in __sk_stream_memory_free()
1360 tcp_stream_memory_free, sk, wake) : true; in __sk_stream_memory_free()
1363 static inline bool sk_stream_memory_free(const struct sock *sk) in sk_stream_memory_free() argument
1365 return __sk_stream_memory_free(sk, 0); in sk_stream_memory_free()
1368 static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake) in __sk_stream_is_writeable() argument
1370 return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && in __sk_stream_is_writeable()
1371 __sk_stream_memory_free(sk, wake); in __sk_stream_is_writeable()
1374 static inline bool sk_stream_is_writeable(const struct sock *sk) in sk_stream_is_writeable() argument
1376 return __sk_stream_is_writeable(sk, 0); in sk_stream_is_writeable()
1379 static inline int sk_under_cgroup_hierarchy(struct sock *sk, in sk_under_cgroup_hierarchy() argument
1383 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), in sk_under_cgroup_hierarchy()
1392 static inline void sk_sockets_allocated_dec(struct sock *sk) in sk_sockets_allocated_dec() argument
1394 percpu_counter_add_batch(sk->sk_prot->sockets_allocated, -1, in sk_sockets_allocated_dec()
1398 static inline void sk_sockets_allocated_inc(struct sock *sk) in sk_sockets_allocated_inc() argument
1400 percpu_counter_add_batch(sk->sk_prot->sockets_allocated, 1, in sk_sockets_allocated_inc()
1405 sk_sockets_allocated_read_positive(struct sock *sk) in sk_sockets_allocated_read_positive() argument
1407 return percpu_counter_read_positive(sk->sk_prot->sockets_allocated); in sk_sockets_allocated_read_positive()
1451 static inline int __sk_prot_rehash(struct sock *sk) in __sk_prot_rehash() argument
1453 sk->sk_prot->unhash(sk); in __sk_prot_rehash()
1454 return sk->sk_prot->hash(sk); in __sk_prot_rehash()
1488 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind);
1489 int __sk_mem_schedule(struct sock *sk, int size, int kind);
1490 void __sk_mem_reduce_allocated(struct sock *sk, int amount);
1491 void __sk_mem_reclaim(struct sock *sk, int amount);
1497 static inline long sk_prot_mem_limits(const struct sock *sk, int index) in sk_prot_mem_limits() argument
1499 return READ_ONCE(sk->sk_prot->sysctl_mem[index]); in sk_prot_mem_limits()
1507 static inline bool sk_has_account(struct sock *sk) in sk_has_account() argument
1510 return !!sk->sk_prot->memory_allocated; in sk_has_account()
1513 static inline bool sk_wmem_schedule(struct sock *sk, int size) in sk_wmem_schedule() argument
1517 if (!sk_has_account(sk)) in sk_wmem_schedule()
1519 delta = size - sk->sk_forward_alloc; in sk_wmem_schedule()
1520 return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_SEND); in sk_wmem_schedule()
1524 sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) in sk_rmem_schedule() argument
1528 if (!sk_has_account(sk)) in sk_rmem_schedule()
1530 delta = size - sk->sk_forward_alloc; in sk_rmem_schedule()
1531 return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) || in sk_rmem_schedule()
1535 static inline int sk_unused_reserved_mem(const struct sock *sk) in sk_unused_reserved_mem() argument
1539 if (likely(!sk->sk_reserved_mem)) in sk_unused_reserved_mem()
1542 unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued - in sk_unused_reserved_mem()
1543 atomic_read(&sk->sk_rmem_alloc); in sk_unused_reserved_mem()
1548 static inline void sk_mem_reclaim(struct sock *sk) in sk_mem_reclaim() argument
1552 if (!sk_has_account(sk)) in sk_mem_reclaim()
1555 reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk); in sk_mem_reclaim()
1558 __sk_mem_reclaim(sk, reclaimable); in sk_mem_reclaim()
1561 static inline void sk_mem_reclaim_final(struct sock *sk) in sk_mem_reclaim_final() argument
1563 sk->sk_reserved_mem = 0; in sk_mem_reclaim_final()
1564 sk_mem_reclaim(sk); in sk_mem_reclaim_final()
1567 static inline void sk_mem_charge(struct sock *sk, int size) in sk_mem_charge() argument
1569 if (!sk_has_account(sk)) in sk_mem_charge()
1571 sk_forward_alloc_add(sk, -size); in sk_mem_charge()
1574 static inline void sk_mem_uncharge(struct sock *sk, int size) in sk_mem_uncharge() argument
1576 if (!sk_has_account(sk)) in sk_mem_uncharge()
1578 sk_forward_alloc_add(sk, size); in sk_mem_uncharge()
1579 sk_mem_reclaim(sk); in sk_mem_uncharge()
1589 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ argument
1591 sk->sk_lock.owned = 0; \
1592 init_waitqueue_head(&sk->sk_lock.wq); \
1593 spin_lock_init(&(sk)->sk_lock.slock); \
1594 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
1595 sizeof((sk)->sk_lock)); \
1596 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
1598 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
1601 static inline bool lockdep_sock_is_held(const struct sock *sk) in lockdep_sock_is_held() argument
1603 return lockdep_is_held(&sk->sk_lock) || in lockdep_sock_is_held()
1604 lockdep_is_held(&sk->sk_lock.slock); in lockdep_sock_is_held()
1607 void lock_sock_nested(struct sock *sk, int subclass);
1609 static inline void lock_sock(struct sock *sk) in lock_sock() argument
1611 lock_sock_nested(sk, 0); in lock_sock()
1614 void __lock_sock(struct sock *sk);
1615 void __release_sock(struct sock *sk);
1616 void release_sock(struct sock *sk);
1625 bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
1640 static inline bool lock_sock_fast(struct sock *sk) in lock_sock_fast() argument
1643 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); in lock_sock_fast()
1645 return __lock_sock_fast(sk); in lock_sock_fast()
1649 static inline bool lock_sock_fast_nested(struct sock *sk) in lock_sock_fast_nested() argument
1651 mutex_acquire(&sk->sk_lock.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_); in lock_sock_fast_nested()
1653 return __lock_sock_fast(sk); in lock_sock_fast_nested()
1664 static inline void unlock_sock_fast(struct sock *sk, bool slow) in unlock_sock_fast() argument
1665 __releases(&sk->sk_lock.slock) in unlock_sock_fast()
1668 release_sock(sk); in unlock_sock_fast()
1669 __release(&sk->sk_lock.slock); in unlock_sock_fast()
1671 mutex_release(&sk->sk_lock.dep_map, _RET_IP_); in unlock_sock_fast()
1672 spin_unlock_bh(&sk->sk_lock.slock); in unlock_sock_fast()
1676 void sockopt_lock_sock(struct sock *sk);
1677 void sockopt_release_sock(struct sock *sk);
1695 static inline void sock_owned_by_me(const struct sock *sk) in sock_owned_by_me() argument
1698 WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks); in sock_owned_by_me()
1702 static inline void sock_not_owned_by_me(const struct sock *sk) in sock_not_owned_by_me() argument
1705 WARN_ON_ONCE(lockdep_sock_is_held(sk) && debug_locks); in sock_not_owned_by_me()
1709 static inline bool sock_owned_by_user(const struct sock *sk) in sock_owned_by_user() argument
1711 sock_owned_by_me(sk); in sock_owned_by_user()
1712 return sk->sk_lock.owned; in sock_owned_by_user()
1715 static inline bool sock_owned_by_user_nocheck(const struct sock *sk) in sock_owned_by_user_nocheck() argument
1717 return sk->sk_lock.owned; in sock_owned_by_user_nocheck()
1720 static inline void sock_release_ownership(struct sock *sk) in sock_release_ownership() argument
1722 DEBUG_NET_WARN_ON_ONCE(!sock_owned_by_user_nocheck(sk)); in sock_release_ownership()
1723 sk->sk_lock.owned = 0; in sock_release_ownership()
1726 mutex_release(&sk->sk_lock.dep_map, _RET_IP_); in sock_release_ownership()
1732 struct sock *sk = (struct sock *)csk; in sock_allow_reclassification() local
1734 return !sock_owned_by_user_nocheck(sk) && in sock_allow_reclassification()
1735 !spin_is_locked(&sk->sk_lock.slock); in sock_allow_reclassification()
1740 void sk_free(struct sock *sk);
1741 void sk_destruct(struct sock *sk);
1742 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
1743 void sk_free_unlock_clone(struct sock *sk);
1745 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1749 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
1761 int sk_setsockopt(struct sock *sk, int level, int optname,
1770 int sk_getsockopt(struct sock *sk, int level, int optname,
1774 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1778 static inline struct sk_buff *sock_alloc_send_skb(struct sock *sk, in sock_alloc_send_skb() argument
1782 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0); in sock_alloc_send_skb()
1785 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
1786 void sock_kfree_s(struct sock *sk, void *mem, int size);
1787 void sock_kzfree_s(struct sock *sk, void *mem, int size);
1788 void sk_send_sigurg(struct sock *sk);
1790 static inline void sock_replace_proto(struct sock *sk, struct proto *proto) in sock_replace_proto() argument
1792 if (sk->sk_socket) in sock_replace_proto()
1793 clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); in sock_replace_proto()
1794 WRITE_ONCE(sk->sk_prot, proto); in sock_replace_proto()
1804 const struct sock *sk) in sockcm_init() argument
1807 .tsflags = READ_ONCE(sk->sk_tsflags) in sockcm_init()
1811 int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
1813 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1829 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
1845 void sk_common_release(struct sock *sk);
1852 void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid);
1857 void sock_init_data(struct socket *sock, struct sock *sk);
1885 static inline void sock_put(struct sock *sk) in sock_put() argument
1887 if (refcount_dec_and_test(&sk->sk_refcnt)) in sock_put()
1888 sk_free(sk); in sock_put()
1893 void sock_gen_put(struct sock *sk);
1895 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
1897 static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, in sk_receive_skb() argument
1900 return __sk_receive_skb(sk, skb, nested, 1, true); in sk_receive_skb()
1903 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) in sk_tx_queue_set() argument
1911 WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue); in sk_tx_queue_set()
1916 static inline void sk_tx_queue_clear(struct sock *sk) in sk_tx_queue_clear() argument
1921 WRITE_ONCE(sk->sk_tx_queue_mapping, NO_QUEUE_MAPPING); in sk_tx_queue_clear()
1924 static inline int sk_tx_queue_get(const struct sock *sk) in sk_tx_queue_get() argument
1926 if (sk) { in sk_tx_queue_get()
1930 int val = READ_ONCE(sk->sk_tx_queue_mapping); in sk_tx_queue_get()
1938 static inline void __sk_rx_queue_set(struct sock *sk, in __sk_rx_queue_set() argument
1947 unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue)) in __sk_rx_queue_set()
1948 WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue); in __sk_rx_queue_set()
1953 static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb) in sk_rx_queue_set() argument
1955 __sk_rx_queue_set(sk, skb, true); in sk_rx_queue_set()
1958 static inline void sk_rx_queue_update(struct sock *sk, const struct sk_buff *skb) in sk_rx_queue_update() argument
1960 __sk_rx_queue_set(sk, skb, false); in sk_rx_queue_update()
1963 static inline void sk_rx_queue_clear(struct sock *sk) in sk_rx_queue_clear() argument
1966 WRITE_ONCE(sk->sk_rx_queue_mapping, NO_QUEUE_MAPPING); in sk_rx_queue_clear()
1970 static inline int sk_rx_queue_get(const struct sock *sk) in sk_rx_queue_get() argument
1973 if (sk) { in sk_rx_queue_get()
1974 int res = READ_ONCE(sk->sk_rx_queue_mapping); in sk_rx_queue_get()
1984 static inline void sk_set_socket(struct sock *sk, struct socket *sock) in sk_set_socket() argument
1986 sk->sk_socket = sock; in sk_set_socket()
1989 static inline wait_queue_head_t *sk_sleep(struct sock *sk) in sk_sleep() argument
1992 return &rcu_dereference_raw(sk->sk_wq)->wait; in sk_sleep()
2001 static inline void sock_orphan(struct sock *sk) in sock_orphan() argument
2003 write_lock_bh(&sk->sk_callback_lock); in sock_orphan()
2004 sock_set_flag(sk, SOCK_DEAD); in sock_orphan()
2005 sk_set_socket(sk, NULL); in sock_orphan()
2006 sk->sk_wq = NULL; in sock_orphan()
2007 write_unlock_bh(&sk->sk_callback_lock); in sock_orphan()
2010 static inline void sock_graft(struct sock *sk, struct socket *parent) in sock_graft() argument
2012 WARN_ON(parent->sk); in sock_graft()
2013 write_lock_bh(&sk->sk_callback_lock); in sock_graft()
2014 rcu_assign_pointer(sk->sk_wq, &parent->wq); in sock_graft()
2015 parent->sk = sk; in sock_graft()
2016 sk_set_socket(sk, parent); in sock_graft()
2017 sk->sk_uid = SOCK_INODE(parent)->i_uid; in sock_graft()
2018 security_sock_graft(sk, parent); in sock_graft()
2019 write_unlock_bh(&sk->sk_callback_lock); in sock_graft()
2022 kuid_t sock_i_uid(struct sock *sk);
2023 unsigned long __sock_i_ino(struct sock *sk);
2024 unsigned long sock_i_ino(struct sock *sk);
2026 static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk) in sock_net_uid() argument
2028 return sk ? sk->sk_uid : make_kuid(net->user_ns, 0); in sock_net_uid()
2038 static inline void sk_set_txhash(struct sock *sk) in sk_set_txhash() argument
2041 WRITE_ONCE(sk->sk_txhash, net_tx_rndhash()); in sk_set_txhash()
2044 static inline bool sk_rethink_txhash(struct sock *sk) in sk_rethink_txhash() argument
2046 if (sk->sk_txhash && sk->sk_txrehash == SOCK_TXREHASH_ENABLED) { in sk_rethink_txhash()
2047 sk_set_txhash(sk); in sk_rethink_txhash()
2054 __sk_dst_get(const struct sock *sk) in __sk_dst_get() argument
2056 return rcu_dereference_check(sk->sk_dst_cache, in __sk_dst_get()
2057 lockdep_sock_is_held(sk)); in __sk_dst_get()
2061 sk_dst_get(const struct sock *sk) in sk_dst_get() argument
2066 dst = rcu_dereference(sk->sk_dst_cache); in sk_dst_get()
2073 static inline void __dst_negative_advice(struct sock *sk) in __dst_negative_advice() argument
2075 struct dst_entry *dst = __sk_dst_get(sk); in __dst_negative_advice()
2078 dst->ops->negative_advice(sk, dst); in __dst_negative_advice()
2081 static inline void dst_negative_advice(struct sock *sk) in dst_negative_advice() argument
2083 sk_rethink_txhash(sk); in dst_negative_advice()
2084 __dst_negative_advice(sk); in dst_negative_advice()
2088 __sk_dst_set(struct sock *sk, struct dst_entry *dst) in __sk_dst_set() argument
2092 sk_tx_queue_clear(sk); in __sk_dst_set()
2093 WRITE_ONCE(sk->sk_dst_pending_confirm, 0); in __sk_dst_set()
2094 old_dst = rcu_dereference_protected(sk->sk_dst_cache, in __sk_dst_set()
2095 lockdep_sock_is_held(sk)); in __sk_dst_set()
2096 rcu_assign_pointer(sk->sk_dst_cache, dst); in __sk_dst_set()
2101 sk_dst_set(struct sock *sk, struct dst_entry *dst) in sk_dst_set() argument
2105 sk_tx_queue_clear(sk); in sk_dst_set()
2106 WRITE_ONCE(sk->sk_dst_pending_confirm, 0); in sk_dst_set()
2107 old_dst = unrcu_pointer(xchg(&sk->sk_dst_cache, RCU_INITIALIZER(dst))); in sk_dst_set()
2112 __sk_dst_reset(struct sock *sk) in __sk_dst_reset() argument
2114 __sk_dst_set(sk, NULL); in __sk_dst_reset()
2118 sk_dst_reset(struct sock *sk) in sk_dst_reset() argument
2120 sk_dst_set(sk, NULL); in sk_dst_reset()
2123 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
2125 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
2127 static inline void sk_dst_confirm(struct sock *sk) in sk_dst_confirm() argument
2129 if (!READ_ONCE(sk->sk_dst_pending_confirm)) in sk_dst_confirm()
2130 WRITE_ONCE(sk->sk_dst_pending_confirm, 1); in sk_dst_confirm()
2136 struct sock *sk = skb->sk; in sock_confirm_neigh() local
2138 if (sk && READ_ONCE(sk->sk_dst_pending_confirm)) in sock_confirm_neigh()
2139 WRITE_ONCE(sk->sk_dst_pending_confirm, 0); in sock_confirm_neigh()
2144 bool sk_mc_loop(const struct sock *sk);
2146 static inline bool sk_can_gso(const struct sock *sk) in sk_can_gso() argument
2148 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); in sk_can_gso()
2151 void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
2153 static inline void sk_gso_disable(struct sock *sk) in sk_gso_disable() argument
2155 sk->sk_gso_disabled = 1; in sk_gso_disable()
2156 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; in sk_gso_disable()
2159 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb, in skb_do_copy_data_nocache() argument
2168 } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) { in skb_do_copy_data_nocache()
2177 static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb, in skb_add_data_nocache() argument
2182 err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy), in skb_add_data_nocache()
2190 static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from, in skb_copy_to_page_nocache() argument
2197 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off, in skb_copy_to_page_nocache()
2203 sk_wmem_queued_add(sk, copy); in skb_copy_to_page_nocache()
2204 sk_mem_charge(sk, copy); in skb_copy_to_page_nocache()
2214 static inline int sk_wmem_alloc_get(const struct sock *sk) in sk_wmem_alloc_get() argument
2216 return refcount_read(&sk->sk_wmem_alloc) - 1; in sk_wmem_alloc_get()
2225 static inline int sk_rmem_alloc_get(const struct sock *sk) in sk_rmem_alloc_get() argument
2227 return atomic_read(&sk->sk_rmem_alloc); in sk_rmem_alloc_get()
2236 static inline bool sk_has_allocations(const struct sock *sk) in sk_has_allocations() argument
2238 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); in sk_has_allocations()
2299 static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk) in skb_set_hash_from_sk() argument
2302 u32 txhash = READ_ONCE(sk->sk_txhash); in skb_set_hash_from_sk()
2310 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
2320 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) in skb_set_owner_r() argument
2323 skb->sk = sk; in skb_set_owner_r()
2325 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in skb_set_owner_r()
2326 sk_mem_charge(sk, skb->truesize); in skb_set_owner_r()
2329 static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk) in skb_set_owner_sk_safe() argument
2331 if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) { in skb_set_owner_sk_safe()
2334 skb->sk = sk; in skb_set_owner_sk_safe()
2340 static inline struct sk_buff *skb_clone_and_charge_r(struct sk_buff *skb, struct sock *sk) in skb_clone_and_charge_r() argument
2342 skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); in skb_clone_and_charge_r()
2344 if (sk_rmem_schedule(sk, skb, skb->truesize)) { in skb_clone_and_charge_r()
2345 skb_set_owner_r(skb, sk); in skb_clone_and_charge_r()
2362 void sk_reset_timer(struct sock *sk, struct timer_list *timer,
2365 void sk_stop_timer(struct sock *sk, struct timer_list *timer);
2367 void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer);
2369 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
2371 void (*destructor)(struct sock *sk,
2373 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2375 int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb,
2378 static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) in sock_queue_rcv_skb() argument
2380 return sock_queue_rcv_skb_reason(sk, skb, NULL); in sock_queue_rcv_skb()
2383 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
2384 struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
2390 static inline int sock_error(struct sock *sk) in sock_error() argument
2397 if (likely(data_race(!sk->sk_err))) in sock_error()
2400 err = xchg(&sk->sk_err, 0); in sock_error()
2404 void sk_error_report(struct sock *sk);
2406 static inline unsigned long sock_wspace(struct sock *sk) in sock_wspace() argument
2410 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { in sock_wspace()
2411 amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc); in sock_wspace()
2422 static inline void sk_set_bit(int nr, struct sock *sk) in sk_set_bit() argument
2425 !sock_flag(sk, SOCK_FASYNC)) in sk_set_bit()
2428 set_bit(nr, &sk->sk_wq_raw->flags); in sk_set_bit()
2431 static inline void sk_clear_bit(int nr, struct sock *sk) in sk_clear_bit() argument
2434 !sock_flag(sk, SOCK_FASYNC)) in sk_clear_bit()
2437 clear_bit(nr, &sk->sk_wq_raw->flags); in sk_clear_bit()
2440 static inline void sk_wake_async(const struct sock *sk, int how, int band) in sk_wake_async() argument
2442 if (sock_flag(sk, SOCK_FASYNC)) { in sk_wake_async()
2444 sock_wake_async(rcu_dereference(sk->sk_wq), how, band); in sk_wake_async()
2449 static inline void sk_wake_async_rcu(const struct sock *sk, int how, int band) in sk_wake_async_rcu() argument
2451 if (unlikely(sock_flag(sk, SOCK_FASYNC))) in sk_wake_async_rcu()
2452 sock_wake_async(rcu_dereference(sk->sk_wq), how, band); in sk_wake_async_rcu()
2465 static inline void sk_stream_moderate_sndbuf(struct sock *sk) in sk_stream_moderate_sndbuf() argument
2469 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) in sk_stream_moderate_sndbuf()
2472 val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); in sk_stream_moderate_sndbuf()
2473 val = max_t(u32, val, sk_unused_reserved_mem(sk)); in sk_stream_moderate_sndbuf()
2475 WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF)); in sk_stream_moderate_sndbuf()
2494 static inline struct page_frag *sk_page_frag(struct sock *sk) in sk_page_frag() argument
2496 if (sk->sk_use_task_frag) in sk_page_frag()
2499 return &sk->sk_frag; in sk_page_frag()
2502 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
2507 static inline bool sock_writeable(const struct sock *sk) in sock_writeable() argument
2509 return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1); in sock_writeable()
2522 static inline long sock_rcvtimeo(const struct sock *sk, bool noblock) in sock_rcvtimeo() argument
2524 return noblock ? 0 : sk->sk_rcvtimeo; in sock_rcvtimeo()
2527 static inline long sock_sndtimeo(const struct sock *sk, bool noblock) in sock_sndtimeo() argument
2529 return noblock ? 0 : sk->sk_sndtimeo; in sock_sndtimeo()
2532 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len) in sock_rcvlowat() argument
2534 int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len); in sock_rcvlowat()
2565 sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb) in sock_skb_set_dropcount() argument
2567 SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ? in sock_skb_set_dropcount()
2568 atomic_read(&sk->sk_drops) : 0; in sock_skb_set_dropcount()
2571 static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb) in sk_drops_add() argument
2575 atomic_add(segs, &sk->sk_drops); in sk_drops_add()
2578 static inline ktime_t sock_read_timestamp(struct sock *sk) in sock_read_timestamp() argument
2585 seq = read_seqbegin(&sk->sk_stamp_seq); in sock_read_timestamp()
2586 kt = sk->sk_stamp; in sock_read_timestamp()
2587 } while (read_seqretry(&sk->sk_stamp_seq, seq)); in sock_read_timestamp()
2591 return READ_ONCE(sk->sk_stamp); in sock_read_timestamp()
2595 static inline void sock_write_timestamp(struct sock *sk, ktime_t kt) in sock_write_timestamp() argument
2598 write_seqlock(&sk->sk_stamp_seq); in sock_write_timestamp()
2599 sk->sk_stamp = kt; in sock_write_timestamp()
2600 write_sequnlock(&sk->sk_stamp_seq); in sock_write_timestamp()
2602 WRITE_ONCE(sk->sk_stamp, kt); in sock_write_timestamp()
2606 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
2608 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
2612 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) in sock_recv_timestamp() argument
2615 u32 tsflags = READ_ONCE(sk->sk_tsflags); in sock_recv_timestamp()
2623 if (sock_flag(sk, SOCK_RCVTSTAMP) || in sock_recv_timestamp()
2628 __sock_recv_timestamp(msg, sk, skb); in sock_recv_timestamp()
2630 sock_write_timestamp(sk, kt); in sock_recv_timestamp()
2632 if (sock_flag(sk, SOCK_WIFI_STATUS) && skb_wifi_acked_valid(skb)) in sock_recv_timestamp()
2633 __sock_recv_wifi_status(msg, sk, skb); in sock_recv_timestamp()
2636 void __sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
2640 static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk, in sock_recv_cmsgs() argument
2649 if (sk->sk_flags & FLAGS_RECV_CMSGS || in sock_recv_cmsgs()
2650 READ_ONCE(sk->sk_tsflags) & TSFLAGS_ANY) in sock_recv_cmsgs()
2651 __sock_recv_cmsgs(msg, sk, skb); in sock_recv_cmsgs()
2652 else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP))) in sock_recv_cmsgs()
2653 sock_write_timestamp(sk, skb->tstamp); in sock_recv_cmsgs()
2654 else if (unlikely(sock_read_timestamp(sk) == SK_DEFAULT_STAMP)) in sock_recv_cmsgs()
2655 sock_write_timestamp(sk, 0); in sock_recv_cmsgs()
2669 static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags, in _sock_tx_timestamp() argument
2676 *tskey = atomic_inc_return(&sk->sk_tskey) - 1; in _sock_tx_timestamp()
2678 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) in _sock_tx_timestamp()
2682 static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags, in sock_tx_timestamp() argument
2685 _sock_tx_timestamp(sk, tsflags, tx_flags, NULL); in sock_tx_timestamp()
2690 _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags, in skb_setup_tx_timestamp()
2694 static inline bool sk_is_inet(const struct sock *sk) in sk_is_inet() argument
2696 int family = READ_ONCE(sk->sk_family); in sk_is_inet()
2701 static inline bool sk_is_tcp(const struct sock *sk) in sk_is_tcp() argument
2703 return sk_is_inet(sk) && in sk_is_tcp()
2704 sk->sk_type == SOCK_STREAM && in sk_is_tcp()
2705 sk->sk_protocol == IPPROTO_TCP; in sk_is_tcp()
2708 static inline bool sk_is_udp(const struct sock *sk) in sk_is_udp() argument
2710 return sk_is_inet(sk) && in sk_is_udp()
2711 sk->sk_type == SOCK_DGRAM && in sk_is_udp()
2712 sk->sk_protocol == IPPROTO_UDP; in sk_is_udp()
2715 static inline bool sk_is_stream_unix(const struct sock *sk) in sk_is_stream_unix() argument
2717 return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM; in sk_is_stream_unix()
2720 static inline bool sk_is_vsock(const struct sock *sk) in sk_is_vsock() argument
2722 return sk->sk_family == AF_VSOCK; in sk_is_vsock()
2733 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) in sk_eat_skb() argument
2735 __skb_unlink(skb, &sk->sk_receive_queue); in sk_eat_skb()
2752 static inline bool sk_fullsock(const struct sock *sk) in sk_fullsock() argument
2754 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV); in sk_fullsock()
2758 sk_is_refcounted(struct sock *sk) in sk_is_refcounted() argument
2761 return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE); in sk_is_refcounted()
2772 struct sock *sk = skb->sk; in sk_validate_xmit_skb() local
2774 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) { in sk_validate_xmit_skb()
2775 skb = sk->sk_validate_xmit_skb(sk, dev, skb); in sk_validate_xmit_skb()
2789 static inline bool sk_listener(const struct sock *sk) in sk_listener() argument
2791 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); in sk_listener()
2794 void sock_enable_timestamp(struct sock *sk, enum sock_flags flag);
2795 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
2798 bool sk_ns_capable(const struct sock *sk,
2800 bool sk_capable(const struct sock *sk, int cap);
2801 bool sk_net_capable(const struct sock *sk, int cap);
2803 void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
2826 static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto) in sk_get_wmem0() argument
2830 return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset)); in sk_get_wmem0()
2835 static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto) in sk_get_rmem0() argument
2839 return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset)); in sk_get_rmem0()
2848 static inline void sk_pacing_shift_update(struct sock *sk, int val) in sk_pacing_shift_update() argument
2850 if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val) in sk_pacing_shift_update()
2852 WRITE_ONCE(sk->sk_pacing_shift, val); in sk_pacing_shift_update()
2860 static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif) in sk_dev_equal_l3scope() argument
2862 int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); in sk_dev_equal_l3scope()
2868 mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif); in sk_dev_equal_l3scope()
2875 void sock_def_readable(struct sock *sk);
2877 int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk);
2878 void sock_set_timestamp(struct sock *sk, int optname, bool valbool);
2879 int sock_set_timestamping(struct sock *sk, int optname,
2882 void sock_enable_timestamps(struct sock *sk);
2883 void sock_no_linger(struct sock *sk);
2884 void sock_set_keepalive(struct sock *sk);
2885 void sock_set_priority(struct sock *sk, u32 priority);
2886 void sock_set_rcvbuf(struct sock *sk, int val);
2887 void sock_set_mark(struct sock *sk, u32 val);
2888 void sock_set_reuseaddr(struct sock *sk);
2889 void sock_set_reuseport(struct sock *sk);
2890 void sock_set_sndtimeo(struct sock *sk, s64 secs);
2892 int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
2898 int sock_ioctl_inout(struct sock *sk, unsigned int cmd,
2900 int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
2901 static inline bool sk_is_readable(struct sock *sk) in sk_is_readable() argument
2903 if (sk->sk_prot->sock_is_readable) in sk_is_readable()
2904 return sk->sk_prot->sock_is_readable(sk); in sk_is_readable()