| /include/net/ |
| A D | sock.h | 1473 sk->sk_prot->unhash(sk); in __sk_prot_rehash() 1474 return sk->sk_prot->hash(sk); in __sk_prot_rehash() 1818 skb->sk = sk; in skb_set_owner_edemux() 2088 parent->sk = sk; in sock_graft() 2214 struct sock *sk = skb->sk; in sock_confirm_neigh() local 2398 skb->sk = sk; in skb_set_owner_r() 2409 skb->sk = sk; in skb_set_owner_sk_safe() 2866 return sk && sk_fullsock(sk) && sock_flag(sk, SOCK_WIFI_STATUS); in sk_requests_wifi_status() 2877 struct sock *sk = skb->sk; in sk_validate_xmit_skb() local 2879 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) { in sk_validate_xmit_skb() [all …]
|
| A D | llc_c_ev.h | 134 int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb); 141 int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk, 144 int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk, 146 int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk, 164 int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk, 166 int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk, 170 int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk, 172 int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk, 212 int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk, 220 return atomic_read(&sk->sk_rmem_alloc) + skb->truesize < in llc_conn_space() [all …]
|
| A D | llc_c_ac.h | 100 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb); 102 int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb); 103 int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb); 104 int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb); 106 int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock *sk, 142 int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk, 168 int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb); 169 int llc_conn_ac_upd_vs(struct sock *sk, struct sk_buff *skb); 170 int llc_conn_disc(struct sock *sk, struct sk_buff *skb); 171 int llc_conn_reset(struct sock *sk, struct sk_buff *skb); [all …]
|
| A D | inet_connection_sock.h | 38 int (*rebuild_header)(struct sock *sk); 51 void (*mtu_reduced)(struct sock *sk); 149 return (void *)inet_csk(sk)->icsk_ca_priv; in inet_csk_ca() 184 memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack)); in inet_csk_delack_init() 230 sk, what, when, (void *)_THIS_IP_); in inet_csk_reset_xmit_timer() 288 return inet_csk_reqsk_queue_len(sk) > READ_ONCE(sk->sk_max_ack_backlog); in inet_csk_reqsk_queue_is_full() 305 sock_set_flag(sk, SOCK_DEAD); in inet_csk_prepare_for_destroy_sock() 326 struct sock *sk); 332 inet_csk(sk)->icsk_ack.pingpong = in inet_csk_enter_pingpong_mode() 338 inet_csk(sk)->icsk_ack.pingpong = 0; in inet_csk_exit_pingpong_mode() [all …]
|
| A D | tcp.h | 806 inet_csk(sk)->icsk_rto = min(inet_csk(sk)->icsk_rto, tcp_rto_max(sk)); in tcp_bound_rto() 836 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && in tcp_fast_path_check() 1634 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) - in tcp_space() 1657 __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss); in tcp_adjust_rcv_ssthresh() 2204 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk); in tcp_highest_sack_reset() 2431 out->md5_key = tp->af_specific->md5_lookup(sk, sk); in tcp_get_current_key() 2505 tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out, in tcp_warn_once() 2506 tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out, in tcp_warn_once() 2507 tcp_sk(sk)->tlp_high_seq, sk->sk_state, in tcp_warn_once() 2509 tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache, in tcp_warn_once() [all …]
|
| A D | inet_sock.h | 214 struct sock sk; member 330 if (sk && READ_ONCE(sk->sk_state) == TCP_NEW_SYN_RECV) in sk_to_full_sk() 331 sk = inet_reqsk(sk)->rsk_listener; in sk_to_full_sk() 332 if (sk && READ_ONCE(sk->sk_state) == TCP_TIME_WAIT) in sk_to_full_sk() 333 sk = NULL; in sk_to_full_sk() 335 return sk; in sk_to_full_sk() 342 if (sk && READ_ONCE(sk->sk_state) == TCP_NEW_SYN_RECV) in sk_const_to_full_sk() 343 sk = ((const struct request_sock *)sk)->rsk_listener; in sk_const_to_full_sk() 344 if (sk && READ_ONCE(sk->sk_state) == TCP_TIME_WAIT) in sk_const_to_full_sk() 345 sk = NULL; in sk_const_to_full_sk() [all …]
|
| A D | busy_poll.h | 69 static inline bool sk_can_busy_loop(struct sock *sk) in sk_can_busy_loop() argument 105 unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec); in sk_busy_loop_timeout() 120 unsigned int napi_id = READ_ONCE(sk->sk_napi_id); in sk_busy_loop() 124 READ_ONCE(sk->sk_prefer_busy_poll), in sk_busy_loop() 153 WRITE_ONCE(sk->sk_napi_id, skb->napi_id); in sk_mark_napi_id() 155 sk_rx_queue_update(sk, skb); in sk_mark_napi_id() 166 WRITE_ONCE(sk->sk_napi_id, skb->napi_id); in sk_mark_napi_id_set() 168 sk_rx_queue_set(sk, skb); in sk_mark_napi_id_set() 174 if (!READ_ONCE(sk->sk_napi_id)) in __sk_mark_napi_id_once() 175 WRITE_ONCE(sk->sk_napi_id, napi_id); in __sk_mark_napi_id_once() [all …]
|
| A D | proto_memory.h | 13 return sk->sk_prot->memory_pressure != NULL; in sk_has_memory_pressure() 26 return proto_memory_pressure(sk->sk_prot); in sk_under_global_memory_pressure() 31 if (!sk->sk_prot->memory_pressure) in sk_under_memory_pressure() 34 if (mem_cgroup_sockets_enabled && sk->sk_memcg && in sk_under_memory_pressure() 35 mem_cgroup_under_socket_pressure(sk->sk_memcg)) in sk_under_memory_pressure() 38 return !!READ_ONCE(*sk->sk_prot->memory_pressure); in sk_under_memory_pressure() 48 sk_memory_allocated(const struct sock *sk) in sk_memory_allocated() argument 50 return proto_memory_allocated(sk->sk_prot); in sk_memory_allocated() 62 sk_memory_allocated_add(const struct sock *sk, int val) in sk_memory_allocated_add() argument 64 struct proto *proto = sk->sk_prot; in sk_memory_allocated_add() [all …]
|
| A D | inet6_hashtables.h | 99 if (sk) in __inet6_lookup() 100 return sk; in __inet6_lookup() 116 if (!sk) in inet6_steal_sock() 120 return sk; in inet6_steal_sock() 124 return sk; in inet6_steal_sock() 127 return sk; in inet6_steal_sock() 129 return sk; in inet6_steal_sock() 136 return sk; in inet6_steal_sock() 159 if (IS_ERR(sk)) in __inet6_lookup_skb() 161 if (sk) in __inet6_lookup_skb() [all …]
|
| A D | sock_reuseport.h | 32 extern int reuseport_alloc(struct sock *sk, bool bind_inany); 33 extern int reuseport_add_sock(struct sock *sk, struct sock *sk2, 35 extern void reuseport_detach_sock(struct sock *sk); 36 void reuseport_stop_listen_sock(struct sock *sk); 37 extern struct sock *reuseport_select_sock(struct sock *sk, 41 struct sock *reuseport_migrate_sock(struct sock *sk, 45 extern int reuseport_detach_prog(struct sock *sk); 47 static inline bool reuseport_has_conns(struct sock *sk) in reuseport_has_conns() argument 53 reuse = rcu_dereference(sk->sk_reuseport_cb); in reuseport_has_conns() 61 void reuseport_has_conns_set(struct sock *sk); [all …]
|
| A D | tls.h | 92 struct sock *sk; member 110 struct sock *sk; member 163 void (*sk_destruct)(struct sock *sk); 255 struct sock *sk; member 356 struct sock *sk = skb->sk; in tls_is_skb_tx_device_offloaded() local 358 return sk && sk_fullsock(sk) && in tls_is_skb_tx_device_offloaded() 398 if (!sk_is_inet(sk) || !inet_test_bit(IS_ICSK, sk)) in tls_sw_has_ctx_tx() 401 ctx = tls_get_ctx(sk); in tls_sw_has_ctx_tx() 411 if (!sk_is_inet(sk) || !inet_test_bit(IS_ICSK, sk)) in tls_sw_has_ctx_rx() 414 ctx = tls_get_ctx(sk); in tls_sw_has_ctx_rx() [all …]
|
| A D | inet_hashtables.h | 411 if (sk) in __inet_lookup() 412 return sk; in __inet_lookup() 432 sk = NULL; in inet_lookup() 433 return sk; in inet_lookup() 446 if (!sk) in inet_steal_sock() 450 return sk; in inet_steal_sock() 454 return sk; in inet_steal_sock() 459 return sk; in inet_steal_sock() 466 return sk; in inet_steal_sock() 492 if (sk) in __inet_lookup_skb() [all …]
|
| A D | espintcp.h | 10 int espintcp_push_skb(struct sock *sk, struct sk_buff *skb); 11 int espintcp_queue_out(struct sock *sk, struct sk_buff *skb); 12 bool tcp_is_ulp_esp(struct sock *sk); 26 void (*saved_data_ready)(struct sock *sk); 27 void (*saved_write_space)(struct sock *sk); 28 void (*saved_destruct)(struct sock *sk); 33 static inline struct espintcp_ctx *espintcp_getctx(const struct sock *sk) in espintcp_getctx() argument 35 const struct inet_connection_sock *icsk = inet_csk(sk); in espintcp_getctx()
|
| A D | tcp_ao.h | 182 const struct sock *sk, const struct sk_buff *skb, 194 void tcp_ao_destroy_sock(struct sock *sk, bool twsk); 201 enum skb_drop_reason tcp_inbound_ao_hash(struct sock *sk, 227 const struct sock *sk, 244 const struct sock *sk, __be32 sisn, 260 void tcp_ao_established(struct sock *sk); 262 void tcp_ao_connect_init(struct sock *sk); 304 static inline void tcp_ao_established(struct sock *sk) in tcp_ao_established() argument 317 static inline void tcp_ao_connect_init(struct sock *sk) in tcp_ao_connect_init() argument 331 static inline int tcp_ao_get_repair(struct sock *sk, in tcp_ao_get_repair() argument [all …]
|
| A D | udp.h | 289 struct udp_sock *up = udp_sk(sk); in udp_lib_init_sock() 311 sk_common_release(sk); in udp_lib_close() 325 void udp4_hash4(struct sock *sk); 371 return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit); in udp_rqueue_get() 412 int udp_init_sock(struct sock *sk); 532 #define __UDPX_MIB(sk, ipv4) \ argument 534 ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \ 536 (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \ 542 IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \ 548 __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field) [all …]
|
| A D | route.h | 138 const struct sock *sk; in inet_sk_init_flowi4() local 151 sk = &inet->sk; in inet_sk_init_flowi4() 152 flowi4_init_output(fl4, sk->sk_bound_dev_if, READ_ONCE(sk->sk_mark), in inet_sk_init_flowi4() 153 ip_sock_rt_tos(sk), ip_sock_rt_scope(sk), in inet_sk_init_flowi4() 154 sk->sk_protocol, inet_sk_flowi_flags(sk), daddr, in inet_sk_init_flowi4() 207 flowi4_init_output(fl4, oif, sk ? READ_ONCE(sk->sk_mark) : 0, tos, in ip_route_output_ports() 208 sk ? ip_sock_rt_scope(sk) : RT_SCOPE_UNIVERSE, in ip_route_output_ports() 209 proto, sk ? inet_sk_flowi_flags(sk) : 0, in ip_route_output_ports() 211 if (sk) in ip_route_output_ports() 332 flowi4_init_output(fl4, oif, READ_ONCE(sk->sk_mark), ip_sock_rt_tos(sk), in ip_route_connect_init() [all …]
|
| /include/linux/ |
| A D | udp.h | 83 void (*encap_destroy)(struct sock *sk); 89 int (*gro_complete)(struct sock *sk, 113 #define udp_test_bit(nr, sk) \ argument 115 #define udp_set_bit(nr, sk) \ argument 117 #define udp_test_and_set_bit(nr, sk) \ argument 119 #define udp_clear_bit(nr, sk) \ argument 121 #define udp_assign_bit(nr, sk, val) \ argument 130 sk_set_peek_off(sk, val); in udp_set_peek_off() 190 !udp_test_bit(ACCEPT_L4, sk)) in udp_unexpected_gso() 212 udp_set_bit(ACCEPT_L4, sk); in udp_allow_gso() [all …]
|
| A D | bpf-cgroup.h | 115 int __cgroup_bpf_run_filter_skb(struct sock *sk, 119 int __cgroup_bpf_run_filter_sk(struct sock *sk, 195 cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS) && sk && \ 196 sk_fullsock(sk)) \ 207 typeof(sk) __sk = sk_to_full_sk(sk); \ 250 lock_sock(sk); \ 253 release_sock(sk); \ 268 lock_sock(sk); \ 271 release_sock(sk); \ 281 (sk)->sk_prot->pre_connect) [all …]
|
| A D | sock_diag.h | 19 int (*get_info)(struct sk_buff *skb, struct sock *sk); 34 u64 __sock_gen_cookie(struct sock *sk); 36 static inline u64 sock_gen_cookie(struct sock *sk) in sock_gen_cookie() argument 41 cookie = __sock_gen_cookie(sk); in sock_gen_cookie() 57 switch (sk->sk_family) { in sock_diag_destroy_group() 59 if (sk->sk_type == SOCK_RAW) in sock_diag_destroy_group() 62 switch (sk->sk_protocol) { in sock_diag_destroy_group() 71 if (sk->sk_type == SOCK_RAW) in sock_diag_destroy_group() 74 switch (sk->sk_protocol) { in sock_diag_destroy_group() 90 const struct net *n = sock_net(sk); in sock_diag_has_destroy_listeners() [all …]
|
| /include/crypto/ |
| A D | if_alg.h | 26 struct sock sk; member 48 int (*accept)(void *private, struct sock *sk); 96 struct sock *sk; member 168 void af_alg_release_parent(struct sock *sk); 176 return (struct alg_sock *)sk; in alg_sk() 185 static inline int af_alg_sndbuf(struct sock *sk) in af_alg_sndbuf() argument 187 struct alg_sock *ask = alg_sk(sk); in af_alg_sndbuf() 202 return PAGE_SIZE <= af_alg_sndbuf(sk); in af_alg_writable() 213 struct alg_sock *ask = alg_sk(sk); in af_alg_rcvbuf() 228 return PAGE_SIZE <= af_alg_rcvbuf(sk); in af_alg_readable() [all …]
|
| /include/trace/events/ |
| A D | handshake.h | 94 const struct sock *sk 96 TP_ARGS(net, req, sk), 104 __entry->sk = sk; 118 TP_ARGS(net, req, sk)) 124 const struct sock *sk, 158 const struct sock *sk, 170 __entry->sk = sk; 190 const struct sock *sk, 246 const struct sock *sk, 258 __entry->sk = sk; [all …]
|
| A D | sock.h | 75 TP_ARGS(sk, skb), 164 __entry->skaddr = sk; 180 sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); 197 TP_ARGS(sk), 228 sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); 244 TP_ARGS(sk), 254 __entry->skaddr = sk; 274 __field(void *, sk) 282 __entry->sk = sk; 301 TP_ARGS(sk, ret, flags) [all …]
|
| A D | tcp.h | 20 TP_ARGS(sk, skb, err), 55 sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); 103 __entry->state = sk ? sk->sk_state : 0; 108 if (sk && sk_fullsock(sk)) { 142 TP_ARGS(sk), 173 sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); 190 TP_ARGS(sk) 197 TP_ARGS(sk) 204 TP_ARGS(sk) 260 sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); [all …]
|
| /include/net/phonet/ |
| A D | phonet.h | 28 struct sock sk; member 34 static inline struct pn_sock *pn_sk(struct sock *sk) in pn_sk() argument 36 return (struct pn_sock *)sk; in pn_sk() 45 int pn_sock_hash(struct sock *sk); 46 void pn_sock_unhash(struct sock *sk); 51 int pn_sock_unbind_res(struct sock *sk, u8 res); 52 void pn_sock_unbind_all_res(struct sock *sk); 54 int pn_skb_send(struct sock *sk, struct sk_buff *skb, 112 static inline bool sk_is_phonet(struct sock *sk) in sk_is_phonet() argument 114 return sk->sk_family == PF_PHONET; in sk_is_phonet() [all …]
|
| A D | gprs.h | 18 int pep_writeable(struct sock *sk); 19 int pep_write(struct sock *sk, struct sk_buff *skb); 20 struct sk_buff *pep_read(struct sock *sk); 22 int gprs_attach(struct sock *sk); 23 void gprs_detach(struct sock *sk);
|