| /net/mac80211/ |
| A D | chan.c | 232 return ctx; in ieee80211_find_reservation_chanctx() 411 ctx->conf.min_def = ctx->conf.def; in _ieee80211_recalc_chanctx_min_def() 639 ieee80211_change_chanctx(local, ctx, ctx, compat); in ieee80211_find_chanctx() 641 return ctx; in ieee80211_find_chanctx() 716 if (!ctx) in ieee80211_alloc_chanctx() 731 return ctx; in ieee80211_alloc_chanctx() 771 if (!ctx) in ieee80211_new_chanctx() 783 return ctx; in ieee80211_new_chanctx() 876 ieee80211_change_chanctx(local, ctx, ctx, compat); in ieee80211_recalc_chanctx_chantype() 1132 WARN_ON(ctx->replace_ctx->replace_ctx != ctx); in ieee80211_link_unreserve_chanctx() [all …]
|
| A D | tkip.c | 86 u16 *p1k = ctx->p1k; in tkip_mixing_phase1() 102 ctx->state = TKIP_STATE_PHASE1_DONE; in tkip_mixing_phase1() 103 ctx->p1k_iv32 = tsc_IV32; in tkip_mixing_phase1() 110 const u16 *p1k = ctx->p1k; in tkip_mixing_phase2() 155 struct tkip_ctx *ctx = &key->u.tkip.tx; in ieee80211_compute_tkip_p1k() local 167 if (ctx->p1k_iv32 != iv32 || ctx->state == TKIP_STATE_NOT_INIT) in ieee80211_compute_tkip_p1k() 176 struct tkip_ctx *ctx = &key->u.tkip.tx; in ieee80211_get_tkip_p1k_iv() local 180 memcpy(p1k, ctx->p1k, sizeof(ctx->p1k)); in ieee80211_get_tkip_p1k_iv() 189 struct tkip_ctx ctx; in ieee80211_get_tkip_rx_p1k() local 192 memcpy(p1k, ctx.p1k, sizeof(ctx.p1k)); in ieee80211_get_tkip_rx_p1k() [all …]
|
| /net/sunrpc/auth_gss/ |
| A D | gss_krb5_mech.c | 348 ctx->initiator_sign = gss_krb5_alloc_hash_v2(ctx, &keyout); in gss_krb5_import_ctx_v2() 356 ctx->acceptor_sign = gss_krb5_alloc_hash_v2(ctx, &keyout); in gss_krb5_import_ctx_v2() 365 ctx->initiator_integ = gss_krb5_alloc_hash_v2(ctx, &keyout); in gss_krb5_import_ctx_v2() 373 ctx->acceptor_integ = gss_krb5_alloc_hash_v2(ctx, &keyout); in gss_krb5_import_ctx_v2() 403 p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags)); in gss_import_v2_context() 406 ctx->initiate = ctx->flags & KRB5_CTX_FLAG_INITIATOR; in gss_import_v2_context() 428 ctx->gk5e = gss_krb5_lookup_enctype(ctx->enctype); in gss_import_v2_context() 431 ctx->enctype); in gss_import_v2_context() 476 ctx = kzalloc(sizeof(*ctx), gfp_mask); in gss_krb5_import_sec_context() 481 memzero_explicit(&ctx->Ksess, sizeof(ctx->Ksess)); in gss_krb5_import_sec_context() [all …]
|
| A D | auth_gss.c | 117 return ctx; in gss_get_ctx() 154 if (ctx) in gss_cred_get_ctx() 157 return ctx; in gss_cred_get_ctx() 165 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); in gss_alloc_context() 172 return ctx; in gss_alloc_context() 247 ctx->gc_acceptor.len, ctx->gc_acceptor.data); in gss_fill_context() 796 gss_msg->ctx = gss_get_ctx(ctx); in gss_pipe_downcall() 1344 if (ctx) in gss_destroy_nullcred() 1422 if (!ctx) in gss_stringify_acceptor() 1440 if (!ctx || !ctx->gc_acceptor.len) { in gss_stringify_acceptor() [all …]
|
| A D | gss_krb5_seal.c | 75 setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token) in setup_token_v2() argument 81 if ((ctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0) in setup_token_v2() 83 if (ctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) in setup_token_v2() 100 token->len = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength; in setup_token_v2() 105 gss_krb5_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text, in gss_krb5_get_mic_v2() argument 108 struct crypto_ahash *tfm = ctx->initiate ? in gss_krb5_get_mic_v2() 109 ctx->initiator_sign : ctx->acceptor_sign; in gss_krb5_get_mic_v2() 111 .len = ctx->gk5e->cksumlength, in gss_krb5_get_mic_v2() 119 krb5_hdr = setup_token_v2(ctx, token); in gss_krb5_get_mic_v2() 123 seq_send_be64 = cpu_to_be64(atomic64_fetch_inc(&ctx->seq_send64)); in gss_krb5_get_mic_v2() [all …]
|
| /net/tls/ |
| A D | tls_main.c | 136 &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]); in update_sk_prot() 138 &tls_proto_ops[ip_ver][ctx->tx_conf][ctx->rx_conf]); in update_sk_prot() 330 if (!ctx) in tls_ctx_free() 340 kfree(ctx); in tls_ctx_free() 394 if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW) in tls_sk_proto_close() 456 if (!ctx) { in do_tls_getsockopt_conf() 534 if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW) in do_tls_getsockopt_no_pad() 865 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); in tls_ctx_create() 866 if (!ctx) in tls_ctx_create() 881 return ctx; in tls_ctx_create() [all …]
|
| A D | tls_device.c | 60 if (ctx->tx_conf == TLS_HW) in tls_device_free_ctx() 66 tls_ctx_free(NULL, ctx); in tls_device_free_ctx() 73 struct tls_context *ctx = offload_ctx->ctx; in tls_device_tx_del_task() local 84 ctx->netdev = NULL; in tls_device_tx_del_task() 85 tls_device_free_ctx(ctx); in tls_device_tx_del_task() 203 if (ctx->open_record) in tls_device_sk_destruct() 546 ctx, in tls_push_data() 850 if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt) in tls_device_core_ctrl_rx_resync() 1052 offload_ctx->ctx = ctx; in alloc_offload_ctx_tx() 1069 ctx = tls_get_ctx(sk); in tls_set_device_offload() [all …]
|
| A D | tls_sw.c | 1067 if (ctx->open_rec) in tls_sw_sendmsg_locked() 1217 if (ctx->open_rec) in tls_sw_sendmsg_locked() 1284 if (!ctx->open_rec) in tls_sw_splice_eof() 1639 err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold); in tls_decrypt_sg() 2062 ctx->zc_capable; in tls_sw_recvmsg() 2554 kfree(ctx); in tls_sw_free_ctx_tx() 2589 kfree(ctx); in tls_sw_free_ctx_rx() 2776 ctx->priv_ctx_tx = init_ctx_tx(ctx, sk); in tls_set_sw_offload() 2780 ctx->priv_ctx_rx = init_ctx_rx(ctx); in tls_set_sw_offload() 2789 cctx = &ctx->tx; in tls_set_sw_offload() [all …]
|
| A D | tls.h | 200 int tls_strp_msg_cow(struct tls_sw_context_rx *ctx); 213 DEBUG_NET_WARN_ON_ONCE(!ctx->strp.msg_ready || !ctx->strp.anchor->len); in tls_strp_msg() 214 return ctx->strp.anchor; in tls_strp_msg() 219 return READ_ONCE(ctx->strp.msg_ready); in tls_strp_msg_ready() 224 return ctx->strp.mixed_decrypted; in tls_strp_msg_mixed_decrypted() 265 int tls_push_sg(struct sock *sk, struct tls_context *ctx, 274 return !!ctx->partially_sent_record; in tls_is_partially_sent_record() 309 struct cipher_context *ctx) in tls_advance_record_sn() argument 316 tls_bigint_increment(ctx->iv + prot->salt_size, in tls_advance_record_sn() 336 struct tls_prot_info *prot = &ctx->prot_info; in tls_fill_prepend() [all …]
|
| A D | tls_toe.c | 51 ctx->sk_destruct(sk); in tls_toe_sk_destruct() 54 tls_ctx_free(sk, ctx); in tls_toe_sk_destruct() 60 struct tls_context *ctx; in tls_toe_bypass() local 66 ctx = tls_ctx_create(sk); in tls_toe_bypass() 67 if (!ctx) in tls_toe_bypass() 70 ctx->sk_destruct = sk->sk_destruct; in tls_toe_bypass() 72 ctx->rx_conf = TLS_HW_RECORD; in tls_toe_bypass() 73 ctx->tx_conf = TLS_HW_RECORD; in tls_toe_bypass() 74 update_sk_prot(sk, ctx); in tls_toe_bypass() 100 ctx->sk_proto->unhash(sk); in tls_toe_unhash() [all …]
|
| /net/6lowpan/ |
| A D | debugfs.c | 19 struct lowpan_iphc_ctx *ctx = data; in lowpan_ctx_flag_active_set() local 44 struct lowpan_iphc_ctx *ctx = data; in lowpan_ctx_flag_c_set() local 68 struct lowpan_iphc_ctx *ctx = data; in lowpan_ctx_plen_set() local 70 container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]); in lowpan_ctx_plen_set() 76 ctx->plen = val; in lowpan_ctx_plen_set() 84 struct lowpan_iphc_ctx *ctx = data; in lowpan_ctx_plen_get() local 86 container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]); in lowpan_ctx_plen_get() 89 *val = ctx->plen; in lowpan_ctx_plen_get() 101 container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]); in lowpan_ctx_pfx_show() 131 container_of(ctx, struct lowpan_iphc_ctx_table, table[ctx->id]); in lowpan_ctx_pfx_write() [all …]
|
| /net/xfrm/ |
| A D | espintcp.c | 29 ctx->saved_data_ready(sk); in handle_nonesp() 270 if (ctx->tx_running) in espintcp_push_msgs() 272 ctx->tx_running = 1; in espintcp_push_msgs() 279 ctx->tx_running = 0; in espintcp_push_msgs() 285 ctx->tx_running = 0; in espintcp_push_msgs() 423 if (!ctx->tx_running) in espintcp_tx_work() 441 kfree(ctx); in espintcp_destruct() 468 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); in espintcp_init_sk() 469 if (!ctx) in espintcp_init_sk() 510 kfree(ctx); in espintcp_init_sk() [all …]
|
| /net/netfilter/ |
| A D | nfnetlink_osf.c | 92 ctx->optsize != foptsize) in nf_osf_match_one() 104 switch (*ctx->optp) { in nf_osf_match_one() 106 mss = ctx->optp[3]; in nf_osf_match_one() 108 mss |= ctx->optp[2]; in nf_osf_match_one() 116 ctx->optp = optend; in nf_osf_match_one() 160 ctx->optp = optpinit; in nf_osf_match_one() 189 if (!ctx->optp) in nf_osf_hdr_ctx_init() 208 struct nf_osf_hdr_ctx ctx; in nf_osf_match() local 212 memset(&ctx, 0, sizeof(ctx)); in nf_osf_match() 269 struct nf_osf_hdr_ctx ctx; in nf_osf_find() local [all …]
|
| A D | nf_tables_api.c | 567 nft_activate_next(ctx->net, ctx->table); in nft_trans_table_add() 581 nft_deactivate_next(ctx->net, ctx->table); in nft_deltable() 611 nft_activate_next(ctx->net, ctx->chain); in nft_trans_chain_add() 1225 err = nf_tables_fill_table_info(skb, ctx->net, ctx->portid, ctx->seq, in nf_tables_table_notify() 1473 ctx->table->nlpid = ctx->portid; in nf_tables_updtable() 2097 err = nf_tables_fill_chain_info(skb, ctx->net, ctx->portid, ctx->seq, in nf_tables_chain_notify() 3727 err = nf_tables_fill_rule_info(skb, ctx->net, ctx->portid, ctx->seq, in nf_tables_rule_notify() 3830 if (ctx->table && ctx->chain) { in nf_tables_dump_rules() 6475 err = nf_tables_fill_setelem_info(skb, ctx, ctx->seq, ctx->portid, in nft_get_set_elem() 8757 __nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, in nf_tables_obj_notify() [all …]
|
| A D | nft_synproxy.c | 171 err = nf_ct_netns_get(ctx->net, ctx->family); in nft_synproxy_do_init() 175 switch (ctx->family) { in nft_synproxy_do_init() 203 nf_ct_netns_put(ctx->net, ctx->family); in nft_synproxy_do_init() 211 switch (ctx->family) { in nft_synproxy_do_destroy() 213 nf_synproxy_ipv4_fini(snet, ctx->net); in nft_synproxy_do_destroy() 225 nf_ct_netns_put(ctx->net, ctx->family); in nft_synproxy_do_destroy() 253 if (ctx->family != NFPROTO_IPV4 && in nft_synproxy_validate() 254 ctx->family != NFPROTO_IPV6 && in nft_synproxy_validate() 255 ctx->family != NFPROTO_INET) in nft_synproxy_validate() 274 nft_synproxy_do_destroy(ctx); in nft_synproxy_destroy() [all …]
|
| A D | nft_compat.c | 148 par->net = ctx->net; in nft_target_set_tgchk_param() 150 switch (ctx->family) { in nft_target_set_tgchk_param() 181 par->family = ctx->family; in nft_target_set_tgchk_param() 306 par.net = ctx->net; in nft_target_destroy() 309 par.family = ctx->family; in nft_target_destroy() 448 par->net = ctx->net; in nft_match_set_mtchk_param() 450 switch (ctx->family) { in nft_match_set_mtchk_param() 556 par.net = ctx->net; in __nft_match_destroy() 559 par.family = ctx->family; in __nft_match_destroy() 800 family = ctx->family; in nft_match_select_ops() [all …]
|
| A D | nft_immediate.c | 46 static int nft_immediate_init(const struct nft_ctx *ctx, in nft_immediate_init() argument 79 err = nf_tables_bind_chain(ctx, chain); in nft_immediate_init() 112 chain_ctx = *ctx; in nft_immediate_activate() 118 nft_clear(ctx->net, chain); in nft_immediate_activate() 135 chain_ctx = *ctx; in nft_immediate_chain_deactivate() 160 nf_tables_unbind_chain(ctx, chain); in nft_immediate_deactivate() 161 nft_deactivate_next(ctx->net, chain); in nft_immediate_deactivate() 165 nft_deactivate_next(ctx->net, chain); in nft_immediate_deactivate() 215 chain_ctx = *ctx; in nft_immediate_destroy() 250 struct nft_ctx *pctx = (struct nft_ctx *)ctx; in nft_immediate_validate() [all …]
|
| A D | nft_inner.c | 27 struct nft_inner_tun_ctx ctx; member 93 ctx->inner_lloff = off; in nft_inner_parse_l2l3() 116 ctx->llproto = llproto; in nft_inner_parse_l2l3() 133 ctx->inner_nhoff = nhoff; in nft_inner_parse_l2l3() 139 ctx->inner_thoff = thoff; in nft_inner_parse_l2l3() 140 ctx->l4proto = iph->protocol; in nft_inner_parse_l2l3() 157 ctx->inner_nhoff = nhoff; in nft_inner_parse_l2l3() 168 ctx->inner_thoff = thoff; in nft_inner_parse_l2l3() 169 ctx->l4proto = l4proto; in nft_inner_parse_l2l3() 185 ctx->inner_tunoff = pkt->thoff; in nft_inner_parse_tunhdr() [all …]
|
| /net/ipv4/netfilter/ |
| A D | nf_nat_snmp_basic_main.c | 73 memcpy(&s[1], &ctx->from, 4); in fast_csum() 74 memcpy(&s[7], &ctx->to, 4); in fast_csum() 84 memcpy(&s[4], &ctx->to, 4); in fast_csum() 114 if (*pdata == ctx->from) { in snmp_helper() 116 (void *)&ctx->from, (void *)&ctx->to); in snmp_helper() 118 if (*ctx->check) in snmp_helper() 119 fast_csum(ctx, (unsigned char *)data - ctx->begin); in snmp_helper() 120 *pdata = ctx->to; in snmp_helper() 132 struct snmp_ctx ctx; in snmp_translate() local 143 if (ctx.from == ctx.to) in snmp_translate() [all …]
|
| /net/sctp/ |
| A D | outqueue.c | 863 ctx->packet = &ctx->transport->packet; in sctp_outq_select_transport() 915 ctx->gfp); in sctp_outq_flush_ctrl() 925 ctx->packet->vtag = ctx->asoc->c.my_vtag; in sctp_outq_flush_ctrl() 999 if (ctx->transport != ctx->asoc->peer.retran_path) { in sctp_outq_flush_rtx() 1001 ctx->transport = ctx->asoc->peer.retran_path; in sctp_outq_flush_rtx() 1002 ctx->packet = &ctx->transport->packet; in sctp_outq_flush_rtx() 1008 sctp_packet_config(ctx->packet, ctx->asoc->peer.i.init_tag, in sctp_outq_flush_rtx() 1012 error = __sctp_outq_flush_rtx(ctx->q, ctx->packet, rtx_timeout, in sctp_outq_flush_rtx() 1049 if (!ctx->packet || !ctx->packet->has_cookie_echo) in sctp_outq_flush_data() 1080 if (ctx->transport) in sctp_outq_flush_data() [all …]
|
| /net/8021q/ |
| A D | vlan_dev.c | 756 path->dev = ctx->dev; in vlan_dev_fill_forward_path() 757 ctx->dev = vlan->real_dev; in vlan_dev_fill_forward_path() 758 if (ctx->num_vlans >= ARRAY_SIZE(ctx->vlan)) in vlan_dev_fill_forward_path() 761 ctx->vlan[ctx->num_vlans].id = vlan->vlan_id; in vlan_dev_fill_forward_path() 762 ctx->vlan[ctx->num_vlans].proto = vlan->vlan_proto; in vlan_dev_fill_forward_path() 763 ctx->num_vlans++; in vlan_dev_fill_forward_path() 776 struct macsec_context *ctx) in vlan_macsec_offload() argument 781 return (*func)(ctx); in vlan_macsec_offload() 791 return vlan_macsec_offload(ops->mdo_dev_open, ctx); in vlan_macsec_dev_open() 801 return vlan_macsec_offload(ops->mdo_dev_stop, ctx); in vlan_macsec_dev_stop() [all …]
|
| /net/ethtool/ |
| A D | tsinfo.c | 299 struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx; in ethnl_tsinfo_prepare_dump() local 346 struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx; in ethnl_tsinfo_dump_one_phydev() local 384 struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx; in ethnl_tsinfo_dump_one_netdev() local 432 struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx; in ethnl_tsinfo_dump_one_net_topo() local 469 struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx; in ethnl_tsinfo_dumpit() local 500 struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx; in ethnl_tsinfo_start() local 506 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); in ethnl_tsinfo_start() 526 ctx->pos_ifindex = 0; in ethnl_tsinfo_start() 527 ctx->pos_phyindex = 0; in ethnl_tsinfo_start() 543 struct ethnl_tsinfo_dump_ctx *ctx = (void *)cb->ctx; in ethnl_tsinfo_done() local [all …]
|
| A D | rss.c | 212 if (!ctx) { in rss_prepare_ctx() 380 ctx->ctx_idx = ctx->start_ctx; in ethnl_rss_dump_start() 389 ctx->ifindex = ctx->match_ifindex; in ethnl_rss_dump_start() 459 ctx->ctx_idx = ctx->start_ctx; in rss_dump_one_dev() 473 if (ctx->match_ifindex && ctx->match_ifindex != ctx->ifindex) in ethnl_rss_dumpit() 875 if (!ctx) { in ethnl_rss_set() 897 if (ctx) in ethnl_rss_set() 1056 if (!ctx) { in ethnl_rss_create_doit() 1132 kfree(ctx); in ethnl_rss_create_doit() 1182 if (!ctx) { in ethnl_rss_delete_doit() [all …]
|
| /net/ipv4/ |
| A D | tcp_fastopen.c | 36 kfree_sensitive(ctx); in tcp_fastopen_ctx_free() 45 if (ctx) in tcp_fastopen_destroy_cipher() 66 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); in tcp_fastopen_reset_cipher() 67 if (!ctx) { in tcp_fastopen_reset_cipher() 77 ctx->num = 2; in tcp_fastopen_reset_cipher() 79 ctx->num = 1; in tcp_fastopen_reset_cipher() 84 octx = unrcu_pointer(xchg(&q->ctx, RCU_INITIALIZER(ctx))); in tcp_fastopen_reset_cipher() 104 ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx); in tcp_fastopen_get_cipher() 107 if (ctx) { in tcp_fastopen_get_cipher() 163 if (ctx) in tcp_fastopen_cookie_gen() [all …]
|
| /net/bpf/ |
| A D | test_run.c | 314 ctx = &head->ctx; in xdp_test_run_batch() 736 void *ctx; member 1034 kfree(ctx); in bpf_prog_test_run_skb() 1042 kfree(ctx); in bpf_prog_test_run_skb() 1051 if (ctx && ctx->ifindex > 1) { in bpf_prog_test_run_skb() 1143 kfree(ctx); in bpf_prog_test_run_skb() 1247 if (ctx) { in bpf_prog_test_run_xdp() 1249 if (ctx->data_meta || ctx->data_end != size || in bpf_prog_test_run_xdp() 1250 ctx->data > ctx->data_end || in bpf_prog_test_run_xdp() 1348 kfree(ctx); in bpf_prog_test_run_xdp() [all …]
|