| /net/sched/ |
| A D | em_meta.c | 859 switch (meta->lvalue.hdr.op) { in em_meta_match() 873 if (meta) { in meta_delete() 877 ops->destroy(&meta->lvalue); in meta_delete() 878 ops->destroy(&meta->rvalue); in meta_delete() 882 kfree(meta); in meta_delete() 912 struct meta_match *meta = NULL; in em_meta_change() local 930 meta = kzalloc(sizeof(*meta), GFP_KERNEL); in em_meta_change() 931 if (meta == NULL) { in em_meta_change() 949 m->datalen = sizeof(*meta); in em_meta_change() 954 if (err && meta) in em_meta_change() [all …]
|
| A D | cls_flower.c | 55 struct flow_dissector_key_meta meta; member 1881 key->meta.ingress_ifindex = err; in fl_set_key() 1882 mask->meta.ingress_ifindex = 0xffffffff; in fl_set_key() 1886 &mask->meta.l2_miss, TCA_FLOWER_UNSPEC, in fl_set_key() 1887 sizeof(key->meta.l2_miss)); in fl_set_key() 2172 FLOW_DISSECTOR_KEY_META, meta); in fl_init_dissector() 2332 return mask->meta.l2_miss; in fl_needs_tc_skb_ext() 3442 if (mask->meta.ingress_ifindex) { in fl_dump_key() 3450 if (fl_dump_key_val(skb, &key->meta.l2_miss, in fl_dump_key() 3451 TCA_FLOWER_L2_MISS, &mask->meta.l2_miss, in fl_dump_key() [all …]
|
| /net/dsa/ |
| A D | tag_sja1105.c | 103 struct sja1105_meta *meta) in sja1105_meta_unpack() argument 347 const struct sja1105_meta *meta) in sja1105_transfer_meta() argument 351 hdr->h_dest[3] = meta->dmac_byte_3; in sja1105_transfer_meta() 352 hdr->h_dest[4] = meta->dmac_byte_4; in sja1105_transfer_meta() 353 SJA1105_SKB_CB(skb)->tstamp = meta->tstamp; in sja1105_transfer_meta() 378 struct sja1105_meta *meta, in sja1105_rcv_meta_state_machine() argument 454 sja1105_transfer_meta(skb, meta); in sja1105_rcv_meta_state_machine() 479 struct sja1105_meta meta = {0}; in sja1105_rcv() local 496 sja1105_meta_unpack(skb, &meta); in sja1105_rcv() 497 source_port = meta.source_port; in sja1105_rcv() [all …]
|
| /net/netfilter/ |
| A D | nft_meta.c | 422 const struct nft_meta *meta = nft_expr_priv(expr); in nft_meta_set_eval() local 424 u32 *sreg = ®s->data[meta->sreg]; in nft_meta_set_eval() 428 switch (meta->key) { in nft_meta_set_eval() 731 NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_META, meta, in nft_meta_get_offload() 735 NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_META, meta, in nft_meta_get_offload() 749 const struct nft_meta *meta; in nft_meta_get_reduce() local 756 meta = nft_expr_priv(track->regs[priv->dreg].selector); in nft_meta_get_reduce() 757 if (priv->key != meta->key || in nft_meta_get_reduce() 758 priv->dreg != meta->dreg) { in nft_meta_get_reduce()
|
| A D | nf_flow_table_offload.c | 99 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_META, meta); in nf_flow_rule_match() 113 key->meta.ingress_ifindex = tuple->tc.iifidx; in nf_flow_rule_match() 115 key->meta.ingress_ifindex = tuple->iifidx; in nf_flow_rule_match() 117 mask->meta.ingress_ifindex = 0xffffffff; in nf_flow_rule_match()
|
| A D | nft_inner.c | 40 struct nft_meta meta; member
|
| A D | nft_cmp.c | 169 reg->offset == offsetof(struct nft_flow_key, meta.ingress_iftype) && in __nft_cmp_offload()
|
| /net/bridge/netfilter/ |
| A D | nft_meta_bridge.c | 112 const struct nft_meta *meta = nft_expr_priv(expr); in nft_meta_bridge_set_eval() local 113 u32 *sreg = ®s->data[meta->sreg]; in nft_meta_bridge_set_eval() 117 switch (meta->key) { in nft_meta_bridge_set_eval()
|
| A D | Kconfig | 14 tristate "Netfilter nf_table bridge meta support" 16 Add support for bridge dedicated meta key.
|
| /net/core/ |
| A D | bpf_sk_storage.c | 780 __bpf_md_ptr(struct bpf_iter_meta *, meta); 786 DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map, struct bpf_iter_meta *meta, in DEFINE_BPF_ITER_FUNC() argument 796 struct bpf_iter_meta meta; in DEFINE_BPF_ITER_FUNC() local 800 meta.seq = seq; in DEFINE_BPF_ITER_FUNC() 801 prog = bpf_iter_get_info(&meta, selem == NULL); in DEFINE_BPF_ITER_FUNC() 803 ctx.meta = &meta; in DEFINE_BPF_ITER_FUNC()
|
| A D | sock_map.c | 711 __bpf_md_ptr(struct bpf_iter_meta *, meta); 717 DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta, in DEFINE_BPF_ITER_FUNC() argument 761 struct bpf_iter_meta meta; in sock_map_seq_show() local 764 meta.seq = seq; in sock_map_seq_show() 765 prog = bpf_iter_get_info(&meta, !v); in sock_map_seq_show() 769 ctx.meta = &meta; in sock_map_seq_show() 1374 struct bpf_iter_meta meta; in sock_hash_seq_show() local 1377 meta.seq = seq; in sock_hash_seq_show() 1378 prog = bpf_iter_get_info(&meta, !elem); in sock_hash_seq_show() 1382 ctx.meta = &meta; in sock_hash_seq_show()
|
| A D | flow_dissector.c | 280 struct flow_dissector_key_meta *meta; in skb_flow_dissect_meta() local 285 meta = skb_flow_dissector_target(flow_dissector, in skb_flow_dissect_meta() 288 meta->ingress_ifindex = skb->skb_iif; in skb_flow_dissect_meta() 295 meta->l2_miss = ext->l2_miss; in skb_flow_dissect_meta()
|
| A D | filter.c | 4258 void *meta = xdp->data_meta + offset; in BPF_CALL_2() local 4259 unsigned long metalen = xdp->data - meta; in BPF_CALL_2() 4263 if (unlikely(meta < xdp_frame_end || in BPF_CALL_2() 4264 meta > xdp->data)) in BPF_CALL_2() 4269 xdp->data_meta = meta; in BPF_CALL_2() 4655 u64, flags, void *, meta, u64, meta_size) in BPF_CALL_5() argument 4664 return bpf_event_output(map, flags, meta, meta_size, skb, skb_size, in BPF_CALL_5() 5068 u64, flags, void *, meta, u64, meta_size) in BPF_CALL_5() argument 5078 return bpf_event_output(map, flags, meta, meta_size, xdp, in BPF_CALL_5()
|
| A D | skbuff.c | 6195 void *meta; in skb_reorder_vlan_header() local 6210 meta = skb_metadata_end(skb) - meta_len; in skb_reorder_vlan_header() 6211 memmove(meta + VLAN_HLEN, meta, meta_len); in skb_reorder_vlan_header()
|
| /net/xdp/ |
| A D | xsk.c | 663 struct xsk_tx_metadata *meta = NULL; in xsk_build_skb() local 727 meta = buffer - xs->pool->tx_metadata_len; in xsk_build_skb() 728 if (unlikely(!xsk_buff_valid_tx_metadata(meta))) { in xsk_build_skb() 733 if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM) { in xsk_build_skb() 734 if (unlikely(meta->request.csum_start + in xsk_build_skb() 735 meta->request.csum_offset + in xsk_build_skb() 741 skb->csum_start = hr + meta->request.csum_start; in xsk_build_skb() 742 skb->csum_offset = meta->request.csum_offset; in xsk_build_skb() 752 if (meta->flags & XDP_TXMD_FLAGS_LAUNCH_TIME) in xsk_build_skb() 753 skb->skb_mstamp_ns = meta->request.launch_time; in xsk_build_skb() [all …]
|
| A D | xsk_buff_pool.c | 758 ret.meta = __xsk_buff_get_metadata(pool, __xp_raw_get_data(pool, addr)); in xp_raw_get_ctx()
|
| /net/netlink/ |
| A D | af_netlink.c | 2724 __bpf_md_ptr(struct bpf_iter_meta *, meta); 2731 struct bpf_iter_meta *meta, in DEFINE_BPF_ITER_FUNC() 2736 meta->seq_num--; /* skip SEQ_START_TOKEN */ in DEFINE_BPF_ITER_FUNC() 2737 ctx.meta = meta; in DEFINE_BPF_ITER_FUNC() 2744 struct bpf_iter_meta meta; in netlink_seq_show() local 2747 meta.seq = seq; in netlink_seq_show() 2748 prog = bpf_iter_get_info(&meta, false); in netlink_seq_show() 2753 return netlink_prog_seq_show(prog, &meta, v); in netlink_seq_show() 2760 struct bpf_iter_meta meta; in netlink_seq_stop() local 2764 meta.seq = seq; in netlink_seq_stop() [all …]
|
| /net/ipv6/ |
| A D | ip6_fib.c | 2762 struct bpf_iter_meta *meta, in ipv6_route_prog_seq_show() argument 2767 ctx.meta = meta; in ipv6_route_prog_seq_show() 2775 struct bpf_iter_meta meta; in ipv6_route_seq_show() local 2779 meta.seq = seq; in ipv6_route_seq_show() 2780 prog = bpf_iter_get_info(&meta, false); in ipv6_route_seq_show() 2784 ret = ipv6_route_prog_seq_show(prog, &meta, v); in ipv6_route_seq_show() 2792 struct bpf_iter_meta meta; in ipv6_route_seq_stop() local 2796 meta.seq = seq; in ipv6_route_seq_stop() 2797 prog = bpf_iter_get_info(&meta, true); in ipv6_route_seq_stop() 2799 (void)ipv6_route_prog_seq_show(prog, &meta, v); in ipv6_route_seq_stop()
|
| A D | route.c | 6822 DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt)
|
| /net/unix/ |
| A D | af_unix.c | 3635 __bpf_md_ptr(struct bpf_iter_meta *, meta); 3645 meta->seq_num--; /* skip SEQ_START_TOKEN */ in unix_prog_seq_show() 3646 ctx.meta = meta; in unix_prog_seq_show() 3770 struct bpf_iter_meta meta; in bpf_iter_unix_seq_show() local 3788 meta.seq = seq; in bpf_iter_unix_seq_show() 3789 prog = bpf_iter_get_info(&meta, false); in bpf_iter_unix_seq_show() 3790 ret = unix_prog_seq_show(prog, &meta, v, uid); in bpf_iter_unix_seq_show() 3799 struct bpf_iter_meta meta; in bpf_iter_unix_seq_stop() local 3803 meta.seq = seq; in bpf_iter_unix_seq_stop() 3804 prog = bpf_iter_get_info(&meta, true); in bpf_iter_unix_seq_stop() [all …]
|
| /net/ipv4/ |
| A D | tcp_ipv4.c | 3036 __bpf_md_ptr(struct bpf_iter_meta *, meta); 3046 meta->seq_num--; /* skip SEQ_START_TOKEN */ 3047 ctx.meta = meta; 3363 struct bpf_iter_meta meta; local 3391 meta.seq = seq; 3392 prog = bpf_iter_get_info(&meta, false); 3393 ret = tcp_prog_seq_show(prog, &meta, v, uid); 3405 struct bpf_iter_meta meta; local 3409 meta.seq = seq; 3410 prog = bpf_iter_get_info(&meta, true); [all …]
|
| A D | udp.c | 3410 __bpf_md_ptr(struct bpf_iter_meta *, meta); 3602 meta->seq_num--; /* skip SEQ_START_TOKEN */ in udp_prog_seq_show() 3603 ctx.meta = meta; in udp_prog_seq_show() 3613 struct bpf_iter_meta meta; in bpf_iter_udp_seq_show() local 3630 meta.seq = seq; in bpf_iter_udp_seq_show() 3631 prog = bpf_iter_get_info(&meta, false); in bpf_iter_udp_seq_show() 3659 struct bpf_iter_meta meta; in bpf_iter_udp_seq_stop() local 3663 meta.seq = seq; in bpf_iter_udp_seq_stop() 3664 prog = bpf_iter_get_info(&meta, true); in bpf_iter_udp_seq_stop() 3666 (void)udp_prog_seq_show(prog, &meta, v, 0, 0); in bpf_iter_udp_seq_stop() [all …]
|