| /linux/drivers/net/ethernet/google/gve/ |
| A D | gve_flow_rule.c | 14 struct gve_adminq_flow_rule *flow_rule = &rule->flow_rule; in gve_fill_ethtool_flow_spec() local 44 fsp->h_u.tcp_ip4_spec.psrc = flow_rule->key.src_port; in gve_fill_ethtool_flow_spec() 46 fsp->h_u.tcp_ip4_spec.tos = flow_rule->key.tos; in gve_fill_ethtool_flow_spec() 51 fsp->m_u.tcp_ip4_spec.tos = flow_rule->mask.tos; in gve_fill_ethtool_flow_spec() 57 fsp->h_u.ah_ip4_spec.spi = flow_rule->key.spi; in gve_fill_ethtool_flow_spec() 58 fsp->h_u.ah_ip4_spec.tos = flow_rule->key.tos; in gve_fill_ethtool_flow_spec() 61 fsp->m_u.ah_ip4_spec.spi = flow_rule->mask.spi; in gve_fill_ethtool_flow_spec() 62 fsp->m_u.ah_ip4_spec.tos = flow_rule->mask.tos; in gve_fill_ethtool_flow_spec() 88 fsp->h_u.ah_ip6_spec.spi = flow_rule->key.spi; in gve_fill_ethtool_flow_spec() 94 fsp->m_u.ah_ip6_spec.spi = flow_rule->mask.spi; in gve_fill_ethtool_flow_spec() [all …]
|
| /linux/net/netfilter/ |
| A D | nf_flow_table_offload.c | 743 flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL); in nf_flow_offload_rule_alloc() 744 if (!flow_rule) in nf_flow_offload_rule_alloc() 748 if (!flow_rule->rule) in nf_flow_offload_rule_alloc() 751 flow_rule->rule->match.dissector = &flow_rule->match.dissector; in nf_flow_offload_rule_alloc() 752 flow_rule->rule->match.mask = &flow_rule->match.mask; in nf_flow_offload_rule_alloc() 753 flow_rule->rule->match.key = &flow_rule->match.key; in nf_flow_offload_rule_alloc() 768 return flow_rule; in nf_flow_offload_rule_alloc() 773 kfree(flow_rule); in nf_flow_offload_rule_alloc() 791 kfree(flow_rule); in __nf_flow_offload_destroy() 809 if (!flow_rule[0]) in nf_flow_offload_alloc() [all …]
|
| A D | nf_flow_table_inet.c | 48 struct nf_flow_rule *flow_rule) in nf_flow_rule_route_inet() argument 55 err = nf_flow_rule_route_ipv4(net, flow, dir, flow_rule); in nf_flow_rule_route_inet() 58 err = nf_flow_rule_route_ipv6(net, flow, dir, flow_rule); in nf_flow_rule_route_inet()
|
| /linux/include/net/ |
| A D | flow_offload.h | 95 struct flow_rule; 97 void flow_rule_match_meta(const struct flow_rule *rule, 99 void flow_rule_match_basic(const struct flow_rule *rule, 105 void flow_rule_match_vlan(const struct flow_rule *rule, 109 void flow_rule_match_arp(const struct flow_rule *rule, 115 void flow_rule_match_ip(const struct flow_rule *rule, 121 void flow_rule_match_tcp(const struct flow_rule *rule, 143 void flow_rule_match_ct(const struct flow_rule *rule, 439 struct flow_rule { struct 696 struct flow_rule *rule; [all …]
|
| /linux/net/core/ |
| A D | flow_offload.c | 10 struct flow_rule *flow_rule_alloc(unsigned int num_actions) in flow_rule_alloc() 12 struct flow_rule *rule; in flow_rule_alloc() 58 void flow_rule_match_meta(const struct flow_rule *rule, in flow_rule_match_meta() 65 void flow_rule_match_basic(const struct flow_rule *rule, in flow_rule_match_basic() 86 void flow_rule_match_vlan(const struct flow_rule *rule, in flow_rule_match_vlan() 93 void flow_rule_match_cvlan(const struct flow_rule *rule, in flow_rule_match_cvlan() 100 void flow_rule_match_arp(const struct flow_rule *rule, in flow_rule_match_arp() 121 void flow_rule_match_ip(const struct flow_rule *rule, in flow_rule_match_ip() 142 void flow_rule_match_tcp(const struct flow_rule *rule, in flow_rule_match_tcp() 156 void flow_rule_match_icmp(const struct flow_rule *rule, in flow_rule_match_icmp() [all …]
|
| /linux/drivers/net/ethernet/netronome/nfp/flower/ |
| A D | main.h | 561 struct flow_rule *rule); 575 struct flow_rule *rule, 580 struct flow_rule *rule); 584 struct flow_rule *rule); 596 struct flow_rule *rule); 600 struct flow_rule *rule); 604 struct flow_rule *rule); 608 struct flow_rule *rule); 610 struct flow_rule *rule, 617 struct flow_rule *rule, [all …]
|
| A D | match.c | 25 struct flow_rule *rule) in nfp_flower_compile_tci() 121 struct flow_rule *rule, in nfp_flower_compile_mpls() 176 struct flow_rule *rule, in nfp_flower_compile_mac_mpls() 190 struct flow_rule *rule) in nfp_flower_compile_tport() 320 struct flow_rule *rule) in nfp_flower_compile_vlan() 403 struct flow_rule *rule) in nfp_flower_compile_tun_ipv4_addrs() 419 struct flow_rule *rule) in nfp_flower_compile_tun_ipv6_addrs() 489 struct flow_rule *rule) in nfp_flower_compile_ipv4_gre_tun() 504 struct flow_rule *rule) in nfp_flower_compile_ipv4_udp_tun() 514 struct flow_rule *rule) in nfp_flower_compile_ipv6_udp_tun() [all …]
|
| A D | conntrack.c | 26 static struct flow_action_entry *get_flow_act(struct flow_rule *rule, 58 struct flow_rule *rule = flow_cls_offload_flow_rule(flow); in is_pre_ct_flow() 90 struct flow_rule *rule = flow_cls_offload_flow_rule(flow); in is_post_ct_flow() 135 static void *get_mangled_key(struct flow_rule *rule, void *buf, in get_mangled_key() 172 static void *get_mangled_tos_ttl(struct flow_rule *rule, void *buf, in get_mangled_tos_ttl() 484 struct flow_rule *rule) in nfp_ct_check_vlan_merge() 718 struct flow_rule *a_rule; in nfp_fl_merge_actions_offload() 815 struct flow_rule *rules[NFP_MAX_ENTRY_RULES]; in nfp_fl_ct_add_offload() 1885 struct flow_rule *rule = flow_cls_offload_flow_rule(flow); in nfp_fl_ct_handle_post_ct() 2149 struct flow_rule *flow_rule = flow->rule; in nfp_fl_ct_offload_nft_supported() local [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ |
| A D | ct_fs_smfs.c | 245 mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *flow_rule) in mlx5_ct_fs_smfs_ct_validate_flow_rule() argument 254 if (!mlx5_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) { in mlx5_ct_fs_smfs_ct_validate_flow_rule() 256 flow_rule->match.dissector->used_keys); in mlx5_ct_fs_smfs_ct_validate_flow_rule() 260 flow_rule_match_basic(flow_rule, &basic); in mlx5_ct_fs_smfs_ct_validate_flow_rule() 261 flow_rule_match_control(flow_rule, &control); in mlx5_ct_fs_smfs_ct_validate_flow_rule() 262 flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs); in mlx5_ct_fs_smfs_ct_validate_flow_rule() 263 flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs); in mlx5_ct_fs_smfs_ct_validate_flow_rule() 265 flow_rule_match_ports(flow_rule, &ports); in mlx5_ct_fs_smfs_ct_validate_flow_rule() 267 flow_rule_match_tcp(flow_rule, &tcp); in mlx5_ct_fs_smfs_ct_validate_flow_rule() 297 struct mlx5_flow_attr *attr, struct flow_rule *flow_rule) in mlx5_ct_fs_smfs_ct_rule_add() argument [all …]
|
| A D | ct_fs.h | 26 struct flow_rule *flow_rule);
|
| A D | int_port.c | 68 struct mlx5_flow_handle *flow_rule; in mlx5e_int_port_create_rx_rule() local 93 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec, in mlx5e_int_port_create_rx_rule() 95 if (IS_ERR(flow_rule)) in mlx5e_int_port_create_rx_rule() 97 PTR_ERR(flow_rule)); in mlx5e_int_port_create_rx_rule() 101 return flow_rule; in mlx5e_int_port_create_rx_rule()
|
| A D | ct_fs_dmfs.c | 31 struct mlx5_flow_attr *attr, struct flow_rule *flow_rule) in mlx5_ct_fs_dmfs_ct_rule_add() argument
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| A D | rdma.c | 26 struct mlx5_flow_handle *flow_rule = NULL; in mlx5_rdma_enable_roce_steering() local 78 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, NULL, 0); in mlx5_rdma_enable_roce_steering() 79 if (IS_ERR(flow_rule)) { in mlx5_rdma_enable_roce_steering() 80 err = PTR_ERR(flow_rule); in mlx5_rdma_enable_roce_steering() 90 roce->allow_rule = flow_rule; in mlx5_rdma_enable_roce_steering()
|
| A D | en_rep.c | 483 if (IS_ERR(flow_rule)) { in mlx5e_sqs2vport_add_peers_rules() 485 return PTR_ERR(flow_rule); in mlx5e_sqs2vport_add_peers_rules() 488 sq_peer->rule = flow_rule; in mlx5e_sqs2vport_add_peers_rules() 531 if (IS_ERR(flow_rule)) { in mlx5e_sqs2vport_start() 532 err = PTR_ERR(flow_rule); in mlx5e_sqs2vport_start() 646 if (IS_ERR(flow_rule)) in mlx5e_rep_add_meta_tunnel_rule() 647 return PTR_ERR(flow_rule); in mlx5e_rep_add_meta_tunnel_rule() 1041 if (IS_ERR(flow_rule)) in mlx5e_create_rep_vport_rx_rule() 1042 return PTR_ERR(flow_rule); in mlx5e_create_rep_vport_rx_rule() 1679 if (IS_ERR(flow_rule)) { in mlx5e_vport_rep_event_pair() [all …]
|
| A D | eswitch_offloads.c | 1020 if (IS_ERR(flow_rule)) in mlx5_eswitch_add_send_to_vport_rule() 1022 PTR_ERR(flow_rule)); in mlx5_eswitch_add_send_to_vport_rule() 1025 return flow_rule; in mlx5_eswitch_add_send_to_vport_rule() 1069 if (IS_ERR(flow_rule)) in mlx5_eswitch_add_send_to_vport_meta_rule() 1074 return flow_rule; in mlx5_eswitch_add_send_to_vport_meta_rule() 1386 if (IS_ERR(flow_rule)) { in esw_add_fdb_miss_rule() 1401 if (IS_ERR(flow_rule)) { in esw_add_fdb_miss_rule() 1454 if (IS_ERR(flow_rule)) in esw_add_restore_rule() 1459 return flow_rule; in esw_add_restore_rule() 2165 return flow_rule; in mlx5_eswitch_create_vport_rx_rule() [all …]
|
| A D | eswitch.c | 64 struct mlx5_flow_handle *flow_rule; member 255 flow_rule = in __esw_fdb_set_vport_rule() 258 if (IS_ERR(flow_rule)) { in __esw_fdb_set_vport_rule() 262 flow_rule = NULL; in __esw_fdb_set_vport_rule() 266 return flow_rule; in __esw_fdb_set_vport_rule() 359 if (vaddr->flow_rule) in esw_del_uc_addr() 361 vaddr->flow_rule = NULL; in esw_del_uc_addr() 399 iter_vaddr->flow_rule = in update_allmulti_vports() 450 vport, mac, vaddr->flow_rule, in esw_add_mc_addr() 477 if (vaddr->flow_rule) in esw_del_mc_addr() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/ |
| A D | tc_ct.c | 290 struct flow_rule *rule) in mlx5_tc_ct_rule_to_tuple_nat() 378 struct flow_rule *rule) in mlx5_tc_ct_set_tuple_match() 548 mlx5_tc_ct_get_ct_metadata_action(struct flow_rule *flow_rule) in mlx5_tc_ct_get_ct_metadata_action() argument 680 struct flow_rule *flow_rule, in mlx5_tc_ct_entry_create_nat() argument 718 struct flow_rule *flow_rule, in mlx5_tc_ct_entry_create_mod_hdr() argument 803 struct flow_rule *flow_rule, in mlx5_tc_ct_entry_add_rule() argument 880 struct flow_rule *flow_rule, in mlx5_tc_ct_entry_update_rule() argument 1098 struct flow_rule *flow_rule, in mlx5_tc_ct_entry_add_rules() argument 1141 struct flow_rule *flow_rule, in mlx5_tc_ct_entry_update_rules() argument 1193 struct flow_rule *flow_rule = flow_cls_offload_flow_rule(flow); in mlx5_tc_ct_block_flow_offload_add() local [all …]
|
| A D | tc_tun_vxlan.c | 22 struct flow_rule *rule = flow_cls_offload_flow_rule(f); in mlx5e_tc_tun_check_udp_dport_vxlan() 114 struct flow_rule *rule = flow_cls_offload_flow_rule(f); in mlx5e_tc_tun_parse_vxlan_gbp_option() 163 struct flow_rule *rule = flow_cls_offload_flow_rule(f); in mlx5e_tc_tun_parse_vxlan()
|
| A D | tc_tun_geneve.c | 25 struct flow_rule *rule = flow_cls_offload_flow_rule(f); in mlx5e_tc_tun_check_udp_dport_geneve() 128 struct flow_rule *rule = flow_cls_offload_flow_rule(f); in mlx5e_tc_tun_parse_geneve_vni() 162 struct flow_rule *rule = flow_cls_offload_flow_rule(f); in mlx5e_tc_tun_parse_geneve_options()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/esw/ |
| A D | legacy.c | 255 struct mlx5_flow_handle *flow_rule; in _mlx5_eswitch_set_vepa_locked() local 283 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec, in _mlx5_eswitch_set_vepa_locked() 285 if (IS_ERR(flow_rule)) { in _mlx5_eswitch_set_vepa_locked() 286 err = PTR_ERR(flow_rule); in _mlx5_eswitch_set_vepa_locked() 289 esw->fdb_table.legacy.vepa_uplink_rule = flow_rule; in _mlx5_eswitch_set_vepa_locked() 296 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL, in _mlx5_eswitch_set_vepa_locked() 298 if (IS_ERR(flow_rule)) { in _mlx5_eswitch_set_vepa_locked() 299 err = PTR_ERR(flow_rule); in _mlx5_eswitch_set_vepa_locked() 302 esw->fdb_table.legacy.vepa_star_rule = flow_rule; in _mlx5_eswitch_set_vepa_locked()
|
| /linux/drivers/net/dsa/microchip/ |
| A D | ksz9477_tc_flower.c | 29 struct flow_rule *rule, in ksz9477_flower_parse_key_l2() 112 struct flow_rule *rule, in ksz9477_flower_parse_key() 162 struct flow_rule *rule = flow_cls_offload_flow_rule(cls); in ksz9477_flower_parse_action() 218 struct flow_rule *rule = flow_cls_offload_flow_rule(cls); in ksz9477_cls_flower_add()
|
| /linux/include/net/netfilter/ |
| A D | nf_flow_table.h | 49 struct flow_rule *rule; 63 struct nf_flow_rule *flow_rule); 346 struct nf_flow_rule *flow_rule); 349 struct nf_flow_rule *flow_rule);
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ |
| A D | ingress_ofld.c | 128 struct mlx5_flow_handle *flow_rule; in esw_acl_ingress_src_port_drop_create() local 141 flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0); in esw_acl_ingress_src_port_drop_create() 142 if (IS_ERR(flow_rule)) { in esw_acl_ingress_src_port_drop_create() 143 err = PTR_ERR(flow_rule); in esw_acl_ingress_src_port_drop_create() 147 vport->ingress.offloads.drop_rule = flow_rule; in esw_acl_ingress_src_port_drop_create()
|
| /linux/drivers/net/ethernet/microchip/vcap/ |
| A D | vcap_tc.h | 11 struct flow_rule *frule;
|
| /linux/drivers/net/ethernet/mellanox/mlxsw/ |
| A D | spectrum_flower.c | 341 struct flow_rule *rule = flow_cls_offload_flow_rule(f); in mlxsw_sp_flower_parse_meta() 409 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); in mlxsw_sp_flower_parse_ports() 436 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); in mlxsw_sp_flower_parse_ports_range() 498 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); in mlxsw_sp_flower_parse_tcp() 529 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); in mlxsw_sp_flower_parse_ip() 562 struct flow_rule *rule = flow_cls_offload_flow_rule(f); in mlxsw_sp_flower_parse()
|