| /net/netfilter/ |
| A D | nf_flow_table_core.c | 54 struct flow_offload *flow; in flow_offload_alloc() local 59 flow = kzalloc(sizeof(*flow), GFP_ATOMIC); in flow_offload_alloc() 60 if (!flow) in flow_offload_alloc() 64 flow->ct = ct; in flow_offload_alloc() 74 return flow; in flow_offload_alloc() 254 switch (flow->type) { in flow_offload_free() 261 nf_ct_put(flow->ct); in flow_offload_free() 262 kfree_rcu(flow, rcu_head); in flow_offload_free() 323 flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow); in flow_offload_add() 380 flow_offload_free(flow); in flow_offload_del() [all …]
|
| A D | nf_flow_table_offload.c | 24 struct flow_offload *flow; member 224 const struct flow_offload *flow, in flow_offload_eth_src() argument 271 const struct flow_offload *flow, in flow_offload_eth_dst() argument 738 struct flow_offload *flow = offload->flow; in nf_flow_offload_rule_alloc() local 836 struct flow_offload *flow, in nf_flow_offload_tuple() argument 956 offload->flow->timeout = max_t(u64, offload->flow->timeout, in flow_offload_work_stats() 961 nf_ct_acct_add(offload->flow->ct, in flow_offload_work_stats() 1031 offload->flow = flow; in nf_flow_offload_work_alloc() 1040 struct flow_offload *flow) in nf_flow_offload_add() argument 1052 struct flow_offload *flow) in nf_flow_offload_del() argument [all …]
|
| A D | nf_flow_table_ip.c | 32 flow_offload_teardown(flow); in nf_flow_state_check() 37 !test_bit(NF_FLOW_CLOSING, &flow->flags)) in nf_flow_state_check() 38 set_bit(NF_FLOW_CLOSING, &flow->flags); in nf_flow_state_check() 376 struct flow_offload *flow; in nf_flow_offload_forward() local 393 flow_offload_teardown(flow); in nf_flow_offload_forward() 427 struct flow_offload *flow; in nf_flow_offload_ip_hook() local 467 flow_offload_teardown(flow); in nf_flow_offload_ip_hook() 655 struct flow_offload *flow; in nf_flow_offload_ipv6_forward() local 672 flow_offload_teardown(flow); in nf_flow_offload_ipv6_forward() 723 struct flow_offload *flow; in nf_flow_offload_ipv6_hook() local [all …]
|
| A D | nf_tables_offload.c | 15 if (!flow) in nft_flow_rule_alloc() 20 kfree(flow); in nft_flow_rule_alloc() 24 flow->rule->match.dissector = &flow->match.dissector; in nft_flow_rule_alloc() 25 flow->rule->match.mask = &flow->match.mask; in nft_flow_rule_alloc() 26 flow->rule->match.key = &flow->match.key; in nft_flow_rule_alloc() 28 return flow; in nft_flow_rule_alloc() 109 if (!flow) in nft_flow_rule_create() 138 return flow; in nft_flow_rule_create() 162 kfree(flow); in nft_flow_rule_destroy() 257 if (flow) in nft_flow_cls_offload_setup() [all …]
|
| A D | nft_cmp.c | 140 struct nft_flow_rule *flow, in __nft_cmp_offload() argument 145 u8 *mask = (u8 *)&flow->match.mask; in __nft_cmp_offload() 146 u8 *key = (u8 *)&flow->match.key; in __nft_cmp_offload() 165 flow->match.dissector.used_keys |= BIT_ULL(reg->key); in __nft_cmp_offload() 166 flow->match.dissector.offset[reg->key] = reg->base_offset; in __nft_cmp_offload() 179 struct nft_flow_rule *flow, in nft_cmp_offload() argument 184 return __nft_cmp_offload(ctx, flow, priv); in nft_cmp_offload() 239 struct nft_flow_rule *flow, in nft_cmp_fast_offload() argument 254 return __nft_cmp_offload(ctx, flow, &cmp); in nft_cmp_fast_offload() 338 struct nft_flow_rule *flow, in nft_cmp16_fast_offload() argument [all …]
|
| A D | nft_payload.c | 307 struct nft_flow_rule *flow, in nft_payload_offload_ll() argument 376 struct nft_flow_rule *flow, in nft_payload_offload_ip() argument 416 struct nft_flow_rule *flow, in nft_payload_offload_ip6() argument 456 struct nft_flow_rule *flow, in nft_payload_offload_nh() argument 463 err = nft_payload_offload_ip(ctx, flow, priv); in nft_payload_offload_nh() 476 struct nft_flow_rule *flow, in nft_payload_offload_tcp() argument 504 struct nft_flow_rule *flow, in nft_payload_offload_udp() argument 532 struct nft_flow_rule *flow, in nft_payload_offload_th() argument 552 struct nft_flow_rule *flow, in nft_payload_offload() argument 560 err = nft_payload_offload_ll(ctx, flow, priv); in nft_payload_offload() [all …]
|
| A D | xt_addrtype.c | 36 struct flowi6 flow; in match_lookup_rt6() local 41 memset(&flow, 0, sizeof(flow)); in match_lookup_rt6() 42 flow.daddr = *addr; in match_lookup_rt6() 44 flow.flowi6_oif = dev->ifindex; in match_lookup_rt6() 52 flowi6_to_flowi(&flow), false); in match_lookup_rt6()
|
| A D | nf_flow_table_inet.c | 46 struct flow_offload *flow, in nf_flow_rule_route_inet() argument 50 const struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple; in nf_flow_rule_route_inet() 55 err = nf_flow_rule_route_ipv4(net, flow, dir, flow_rule); in nf_flow_rule_route_inet() 58 err = nf_flow_rule_route_ipv6(net, flow, dir, flow_rule); in nf_flow_rule_route_inet()
|
| /net/openvswitch/ |
| A D | flow_table.c | 78 if (!flow) in ovs_flow_alloc() 97 return flow; in ovs_flow_alloc() 137 if (!flow) in ovs_flow_free() 491 flow); in table_instance_flow_flush() 826 if (!flow) in ovs_flow_tbl_lookup_stats() 840 if (flow) in ovs_flow_tbl_lookup_stats() 844 return flow; in ovs_flow_tbl_lookup_stats() 864 return flow; in ovs_flow_tbl_lookup() 885 if (flow && ovs_identifier_is_key(&flow->id) && in ovs_flow_tbl_lookup_exact() 1040 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range); in flow_key_insert() [all …]
|
| A D | datapath.c | 250 struct sw_flow *flow; in ovs_dp_process_packet() local 264 if (unlikely(!flow)) { in ovs_dp_process_packet() 617 struct sw_flow *flow; in ovs_packet_cmd_execute() local 661 err = PTR_ERR(flow); in ovs_packet_cmd_execute() 662 if (IS_ERR(flow)) in ovs_packet_cmd_execute() 1077 if (!flow) in ovs_flow_cmd_new() 1079 if (likely(!flow)) { in ovs_flow_cmd_new() 1121 flow = NULL; in ovs_flow_cmd_new() 1122 if (!flow) { in ovs_flow_cmd_new() 1384 if (!flow) { in ovs_flow_cmd_get() [all …]
|
| A D | flow.c | 68 stats = rcu_dereference(flow->stats[cpu]); in ovs_flow_stats_update() 75 flow->stats_last_writer = cpu; in ovs_flow_stats_update() 83 if (unlikely(flow->stats_last_writer != cpu)) { in ovs_flow_stats_update() 89 if (likely(flow->stats_last_writer != -1) && in ovs_flow_stats_update() 108 rcu_assign_pointer(flow->stats[cpu], in ovs_flow_stats_update() 111 flow->cpu_used_mask); in ovs_flow_stats_update() 115 flow->stats_last_writer = cpu; in ovs_flow_stats_update() 128 void ovs_flow_stats_get(const struct sw_flow *flow, in ovs_flow_stats_get() argument 140 cpu = cpumask_next(cpu, flow->cpu_used_mask)) { in ovs_flow_stats_get() 159 void ovs_flow_stats_clear(struct sw_flow *flow) in ovs_flow_stats_clear() argument [all …]
|
| A D | flow_netlink.h | 41 int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb); 42 int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb); 43 int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb);
|
| /net/sched/ |
| A D | sch_fq_codel.c | 120 flow->head = skb->next; in dequeue_head() 129 if (flow->head == NULL) in flow_queue_add() 130 flow->head = skb; in flow_queue_add() 133 flow->tail = skb; in flow_queue_add() 164 flow = &q->flows[idx]; in fq_codel_drop() 205 flow = &q->flows[idx]; in fq_codel_enqueue() 264 if (flow->head) { in dequeue_func() 330 rtnl_kfree_skbs(flow->head, flow->tail); in fq_codel_flow_purge() 331 flow->head = NULL; in fq_codel_flow_purge() 669 if (flow->head) { in fq_codel_dump_class_stats() [all …]
|
| A D | sch_fq_pie.c | 122 if (!flow->head) in flow_queue_add() 123 flow->head = skb; in flow_queue_add() 125 flow->tail->next = skb; in flow_queue_add() 126 flow->tail = skb; in flow_queue_add() 233 flow->head = skb->next; in dequeue_head() 242 struct fq_pie_flow *flow; in fq_pie_qdisc_dequeue() local 262 if (flow->head) { in fq_pie_qdisc_dequeue() 279 flow->qlen--; in fq_pie_qdisc_dequeue() 283 pie_process_dequeue(skb, &q->p_params, &flow->vars, flow->backlog); in fq_pie_qdisc_dequeue() 544 rtnl_kfree_skbs(flow->head, flow->tail); in fq_pie_reset() [all …]
|
| A D | sch_hhf.c | 200 kfree(flow); in seek_list() 203 return flow; in seek_list() 215 struct hh_flow_state *flow; in alloc_new_hh() local 224 return flow; in alloc_new_hh() 234 if (!flow) in alloc_new_hh() 241 return flow; in alloc_new_hh() 252 struct hh_flow_state *flow; in hhf_classify() local 273 flow->hit_timestamp = now; in hhf_classify() 310 flow->hash_id = hash; in hhf_classify() 311 flow->hit_timestamp = now; in hhf_classify() [all …]
|
| A D | sch_cake.c | 915 if (!flow->head) in flow_queue_add() 919 flow->tail = skb; in flow_queue_add() 1216 if (flow->head == flow->tail) in cake_ack_filter() 1219 skb = flow->tail; in cake_ack_filter() 1592 flow->dropped++; in cake_drop() 1903 if (!flow->set || flow->set == CAKE_SET_DECAYING) { in cake_enqueue() 1913 flow->deficit = cake_get_flow_quantum(b, flow, q->flow_mode); in cake_enqueue() 1949 if (flow->head) { in cake_dequeue_one() 2109 flow->deficit += cake_get_flow_quantum(b, flow, q->flow_mode); in cake_dequeue() 2123 if (flow->cvars.p_drop || flow->cvars.count || in cake_dequeue() [all …]
|
| A D | cls_flow.c | 69 __be32 src = flow_get_u32_src(flow); in flow_get_src() 88 const struct flow_keys *flow) in flow_get_proto() argument 90 return flow->basic.ip_proto; in flow_get_proto() 96 if (flow->ports.ports) in flow_get_proto_src() 97 return ntohs(flow->ports.src); in flow_get_proto_src() 105 if (flow->ports.ports) in flow_get_proto_dst() 106 return ntohs(flow->ports.dst); in flow_get_proto_dst() 162 return flow_get_src(skb, flow); in flow_get_nfct_src() 175 return flow_get_dst(skb, flow); in flow_get_nfct_dst() 245 return flow_get_src(skb, flow); in flow_key_get() [all …]
|
| A D | sch_fq.c | 206 head->last->next = flow; in fq_flow_add_tail() 208 head->first = flow; in fq_flow_add_tail() 209 head->last = flow; in fq_flow_add_tail() 210 flow->next = NULL; in fq_flow_add_tail() 482 if (skb == flow->head) { in fq_erase_head() 507 head = flow->head; in flow_queue_add() 511 flow->head = skb; in flow_queue_add() 514 flow->tail = skb; in flow_queue_add() 794 rtnl_kfree_skbs(flow->head, flow->tail); in fq_flow_purge() 795 flow->head = NULL; in fq_flow_purge() [all …]
|
| A D | act_ct.c | 243 struct flow_offload *flow, in tcf_ct_flow_table_fill_actions() argument 249 struct nf_conn *ct = flow->ct; in tcf_ct_flow_table_fill_actions() 260 set_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags); in tcf_ct_flow_table_fill_actions() 290 !test_bit(NF_FLOW_HW_PENDING, &flow->flags) && in tcf_ct_flow_is_outdated() 291 !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags); in tcf_ct_flow_is_outdated() 662 struct flow_offload *flow; in tcf_ct_flow_table_lookup() local 685 ct = flow->ct; in tcf_ct_flow_table_lookup() 693 set_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags); in tcf_ct_flow_table_lookup() 703 flow_offload_teardown(flow); in tcf_ct_flow_table_lookup() 714 tcf_ct_flow_ct_ext_ifidx_update(flow); in tcf_ct_flow_table_lookup() [all …]
|
| /net/ipv4/netfilter/ |
| A D | ipt_rpfilter.c | 58 struct flowi4 flow; in rpfilter_mt() local 74 memset(&flow, 0, sizeof(flow)); in rpfilter_mt() 75 flow.flowi4_iif = LOOPBACK_IFINDEX; in rpfilter_mt() 76 flow.daddr = iph->saddr; in rpfilter_mt() 77 flow.saddr = rpfilter_get_saddr(iph->daddr); in rpfilter_mt() 78 flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; in rpfilter_mt() 79 flow.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph)); in rpfilter_mt() 80 flow.flowi4_scope = RT_SCOPE_UNIVERSE; in rpfilter_mt() 81 flow.flowi4_l3mdev = l3mdev_master_ifindex_rcu(xt_in(par)); in rpfilter_mt() 82 flow.flowi4_uid = sock_net_uid(xt_net(par), NULL); in rpfilter_mt() [all …]
|
| /net/qrtr/ |
| A D | af_qrtr.c | 193 flow = *slot; in __qrtr_node_release() 195 kfree(flow); in __qrtr_node_release() 234 if (flow) { in qrtr_tx_resume() 273 if (!flow) { in qrtr_tx_wait() 274 flow = kzalloc(sizeof(*flow), GFP_KERNEL); in qrtr_tx_wait() 275 if (flow) { in qrtr_tx_wait() 278 kfree(flow); in qrtr_tx_wait() 279 flow = NULL; in qrtr_tx_wait() 286 if (!flow) in qrtr_tx_wait() 332 if (flow) { in qrtr_tx_flow_failed() [all …]
|
| /net/smc/ |
| A D | smc_llc.c | 211 flow->qentry = NULL; in smc_llc_flow_qentry_clr() 219 if (flow->qentry) { in smc_llc_flow_qentry_del() 220 qentry = flow->qentry; in smc_llc_flow_qentry_del() 221 flow->qentry = NULL; in smc_llc_flow_qentry_del() 229 flow->qentry = qentry; in smc_llc_flow_qentry_set() 260 if (flow->type) { in smc_llc_flow_start() 323 memset(flow, 0, sizeof(*flow)); in smc_llc_flow_stop() 344 (flow->qentry || in smc_llc_wait() 348 if (!flow->qentry || in smc_llc_wait() 366 flow->type, lgr->role, in smc_llc_wait() [all …]
|
| A D | smc_llc.h | 102 void smc_llc_flow_stop(struct smc_link_group *lgr, struct smc_llc_flow *flow); 110 struct smc_llc_qentry *smc_llc_flow_qentry_clr(struct smc_llc_flow *flow); 111 void smc_llc_flow_qentry_del(struct smc_llc_flow *flow);
|
| /net/core/ |
| A D | flow_dissector.c | 1720 switch (flow->control.addr_type) { in flow_keys_hash_length() 1731 return sizeof(*flow) - diff; in flow_keys_hash_length() 1736 switch (flow->control.addr_type) { in flow_get_u32_src() 1738 return flow->addrs.v4addrs.src; in flow_get_u32_src() 1741 &flow->addrs.v6addrs.src); in flow_get_u32_src() 1743 return flow->addrs.tipckey.key; in flow_get_u32_src() 1752 switch (flow->control.addr_type) { in flow_get_u32_dst() 1754 return flow->addrs.v4addrs.dst; in flow_get_u32_dst() 1757 &flow->addrs.v6addrs.dst); in flow_get_u32_dst() 1848 const struct flow_keys *flow) in make_flow_keys_digest() argument [all …]
|
| /net/caif/ |
| A D | chnl_net.c | 147 static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow, in chnl_flowctrl_cb() argument 152 flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" : in chnl_flowctrl_cb() 153 flow == CAIF_CTRLCMD_INIT_RSP ? "INIT" : in chnl_flowctrl_cb() 154 flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" : in chnl_flowctrl_cb() 155 flow == CAIF_CTRLCMD_DEINIT_RSP ? "CLOSE/DEINIT" : in chnl_flowctrl_cb() 156 flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "OPEN_FAIL" : in chnl_flowctrl_cb() 157 flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? in chnl_flowctrl_cb() 162 switch (flow) { in chnl_flowctrl_cb()
|