Lines Matching refs:stats
75 struct sw_flow_stats *stats; in ovs_flow_alloc() local
82 flow->cpu_used_mask = (struct cpumask *)&flow->stats[nr_cpu_ids]; in ovs_flow_alloc()
85 stats = kmem_cache_alloc_node(flow_stats_cache, in ovs_flow_alloc()
88 if (!stats) in ovs_flow_alloc()
91 spin_lock_init(&stats->lock); in ovs_flow_alloc()
93 RCU_INIT_POINTER(flow->stats[0], stats); in ovs_flow_alloc()
120 if (flow->stats[cpu]) in flow_free()
122 (struct sw_flow_stats __force *)flow->stats[cpu]); in flow_free()
203 struct mask_array_stats *stats; in tbl_mask_array_reset_counters() local
207 stats = per_cpu_ptr(ma->masks_usage_stats, cpu); in tbl_mask_array_reset_counters()
209 start = u64_stats_fetch_begin(&stats->syncp); in tbl_mask_array_reset_counters()
210 counter = stats->usage_cntrs[i]; in tbl_mask_array_reset_counters()
211 } while (u64_stats_fetch_retry(&stats->syncp, start)); in tbl_mask_array_reset_counters()
735 struct mask_array_stats *stats = this_cpu_ptr(ma->masks_usage_stats); in flow_lookup() local
745 u64_stats_update_begin(&stats->syncp); in flow_lookup()
746 stats->usage_cntrs[*index]++; in flow_lookup()
747 u64_stats_update_end(&stats->syncp); in flow_lookup()
766 u64_stats_update_begin(&stats->syncp); in flow_lookup()
767 stats->usage_cntrs[*index]++; in flow_lookup()
768 u64_stats_update_end(&stats->syncp); in flow_lookup()
1131 struct mask_array_stats *stats; in ovs_flow_masks_rebalance() local
1135 stats = per_cpu_ptr(ma->masks_usage_stats, cpu); in ovs_flow_masks_rebalance()
1137 start = u64_stats_fetch_begin(&stats->syncp); in ovs_flow_masks_rebalance()
1138 counter = stats->usage_cntrs[i]; in ovs_flow_masks_rebalance()
1139 } while (u64_stats_fetch_retry(&stats->syncp, start)); in ovs_flow_masks_rebalance()