Home
last modified time | relevance | path

Searched refs:counter (Results 1 – 25 of 137) sorted by relevance

123456

/tools/testing/selftests/net/netfilter/
A Dnft_meta.sh30 counter iifcount {}
31 counter iifnamecount {}
32 counter iifgroupcount {}
33 counter iiftypecount {}
36 counter imarkcounter {}
37 counter icpu0counter {}
41 counter oifcount {}
42 counter oifnamecount {}
44 counter oiftypecount {}
68 meta oif lo counter name "oifcount" counter
[all …]
A Dconntrack_icmp_related.sh116 counter unknown { }
117 counter related { }
123 counter name "unknown" drop
131 counter unknown { }
132 counter related { }
133 counter redir4 { }
134 counter redir6 { }
144 counter name "unknown" drop
151 counter unknown { }
152 counter new { }
[all …]
A Dconntrack_vrf.sh92 iif { veth0, tvrf } counter meta nftrace set 1
93 iif veth0 counter ct zone set 1 counter return
94 iif tvrf counter ct zone set 2 counter return
95 ip protocol icmp counter
96 notrack counter
102 oif veth0 counter ct zone set 1 counter return
103 oif tvrf counter ct zone set 2 counter return
104 notrack counter
146 oif tvrf ct state untracked counter
151 oif tvrf ct state untracked counter
[all …]
A Dnft_nat.sh64 local counter=$2
69 ip netns exec "$ns" nft list counter inet filter "$counter" 1>&2
1068 counter ns0in {}
1069 counter ns1in {}
1070 counter ns2in {}
1072 counter ns0out {}
1073 counter ns1out {}
1074 counter ns2out {}
1076 counter ns0in6 {}
1077 counter ns1in6 {}
[all …]
A Dnft_fib.sh93 fib daddr type unicast counter
95 fib daddr type local counter
96 fib daddr . oif type local counter
349 counter
355 counter
361 counter
367 counter
373 counter
379 counter
385 counter
[all …]
/tools/testing/selftests/kvm/riscv/
A Dsbi_pmu_test.c278 unsigned long counter; in test_pmu_event() local
283 counter_value_pre = read_counter(counter, ctrinfo_arr[counter]); in test_pmu_event()
288 stop_counter(counter, 0); in test_pmu_event()
290 counter_value_post = read_counter(counter, ctrinfo_arr[counter]); in test_pmu_event()
301 stop_counter(counter, 0); in test_pmu_event()
302 counter_value_pre = read_counter(counter, ctrinfo_arr[counter]); in test_pmu_event()
306 counter_value_post = read_counter(counter, ctrinfo_arr[counter]); in test_pmu_event()
316 counter_value_post = read_counter(counter, ctrinfo_arr[counter]); in test_pmu_event()
326 unsigned long counter; in test_pmu_event_snapshot() local
332 counter_value_pre = read_counter(counter, ctrinfo_arr[counter]); in test_pmu_event_snapshot()
[all …]
/tools/testing/selftests/drivers/net/mlxsw/
A Dqos_lib.sh21 local counter=$1; shift # Counter to use for measurement
35 local t0=$(ethtool_stats_get $host_in $counter)
36 local u0=$(ethtool_stats_get $sw_in $counter)
38 local t1=$(ethtool_stats_get $host_in $counter)
39 local u1=$(ethtool_stats_get $sw_in $counter)
/tools/testing/selftests/bpf/progs/
A Dcgroup_storage.c15 __u64 *counter; in bpf_prog() local
17 counter = bpf_get_local_storage(&cgroup_storage, 0); in bpf_prog()
18 __sync_fetch_and_add(counter, 1); in bpf_prog()
21 return (*counter & 1); in bpf_prog()
A Dbpf_iter_netlink.c45 s->sk_rmem_alloc.counter, in dump_netlink()
46 s->sk_wmem_alloc.refs.counter - 1, in dump_netlink()
47 nlk->cb_running, s->sk_refcnt.refs.counter); in dump_netlink()
60 BPF_SEQ_PRINTF(seq, "%-8u %-8lu\n", s->sk_drops.counter, ino); in dump_netlink()
A Darena_spin_lock.c29 int counter; variable
42 if (counter != limit) in prog()
43 counter++; in prog()
A Dbpf_iter_udp4.c55 rqueue = inet->sk.sk_rmem_alloc.counter - udp_sk->forward_deficit; in dump_udp4()
62 inet->sk.sk_wmem_alloc.refs.counter - 1, in dump_udp4()
66 inet->sk.sk_refcnt.refs.counter, udp_sk, in dump_udp4()
67 inet->sk.sk_drops.counter); in dump_udp4()
/tools/arch/x86/include/asm/
A Datomic.h29 return READ_ONCE((v)->counter); in atomic_read()
41 v->counter = i; in atomic_set()
53 : "+m" (v->counter)); in atomic_inc()
66 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e"); in atomic_dec_and_test()
71 return cmpxchg(&v->counter, old, new); in atomic_cmpxchg()
/tools/perf/util/
A Dbpf_counter.c40 struct bpf_counter *counter; in bpf_counter_alloc() local
42 counter = zalloc(sizeof(*counter)); in bpf_counter_alloc()
43 if (counter) in bpf_counter_alloc()
45 return counter; in bpf_counter_alloc()
54 list_del_init(&counter->list); in bpf_program_profiler__destroy()
56 free(counter); in bpf_program_profiler__destroy()
105 struct bpf_counter *counter; in bpf_program_profiler_load_one() local
117 if (!counter) { in bpf_program_profiler_load_one()
156 counter->skel = skel; in bpf_program_profiler_load_one()
164 free(counter); in bpf_program_profiler_load_one()
[all …]
A Dsideband_evlist.c99 struct evsel *counter; in evlist__start_sb_thread() local
110 evlist__for_each_entry(evlist, counter) in evlist__start_sb_thread()
111 evsel__set_sample_id(counter, can_sample_identifier); in evlist__start_sb_thread()
116 evlist__for_each_entry(evlist, counter) { in evlist__start_sb_thread()
117 if (evsel__open(counter, evlist->core.user_requested_cpus, in evlist__start_sb_thread()
125 evlist__for_each_entry(evlist, counter) { in evlist__start_sb_thread()
126 if (evsel__enable(counter)) in evlist__start_sb_thread()
A Dstat-display.c944 if (verbose == 0 && counter->skippable && !counter->supported) in should_skip_zero_counter()
1001 os->evsel = counter; in print_counter_aggrdata()
1095 struct evsel *counter; in print_aggr() local
1167 struct evsel *counter; in print_no_aggr_metric() local
1184 os->evsel = counter; in print_no_aggr_metric()
1245 struct evsel *counter; in print_metric_headers() local
1272 config->aggr_mode != AGGR_NONE && counter->metric_leader != counter) in print_metric_headers()
1275 os.evsel = counter; in print_metric_headers()
1568 struct evsel *counter; in print_cgroup_counter() local
1590 struct evsel *counter; in evlist__print_counters() local
[all …]
A Dstat.c315 if (!counter->per_pkg) in check_per_pkg()
326 counter->per_pkg_mask = mask; in check_per_pkg()
462 struct evsel *counter) in process_counter_maps() argument
465 int ncpus = evsel__nr_cpus(counter); in process_counter_maps()
480 struct evsel *counter) in perf_stat_process_counter() argument
482 struct perf_stat_evsel *ps = counter->stats; in perf_stat_process_counter()
486 if (counter->per_pkg) in perf_stat_process_counter()
487 evsel__zero_per_pkg(counter); in perf_stat_process_counter()
653 struct evsel *counter; in perf_event__process_stat_event() local
661 if (!counter) { in perf_event__process_stat_event()
[all …]
/tools/include/asm-generic/
A Datomic-gcc.h26 return READ_ONCE((v)->counter); in atomic_read()
38 v->counter = i; in atomic_set()
49 __sync_add_and_fetch(&v->counter, 1); in atomic_inc()
62 return __sync_sub_and_fetch(&v->counter, 1) == 0; in atomic_dec_and_test()
70 return cmpxchg(&(v)->counter, oldval, newval); in atomic_cmpxchg()
/tools/perf/
A Dbuiltin-stat.c369 if (!counter->err) in read_affinity_counters()
370 counter->err = read_counter_cpu(counter, evlist_cpu_itr.cpu_map_idx); in read_affinity_counters()
409 if (counter->err) in process_counters()
411 if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) in process_counters()
413 counter->err = 0; in process_counters()
635 if ((evsel__leader(counter) != counter) || in stat_handle_error()
671 if ((evsel__leader(counter) != counter) || in stat_handle_error()
740 if (counter->reset_group || counter->errored) in __run_perf_stat()
756 evsel__leader(counter) != counter && in __run_perf_stat()
790 if (!counter->reset_group && !counter->errored) in __run_perf_stat()
[all …]
A Ddesign.txt19 There's one file descriptor per virtual counter used.
35 When creating a new counter fd, 'perf_event_attr' is:
158 A read() on a counter returns the current value of the counter and possible
241 counter to be specific to a task:
254 cpu == -1: the counter counts on all CPUs
258 A 'pid > 0' and 'cpu == -1' counter is a per task counter that counts
263 A 'pid == -1' and 'cpu == x' counter is a per CPU counter that counts
270 counter group has one counter which is the group "leader". The leader
411 so many counter overflow events.
420 An individual counter can be enabled with
[all …]
/tools/testing/selftests/sync/
A Dsync_stress_parallelism.c37 int counter; member
61 ASSERT(test_data_two_threads.counter == i * 2 + thread_id, in test_stress_two_threads_shared_timeline_thread()
63 test_data_two_threads.counter++; in test_stress_two_threads_shared_timeline_thread()
85 test_data_two_threads.counter = 0; in test_stress_two_threads_shared_timeline()
104 ASSERT(test_data_two_threads.counter == in test_stress_two_threads_shared_timeline()
/tools/perf/util/bpf_skel/
A Dbpf_prog_profiler.bpf.c60 if (before && before->counter) { in fexit_update_maps()
63 diff.counter = after->counter - before->counter; in fexit_update_maps()
69 accum->counter += diff.counter; in fexit_update_maps()
/tools/testing/selftests/turbostat/
A Dadded_perf_counters.py56 for counter in PERF_COUNTERS_CANDIDATES:
57 if has_perf_counter_access(counter.get_perf_event_name()):
58 present_perf_counters.append(counter)
93 for counter in present_perf_counters:
94 if counter.subsys == 'cstate_core':
96 elif counter.subsys == 'cstate_pkg':
102 column_name = counter.event
104 cparams = counter.get_turbostat_perf_id(
/tools/bpf/bpftool/skeleton/
A Dprofiler.bpf.c8 __u64 counter; member
83 if (before && before->counter) { in fexit_update_maps()
86 diff.counter = after->counter - before->counter; in fexit_update_maps()
92 accum->counter += diff.counter; in fexit_update_maps()
/tools/counter/
A DMakefile29 $(OUTPUT)include/linux/counter.h: ../../include/uapi/linux/counter.h
31 ln -sf $(CURDIR)/../../include/uapi/linux/counter.h $@
33 prepare: $(OUTPUT)include/linux/counter.h
49 rm -rf $(OUTPUT)include/linux/counter.h
/tools/testing/selftests/bpf/
A Dbpf_atomic.h126 #define atomic_read(p) READ_ONCE((p)->counter)
129 smp_cond_load_relaxed_label(&(p)->counter, cond_expr, label)
132 smp_cond_load_acquire_label(&(p)->counter, cond_expr, label)
135 try_cmpxchg_relaxed(&(p)->counter, pold, new)
138 try_cmpxchg_acquire(&(p)->counter, pold, new)

Completed in 40 milliseconds

123456