| /linux/drivers/infiniband/core/ |
| A D | uverbs_std_types_counters.c | 48 ret = counters->device->ops.destroy_counters(counters); in uverbs_free_counters() 51 kfree(counters); in uverbs_free_counters() 61 struct ib_counters *counters; in UVERBS_HANDLER() local 73 if (!counters) in UVERBS_HANDLER() 76 counters->device = ib_dev; in UVERBS_HANDLER() 77 counters->uobject = uobj; in UVERBS_HANDLER() 78 uobj->object = counters; in UVERBS_HANDLER() 79 atomic_set(&counters->usecnt, 0); in UVERBS_HANDLER() 83 kfree(counters); in UVERBS_HANDLER() 93 struct ib_counters *counters = in UVERBS_HANDLER() local [all …]
|
| /linux/lib/ |
| A D | percpu_counter.c | 98 count = this_cpu_read(*fbc->counters); in percpu_counter_add_batch() 133 this_cpu_add(*fbc->counters, amount); in percpu_counter_add_batch() 154 __this_cpu_sub(*fbc->counters, count); in percpu_counter_sync() 194 s32 __percpu *counters; in __percpu_counter_init_many() local 197 counter_size = ALIGN(sizeof(*counters), __alignof__(*counters)); in __percpu_counter_init_many() 200 if (!counters) { in __percpu_counter_init_many() 201 fbc[0].counters = NULL; in __percpu_counter_init_many() 212 fbc[i].counters = (void __percpu *)counters + i * counter_size; in __percpu_counter_init_many() 235 if (!fbc[0].counters) in percpu_counter_destroy_many() 248 free_percpu(fbc[0].counters); in percpu_counter_destroy_many() [all …]
|
| /linux/net/netfilter/ |
| A D | xt_connbytes.c | 30 const struct nf_conn_counter *counters; in connbytes_mt() local 40 counters = acct->counter; in connbytes_mt() 48 what = atomic64_read(&counters[IP_CT_DIR_REPLY].packets); in connbytes_mt() 52 what += atomic64_read(&counters[IP_CT_DIR_REPLY].packets); in connbytes_mt() 59 what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes); in connbytes_mt() 62 what = atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); in connbytes_mt() 65 what = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].bytes); in connbytes_mt() 66 what += atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); in connbytes_mt() 77 bytes = atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); in connbytes_mt() 82 atomic64_read(&counters[IP_CT_DIR_REPLY].bytes); in connbytes_mt() [all …]
|
| /linux/tools/testing/selftests/net/tcp_ao/lib/ |
| A D | proc.c | 18 struct netstat_counter *counters; member 52 ret->counters = NULL; in lookup_get() 81 type->counters = reallocarray(type->counters, in netstat_read_type() 84 if (!type->counters) in netstat_read_type() 133 type->counters = reallocarray(type->counters, i + 1, in snmp6_read() 135 if (!type->counters) in snmp6_read() 137 nc = &type->counters[i]; in snmp6_read() 197 free(ns->counters); in netstat_free() 214 nsb->counters[i].name, a, nsb->counters[i].val); in __netstat_print_diff() 233 if (strcmp(nsb->counters[i].name, nsa->counters[j].name)) { in netstat_print_diff() [all …]
|
| /linux/drivers/net/ethernet/aquantia/atlantic/macsec/ |
| A D | macsec_api.c | 1873 memset(counters, 0, sizeof(*counters)); in aq_mss_get_egress_sc_counters() 1927 memset(counters, 0, sizeof(*counters)); in aq_mss_get_egress_sa_counters() 1956 counters->untagged_pkts[0] = in get_egress_common_counters() 1958 counters->untagged_pkts[1] = in get_egress_common_counters() 1989 memset(counters, 0, sizeof(*counters)); in aq_mss_get_egress_common_counters() 2166 memset(counters, 0, sizeof(*counters)); in aq_mss_get_ingress_sa_counters() 2210 counters->untagged_pkts[0] = in get_ingress_common_counters() 2212 counters->untagged_pkts[1] = in get_ingress_common_counters() 2270 counters->too_long_pkts[0] = in get_ingress_common_counters() 2272 counters->too_long_pkts[1] = in get_ingress_common_counters() [all …]
|
| /linux/samples/cgroup/ |
| A D | memcg_event_listener.c | 45 struct memcg_counters counters; member 54 printf("\tlow: %ld\n", counters->low); in print_memcg_counters() 56 printf("\tmax: %ld\n", counters->max); in print_memcg_counters() 57 printf("\toom: %ld\n", counters->oom); in print_memcg_counters() 109 struct memcg_counters *counters = &events->counters; in read_memcg_events() local 118 .old = &counters->low, in read_memcg_events() 123 .old = &counters->high, in read_memcg_events() 128 .old = &counters->max, in read_memcg_events() 133 .old = &counters->oom, in read_memcg_events() 138 .old = &counters->oom_kill, in read_memcg_events() [all …]
|
| /linux/Documentation/translations/zh_CN/core-api/ |
| A D | local_ops.rst | 93 static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0); 105 local_inc(&get_cpu_var(counters)); 106 put_cpu_var(counters); 110 local_inc(this_cpu_ptr(&counters)); 123 sum += local_read(&per_cpu(counters, cpu)); 143 static DEFINE_PER_CPU(local_t, counters) = LOCAL_INIT(0); 152 local_inc(this_cpu_ptr(&counters)); 157 * local_inc(&get_cpu_var(counters)); 158 * put_cpu_var(counters); 166 /* Increment the counters */ [all …]
|
| /linux/fs/bcachefs/ |
| A D | sb-counters.c | 32 struct bch_sb_field_counters *ctrs = field_to_type(f, counters); in bch2_sb_counters_to_text() 43 struct bch_sb_field_counters *ctrs = bch2_sb_field_get(c->disk_sb.sb, counters); in bch2_sb_counters_to_cpu() 53 percpu_u64_set(&c->counters[i], val); in bch2_sb_counters_to_cpu() 61 struct bch_sb_field_counters *ctrs = bch2_sb_field_get(c->disk_sb.sb, counters); in bch2_sb_counters_from_cpu() 67 ret = bch2_sb_field_resize(&c->disk_sb, counters, in bch2_sb_counters_from_cpu() 78 ctrs->d[i] = cpu_to_le64(percpu_u64_get(&c->counters[i])); in bch2_sb_counters_from_cpu() 84 free_percpu(c->counters); in bch2_fs_counters_exit() 89 c->counters = __alloc_percpu(sizeof(u64) * BCH_COUNTER_NR, sizeof(u64)); in bch2_fs_counters_init() 90 if (!c->counters) in bch2_fs_counters_init()
|
| /linux/kernel/gcov/ |
| A D | gcc_base.c | 46 void __gcov_merge_add(gcov_type *counters, unsigned int n_counters) in __gcov_merge_add() argument 52 void __gcov_merge_single(gcov_type *counters, unsigned int n_counters) in __gcov_merge_single() argument 58 void __gcov_merge_delta(gcov_type *counters, unsigned int n_counters) in __gcov_merge_delta() argument 64 void __gcov_merge_ior(gcov_type *counters, unsigned int n_counters) in __gcov_merge_ior() argument 70 void __gcov_merge_time_profile(gcov_type *counters, unsigned int n_counters) in __gcov_merge_time_profile() argument 76 void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters) in __gcov_merge_icall_topn() argument
|
| A D | clang.c | 75 u64 *counters; member 128 void llvm_gcda_emit_arcs(u32 num_counters, u64 *counters) in llvm_gcda_emit_arcs() argument 134 info->counters = counters; in llvm_gcda_emit_arcs() 230 memset(fn->counters, 0, in gcov_info_reset() 231 sizeof(fn->counters[0]) * fn->num_counters); in gcov_info_reset() 282 dfn_ptr->counters[i] += sfn_ptr->counters[i]; in gcov_info_add() 297 cv_size = fn->num_counters * sizeof(fn->counters[0]); in gcov_fn_info_dup() 298 fn_dup->counters = kvmalloc(cv_size, GFP_KERNEL); in gcov_fn_info_dup() 299 if (!fn_dup->counters) { in gcov_fn_info_dup() 304 memcpy(fn_dup->counters, fn->counters, cv_size); in gcov_fn_info_dup() [all …]
|
| /linux/drivers/perf/amlogic/ |
| A D | meson_ddr_pmu_core.c | 147 memset(&pmu->counters, 0, sizeof(pmu->counters)); in meson_ddr_perf_event_start() 359 sum_cnter = &pmu->counters; in dmc_irq_handler() 376 counters.all_req, in dmc_irq_handler() 377 counters.all_cnt, in dmc_irq_handler() 378 counters.channel_cnt[0], in dmc_irq_handler() 379 counters.channel_cnt[1], in dmc_irq_handler() 380 counters.channel_cnt[2], in dmc_irq_handler() 381 counters.channel_cnt[3], in dmc_irq_handler() 383 pmu->counters.all_req, in dmc_irq_handler() 384 pmu->counters.all_cnt, in dmc_irq_handler() [all …]
|
| /linux/Documentation/arch/arm64/ |
| A D | amu.rst | 22 counters intended for system management use. The AMU extension provides a 27 of four fixed and architecturally defined 64-bit event counters. 37 When in WFI or WFE these counters do not increment. 41 implement additional architected event counters. 44 64-bit event counters. 46 On cold reset all counters reset to 0. 59 counters, only the presence of the extension. 66 - Enable the counters. If not enabled these will read as 0. 74 commonly, the counters will read as 0, indicating that they are not 91 Auxiliary platform specific counters can be accessed using [all …]
|
| /linux/drivers/gpu/drm/v3d/ |
| A D | v3d_perfmon.c | 200 const struct v3d_perf_counter_desc *counters = NULL; in v3d_perfmon_init() local 204 counters = v3d_v71_performance_counters; in v3d_perfmon_init() 207 counters = v3d_v42_performance_counters; in v3d_perfmon_init() 212 v3d->perfmon_info.counters = counters; in v3d_perfmon_init() 243 u32 channel = V3D_SET_FIELD(perfmon->counters[i], V3D_PCTR_S0); in v3d_perfmon_start() 246 channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0, in v3d_perfmon_start() 249 channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0, in v3d_perfmon_start() 252 channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0, in v3d_perfmon_start() 348 if (req->counters[i] >= v3d->perfmon_info.max_counters) in v3d_perfmon_create_ioctl() 358 perfmon->counters[i] = req->counters[i]; in v3d_perfmon_create_ioctl() [all …]
|
| /linux/net/ipv4/netfilter/ |
| A D | arp_tables.c | 662 if (counters == NULL) in alloc_counters() 667 return counters; in alloc_counters() 682 if (IS_ERR(counters)) in copy_entries_to_user() 699 &counters[num], in copy_entries_to_user() 713 vfree(counters); in copy_entries_to_user() 895 if (!counters) { in __do_replace() 939 vfree(counters); in __do_replace() 946 vfree(counters); in __do_replace() 1141 memcpy(&de->counters, &e->counters, sizeof(e->counters)); in compat_copy_entry_from_user() 1319 copy_to_user(&ce->counters, &counters[i], in compat_copy_entry_to_user() [all …]
|
| A D | ip_tables.c | 801 if (counters == NULL) in alloc_counters() 806 return counters; in alloc_counters() 822 if (IS_ERR(counters)) in copy_entries_to_user() 841 &counters[num], in copy_entries_to_user() 866 vfree(counters); in copy_entries_to_user() 1047 if (!counters) { in __do_replace() 1090 vfree(counters); in __do_replace() 1097 vfree(counters); in __do_replace() 1232 copy_to_user(&ce->counters, &counters[i], in compat_copy_entry_to_user() 1368 memcpy(&de->counters, &e->counters, sizeof(e->counters)); in compat_copy_entry_from_user() [all …]
|
| /linux/include/linux/ |
| A D | alloc_tag.h | 30 struct alloc_tag_counters __percpu *counters; member 81 .counters = &_shared_alloc_tag }; 90 .counters = &_alloc_tag_cntr }; 110 counter = per_cpu_ptr(tag->counters, cpu); in alloc_tag_read() 159 this_cpu_inc(tag->counters->calls); in alloc_tag_ref_set() 166 this_cpu_add(tag->counters->bytes, bytes); in alloc_tag_add() 184 this_cpu_sub(tag->counters->bytes, bytes); in alloc_tag_sub() 185 this_cpu_dec(tag->counters->calls); in alloc_tag_sub()
|
| /linux/net/ipv6/netfilter/ |
| A D | ip6_tables.c | 817 if (counters == NULL) in alloc_counters() 822 return counters; in alloc_counters() 838 if (IS_ERR(counters)) in copy_entries_to_user() 857 &counters[num], in copy_entries_to_user() 882 vfree(counters); in copy_entries_to_user() 1064 if (!counters) { in __do_replace() 1107 vfree(counters); in __do_replace() 1114 vfree(counters); in __do_replace() 1248 copy_to_user(&ce->counters, &counters[i], in compat_copy_entry_to_user() 1384 memcpy(&de->counters, &e->counters, sizeof(e->counters)); in compat_copy_entry_from_user() [all …]
|
| /linux/tools/perf/util/ |
| A D | values.c | 33 values->counters = 0; in perf_read_values_init() 60 for (i = 0; i < values->counters; i++) in perf_read_values_destroy() 167 for (i = 0; i < values->counters; i++) in perf_read_values__findnew_counter() 177 i = values->counters++; in perf_read_values__findnew_counter() 215 for (j = 0; j < values->counters; j++) in perf_read_values__display_pretty() 226 for (j = 0; j < values->counters; j++) { in perf_read_values__display_pretty() 234 for (j = 0; j < values->counters; j++) in perf_read_values__display_pretty() 241 for (j = 0; j < values->counters; j++) in perf_read_values__display_pretty() 269 for (j = 0; j < values->counters; j++) { in perf_read_values__display_raw() 278 for (j = 0; j < values->counters; j++) { in perf_read_values__display_raw() [all …]
|
| /linux/Documentation/core-api/ |
| A D | local_ops.rst | 37 and for various performance monitoring counters. 107 local_inc(&get_cpu_var(counters)); 108 put_cpu_var(counters); 113 local_inc(this_cpu_ptr(&counters)); 117 Reading the counters 126 sum += local_read(&per_cpu(counters, cpu)); 158 local_inc(this_cpu_ptr(&counters)); 163 * local_inc(&get_cpu_var(counters)); 164 * put_cpu_var(counters); 172 /* Increment the counters */ [all …]
|
| /linux/drivers/accessibility/speakup/ |
| A D | keyhelp.c | 51 u_char *kp, counters[MAXFUNCS], ch, ch1; in build_key_data() local 56 memset(counters, 0, sizeof(counters)); in build_key_data() 66 counters[*kp]++; in build_key_data() 70 if (counters[i] == 0) in build_key_data() 73 offset += (counters[i] + 1); in build_key_data() 90 counters[ch1]--; in build_key_data() 94 p_key = key_data + offset + counters[ch1]; in build_key_data()
|
| /linux/drivers/net/wireless/silabs/wfx/ |
| A D | debug.c | 65 struct wfx_hif_mib_extended_count_table counters[3]; in wfx_counters_show() local 67 for (i = 0; i < ARRAY_SIZE(counters); i++) { in wfx_counters_show() 68 ret = wfx_hif_get_counters_table(wdev, i, counters + i); in wfx_counters_show() 79 le32_to_cpu(counters[2].count_##name), \ in wfx_counters_show() 80 le32_to_cpu(counters[0].count_##name), \ in wfx_counters_show() 81 le32_to_cpu(counters[1].count_##name)) in wfx_counters_show() 117 for (i = 0; i < ARRAY_SIZE(counters[0].reserved); i++) in wfx_counters_show() 119 le32_to_cpu(counters[2].reserved[i]), in wfx_counters_show() 120 le32_to_cpu(counters[0].reserved[i]), in wfx_counters_show() 121 le32_to_cpu(counters[1].reserved[i])); in wfx_counters_show()
|
| /linux/tools/perf/tests/shell/ |
| A D | stat_bpf_counters_cgrp.sh | 15 if ! perf stat -a --bpf-counters --for-each-cgroup / true > /dev/null 2>&1; then 18 perf --no-pager stat -a --bpf-counters --for-each-cgroup / true || true 51 …check_system_wide_counted_output=$(perf stat -a --bpf-counters --for-each-cgroup ${test_cgroups} -… 63 …check_cpu_list_counted_output=$(perf stat -C 0,1 --bpf-counters --for-each-cgroup ${test_cgroups} …
|
| /linux/drivers/scsi/elx/efct/ |
| A D | efct_xport.c | 118 counters[EFCT_HW_LINK_STAT_CRC_COUNT].counter; in efct_xport_link_stats_cb() 130 counters[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter; in efct_xport_host_stats_cb() 132 counters[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter; in efct_xport_host_stats_cb() 134 counters[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter; in efct_xport_host_stats_cb() 136 counters[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter; in efct_xport_host_stats_cb() 143 struct efct_hw_link_stat_counts *counters, in efct_xport_async_link_stats_cb() argument 157 counters[EFCT_HW_LINK_STAT_CRC_COUNT].counter; in efct_xport_async_link_stats_cb() 162 struct efct_hw_host_stat_counts *counters, in efct_xport_async_host_stats_cb() argument 168 counters[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter; in efct_xport_async_host_stats_cb() 170 counters[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter; in efct_xport_async_host_stats_cb() [all …]
|
| /linux/tools/perf/ |
| A D | design.txt | 5 Performance counters are special hardware registers available on most modern 15 provides "virtual" 64-bit counters, regardless of the width of the 16 underlying hardware counters. 18 Performance counters are accessed via special file descriptors. 32 Multiple counters can be kept open at a time, and the counters 131 * does not support performance counters. These counters measure various 152 Counters come in two flavours: counting counters and sampling 222 counters. 249 pid < 0: all tasks are counted (per cpu counters) 301 * Bits needed to read the hw counters in user-space. [all …]
|
| /linux/Documentation/admin-guide/perf/ |
| A D | thunderx2-pmu.rst | 13 The DMC and L3C support up to 4 counters, while the CCPI2 supports up to 8 14 counters. Counters are independently programmable to different events and 15 can be started and stopped individually. None of the counters support an 16 overflow interrupt. DMC and L3C counters are 32-bit and read every 2 seconds. 17 The CCPI2 counters are 64-bit and assumed not to overflow in normal operation.
|