| /tools/testing/selftests/bpf/ |
| A D | uprobe_multi.c | 27 #define NAME(name, idx) PASTE(name, idx) argument 30 #define CALL(name, idx) NAME(name, idx)(); argument 32 #define F(body, name, idx) body(name, idx) argument 35 F(body, PASTE(name, idx), 0) F(body, PASTE(name, idx), 1) F(body, PASTE(name, idx), 2) \ 36 F(body, PASTE(name, idx), 3) F(body, PASTE(name, idx), 4) F(body, PASTE(name, idx), 5) \ 37 F(body, PASTE(name, idx), 6) F(body, PASTE(name, idx), 7) F(body, PASTE(name, idx), 8) \ 41 F10(body, PASTE(name, idx), 0) F10(body, PASTE(name, idx), 1) F10(body, PASTE(name, idx), 2) \ 42 F10(body, PASTE(name, idx), 3) F10(body, PASTE(name, idx), 4) F10(body, PASTE(name, idx), 5) \ 43 F10(body, PASTE(name, idx), 6) F10(body, PASTE(name, idx), 7) F10(body, PASTE(name, idx), 8) \ 47 F100(body, PASTE(name, idx), 0) F100(body, PASTE(name, idx), 1) F100(body, PASTE(name, idx), 2) \ [all …]
|
| /tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/ |
| A D | Core.pm | 40 if (!$value && !$idx) { 45 if ($idx && ($value & $idx) == $idx) { 49 $string .= "$trace_flags{$idx}"; 51 $value &= ~$idx; 70 if (!$value && !$idx) { 74 if ($idx && ($value & $idx) == $idx) { 80 $value &= ~$idx; 110 print " value $idx: $flag_fields{$event}{$field}{'values'}{$idx}\n"; 122 if (!$value && !$idx) { 126 if ($value == $idx) { [all …]
|
| /tools/testing/selftests/kvm/lib/ |
| A D | sparsebit.c | 287 root->idx = subtree->idx; in node_copy_subtree() 317 if (idx >= nodep->idx && in node_find() 358 if (idx < parentp->idx) { in node_add() 518 if (nodep1->idx == idx) in node_split() 785 if (idx >= nodep->idx && in sparsebit_is_set() 797 assert(idx >= nodep->idx && idx - nodep->idx < MASK_BITS); in sparsebit_is_set() 820 assert(idx >= nodep->idx && idx <= nodep->idx + MASK_BITS - 1); in bit_set() 855 assert(idx >= nodep->idx && idx <= nodep->idx + MASK_BITS - 1); in bit_clear() 1267 for (idx = lowest_possible - nodep1->idx; idx < MASK_BITS; idx++) in sparsebit_next_clear() 1311 idx != 0 && idx + num - 1 >= idx; in sparsebit_next_set_num() [all …]
|
| /tools/lib/ |
| A D | find_bit.c | 29 unsigned long idx, val, sz = (size); \ 31 for (idx = 0; idx * BITS_PER_LONG < sz; idx++) { \ 34 sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(val)), sz); \ 57 idx = __start / BITS_PER_LONG; \ 60 if ((idx + 1) * BITS_PER_LONG >= sz) \ 62 idx++; \ 65 sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(tmp)), sz); \ 76 return FIND_FIRST_BIT(addr[idx], /* nop */, size); in _find_first_bit() 88 return FIND_FIRST_BIT(addr1[idx] & addr2[idx], /* nop */, size); in _find_first_and_bit() 98 return FIND_FIRST_BIT(~addr[idx], /* nop */, size); in _find_first_zero_bit() [all …]
|
| /tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/ |
| A D | Core.py | 35 for idx in sorted(flag_fields[event_name][field_name]['values']): 36 if not value and not idx: 39 if idx and (value & idx) == idx: 44 value &= ~idx 53 if not value and not idx: 56 if (value == idx): 73 for idx in trace_flags: 74 if not value and not idx: 78 if idx and (value & idx) == idx: 81 string += trace_flags[idx] [all …]
|
| /tools/lib/perf/include/perf/ |
| A D | cpumap.h | 52 LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx); 90 #define perf_cpu_map__for_each_cpu(cpu, idx, cpus) \ argument 91 for ((idx) = 0, (cpu) = perf_cpu_map__cpu(cpus, idx); \ 92 (idx) < perf_cpu_map__nr(cpus); \ 93 (idx)++, (cpu) = perf_cpu_map__cpu(cpus, idx)) 95 #define perf_cpu_map__for_each_cpu_skip_any(_cpu, idx, cpus) \ argument 96 for ((idx) = 0, (_cpu) = perf_cpu_map__cpu(cpus, idx); \ 97 (idx) < perf_cpu_map__nr(cpus); \ 98 (idx)++, (_cpu) = perf_cpu_map__cpu(cpus, idx)) \ 101 #define perf_cpu_map__for_each_idx(idx, cpus) \ argument [all …]
|
| /tools/testing/selftests/bpf/progs/ |
| A D | cgroup_preorder.c | 8 unsigned int idx; variable 14 if (idx < 4) in child() 15 result[idx++] = 1; in child() 22 if (idx < 4) in child_2() 23 result[idx++] = 2; in child_2() 30 if (idx < 4) in parent() 31 result[idx++] = 3; in parent() 38 if (idx < 4) in parent_2() 39 result[idx++] = 4; in parent_2()
|
| A D | sock_iter_batch.c | 38 int idx; in iter_tcp_soreuse() local 53 idx = 0; in iter_tcp_soreuse() 55 idx = 1; in iter_tcp_soreuse() 57 idx = 0; in iter_tcp_soreuse() 66 bucket[idx] = hash & hinfo->lhash2_mask; in iter_tcp_soreuse() 67 bpf_seq_write(ctx->meta->seq, &idx, sizeof(idx)); in iter_tcp_soreuse() 102 int idx; in iter_udp_soreuse() local 116 idx = 0; in iter_udp_soreuse() 118 idx = 1; in iter_udp_soreuse() 120 idx = 0; in iter_udp_soreuse() [all …]
|
| A D | test_bpf_ma.c | 69 new = bpf_obj_new_impl(data_btf_ids[idx], NULL); in batch_alloc() 106 unsigned int idx) in batch_percpu_alloc() argument 120 new = bpf_percpu_obj_new_impl(percpu_data_btf_ids[idx], NULL); in batch_percpu_alloc() 134 unsigned int idx) in batch_percpu_free() argument 154 #define CALL_BATCH_ALLOC(size, batch, idx) \ argument 155 batch_alloc((struct bpf_map *)(&array_##size), batch, idx) 157 #define CALL_BATCH_ALLOC_FREE(size, batch, idx) \ argument 159 batch_alloc((struct bpf_map *)(&array_##size), batch, idx); \ 160 batch_free((struct bpf_map *)(&array_##size), batch, idx); \ 163 #define CALL_BATCH_PERCPU_ALLOC(size, batch, idx) \ argument [all …]
|
| A D | uprobe_multi_session_single.c | 14 static int uprobe_multi_check(void *ctx, int idx) in uprobe_multi_check() argument 19 uprobe_session_result[idx]++; in uprobe_multi_check() 22 if (idx == 0 || idx == 2) in uprobe_multi_check()
|
| A D | uprobe_multi_pid_filter.c | 11 static void update_pid(int idx) in update_pid() argument 15 if (pid == pids[idx]) in update_pid() 16 test[idx][0]++; in update_pid() 18 test[idx][1]++; in update_pid()
|
| /tools/testing/selftests/kvm/x86/ |
| A D | hyperv_features.c | 25 uint32_t idx; member 47 GUEST_ASSERT(msr->idx); in guest_msr() 257 msr->idx = HV_X64_MSR_RESET; in guest_test_msrs_access() 263 msr->idx = HV_X64_MSR_RESET; in guest_test_msrs_access() 268 msr->idx = HV_X64_MSR_RESET; in guest_test_msrs_access() 299 msr->idx = HV_X64_MSR_EOM; in guest_test_msrs_access() 308 msr->idx = HV_X64_MSR_EOM; in guest_test_msrs_access() 314 msr->idx = HV_X64_MSR_EOM; in guest_test_msrs_access() 319 msr->idx = HV_X64_MSR_EOM; in guest_test_msrs_access() 358 msr->idx = HV_X64_MSR_EOI; in guest_test_msrs_access() [all …]
|
| /tools/testing/radix-tree/ |
| A D | main.c | 17 long idx; in __gang_check() local 22 for (idx = -down; idx < up; idx++) in __gang_check() 23 item_insert(&tree, middle + idx); in __gang_check() 26 for (idx = -down; idx < up; idx++) in __gang_check() 133 if (idx[i] < start || idx[i] > end) { in check_copied_tags() 136 end, idx[i], item_tag_get(tree, idx[i], in check_copied_tags() 146 idx[i], item_tag_get(tree, idx[i], fromtag), in check_copied_tags() 159 unsigned long idx[ITEMS]; in copy_tag_check() local 203 idx[i] = rand(); in copy_tag_check() 206 item_insert(&tree, idx[i]); in copy_tag_check() [all …]
|
| /tools/perf/util/ |
| A D | syscalltbl.c | 102 int syscalltbl__id_at_idx(int e_machine, int idx) in syscalltbl__id_at_idx() argument 109 assert(idx >= 0 && idx < table->sorted_names_len); in syscalltbl__id_at_idx() 110 return table->sorted_names[idx]; in syscalltbl__id_at_idx() 113 int syscalltbl__strglobmatch_next(int e_machine, const char *syscall_glob, int *idx) in syscalltbl__strglobmatch_next() argument 117 for (int i = *idx + 1; table && i < table->sorted_names_len; ++i) { in syscalltbl__strglobmatch_next() 121 *idx = i; in syscalltbl__strglobmatch_next() 129 int syscalltbl__strglobmatch_first(int e_machine, const char *syscall_glob, int *idx) in syscalltbl__strglobmatch_first() argument 131 *idx = -1; in syscalltbl__strglobmatch_first() 132 return syscalltbl__strglobmatch_next(e_machine, syscall_glob, idx); in syscalltbl__strglobmatch_first()
|
| A D | mmap.c | 66 int idx __maybe_unused) in auxtrace_mmap_params__set_idx() 81 if (map->aio.data[idx] == MAP_FAILED) { in perf_mmap__aio_alloc() 82 map->aio.data[idx] = NULL; in perf_mmap__aio_alloc() 91 if (map->aio.data[idx]) { in perf_mmap__aio_free() 92 munmap(map->aio.data[idx], mmap__mmap_len(map)); in perf_mmap__aio_free() 93 map->aio.data[idx] = NULL; in perf_mmap__aio_free() 106 data = map->aio.data[idx]; in perf_mmap__aio_bind() 129 if (map->aio.data[idx] == NULL) in perf_mmap__aio_alloc() 137 zfree(&(map->aio.data[idx])); in perf_mmap__aio_free() 245 int idx, nr_cpus; in build_node_mask() local [all …]
|
| /tools/lib/perf/ |
| A D | evsel.c | 24 int idx) in perf_evsel__init() argument 29 evsel->idx = idx; in perf_evsel__init() 70 int idx, thread; in perf_evsel__alloc_fd() local 72 for (idx = 0; idx < ncpus; idx++) { in perf_evsel__alloc_fd() 209 for (int idx = 0; idx < xyarray__max_x(evsel->fd); idx++) in perf_evsel__close_fd() local 238 int idx, thread; in perf_evsel__munmap() local 243 for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) { in perf_evsel__munmap() 260 int ret, idx, thread; in perf_evsel__mmap() local 272 for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) { in perf_evsel__mmap() 341 int idx = 1; in perf_evsel__read_group() local [all …]
|
| /tools/sched_ext/ |
| A D | scx_simple.c | 45 __u32 idx; in read_stats() local 49 for (idx = 0; idx < 2; idx++) { in read_stats() 53 &idx, cnts[idx]); in read_stats() 57 stats[idx] += cnts[idx][cpu]; in read_stats()
|
| A D | scx_flatcg.c | 59 int idx; in read_cpu_util() local 75 for (idx = 0; (tok = strtok_r(line, " \n", &cur)); idx++) { in read_cpu_util() 79 if (idx == 0) { in read_cpu_util() 86 idx, tok); in read_cpu_util() 90 if (idx == 4) in read_cpu_util() 105 __u32 idx; in fcg_read_stats() local 109 for (idx = 0; idx < FCG_NR_STATS; idx++) { in fcg_read_stats() 113 &idx, cnts[idx]); in fcg_read_stats() 117 stats[idx] += cnts[idx][cpu]; in fcg_read_stats()
|
| /tools/perf/arch/x86/util/ |
| A D | iostat.c | 66 int idx; member 100 int idx; in iio_root_ports_list_free() local 103 for (idx = 0; idx < list->nr_entries; idx++) in iio_root_ports_list_free() 113 int idx; in iio_root_port_find_by_notation() local 117 for (idx = 0; idx < list->nr_entries; idx++) { in iio_root_port_find_by_notation() 118 rp = list->rps[idx]; in iio_root_port_find_by_notation() 139 tmp_buf[rp->idx] = rp; in iio_root_ports_list_insert() 302 int idx; in iostat_event_group() local 316 for (idx = 0; idx < list->nr_entries; idx++) { in iostat_event_group() 318 list->rps[idx]->pmu_idx, list->rps[idx]->pmu_idx, in iostat_event_group() [all …]
|
| A D | evlist.c | 82 return lhs->core.idx - rhs->core.idx; in arch_evlist__cmp() 88 int idx = 0; in arch_evlist__add_required_events() local 100 idx++; in arch_evlist__add_required_events() 106 return topdown_insert_slots_event(list, idx + 1, metric_event); in arch_evlist__add_required_events()
|
| /tools/testing/selftests/kvm/s390/ |
| A D | cpumodel_subfuncs_test.c | 272 int idx; in main() local 282 for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) { in main() 283 if (test_facility(testlist[idx].facility_bit)) { in main() 284 u8 *array = malloc(testlist[idx].array_size); in main() 286 testlist[idx].test((u8 (*)[testlist[idx].array_size])array); in main() 288 TEST_ASSERT_EQ(memcmp(testlist[idx].subfunc_array, in main() 289 array, testlist[idx].array_size), 0); in main() 291 ksft_test_result_pass("%s\n", testlist[idx].subfunc_name); in main() 295 testlist[idx].subfunc_name); in main()
|
| /tools/testing/selftests/kvm/lib/s390/ |
| A D | processor.c | 52 int ri, idx; in virt_arch_pg_map() local 75 idx = (gva >> (64 - 11 * ri)) & 0x7ffu; in virt_arch_pg_map() 76 if (entry[idx] & REGION_ENTRY_INVALID) in virt_arch_pg_map() 77 entry[idx] = virt_alloc_region(vm, ri); in virt_arch_pg_map() 83 if (!(entry[idx] & PAGE_INVALID)) in virt_arch_pg_map() 86 entry[idx] = gpa; in virt_arch_pg_map() 91 int ri, idx; in addr_arch_gva2gpa() local 99 idx = (gva >> (64 - 11 * ri)) & 0x7ffu; in addr_arch_gva2gpa() 100 TEST_ASSERT(!(entry[idx] & REGION_ENTRY_INVALID), in addr_arch_gva2gpa() 108 TEST_ASSERT(!(entry[idx] & PAGE_INVALID), in addr_arch_gva2gpa() [all …]
|
| /tools/testing/selftests/bpf/prog_tests/ |
| A D | xdp_cpumap_attach.c | 23 __u32 idx = 0; in test_xdp_with_cpumap_helpers() local 47 err = bpf_map_update_elem(map_fd, &idx, &val, 0); in test_xdp_with_cpumap_helpers() 50 err = bpf_map_lookup_elem(map_fd, &idx, &val); in test_xdp_with_cpumap_helpers() 81 err = bpf_map_update_elem(map_fd, &idx, &val, 0); in test_xdp_with_cpumap_helpers() 87 idx = 1; in test_xdp_with_cpumap_helpers() 90 err = bpf_map_update_elem(map_fd, &idx, &val, 0); in test_xdp_with_cpumap_helpers() 108 __u32 idx = 0; in test_xdp_with_cpumap_frags_helpers() local 121 err = bpf_map_update_elem(map_fd, &idx, &val, 0); in test_xdp_with_cpumap_frags_helpers() 124 err = bpf_map_lookup_elem(map_fd, &idx, &val); in test_xdp_with_cpumap_frags_helpers() 132 idx = 1; in test_xdp_with_cpumap_frags_helpers() [all …]
|
| /tools/perf/tests/ |
| A D | openat-syscall-all-cpus.c | 25 int err = TEST_FAIL, fd, idx; in test__openat_syscall_event_on_all_cpus() local 64 perf_cpu_map__for_each_cpu(cpu, idx, cpus) { in test__openat_syscall_event_on_all_cpus() 65 unsigned int ncalls = nr_openat_calls + idx; in test__openat_syscall_event_on_all_cpus() 95 perf_cpu_map__for_each_cpu(cpu, idx, cpus) { in test__openat_syscall_event_on_all_cpus() 101 if (evsel__read_on_cpu(evsel, idx, 0) < 0) { in test__openat_syscall_event_on_all_cpus() 107 expected = nr_openat_calls + idx; in test__openat_syscall_event_on_all_cpus() 108 if (perf_counts(evsel->counts, idx, 0)->val != expected) { in test__openat_syscall_event_on_all_cpus() 110 expected, cpu.cpu, perf_counts(evsel->counts, idx, 0)->val); in test__openat_syscall_event_on_all_cpus()
|
| /tools/mm/ |
| A D | slabinfo-gnuplot.sh | 219 local idx=0 225 files[$idx]=$p 226 idx=$idx+1 229 t_files[$idx]=$p 230 idx=$idx+1 233 files[$idx]=$p 234 idx=$idx+1
|