| /tools/testing/radix-tree/ |
| A D | regression1.c | 171 int nr_threads; in regression1_test() local 177 nr_threads = 2; in regression1_test() 178 pthread_barrier_init(&worker_barrier, NULL, nr_threads); in regression1_test() 180 threads = malloc(nr_threads * sizeof(*threads)); in regression1_test() 182 for (i = 0; i < nr_threads; i++) { in regression1_test() 190 for (i = 0; i < nr_threads; i++) { in regression1_test()
|
| /tools/testing/selftests/bpf/benchs/ |
| A D | run_bench_bpf_hashmap_full_update.sh | 8 nr_threads=`expr $(cat /proc/cpuinfo | grep "processor"| wc -l) - 1` 9 summary=$($RUN_BENCH -p $nr_threads bpf-hashmap-full-update)
|
| /tools/lib/perf/ |
| A D | threadmap.c | 45 struct perf_thread_map *perf_thread_map__new_array(int nr_threads, pid_t *array) in perf_thread_map__new_array() argument 47 struct perf_thread_map *threads = thread_map__alloc(nr_threads); in perf_thread_map__new_array() 53 for (i = 0; i < nr_threads; i++) in perf_thread_map__new_array() 56 threads->nr = nr_threads; in perf_thread_map__new_array()
|
| A D | evlist.c | 385 int nr_threads = perf_thread_map__nr(evlist->threads); in perf_evlist__alloc_pollfd() local 393 nfds += nr_cpus * nr_threads; in perf_evlist__alloc_pollfd() 609 int nr_threads = perf_thread_map__nr(evlist->threads); in mmap_per_thread() local 615 __func__, nr_cpus, nr_threads); in mmap_per_thread() 618 for (thread = 0; thread < nr_threads; thread++, idx++) { in mmap_per_thread() 651 int nr_threads = perf_thread_map__nr(evlist->threads); in mmap_per_cpu() local 656 pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads); in mmap_per_cpu() 662 for (thread = 0; thread < nr_threads; thread++) { in mmap_per_cpu()
|
| /tools/testing/selftests/cgroup/ |
| A D | test_kmem.c | 102 int nr_threads = 2 * get_nprocs(); in alloc_kmem_smp() local 107 tinfo = calloc(nr_threads, sizeof(pthread_t)); in alloc_kmem_smp() 111 for (i = 0; i < nr_threads; i++) { in alloc_kmem_smp() 119 for (i = 0; i < nr_threads; i++) { in alloc_kmem_smp() 247 int nr_threads = 1000; in spawn_1000_threads() local 253 tinfo = calloc(nr_threads, sizeof(pthread_t)); in spawn_1000_threads() 257 for (i = 0; i < nr_threads; i++) { in spawn_1000_threads()
|
| /tools/perf/bench/ |
| A D | sched-pipe.c | 223 int nr_threads = 2; in bench_sched_pipe() local 244 for (t = 0; t < nr_threads; t++) { in bench_sched_pipe() 259 for (t = 0; t < nr_threads; t++) { in bench_sched_pipe() 266 for (t = 0; t < nr_threads; t++) { in bench_sched_pipe()
|
| A D | numa.c | 83 int nr_threads; member 988 for (t = 0; t < g->p.nr_threads; t++) { in count_process_nodes() 993 task_nr = process_nr*g->p.nr_threads + t; in count_process_nodes() 1027 for (t = 0; t < g->p.nr_threads; t++) { in count_node_processes() 1032 task_nr = p*g->p.nr_threads + t; in count_node_processes() 1420 task_nr = process_nr*g->p.nr_threads; in worker_process() 1434 for (t = 0; t < g->p.nr_threads; t++) { in worker_process() 1450 for (t = 0; t < g->p.nr_threads; t++) { in worker_process() 1559 BUG_ON(g->p.nr_threads <= 0); in init() 1775 for (t = 0; t < g->p.nr_threads; t++) { in __bench_numa() [all …]
|
| /tools/testing/selftests/net/tcp_ao/lib/ |
| A D | setup.c | 175 static unsigned int nr_threads = 1; variable 189 if (stage_threads[q] == nr_threads) { in synchronize_threads() 194 while (stage_threads[q] < nr_threads) in synchronize_threads() 269 nr_threads++; in __test_init()
|
| /tools/lib/perf/include/perf/ |
| A D | threadmap.h | 11 LIBPERF_API struct perf_thread_map *perf_thread_map__new_array(int nr_threads, pid_t *array);
|
| /tools/testing/selftests/kvm/arm64/ |
| A D | vgic_lpi_stress.c | 374 u32 nr_threads; in main() local 398 nr_threads = test_data.nr_cpus + test_data.nr_devices; in main() 399 if (nr_threads > get_nprocs()) in main() 401 nr_threads, get_nprocs()); in main()
|
| /tools/testing/selftests/bpf/prog_tests/ |
| A D | task_local_storage.c | 176 const int nr_threads = 32; in test_nodeadlock() local 177 pthread_t tids[nr_threads]; in test_nodeadlock() 208 for (i = 0; i < nr_threads; i++) { in test_nodeadlock() 222 waitall(tids, nr_threads); in test_nodeadlock()
|
| /tools/perf/ |
| A D | builtin-record.c | 184 int nr_threads; member 1267 for (t = 0; t < rec->nr_threads; t++) in record__alloc_thread_data() 2292 int t, tt, err, ret = 0, nr_threads = rec->nr_threads; in record__start_threads() local 2312 for (t = 1; t < nr_threads; t++) { in record__start_threads() 3698 for (t = 0; t < nr_threads; t++) in record__free_thread_masks() 3714 for (t = 0; t < nr_threads; t++) { in record__alloc_thread_masks() 3738 rec->nr_threads = nr_cpus; in record__init_thread_cpu_masks() 3854 rec->nr_threads = t; in record__init_thread_masks_spec() 3856 if (!rec->nr_threads) in record__init_thread_masks_spec() 4018 rec->nr_threads = 1; in record__init_thread_default_masks() [all …]
|
| /tools/tracing/latency/ |
| A D | latency-collector.c | 77 static unsigned int nr_threads = DEFAULT_NR_PRINTER_THREADS; variable 1642 if (nr_threads > MAX_THREADS) { in start_printthread() 1645 nr_threads, MAX_THREADS); in start_printthread() 1646 nr_threads = MAX_THREADS; in start_printthread() 1648 for (i = 0; i < nr_threads; i++) { in start_printthread() 1931 nr_threads = value; in scan_arguments() 2040 policy_name(sched_policy), sched_pri, nr_threads); in show_params()
|
| /tools/lib/perf/Documentation/ |
| A D | libperf.txt | 64 struct perf_thread_map *perf_thread_map__new_array(int nr_threads, pid_t *array);
|
| /tools/perf/util/ |
| A D | evsel.c | 1967 static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int thread_idx) in evsel__remove_fd() argument 1970 for (int thread = thread_idx; thread < nr_threads - 1; thread++) in evsel__remove_fd() 1976 int nr_threads, int thread_idx) in update_fds() argument 1980 if (cpu_map_idx >= nr_cpus || thread_idx >= nr_threads) in update_fds() 1986 evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx); in update_fds()
|