| /tools/testing/selftests/rcutorture/bin/ |
| A D | kvm-transform.sh | 45 seconds="$4" 46 if test -n "$seconds" && echo $seconds | grep -q '[^0-9]' 48 echo "Invalid duration, should be numeric in seconds: '$seconds'" 63 if (seconds == "") 66 print "# seconds=" seconds; 104 if ("" seconds != "" && $i ~ /\.shutdown_secs=[0-9]*$/) 105 sub(/[0-9]*$/, seconds, arg); 139 -v seconds="$seconds" -f $T/bootarg.awk
|
| A D | kvm-test-1-run-qemu.sh | 101 elif test $kruntime -ge $seconds || test -f "$resdir/../STOP.1" 108 if test $kruntime -lt $seconds 110 echo Completed in $kruntime vs. $seconds >> $resdir/Warnings 2>&1 161 …if test "$newline" != "$oldline" && test "$last_ts" -lt $((seconds + $TORTURE_SHUTDOWN_GRACE)) && … 164 if test $kruntime -ge $((seconds + $TORTURE_SHUTDOWN_GRACE)) 171 echo "!!! PID $qemu_pid hung at $kruntime vs. $seconds seconds `date`" >> $resdir/Warnings 2>&1
|
| A D | functions.sh | 140 print s " seconds"
|
| A D | kvm-test-1-run.sh | 135 seconds=$3 165 boot_args="`per_version_boot_params "$boot_args" $resdir/.config $seconds`" 204 echo "# seconds=$seconds" >> $resdir/qemu-cmd
|
| /tools/testing/selftests/dma/ |
| A D | dma_map_benchmark.c | 29 int threads = 1, seconds = 20, node = -1; in main() local 43 seconds = atoi(optarg); in main() 71 if (seconds <= 0 || seconds > DMA_MAP_MAX_SECONDS) { in main() 107 map.seconds = seconds; in main() 121 threads, seconds, node, dir[directions], granule); in main()
|
| /tools/testing/selftests/firmware/ |
| A D | settings | 2 # 2 seconds). There are 3 test configs, each done with and without firmware 4 # normal execution should be 2 * 3 * 2 * 2 * 5 = 120 seconds for those alone. 5 # Additionally, fw_fallback may take 5 seconds for internal timeouts in each 6 # of the 3 configs, so at least another 15 seconds are needed. Add another 7 # 10 seconds for each testing config: 120 + 15 + 30
|
| /tools/testing/vsock/ |
| A D | timeout.c | 44 void timeout_begin(unsigned int seconds) in timeout_begin() argument 46 alarm(seconds); in timeout_begin()
|
| A D | timeout.h | 11 void timeout_begin(unsigned int seconds);
|
| /tools/laptop/freefall/ |
| A D | freefall.c | 80 static void protect(int seconds) in protect() argument 82 const char *str = (seconds == 0) ? "Unparked" : "Parked"; in protect() 84 write_int(unload_heads_path, seconds*1000); in protect()
|
| /tools/testing/selftests/timers/ |
| A D | inconsistency-check.c | 83 int consistency_test(int clock_type, unsigned long seconds) in consistency_test() argument 98 while (seconds == -1 || now - then < seconds) { in consistency_test()
|
| /tools/include/nolibc/ |
| A D | unistd.h | 66 unsigned int sleep(unsigned int seconds) in sleep() argument 68 struct timeval my_timeval = { seconds, 0 }; in sleep()
|
| /tools/testing/selftests/resctrl/ |
| A D | settings | 1 # If running time is longer than 120 seconds when new tests are added in
|
| /tools/perf/Documentation/ |
| A D | examples.txt | 42 0.613972165 seconds time elapsed 63 0.643954516 seconds time elapsed ( +- 2.363% ) 158 seconds: 171 10.000591410 seconds time elapsed 187 1.058135029 seconds time elapsed ( +- 3.089% )
|
| A D | cpu-and-latency-overheads.txt | 7 Each second of wall-clock time we have number-of-cores seconds of CPU time. 18 consider a program that executes function 'foo' for 9 seconds with 1 thread, 20 128 seconds of CPU time). The CPU overhead is: 'foo' - 6.6%, 'bar' - 93.4%.
|
| A D | perf-kwork.txt | 108 have the format seconds.microseconds. If start is not given (i.e., time 138 have the format seconds.microseconds. If start is not given (i.e., time 177 have the format seconds.microseconds. If start is not given (i.e., time 207 have the format seconds.microseconds. If start is not given (i.e., time
|
| A D | perf-iostat.txt | 84 197.081983474 seconds time elapsed
|
| A D | perf-test.txt | 65 seconds: leafloop, noploop, sqrtloop, thloop
|
| /tools/testing/selftests/seccomp/ |
| A D | seccomp_benchmark.c | 56 int seconds = 15; in calibrate() local 58 ksft_print_msg("Calibrating sample size for %d seconds worth of syscalls ...\n", seconds); in calibrate() 76 return samples * seconds; in calibrate()
|
| /tools/testing/selftests/net/packetdrill/ |
| A D | tcp_user_timeout_user_timeout.pkt | 18 // We set TCP_USER_TIMEOUT to 3 seconds because really it is not worth
|
| /tools/testing/selftests/kvm/ |
| A D | memslot_perf_test.c | 894 int seconds; member 915 targs->seconds); in help() 1004 targs->seconds = atoi_non_negative("Test length", optarg); in parse_args() 1054 if (!test_execute(targs->nslots, &maxslots, targs->seconds, data, in test_loop() 1104 .seconds = 5, in main() 1125 data->name, targs.runs, targs.seconds); in main()
|
| /tools/testing/selftests/ptp/ |
| A D | testptp.c | 193 int seconds = 0; in main() local 278 seconds = atoi(optarg); in main() 429 ts.tv_sec = seconds; in main()
|
| /tools/testing/selftests/net/ |
| A D | so_txtime.sh | 67 local readonly START="$(date +%s%N --date="+ 0.1 seconds")"
|
| /tools/testing/selftests/media_tests/ |
| A D | regression_test.txt | 24 seconds. The idea is when device file goes away, media devnode and cdev
|
| /tools/lib/perf/Documentation/ |
| A D | libperf-sampling.txt | 42 - sleeps for 3 seconds 160 We will sleep for 3 seconds while the ring buffers get data from all CPUs, then we disable the even…
|
| /tools/power/pm-graph/config/ |
| A D | example.cfg | 29 # Use rtcwake to autoresume after X seconds, or off to disable (default: 15) 78 # Run N tests D seconds apart, generates separate outputs with a summary (default: false)
|