/linux-6.3-rc2/fs/ |
A D | utimes.c | 27 if (times) { in vfs_utimes() 29 !nsec_valid(times[1].tv_nsec)) in vfs_utimes() 32 times[1].tv_nsec == UTIME_NOW) in vfs_utimes() 33 times = NULL; in vfs_utimes() 41 if (times) { in vfs_utimes() 45 newattrs.ia_atime = times[0]; in vfs_utimes() 52 newattrs.ia_mtime = times[1]; in vfs_utimes() 181 if (copy_from_user(×, utimes, sizeof(times))) in do_futimesat() 189 if (times[0].tv_usec >= 1000000 || times[0].tv_usec < 0 || in do_futimesat() 190 times[1].tv_usec >= 1000000 || times[1].tv_usec < 0) in do_futimesat() [all …]
|
/linux-6.3-rc2/Documentation/scheduler/ |
A D | sched-stats.rst | 43 1) # of times sched_yield() was called 49 3) # of times schedule() was called 50 4) # of times schedule() left the processor idle 54 5) # of times try_to_wake_up() was called 78 1) # of times in this domain load_balance() was called when the 80 2) # of times in this domain load_balance() checked but found 88 6) # of times in this domain pull_task() was called even though 90 7) # of times in this domain load_balance() was called but did 102 13) # of times in this domain pull_task() was called when busy 128 25) # of times active_load_balance() was called [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/i915/gt/ |
A D | selftest_engine_heartbeat.c | 209 u32 times[5]; in __live_heartbeat_fast() local 223 for (i = 0; i < ARRAY_SIZE(times); i++) { in __live_heartbeat_fast() 252 times[i] = ktime_us_delta(t1, t0); in __live_heartbeat_fast() 255 sort(times, ARRAY_SIZE(times), sizeof(times[0]), cmp_u32, NULL); in __live_heartbeat_fast() 259 times[ARRAY_SIZE(times) / 2], in __live_heartbeat_fast() 260 times[0], in __live_heartbeat_fast() 261 times[ARRAY_SIZE(times) - 1]); in __live_heartbeat_fast() 270 if (times[ARRAY_SIZE(times) / 2] > error_threshold) { in __live_heartbeat_fast() 273 times[ARRAY_SIZE(times) / 2], in __live_heartbeat_fast()
|
/linux-6.3-rc2/lib/ |
A D | fault-inject.c | 22 int times; in setup_fault_attr() local 27 &interval, &probability, &space, ×) < 4) { in setup_fault_attr() 35 atomic_set(&attr->times, times); in setup_fault_attr() 50 atomic_read(&attr->times)); in fail_dump() 131 if (atomic_read(&attr->times) == 0) in should_fail_ex() 155 if (atomic_read(&attr->times) != -1) in should_fail_ex() 156 atomic_dec_not_zero(&attr->times); in should_fail_ex() 223 debugfs_create_atomic_t("times", mode, dir, &attr->times); in fault_create_debugfs_attr()
|
/linux-6.3-rc2/fs/hostfs/ |
A D | hostfs_user.c | 197 struct timeval times[2]; in set_attr() local 244 times[0].tv_sec = st.atime.tv_sec; in set_attr() 245 times[0].tv_usec = st.atime.tv_nsec / 1000; in set_attr() 246 times[1].tv_sec = st.mtime.tv_sec; in set_attr() 247 times[1].tv_usec = st.mtime.tv_nsec / 1000; in set_attr() 250 times[0].tv_sec = attrs->ia_atime.tv_sec; in set_attr() 251 times[0].tv_usec = attrs->ia_atime.tv_nsec / 1000; in set_attr() 254 times[1].tv_sec = attrs->ia_mtime.tv_sec; in set_attr() 255 times[1].tv_usec = attrs->ia_mtime.tv_nsec / 1000; in set_attr() 259 if (futimes(fd, times) != 0) in set_attr() [all …]
|
/linux-6.3-rc2/Documentation/powerpc/ |
A D | vcpudispatch_stats.rst | 17 By default, the DTLB log for each vcpu is processed 50 times a second so 31 2. number of times this vcpu was dispatched on the same processor as last 33 3. number of times this vcpu was dispatched on a different processor core 35 4. number of times this vcpu was dispatched on a different chip 36 5. number of times this vcpu was dispatches on a different socket/drawer 42 6. number of times this vcpu was dispatched in its home node (chip) 43 7. number of times this vcpu was dispatched in a different node 44 8. number of times this vcpu was dispatched in a node further away (numa
|
/linux-6.3-rc2/tools/power/cpupower/bench/ |
A D | README-BENCH | 34 You can specify load (100% CPU load) and sleep (0% CPU load) times in us which 42 repeated 20 times. 48 Will increase load and sleep time by 25ms 5 times. 50 25ms load/sleep time repeated 20 times (cycles). 51 50ms load/sleep time repeated 20 times (cycles). 53 100ms load/sleep time repeated 20 times (cycles). 79 In round 2, if the ondemand sampling times exactly match the load/sleep 96 You can easily test all kind of load/sleep times and check whether your
|
/linux-6.3-rc2/kernel/sched/ |
A D | psi.c | 259 memcpy(times, groupc->times, sizeof(groupc->times)); in get_recent_times() 279 times[s] += now - state_start; in get_recent_times() 284 times[s] = delta; in get_recent_times() 351 u32 times[NR_PSI_STATES]; in collect_percpu_times() local 363 deltas[s] += (u64)times[s] * nonidle; in collect_percpu_times() 734 groupc->times[PSI_IO_SOME] += delta; in record_times() 736 groupc->times[PSI_IO_FULL] += delta; in record_times() 740 groupc->times[PSI_MEM_SOME] += delta; in record_times() 746 groupc->times[PSI_CPU_SOME] += delta; in record_times() 752 groupc->times[PSI_NONIDLE] += delta; in record_times() [all …]
|
A D | cputime.c | 312 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) in thread_group_cputime() argument 337 times->utime = sig->utime; in thread_group_cputime() 338 times->stime = sig->stime; in thread_group_cputime() 339 times->sum_exec_runtime = sig->sum_sched_runtime; in thread_group_cputime() 343 times->utime += utime; in thread_group_cputime() 344 times->stime += stime; in thread_group_cputime() 345 times->sum_exec_runtime += read_sum_exec_runtime(t); in thread_group_cputime()
|
/linux-6.3-rc2/arch/mips/include/asm/ |
A D | unroll.h | 16 #define unroll(times, fn, ...) do { \ argument 28 BUILD_BUG_ON(!__builtin_constant_p(times)); \ 30 switch (times) { \
|
/linux-6.3-rc2/tools/testing/selftests/bpf/benchs/ |
A D | bench_bpf_hashmap_lookup.c | 213 static int compute_events(u64 *times, double *events_mean, double *events_stddev, u64 *mean_time) in compute_events() argument 222 if (!times[i]) in compute_events() 224 *mean_time += times[i]; in compute_events() 225 *events_mean += events_from_time(times[i]); in compute_events() 236 double events_i = *events_mean - events_from_time(times[i]); in compute_events()
|
/linux-6.3-rc2/Documentation/ABI/testing/ |
A D | sysfs-devices-platform-ipmi | 127 idles (RO) Number of times the interface was 140 hosed_count (RO) Number of times the hardware didn't 143 long_timeouts (RO) Number of times the driver 147 flag_fetches (RO) Number of times the driver 156 short_timeouts (RO) Number of times the driver 207 hosed (RO) Number of times the hardware didn't 228 flag_fetches (RO) Number of times a flag fetch was 234 receive_retries (RO) Number of times the receive of a 237 send_errors (RO) Number of times the send of a
|
A D | sysfs-class-wakeup | 18 This file contains the number of times the wakeup source was 32 This file contains the number of times the wakeup source might 39 This file contains the number of times the wakeup source's
|
A D | sysfs-firmware-acpi | 122 times it has triggered:: 169 sci The number of times the ACPI SCI 172 sci_not The number of times the ACPI SCI 224 # press the power button for 3 times; 230 # press the power button for 3 times; 241 # press the power button for 3 times; 245 # press the power button for 3 times;
|
A D | sysfs-kernel-slab | 59 The alloc_from_partial file shows how many times a cpu slab has 71 The alloc_refill file shows how many times the per-cpu freelist 82 The alloc_slab file is shows how many times a new slab had to 124 The file cpuslab_flush shows how many times a cache's cpu slabs 147 The deactivate_empty file shows how many times an empty cpu slab 157 The deactivate_full file shows how many times a full cpu slab 178 The deactivate_to_head file shows how many times a partial cpu 189 The deactivate_to_tail file shows how many times a partial cpu 209 The free_add_partial file shows how many times an object has 264 The free_slab file shows how many times an empty slab has been [all …]
|
/linux-6.3-rc2/Documentation/admin-guide/cgroup-v1/ |
A D | cpuacct.rst | 35 CPU time obtained by the cgroup into user and system times. Currently 44 system times. This has two side effects: 46 - It is theoretically possible to see wrong values for user and system times. 49 - It is possible to see slightly outdated values for user and system times
|
/linux-6.3-rc2/tools/perf/scripts/python/ |
A D | stat-cpi.py | 6 times = [] variable 14 if (time not in times): 15 times.append(time)
|
/linux-6.3-rc2/Documentation/fault-injection/ |
A D | nvme-fault-injection.rst | 22 echo 1 > /sys/kernel/debug/nvme0n1/fault_inject/times 33 name fault_inject, interval 1, probability 100, space 0, times 1 77 echo 1 > /sys/kernel/debug/nvme0n1/fault_inject/times 91 name fault_inject, interval 1, probability 100, space 0, times 1 129 echo 1 > /sys/kernel/debug/nvme0/fault_inject/times 141 name fault_inject, interval 1, probability 100, space 1, times 1
|
A D | fault-injection.rst | 83 - /sys/kernel/debug/fail*/times: 85 specifies how many times failures may happen at most. A value of -1 214 mmc_core.fail_request=<interval>,<probability>,<space>,<times> 228 like probability, interval, times, etc. But per-capability settings 350 echo -1 > /sys/kernel/debug/$FAILTYPE/times 404 echo -1 > /sys/kernel/debug/$FAILTYPE/times 435 echo -1 > /sys/kernel/debug/$FAILTYPE/times 470 Same as above except to specify 100 times failures at most instead of one time 473 # ./tools/testing/fault-injection/failcmd.sh --times=100 \ 480 ./tools/testing/fault-injection/failcmd.sh --times=100 \
|
/linux-6.3-rc2/Documentation/driver-api/media/drivers/ccs/ |
A D | mk-ccs-regs | 304 my $times = $h->{$arg}->{elsize} != 1 ? 310 $reg_formula .= " + (($arg) < $lim ? ($arg)$times : $offset + (($arg) - $lim)$times)"; 312 $reg_formula .= " + ($arg)$times"; 315 $lim_formula .= (defined $lim_formula ? " + " : "") . "($arg)$times";
|
/linux-6.3-rc2/include/linux/ |
A D | fault-inject.h | 19 atomic_t times; member 40 .times = ATOMIC_INIT(1), \
|
/linux-6.3-rc2/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/ |
A D | counters.rst | 161 - The number of times the SQ was recovered. 380 - The number of times the RQ was recovered. 393 - The number of times the NAPI poll function completed and armed the 407 - The number of times the EQ was recovered. 416 - The number of times an outstanding UMR request is delayed due to 633 - Number of times the TLS async resync request was started. 637 - Number of times the TLS async resync request properly ended with 647 - Number of times the TLS resync response call to the driver was 652 - Number of times the TLS resync response call to the driver was 662 - Number of times when CQE TLS offload was problematic. [all …]
|
/linux-6.3-rc2/drivers/staging/media/av7110/ |
A D | video-slowmotion.rst | 47 - The number of times to repeat each frame. 53 of times. This call can only be used if VIDEO_SOURCE_MEMORY is
|
/linux-6.3-rc2/Documentation/power/ |
A D | drivers-testing.rst | 13 several times, preferably several times in a row, and separately for hibernation 50 Each of the above tests should be repeated several times and the STD tests
|
/linux-6.3-rc2/drivers/crypto/qat/qat_common/ |
A D | qat_hal.c | 126 int times = MAX_RETRY_TIMES; in qat_hal_wait_cycles() local 146 if (times < 0) { in qat_hal_wait_cycles() 383 int times = MAX_RETRY_TIMES; in qat_hal_check_ae_alive() local 394 if (times < 0) { in qat_hal_check_ae_alive() 448 int times = 30; in qat_hal_init_esram() local 465 if (times < 0) { in qat_hal_init_esram() 480 unsigned int times = 100; in qat_hal_clr_reset() local 488 if (!(times--)) in qat_hal_clr_reset() 626 int times = MAX_RETRY_TIMES; in qat_hal_clear_gpr() local 658 } while (ret && times--); in qat_hal_clear_gpr() [all …]
|