| /kernel/ |
| A D | range.c | 67 int i, j; in subtract_range() local 72 for (j = 0; j < az; j++) { in subtract_range() 73 if (!range[j].end) in subtract_range() 76 if (start <= range[j].start && end >= range[j].end) { in subtract_range() 78 range[j].end = 0; in subtract_range() 82 if (start <= range[j].start && end < range[j].end && in subtract_range() 89 if (start > range[j].start && end >= range[j].end && in subtract_range() 95 if (start > range[j].start && end < range[j].end) { in subtract_range() 133 for (j = k; j > i; j--) { in clean_sort_range() 135 k = j; in clean_sort_range() [all …]
|
| A D | kallsyms_selftest.c | 259 int i, j, ret; in test_kallsyms_basic_function() local 388 for (j = 0; j < stat->save_cnt; j++) { in test_kallsyms_basic_function() 389 if (stat->addrs[j] == addr) in test_kallsyms_basic_function() 393 if (j == stat->save_cnt) { in test_kallsyms_basic_function()
|
| A D | audit_tree.c | 296 int i, j; in replace_chunk() local 302 for (i = j = 0; j < old->count; i++, j++) { in replace_chunk() 303 if (!old->owners[j].owner) { in replace_chunk() 307 owner = old->owners[j].owner; in replace_chunk() 309 new->owners[i].index = old->owners[j].index - j + i; in replace_chunk() 313 list_replace_init(&old->owners[j].list, &new->owners[i].list); in replace_chunk()
|
| /kernel/rcu/ |
| A D | tree_stall.h | 229 unsigned long j; in rcu_stall_kick_kthreads() local 453 *jp = j; in rcu_is_gp_kthread_starving() 461 unsigned long j; in rcu_is_rcuc_kthread_starving() local 474 *jp = j; in rcu_is_rcuc_kthread_starving() 526 unsigned long j; in print_cpu_stall_info() local 686 j = jiffies; in print_other_cpu_stall() 689 rcu_state.name, j - gpa, j, gpa, in print_other_cpu_stall() 799 j = jiffies; in check_cpu_stall() 948 j = jiffies; in show_rcu_gp_kthreads() 1023 j = jiffies; in rcu_check_gp_start_stall() [all …]
|
| A D | rcu_segcblist.c | 471 int i, j; in rcu_segcblist_advance() local 493 for (j = RCU_WAIT_TAIL; j < i; j++) in rcu_segcblist_advance() 494 WRITE_ONCE(rsclp->tails[j], rsclp->tails[RCU_DONE_TAIL]); in rcu_segcblist_advance() 502 for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) { in rcu_segcblist_advance() 503 if (rsclp->tails[j] == rsclp->tails[RCU_NEXT_TAIL]) in rcu_segcblist_advance() 505 WRITE_ONCE(rsclp->tails[j], rsclp->tails[i]); in rcu_segcblist_advance() 506 rcu_segcblist_move_seglen(rsclp, i, j); in rcu_segcblist_advance() 507 rsclp->gp_seq[j] = rsclp->gp_seq[i]; in rcu_segcblist_advance() 528 int i, j; in rcu_segcblist_accelerate() local 572 for (j = i + 1; j <= RCU_NEXT_TAIL; j++) in rcu_segcblist_accelerate() [all …]
|
| A D | update.c | 415 int j; in __wait_rcu_gp() local 424 for (j = 0; j < i; j++) in __wait_rcu_gp() 425 if (crcu_array[j] == crcu_array[i]) in __wait_rcu_gp() 427 if (j == i) { in __wait_rcu_gp() 439 for (j = 0; j < i; j++) in __wait_rcu_gp() 440 if (crcu_array[j] == crcu_array[i]) in __wait_rcu_gp() 442 if (j == i) { in __wait_rcu_gp()
|
| A D | tree_nocb.h | 309 unsigned long j, bool lazy) in rcu_nocb_do_flush_bypass() argument 353 unsigned long j, bool lazy) in rcu_nocb_flush_bypass() argument 399 unsigned long j = jiffies; in rcu_nocb_try_bypass() local 467 if (j != rdp->nocb_gp_adv_time && in rcu_nocb_try_bypass() 471 rdp->nocb_gp_adv_time = j; in rcu_nocb_try_bypass() 532 unsigned long j; in __call_rcu_nocb_wake() local 571 j = jiffies; in __call_rcu_nocb_wake() 572 if (j != rdp->nocb_gp_adv_time && in __call_rcu_nocb_wake() 576 rdp->nocb_gp_adv_time = j; in __call_rcu_nocb_wake() 655 unsigned long j = jiffies; in nocb_gp_wait() local [all …]
|
| A D | tree.c | 478 ulong j; in param_set_first_fqs_jiffies() local 482 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j); in param_set_first_fqs_jiffies() 490 ulong j; in param_set_next_fqs_jiffies() local 494 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1)); in param_set_next_fqs_jiffies() 2066 j = (j + 2) / 3; in rcu_gp_fqs_loop() 2068 j = 1; in rcu_gp_fqs_loop() 2078 jiffies + (j ? 3 * j : 2)); in rcu_gp_fqs_loop() 2127 j = 1; in rcu_gp_fqs_loop() 2129 j = rcu_state.jiffies_force_qs - j; in rcu_gp_fqs_loop() 4647 int j; in rcu_init_one() local [all …]
|
| A D | rcuscale.c | 948 int j; in rcu_scale_cleanup() local 1002 j = writer_n_durations[i]; in rcu_scale_cleanup() 1004 scale_type, SCALE_FLAG, i, j); in rcu_scale_cleanup() 1005 ngps += j; in rcu_scale_cleanup() 1023 for (j = 0; j < writer_n_durations[i]; j++) { in rcu_scale_cleanup() 1024 wdp = &wdpp[j]; in rcu_scale_cleanup() 1027 i, j, *wdp); in rcu_scale_cleanup() 1028 if (j % 100 == 0) in rcu_scale_cleanup() 1087 long j; in rcu_scale_init() local 1193 for (j = 0; j < gp_async_max; j++) { in rcu_scale_init() [all …]
|
| A D | rcutorture.c | 1208 j = jiffies; in rcu_torture_boost_failed() 2228 int j; in rcutorture_loop_extend() local 2237 for (j = 0; j < i; j++) { in rcutorture_loop_extend() 3122 int j; in rcu_torture_fwd_cb_hist() local 3130 for (j = 0; j <= i; j++) { in rcu_torture_fwd_cb_hist() 3950 int j; in rcu_torture_cleanup() local 4066 for (j = 0; buf2[j]; j++) in rcu_torture_cleanup() 4068 if (j) in rcu_torture_cleanup() 4233 if (j >= 0) in srcu_lockdep_next() 4237 return j; in srcu_lockdep_next() [all …]
|
| A D | tasks.h | 353 unsigned long j; in call_rcu_tasks_generic() local 368 j = jiffies; in call_rcu_tasks_generic() 369 if (rtpcp->rtp_jiffies != j) { in call_rcu_tasks_generic() 370 rtpcp->rtp_jiffies = j; in call_rcu_tasks_generic() 763 unsigned long j = jiffies; in rcu_tasks_torture_stats_print_generic() local 767 j - data_race(rtp->gp_start), j - data_race(rtp->gp_jiffies), in rcu_tasks_torture_stats_print_generic() 818 unsigned long j; in rcu_tasks_wait_gp() local 888 j = jiffies; in rcu_tasks_wait_gp() 890 lastinfo = j; in rcu_tasks_wait_gp() 1059 unsigned long j = jiffies + 1; in rcu_tasks_postscan() local [all …]
|
| A D | tree_exp.h | 558 static void synchronize_rcu_expedited_stall(unsigned long jiffies_start, unsigned long j) in synchronize_rcu_expedited_stall() argument 590 j - jiffies_start, rcu_state.expedited_sequence, data_race(rnp_root->expmask), in synchronize_rcu_expedited_stall() 623 unsigned long j; in synchronize_rcu_expedited_wait() local 650 j = READ_ONCE(jiffies_till_first_fqs); in synchronize_rcu_expedited_wait() 651 if (synchronize_rcu_expedited_wait_once(j + HZ)) in synchronize_rcu_expedited_wait() 656 unsigned long j; in synchronize_rcu_expedited_wait() local 665 j = jiffies; in synchronize_rcu_expedited_wait() 666 rcu_stall_notifier_call_chain(RCU_STALL_NOTIFY_EXP, (void *)(j - jiffies_start)); in synchronize_rcu_expedited_wait() 668 synchronize_rcu_expedited_stall(jiffies_start, j); in synchronize_rcu_expedited_wait()
|
| /kernel/sched/ |
| A D | topology.c | 1780 int i,j; in sched_numa_warn() local 1791 for (j = 0; j < nr_node_ids; j++) { in sched_numa_warn() 1933 for (i = 0, j = 0; i < nr_levels; i++, j++) { in sched_init_numa() 2016 for (j = 1; j < nr_levels; i++, j++) { in sched_init_numa() 2090 for (j = 0; j < nr_node_ids; j++) { in sched_domains_numa_masks_set() 2106 for (j = 0; j < nr_node_ids; j++) { in sched_domains_numa_masks_clear() 2252 int j; in __sdt_alloc() local 2319 int j; in __sdt_free() local 2761 for (j = 0; j < n && !new_topology; j++) { in partition_sched_domains_locked() 2782 for (j = 0; j < n && !new_topology; j++) { in partition_sched_domains_locked() [all …]
|
| /kernel/time/ |
| A D | time.c | 377 unsigned int jiffies_to_msecs(const unsigned long j) in jiffies_to_msecs() argument 380 return (MSEC_PER_SEC / HZ) * j; in jiffies_to_msecs() 400 unsigned int jiffies_to_usecs(const unsigned long j) in jiffies_to_usecs() argument 409 return (USEC_PER_SEC / HZ) * j; in jiffies_to_usecs() 412 return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32; in jiffies_to_usecs() 414 return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN; in jiffies_to_usecs() 755 u64 jiffies64_to_nsecs(u64 j) in jiffies64_to_nsecs() argument 758 return (NSEC_PER_SEC / HZ) * j; in jiffies64_to_nsecs() 760 return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN); in jiffies64_to_nsecs() 771 u64 jiffies64_to_msecs(const u64 j) in jiffies64_to_msecs() argument [all …]
|
| A D | timer.c | 352 unsigned long original = j; in round_jiffies_common() 362 j += cpu * 3; in round_jiffies_common() 364 rem = j % HZ; in round_jiffies_common() 374 j = j - rem; in round_jiffies_common() 376 j = j - rem + HZ; in round_jiffies_common() 379 j -= cpu * 3; in round_jiffies_common() 385 return time_is_after_jiffies(j) ? j : original; in round_jiffies_common() 432 unsigned long round_jiffies(unsigned long j) in round_jiffies() argument 453 unsigned long round_jiffies_relative(unsigned long j) in round_jiffies_relative() argument 474 return round_jiffies_common(j + j0, cpu, true) - j0; in __round_jiffies_up_relative() [all …]
|
| /kernel/irq/ |
| A D | affinity.c | 80 for (int j = 0; j < nr_masks; j++) in irq_create_affinity_masks() local 81 cpumask_copy(&masks[curvec + j].mask, &result[j]); in irq_create_affinity_masks()
|
| A D | proc.c | 455 int i = *(loff_t *) v, j; in show_interrupts() local 467 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) in show_interrupts() 468 j *= 10; in show_interrupts() 471 for_each_online_cpu(j) in show_interrupts() 472 seq_printf(p, "CPU%-8d", j); in show_interrupts() 485 for_each_online_cpu(j) { in show_interrupts() 486 unsigned int cnt = desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, j) : 0; in show_interrupts()
|
| /kernel/module/ |
| A D | strict_rwx.c | 138 int i, j; in module_mark_ro_after_init() local 143 for (j = 0; j < ARRAY_SIZE(ro_after_init); j++) { in module_mark_ro_after_init() 145 ro_after_init[j]) == 0) { in module_mark_ro_after_init()
|
| /kernel/bpf/ |
| A D | log.c | 182 int i, j; in bpf_vlog_reverse_kbuf() local 184 for (i = 0, j = len - 1; i < j; i++, j--) in bpf_vlog_reverse_kbuf() 185 swap(buf[i], buf[j]); in bpf_vlog_reverse_kbuf() 782 int j; in print_verifier_state() local 787 for (j = 0; j < BPF_REG_SIZE; j++) { in print_verifier_state() 788 slot_type = state->stack[i].slot_type[j]; in print_verifier_state() 791 types_buf[j] = slot_type_char[slot_type]; in print_verifier_state() 801 for (j = 0; j < BPF_REG_SIZE; j++) in print_verifier_state() 802 if (state->stack[i].slot_type[j] == STACK_SPILL) in print_verifier_state() 804 types_buf[j] = '\0'; in print_verifier_state()
|
| A D | percpu_freelist.c | 82 unsigned int cpu, cpu_idx, i, j, n, m; in pcpu_freelist_populate() local 90 j = n + (cpu_idx < m ? 1 : 0); in pcpu_freelist_populate() 91 for (i = 0; i < j; i++) { in pcpu_freelist_populate()
|
| /kernel/cgroup/ |
| A D | misc.c | 154 struct misc_cg *i, *j; in misc_cg_try_charge() local 181 for (j = cg; j != i; j = parent_misc(j)) in misc_cg_try_charge() 182 misc_cg_cancel_charge(type, j, amount); in misc_cg_try_charge()
|
| /kernel/locking/ |
| A D | locktorture.c | 252 unsigned long j; in torture_spin_lock_write_delay() local 258 j = jiffies; in torture_spin_lock_write_delay() 260 pr_alert("%s: delay = %lu jiffies.\n", __func__, jiffies - j); in torture_spin_lock_write_delay() 899 unsigned long j; in lock_torture_writer() local 933 j = jiffies; in lock_torture_writer() 942 WARN_ONCE(time_after(j1, j + acq_writer_lim), in lock_torture_writer() 944 __func__, j1 - j); in lock_torture_writer() 1219 int i, j; in lock_torture_init() local 1406 for (i = 0, j = 0; i < cxt.nrealwriters_stress || in lock_torture_init() 1407 j < cxt.nrealreaders_stress; i++, j++) { in lock_torture_init() [all …]
|
| /kernel/trace/ |
| A D | pid_list.c | 458 int i, j; in trace_pid_list_free() local 484 for (j = 0; j < UPPER2_SIZE; j++) { in trace_pid_list_free() 485 lower = upper->data[j]; in trace_pid_list_free()
|
| A D | trace_probe.c | 1727 int i, j, n, used, ret, args_idx = -1; in traceprobe_expand_meta_args() local 1760 for (i = 0, j = 0; i < argc; i++) { in traceprobe_expand_meta_args() 1769 new_argv[j++] = buf + used; in traceprobe_expand_meta_args() 1788 new_argv[j++] = buf + used; in traceprobe_expand_meta_args() 1791 new_argv[j++] = argv[i]; in traceprobe_expand_meta_args() 1900 int i, j; in __set_print_fmt() local 1930 for (j = 1; j < parg->count; j++) in __set_print_fmt() 1948 for (j = 0; j < parg->count; j++) in __set_print_fmt() 1950 fmt, parg->name, j); in __set_print_fmt() 2257 int i, j; in trace_probe_print_args() local [all …]
|
| /kernel/bpf/preload/iterators/ |
| A D | README | 2 If you change "iterators.bpf.c" do "make -j" in this directory to 4 machine, do "make -j big" in this directory to rebuild
|