| /kernel/locking/ |
| A D | ww_mutex.h | 11 struct mutex_waiter *w; in __ww_waiter_first() local 14 if (list_entry_is_head(w, &lock->wait_list, list)) in __ww_waiter_first() 17 return w; in __ww_waiter_first() 23 w = list_next_entry(w, list); in __ww_waiter_next() 27 return w; in __ww_waiter_next() 33 w = list_prev_entry(w, list); in __ww_waiter_prev() 37 return w; in __ww_waiter_prev() 43 struct mutex_waiter *w; in __ww_waiter_last() local 49 return w; in __ww_waiter_last() 105 struct rb_node *n = rb_next(&w->tree.entry); in __ww_waiter_next() [all …]
|
| A D | rtmutex_common.h | 132 struct rt_mutex_waiter *w = NULL; in rt_mutex_top_waiter() local 137 w = rb_entry(leftmost, struct rt_mutex_waiter, tree.entry); in rt_mutex_top_waiter() 138 BUG_ON(w->lock != lock); in rt_mutex_top_waiter() 140 return w; in rt_mutex_top_waiter()
|
| A D | rtmutex.c | 558 struct rt_mutex_waiter *w) in rt_mutex_wake_q_add() argument 560 rt_mutex_wake_q_add_task(wqh, w->task, w->wake_state); in rt_mutex_wake_q_add() 1660 struct rt_mutex_waiter *w) in rt_mutex_handle_deadlock() argument 1669 if (build_ww_mutex() && w->ww_ctx) in rt_mutex_handle_deadlock()
|
| /kernel/ |
| A D | audit_watch.c | 238 audit_log_untrustedstring(ab, w->path); in audit_watch_log_rule_change() 324 struct audit_watch *w, *nextw; in audit_remove_parent_watches() local 329 list_for_each_entry_safe(w, nextw, &parent->watches, wlist) { in audit_remove_parent_watches() 330 list_for_each_entry_safe(r, nextr, &w->rules, rlist) { in audit_remove_parent_watches() 332 audit_watch_log_rule_change(r, w, "remove_rule"); in audit_remove_parent_watches() 340 audit_remove_watch(w); in audit_remove_parent_watches() 372 struct audit_watch *w, *watch = krule->watch; in audit_add_to_parent() local 377 list_for_each_entry(w, &parent->watches, wlist) { in audit_add_to_parent() 378 if (strcmp(watch->path, w->path)) in audit_add_to_parent() 386 audit_get_watch(w); in audit_add_to_parent() [all …]
|
| A D | audit.h | 308 #define audit_put_watch(w) do { } while (0) argument 309 #define audit_get_watch(w) do { } while (0) argument 313 #define audit_watch_path(w) "" argument 314 #define audit_watch_compare(w, i, d) 0 argument
|
| A D | scftorture.c | 259 unsigned long w; in scf_sel_dump() local 263 w = (scfsp->scfs_weight - oldw) * 100000 / scf_sel_totweight; in scf_sel_dump() 264 pr_info("%s: %3lu.%03lu %s(%s)\n", __func__, w / 1000, w % 1000, in scf_sel_dump() 275 unsigned long w = torture_random(trsp) % (scf_sel_totweight + 1); in scf_sel_rand() local 278 if (scf_sel_array[i].scfs_weight >= w) in scf_sel_rand()
|
| A D | watch_queue.c | 465 struct watch *w; in add_one_watch() local 467 hlist_for_each_entry(w, &wlist->watchers, list_node) { in add_one_watch() 468 struct watch_queue *wq = rcu_access_pointer(w->queue); in add_one_watch() 469 if (wqueue == wq && watch->id == w->id) in add_one_watch()
|
| A D | padata.c | 400 static void __init padata_mt_helper(struct work_struct *w) in padata_mt_helper() argument 402 struct padata_work *pw = container_of(w, struct padata_work, pw_work); in padata_mt_helper()
|
| A D | exit.c | 315 int rcuwait_wake_up(struct rcuwait *w) in rcuwait_wake_up() argument 335 task = rcu_dereference(w->task); in rcuwait_wake_up()
|
| /kernel/futex/ |
| A D | syscalls.c | 216 futexv[i].w.flags = flags; in futex_parse_waitv() 217 futexv[i].w.val = aux.val; in futex_parse_waitv() 218 futexv[i].w.uaddr = aux.uaddr; in futex_parse_waitv() 434 cmpval = futexes[0].w.val; in SYSCALL_DEFINE4() 436 return futex_requeue(u64_to_user_ptr(futexes[0].w.uaddr), futexes[0].w.flags, in SYSCALL_DEFINE4() 437 u64_to_user_ptr(futexes[1].w.uaddr), futexes[1].w.flags, in SYSCALL_DEFINE4()
|
| A D | waitwake.c | 430 if (!(vs[i].w.flags & FLAGS_SHARED) && retry) in futex_wait_multiple_setup() 433 ret = get_futex_key(u64_to_user_ptr(vs[i].w.uaddr), in futex_wait_multiple_setup() 434 vs[i].w.flags, in futex_wait_multiple_setup() 444 u32 __user *uaddr = (u32 __user *)(unsigned long)vs[i].w.uaddr; in futex_wait_multiple_setup() 446 u32 val = vs[i].w.val; in futex_wait_multiple_setup()
|
| A D | futex.h | 476 struct futex_waitv w; member
|
| /kernel/bpf/ |
| A D | helpers.c | 1190 struct bpf_async_cb *cb = &w->cb; in bpf_wq_work() 1226 cancel_work_sync(&w->work); in bpf_wq_delete_work() 1228 kfree_rcu(w, cb.rcu); in bpf_wq_delete_work() 1251 struct bpf_work *w; in __bpf_async_init() local 1295 w = (struct bpf_work *)cb; in __bpf_async_init() 1297 INIT_WORK(&w->work, bpf_wq_work); in __bpf_async_init() 1298 INIT_WORK(&w->delete_work, bpf_wq_delete_work); in __bpf_async_init() 3044 struct bpf_work *w; in bpf_wq_start() local 3050 w = READ_ONCE(async->work); in bpf_wq_start() 3051 if (!w || !READ_ONCE(w->cb.prog)) in bpf_wq_start() [all …]
|
| A D | verifier.c | 17380 if (w < 0 || w >= env->prog->len) { in push_insn() 17389 mark_jmp_point(env, w); in push_insn() 17392 if (insn_state[w] == 0) { in push_insn() 17404 verbose_linfo(env, w, "%d: ", w); in push_insn() 17422 int w; in visit_func_call_insn() local 24506 w = dfs[dfs_sz - 1]; in compute_scc() 24507 if (pre[w] == 0) { in compute_scc() 24517 low[w] = min(low[w], low[succ[j]]); in compute_scc() 24527 if (low[w] < pre[w]) { in compute_scc() 24537 if (succ[j] == w) { in compute_scc() [all …]
|
| /kernel/sched/ |
| A D | psi.c | 1274 int w; in psi_show() local 1278 for (w = 0; w < 3; w++) in psi_show() 1279 avg[w] = group->avg[res * 2 + full][w]; in psi_show()
|
| A D | sched.h | 148 # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) argument 149 # define scale_load_down(w) \ argument 151 unsigned long __w = (w); \ 159 # define scale_load(w) (w) argument 160 # define scale_load_down(w) (w) argument
|
| A D | topology.c | 2150 int w; member 2162 k->w = 0; in hop_cmp() 2167 k->w = cpumask_weight_and(k->cpus, prev_hop[k->node]); in hop_cmp() 2168 if (k->w <= k->cpu) in hop_cmp() 2207 cpumask_nth_and_andnot(cpu - k.w, cpus, k.masks[hop][node], k.masks[hop-1][node]) : in sched_numa_find_nth_cpu()
|
| A D | fair.c | 177 static inline void update_load_set(struct load_weight *lw, unsigned long w) in update_load_set() argument 179 lw->weight = w; in update_load_set() 233 unsigned long w; in __update_inv_weight() local 238 w = scale_load_down(lw->weight); in __update_inv_weight() 240 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) in __update_inv_weight() 242 else if (unlikely(!w)) in __update_inv_weight() 245 lw->inv_weight = WMULT_CONST / w; in __update_inv_weight()
|
| /kernel/rcu/ |
| A D | update.c | 450 void finish_rcuwait(struct rcuwait *w) in finish_rcuwait() argument 452 rcu_assign_pointer(w->task, NULL); in finish_rcuwait()
|
| /kernel/printk/ |
| A D | nbcon.c | 1251 static inline bool rcuwait_has_sleeper(struct rcuwait *w) in rcuwait_has_sleeper() argument 1265 return rcuwait_active(w); in rcuwait_has_sleeper()
|
| /kernel/trace/ |
| A D | ring_buffer.c | 4427 unsigned long tail, write, w; in __rb_reserve_next() local 4432 /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK; in __rb_reserve_next() 4447 if (!w) { in __rb_reserve_next() 4477 if (likely(tail == w)) { in __rb_reserve_next()
|
| /kernel/module/ |
| A D | main.c | 79 static void do_free_init(struct work_struct *w); 2975 static void do_free_init(struct work_struct *w) in do_free_init() argument
|
| /kernel/events/ |
| A D | core.c | 602 static void perf_duration_warn(struct irq_work *w) in perf_duration_warn() argument
|