| /kernel/printk/ |
| A D | nbcon.c | 262 if (ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio) in nbcon_context_try_acquire_direct() 265 if (cur->unsafe) in nbcon_context_try_acquire_direct() 371 new.atom = cur->atom; in nbcon_context_try_acquire_requested() 439 WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio); in nbcon_context_try_acquire_handover() 453 if (cur->cpu == cpu) in nbcon_context_try_acquire_handover() 471 new.atom = cur->atom; in nbcon_context_try_acquire_handover() 476 cur->atom = new.atom; in nbcon_context_try_acquire_handover() 560 WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio); in nbcon_context_try_acquire_hostile() 741 if (cur->unsafe) in nbcon_context_can_proceed() 745 WARN_ON_ONCE(cur->req_prio <= cur->prio); in nbcon_context_can_proceed() [all …]
|
| /kernel/ |
| A D | crash_reserve.c | 84 cur = tmp; in parse_crashkernel_mem() 89 cur++; in parse_crashkernel_mem() 98 cur = tmp; in parse_crashkernel_mem() 109 cur++; in parse_crashkernel_mem() 116 cur = tmp; in parse_crashkernel_mem() 130 while (*cur && *cur != ' ' && *cur != '@') in parse_crashkernel_mem() 131 cur++; in parse_crashkernel_mem() 133 cur++; in parse_crashkernel_mem() 166 *crash_base = memparse(cur+1, &cur); in parse_crashkernel_simple() 167 else if (*cur != ' ' && *cur != '\0') { in parse_crashkernel_simple() [all …]
|
| A D | smpboot.c | 211 struct smp_hotplug_thread *cur; in smpboot_create_threads() local 215 list_for_each_entry(cur, &hotplug_threads, list) { in smpboot_create_threads() 216 ret = __smpboot_create_thread(cur, cpu); in smpboot_create_threads() 234 struct smp_hotplug_thread *cur; in smpboot_unpark_threads() local 237 list_for_each_entry(cur, &hotplug_threads, list) in smpboot_unpark_threads() 238 smpboot_unpark_thread(cur, cpu); in smpboot_unpark_threads() 253 struct smp_hotplug_thread *cur; in smpboot_park_threads() local 256 list_for_each_entry_reverse(cur, &hotplug_threads, list) in smpboot_park_threads() 257 smpboot_park_thread(cur, cpu); in smpboot_park_threads()
|
| A D | seccomp.c | 1487 struct seccomp_knotif *cur; in find_notification() local 1492 if (cur->id == id) in find_notification() 1493 return cur; in find_notification() 1559 knotif = cur; in seccomp_notify_recv() 1844 struct seccomp_knotif *cur; in seccomp_notify_poll() local 1912 struct seccomp_filter *cur; in has_duplicate_listener() local 1919 for (cur = current->seccomp.filter; cur; cur = cur->prev) { in has_duplicate_listener() 1920 if (cur->notif) in has_duplicate_listener() 2317 for (cur = seccomp_log_names; cur->name && size; cur++) { in seccomp_names_from_actions_logged() 2349 for (cur = seccomp_log_names; cur->name; cur++) { in seccomp_action_logged_from_name() [all …]
|
| A D | resource.c | 1532 struct resource *cur; in merge_system_ram_resource() local 1541 cur = res->sibling; in merge_system_ram_resource() 1542 if (cur && system_ram_resources_mergeable(res, cur)) { in merge_system_ram_resource() 1543 res->end = cur->end; in merge_system_ram_resource() 1544 res->sibling = cur->sibling; in merge_system_ram_resource() 1545 free_resource(cur); in merge_system_ram_resource() 1549 cur = res->parent->child; in merge_system_ram_resource() 1550 while (cur && cur->sibling != res) in merge_system_ram_resource() 1551 cur = cur->sibling; in merge_system_ram_resource() 1552 if (cur && system_ram_resources_mergeable(cur, res)) { in merge_system_ram_resource() [all …]
|
| A D | padata.c | 137 struct padata_work *cur, *next; in padata_works_free() local 143 list_for_each_entry_safe(cur, next, works, pw_list) { in padata_works_free() 144 list_del(&cur->pw_list); in padata_works_free() 145 padata_work_free(cur); in padata_works_free() 360 struct padata_priv *cur; in padata_do_serial() local 367 cur = list_entry(pos, struct padata_priv, list); in padata_do_serial() 369 if ((signed int)(cur->seq_nr - padata->seq_nr) < 0) in padata_do_serial()
|
| A D | kprobes.c | 1936 struct llist_node **cur) in __kretprobe_find_ret_addr() argument 1939 struct llist_node *node = *cur; in __kretprobe_find_ret_addr() 1949 *cur = node; in __kretprobe_find_ret_addr() 1974 struct llist_node **cur) in kretprobe_find_ret_addr() argument 1979 if (WARN_ON_ONCE(!cur)) in kretprobe_find_ret_addr() 1983 ret = __kretprobe_find_ret_addr(tsk, cur); in kretprobe_find_ret_addr() 1986 ri = container_of(*cur, struct kretprobe_instance, llist); in kretprobe_find_ret_addr()
|
| /kernel/locking/ |
| A D | ww_mutex.h | 375 struct MUTEX_WAITER *cur; in __ww_mutex_check_waiters() local 379 for (cur = __ww_waiter_first(lock); cur; in __ww_mutex_check_waiters() 380 cur = __ww_waiter_next(lock, cur)) { in __ww_mutex_check_waiters() 382 if (!cur->ww_ctx) in __ww_mutex_check_waiters() 489 for (cur = __ww_waiter_prev(lock, waiter); cur; in __ww_mutex_check_kill() 490 cur = __ww_waiter_prev(lock, cur)) { in __ww_mutex_check_kill() 492 if (!cur->ww_ctx) in __ww_mutex_check_kill() 535 for (cur = __ww_waiter_last(lock); cur; in __ww_mutex_add_waiter() 536 cur = __ww_waiter_prev(lock, cur)) { in __ww_mutex_add_waiter() 538 if (!cur->ww_ctx) in __ww_mutex_add_waiter() [all …]
|
| A D | locktorture.c | 1004 long cur; in __torture_print_stats() local 1014 cur = data_race(statp[i].n_lock_acquired); in __torture_print_stats() 1015 sum += cur; in __torture_print_stats() 1016 if (max < cur) in __torture_print_stats() 1017 max = cur; in __torture_print_stats() 1018 if (min > cur) in __torture_print_stats() 1019 min = cur; in __torture_print_stats()
|
| /kernel/power/ |
| A D | snapshot.c | 592 bm->cur.node = list_entry(bm->cur.zone->leaves.next, in memory_bm_position_reset() 667 cur = ext; in create_mem_extents() 674 kfree(cur); in create_mem_extents() 894 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) { in rtree_next_node() 895 bm->cur.node = list_entry(bm->cur.node->list.next, in rtree_next_node() 905 bm->cur.zone = list_entry(bm->cur.zone->list.next, in rtree_next_node() 907 bm->cur.node = list_entry(bm->cur.zone->leaves.next, in rtree_next_node() 935 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn; in memory_bm_next_pfn() 940 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit; in memory_bm_next_pfn() 2279 handle->cur++; in snapshot_read_next() [all …]
|
| A D | swap.c | 97 struct swap_map_page *cur; member 402 if (handle->cur) in release_swap_writer() 404 handle->cur = NULL; in release_swap_writer() 418 if (!handle->cur) { in get_swap_writer() 444 if (!handle->cur) in swap_write_page() 455 handle->cur->next_swap = offset; in swap_write_page() 459 clear_page(handle->cur); in swap_write_page() 992 handle->cur = NULL; in release_swap_reader() 1007 handle->cur = NULL; in get_swap_reader() 1048 if (!handle->cur) in swap_read_page() [all …]
|
| A D | power.h | 142 unsigned int cur; /* number of the block of PAGE_SIZE bytes the member
|
| /kernel/trace/ |
| A D | rethook.c | 206 struct llist_node **cur) in __rethook_find_ret_addr() argument 209 struct llist_node *node = *cur; in __rethook_find_ret_addr() 219 *cur = node; in __rethook_find_ret_addr() 245 struct llist_node **cur) in rethook_find_ret_addr() argument 250 if (WARN_ON_ONCE(!cur)) in rethook_find_ret_addr() 257 ret = __rethook_find_ret_addr(tsk, cur); in rethook_find_ret_addr() 260 rhn = container_of(*cur, struct rethook_node, llist); in rethook_find_ret_addr()
|
| /kernel/kcsan/ |
| A D | kcsan_test.c | 164 char *cur; in __report_matches() local 178 cur = expect[0]; in __report_matches() 180 cur += scnprintf(cur, end - cur, "BUG: KCSAN: %s in ", in __report_matches() 190 cur += scnprintf(cur, end - cur, "%ps / %ps", in __report_matches() 194 scnprintf(cur, end - cur, "%pS", r->access[0].fn); in __report_matches() 197 if (cur) in __report_matches() 198 *cur = '\0'; in __report_matches() 202 cur = expect[1]; in __report_matches() 205 cur += scnprintf(cur, end - cur, "race at unknown origin, with "); in __report_matches() 239 cur += scnprintf(cur, end - cur, "%s%s to ", access_type, in __report_matches() [all …]
|
| A D | report.c | 280 char *cur; in get_stack_skipnr() local 291 cur = strnstr(buf, "kcsan_", len); in get_stack_skipnr() 292 if (cur) { in get_stack_skipnr() 293 cur += strlen("kcsan_"); in get_stack_skipnr() 294 if (!str_has_prefix(cur, "test")) in get_stack_skipnr()
|
| /kernel/bpf/ |
| A D | bpf_lru_list.c | 175 struct list_head *cur, *last, *next = inactive; in __bpf_lru_list_rotate_inactive() local 186 cur = l->next_inactive_rotation; in __bpf_lru_list_rotate_inactive() 188 if (cur == inactive) { in __bpf_lru_list_rotate_inactive() 189 cur = cur->prev; in __bpf_lru_list_rotate_inactive() 193 node = list_entry(cur, struct bpf_lru_node, list); in __bpf_lru_list_rotate_inactive() 194 next = cur->prev; in __bpf_lru_list_rotate_inactive() 197 if (cur == last) in __bpf_lru_list_rotate_inactive() 199 cur = next; in __bpf_lru_list_rotate_inactive()
|
| A D | bpf_iter.c | 779 int cur; /* current value, inclusive */ member 796 s->cur = s->end = 0; in bpf_iter_num_new() 802 s->cur = s->end = 0; in bpf_iter_num_new() 810 s->cur = start - 1; in bpf_iter_num_new() 825 if ((s64)(s->cur + 1) >= s->end) { in bpf_iter_num_next() 826 s->cur = s->end = 0; in bpf_iter_num_next() 830 s->cur++; in bpf_iter_num_next() 832 return &s->cur; in bpf_iter_num_next() 839 s->cur = s->end = 0; in bpf_iter_num_destroy()
|
| A D | rqspinlock.c | 83 u64 cur; member 220 u64 prev = ts->cur; in check_timeout() 223 ts->cur = time; in check_timeout() 236 ts->cur = time; in check_timeout()
|
| A D | verifier.c | 1789 struct bpf_func_state *state = cur->frame[cur->curframe]; in explored_state() 2063 if (cur) { in pop_stack() 3987 cur->jmp_history = p; in push_jmp_history() 8504 if (reg->id != cur->active_lock_id || ptr != cur->active_lock_ptr) { in process_spin_lock() 8906 fcur = cur->frame[fr]; in widen_imprecise_scalars() 18426 map[i].cur = cur_id; in check_ids() 19191 fcur = cur->frame[fr]; in states_maybe_looping() 19326 frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) { in is_state_visited() 19382 cur_frame = cur->frame[cur->curframe]; in is_state_visited() 19421 print_verifier_state(env, cur, cur->curframe, true); in is_state_visited() [all …]
|
| /kernel/time/ |
| A D | tick-broadcast.c | 165 struct clock_event_device *cur = tick_broadcast_device.evtdev; in tick_install_broadcast_device() local 170 if (!tick_check_broadcast_device(cur, dev)) in tick_install_broadcast_device() 176 clockevents_exchange_device(cur, dev); in tick_install_broadcast_device() 177 if (cur) in tick_install_broadcast_device() 178 cur->event_handler = clockevents_handle_noop; in tick_install_broadcast_device()
|
| /kernel/cgroup/ |
| A D | cpuset-internal.h | 293 int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial); 302 static inline int cpuset1_validate_change(struct cpuset *cur, in cpuset1_validate_change() argument
|
| A D | cpuset-v1.c | 353 int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial) in cpuset1_validate_change() argument 363 cpuset_for_each_child(c, css, cur) in cpuset1_validate_change() 369 par = parent_cs(cur); in cpuset1_validate_change()
|
| A D | cpuset.c | 563 static int validate_change(struct cpuset *cur, struct cpuset *trial) in validate_change() argument 572 ret = cpuset1_validate_change(cur, trial); in validate_change() 577 if (cur == &top_cpuset) in validate_change() 580 par = parent_cs(cur); in validate_change() 587 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { in validate_change() 588 if (!cpumask_empty(cur->cpus_allowed) && in validate_change() 591 if (!nodes_empty(cur->mems_allowed) && in validate_change() 614 if (is_cpu_exclusive(cur) && is_sched_load_balance(cur) && in validate_change() 615 !cpuset_cpumask_can_shrink(cur->effective_cpus, user_xcpus(trial))) in validate_change() 626 if (c == cur) in validate_change()
|
| /kernel/futex/ |
| A D | core.c | 1826 struct futex_private_hash *cur, *new; in futex_hash_allocate() local 1828 cur = rcu_dereference_protected(mm->futex_phash, in futex_hash_allocate() 1834 if (cur && !cur->hash_mask) { in futex_hash_allocate() 1844 if (cur && !new) { in futex_hash_allocate() 1850 futex_ref_drop(cur); in futex_hash_allocate()
|
| /kernel/sched/ |
| A D | fair.c | 2247 struct task_struct *cur; in task_numa_compare() local 2258 cur = rcu_dereference(dst_rq->curr); in task_numa_compare() 2259 if (cur && ((cur->flags & (PF_EXITING | PF_KTHREAD)) || in task_numa_compare() 2260 !cur->mm)) in task_numa_compare() 2261 cur = NULL; in task_numa_compare() 2267 if (cur == env->p) { in task_numa_compare() 2272 if (!cur) { in task_numa_compare() 2303 cur_ng = rcu_dereference(cur->numa_group); in task_numa_compare() 2350 cur = NULL; in task_numa_compare() 2387 if (!cur) { in task_numa_compare() [all …]
|