| /kernel/ |
| A D | irq_work.c | 112 irq_work_raise(work); in __irq_work_queue_local() 119 if (!irq_work_claim(work)) in irq_work_queue() 124 __irq_work_queue_local(work); in irq_work_queue() 140 return irq_work_queue(work); in irq_work_queue_on() 147 if (!irq_work_claim(work)) in irq_work_queue_on() 169 if (!irq_work_claim(work)) in irq_work_queue_on() 203 struct irq_work *work = arg; in irq_work_single() local 221 work->func(work); in irq_work_single() 237 struct irq_work *work, *tmp; in irq_work_run_list() local 252 irq_work_single(work); in irq_work_run_list() [all …]
|
| A D | task_work.c | 72 work->next = head; in task_work_add() 128 while (work) { in task_work_cancel_match() 130 pprev = &work->next; in task_work_cancel_match() 132 } else if (try_cmpxchg(pprev, &work, work->next)) in task_work_cancel_match() 137 return work; in task_work_cancel_match() 207 if (!work) { in task_work_run() 215 if (!work) in task_work_run() 226 next = work->next; in task_work_run() 227 work->func(work); in task_work_run() 228 work = next; in task_work_run() [all …]
|
| A D | kthread.c | 996 work = NULL; in kthread_worker_fn() 1006 if (work) { in kthread_worker_fn() 1010 work->func(work); in kthread_worker_fn() 1145 return !list_empty(&work->node) || work->canceling; in queuing_blocked() 1154 WARN_ON_ONCE(work->worker && work->worker != worker); in kthread_insert_work_sanity_check() 1212 struct kthread_work *work = &dwork->work; in kthread_delayed_work_timer_fn() local 1242 struct kthread_work *work = &dwork->work; in __kthread_queue_delayed_work() local 1285 struct kthread_work *work = &dwork->work; in kthread_queue_delayed_work() local 1309 container_of(work, struct kthread_flush_work, work); in kthread_flush_work_fn() 1362 container_of(work, struct kthread_delayed_work, work); in kthread_cancel_delayed_work_timer() [all …]
|
| A D | workqueue.c | 694 debug_object_free(&work->work, &work_debug_descr); in destroy_delayed_work_on_stack() 793 atomic_long_set(&work->data, data | work_static(work)); in set_work_data() 1890 if (!work) { in node_activate_pending_pwq() 2493 struct work_struct *work = &dwork->work; in __queue_delayed_work() local 2550 struct work_struct *work = &dwork->work; in queue_delayed_work_on() local 2624 struct work_struct *work = &rwork->work; in queue_rcu_work() local 3618 container_of(work, struct wq_drain_dead_softirq_work, work); in drain_dead_softirq_workfn() 3732 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); in wq_barrier_func() 4367 enable_work(work); in __cancel_work_sync() 4596 fn(&ew->work); in execute_in_process_context() [all …]
|
| A D | stop_machine.c | 102 else if (work->done) in cpu_stop_queue_work() 396 struct cpu_stop_work *work; in queue_stop_cpus_work() local 410 work->fn = fn; in queue_stop_cpus_work() 411 work->arg = arg; in queue_stop_cpus_work() 412 work->done = done; in queue_stop_cpus_work() 413 work->caller = _RET_IP_; in queue_stop_cpus_work() 490 struct cpu_stop_work *work; in cpu_stopper_thread() local 493 work = NULL; in cpu_stopper_thread() 498 list_del_init(&work->list); in cpu_stopper_thread() 502 if (work) { in cpu_stopper_thread() [all …]
|
| A D | async.c | 72 struct work_struct work; member 117 static void async_run_entry_fn(struct work_struct *work) in async_run_entry_fn() argument 120 container_of(work, struct async_entry, work); in async_run_entry_fn() 160 INIT_WORK(&entry->work, async_run_entry_fn); in __async_schedule_node_domain() 178 queue_work_node(node, async_wq, &entry->work); in __async_schedule_node_domain()
|
| A D | jump_label.c | 325 void jump_label_update_timeout(struct work_struct *work) in jump_label_update_timeout() argument 328 container_of(work, struct static_key_deferred, work.work); in jump_label_update_timeout() 347 struct delayed_work *work, in __static_key_slow_dec_deferred() argument 355 schedule_delayed_work(work, timeout); in __static_key_slow_dec_deferred() 359 void __static_key_deferred_flush(void *key, struct delayed_work *work) in __static_key_deferred_flush() argument 362 flush_delayed_work(work); in __static_key_deferred_flush() 371 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); in jump_label_rate_limit()
|
| A D | pid_namespace.c | 73 static void destroy_pid_namespace_work(struct work_struct *work); 121 INIT_WORK(&ns->work, destroy_pid_namespace_work); in create_pid_namespace() 160 static void destroy_pid_namespace_work(struct work_struct *work) in destroy_pid_namespace_work() argument 163 container_of(work, struct pid_namespace, work); in destroy_pid_namespace_work() 187 schedule_work(&ns->work); in put_pid_ns()
|
| A D | umh.c | 157 static void call_usermodehelper_exec_work(struct work_struct *work) in call_usermodehelper_exec_work() argument 160 container_of(work, struct subprocess_info, work); in call_usermodehelper_exec_work() 366 INIT_WORK(&sub_info->work, call_usermodehelper_exec_work); in call_usermodehelper_setup() 433 queue_work(system_unbound_wq, &sub_info->work); in call_usermodehelper_exec()
|
| A D | acct.c | 111 struct work_struct work; member 199 schedule_work(&acct->work); in acct_pin_kill() 207 static void close_work(struct work_struct *work) in close_work() argument 209 struct bsd_acct_struct *acct = container_of(work, struct bsd_acct_struct, work); in close_work() 287 INIT_WORK(&acct->work, close_work); in acct_on()
|
| A D | smp.c | 1105 struct work_struct work; member 1113 static void smp_call_on_cpu_callback(struct work_struct *work) in smp_call_on_cpu_callback() argument 1117 sscs = container_of(work, struct smp_call_on_cpu_struct, work); in smp_call_on_cpu_callback() 1136 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback); in smp_call_on_cpu() 1141 queue_work_on(cpu, system_wq, &sscs.work); in smp_call_on_cpu() 1143 destroy_work_on_stack(&sscs.work); in smp_call_on_cpu()
|
| /kernel/unwind/ |
| A D | deferred.c | 154 struct unwind_work *work; in process_unwind_deferred() local 181 if (test_bit(work->bit, &bits)) { in process_unwind_deferred() 182 work->func(work, &trace, cookie); in process_unwind_deferred() 253 bit = READ_ONCE(work->bit); in unwind_deferred_request() 301 if (!work) in unwind_deferred_cancel() 304 bit = work->bit; in unwind_deferred_cancel() 311 list_del_rcu(&work->list); in unwind_deferred_cancel() 314 work->bit = -1; in unwind_deferred_cancel() 331 memset(work, 0, sizeof(*work)); in unwind_deferred_init() 339 work->bit = ffz(unwind_mask); in unwind_deferred_init() [all …]
|
| /kernel/entry/ |
| A D | syscall-common.c | 21 unsigned long work) in syscall_trace_enter() argument 30 if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) { in syscall_trace_enter() 38 if (ret || (work & SYSCALL_WORK_SYSCALL_EMU)) in syscall_trace_enter() 43 if (work & SYSCALL_WORK_SECCOMP) { in syscall_trace_enter() 52 if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT)) { in syscall_trace_enter() 81 if (work & SYSCALL_WORK_SYSCALL_EMU) in report_single_step() 84 return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP; in report_single_step() 97 if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) { in syscall_exit_work() 106 if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT) in syscall_exit_work() 109 step = report_single_step(work); in syscall_exit_work() [all …]
|
| /kernel/locking/ |
| A D | test-ww_mutex.c | 29 struct work_struct work; member 42 struct test_mutex *mtx = container_of(work, typeof(*mtx), work); in test_mutex_work() 74 schedule_work(&mtx.work); in __test_mutex() 103 flush_work(&mtx.work); in __test_mutex() 179 struct work_struct work; member 190 struct test_abba *abba = container_of(work, typeof(*abba), work); in test_abba_work() 259 flush_work(&abba.work); in test_abba() 290 struct test_cycle *cycle = container_of(work, typeof(*cycle), work); in test_cycle_work() 432 struct stress *stress = container_of(work, typeof(*stress), work); in stress_inorder_work() 491 struct stress *stress = container_of(work, typeof(*stress), work); in stress_reorder_work() [all …]
|
| /kernel/bpf/ |
| A D | mmap_unlock_work.h | 26 struct mmap_unlock_irq_work *work = NULL; in bpf_mmap_unlock_get_irq_work() local 31 work = this_cpu_ptr(&mmap_unlock_work); in bpf_mmap_unlock_get_irq_work() 32 if (irq_work_is_busy(&work->irq_work)) { in bpf_mmap_unlock_get_irq_work() 45 *work_ptr = work; in bpf_mmap_unlock_get_irq_work() 49 static inline void bpf_mmap_unlock_mm(struct mmap_unlock_irq_work *work, struct mm_struct *mm) in bpf_mmap_unlock_mm() argument 51 if (!work) { in bpf_mmap_unlock_mm() 54 work->mm = mm; in bpf_mmap_unlock_mm() 61 irq_work_queue(&work->irq_work); in bpf_mmap_unlock_mm()
|
| A D | token.c | 41 static void bpf_token_put_deferred(struct work_struct *work) in bpf_token_put_deferred() argument 43 struct bpf_token *token = container_of(work, struct bpf_token, work); in bpf_token_put_deferred() 56 INIT_WORK(&token->work, bpf_token_put_deferred); in bpf_token_put() 57 schedule_work(&token->work); in bpf_token_put()
|
| A D | task_iter.c | 754 struct mmap_unlock_irq_work *work = NULL; in BPF_CALL_5() local 770 irq_work_busy = bpf_mmap_unlock_get_irq_work(&work); in BPF_CALL_5() 782 bpf_mmap_unlock_mm(work, mm); in BPF_CALL_5() 800 struct mmap_unlock_irq_work *work; member 844 irq_work_busy = bpf_mmap_unlock_get_irq_work(&kit->data->work); in bpf_iter_task_vma_new() 876 bpf_mmap_unlock_mm(kit->data->work, kit->data->mm); in bpf_iter_task_vma_destroy() 1036 struct mmap_unlock_irq_work *work; in do_mmap_read_unlock() local 1042 mmap_read_unlock_non_owner(work->mm); in do_mmap_read_unlock() 1047 struct mmap_unlock_irq_work *work; in task_iter_init() local 1051 work = per_cpu_ptr(&mmap_unlock_work, cpu); in task_iter_init() [all …]
|
| A D | ringbuf.c | 29 struct irq_work work; member 151 static void bpf_ringbuf_notify(struct irq_work *work) in bpf_ringbuf_notify() argument 153 struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work); in bpf_ringbuf_notify() 180 init_irq_work(&rb->work, bpf_ringbuf_notify); in bpf_ringbuf_alloc() 505 irq_work_queue(&rb->work); in bpf_ringbuf_commit() 507 irq_work_queue(&rb->work); in bpf_ringbuf_commit() 788 irq_work_queue(&rb->work); in BPF_CALL_4() 790 irq_work_queue(&rb->work); in BPF_CALL_4()
|
| A D | memalloc.c | 427 static void bpf_mem_refill(struct irq_work *work) in bpf_mem_refill() argument 429 struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work); in bpf_mem_refill() 709 static void free_mem_alloc_deferred(struct work_struct *work) in free_mem_alloc_deferred() argument 711 struct bpf_mem_alloc *ma = container_of(work, struct bpf_mem_alloc, work); in free_mem_alloc_deferred() 738 INIT_WORK(©->work, free_mem_alloc_deferred); in destroy_mem_alloc() 739 queue_work(system_unbound_wq, ©->work); in destroy_mem_alloc()
|
| /kernel/irq/ |
| A D | irq_sim.c | 15 struct irq_work work; member 82 irq_work_queue(&irq_ctx->work_ctx->work); in irq_sim_set_irqchip_state() 128 static void irq_sim_handle_irq(struct irq_work *work) in irq_sim_handle_irq() argument 134 work_ctx = container_of(work, struct irq_sim_work_ctx, work); in irq_sim_handle_irq() 221 work_ctx->work = IRQ_WORK_INIT_HARD(irq_sim_handle_irq); in irq_domain_create_sim_full() 242 irq_work_sync(&work_ctx->work); in irq_domain_remove_sim()
|
| /kernel/rcu/ |
| A D | srcutree.c | 142 INIT_WORK(&sdp->work, srcu_invoke_callbacks); in init_srcu_struct_data() 691 flush_delayed_work(&sup->work); in cleanup_srcu_struct() 696 flush_work(&sdp->work); in cleanup_srcu_struct() 1092 queue_delayed_work(rcu_gp_wq, &sup->work, in srcu_funnel_gp_start() 1094 else if (list_empty(&sup->work.work.entry)) in srcu_funnel_gp_start() 1095 list_add(&sup->work.work.entry, &srcu_boot_list); in srcu_funnel_gp_start() 1778 sdp = container_of(work, struct srcu_data, work); in srcu_invoke_callbacks() 1860 sup = container_of(work, struct srcu_usage, work.work); in process_srcu() 1995 work.work.entry); in srcu_init() 1996 list_del_init(&sup->work.work.entry); in srcu_init() [all …]
|
| /kernel/module/ |
| A D | dups.c | 71 static void kmod_dup_request_delete(struct work_struct *work) in kmod_dup_request_delete() argument 74 kmod_req = container_of(to_delayed_work(work), struct kmod_dup_req, delete_work); in kmod_dup_request_delete() 96 static void kmod_dup_request_complete(struct work_struct *work) in kmod_dup_request_complete() argument 100 kmod_req = container_of(work, struct kmod_dup_req, complete_work); in kmod_dup_request_complete()
|
| /kernel/cgroup/ |
| A D | cpuset-v1.c | 10 struct work_struct work; member 277 static void cpuset_migrate_tasks_workfn(struct work_struct *work) in cpuset_migrate_tasks_workfn() argument 281 s = container_of(work, struct cpuset_remove_tasks_struct, work); in cpuset_migrate_tasks_workfn() 328 INIT_WORK(&s->work, cpuset_migrate_tasks_workfn); in cpuset1_hotplug_update_tasks() 329 schedule_work(&s->work); in cpuset1_hotplug_update_tasks()
|
| /kernel/sched/ |
| A D | cpufreq_schedutil.c | 32 struct kthread_work work; member 542 static void sugov_work(struct kthread_work *work) in sugov_work() argument 544 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work); in sugov_work() 574 kthread_queue_work(&sg_policy->worker, &sg_policy->work); in sugov_irq_work() 679 kthread_init_work(&sg_policy->work, sugov_work); in sugov_kthread_create() 888 kthread_cancel_work_sync(&sg_policy->work); in sugov_stop()
|
| /kernel/trace/ |
| A D | bpf_trace.c | 808 struct send_signal_irq_work *work; in do_bpf_send_signal() local 812 siginfo = work->has_siginfo ? &work->info : SEND_SIG_PRIV; in do_bpf_send_signal() 814 group_send_sig_info(work->sig, siginfo, work->task, work->type); in do_bpf_send_signal() 815 put_task_struct(work->task); in do_bpf_send_signal() 866 work->task = get_task_struct(task); in bpf_send_signal_common() 868 if (work->has_siginfo) in bpf_send_signal_common() 869 copy_siginfo(&work->info, &info); in bpf_send_signal_common() 870 work->sig = sig; in bpf_send_signal_common() 871 work->type = type; in bpf_send_signal_common() 872 irq_work_queue(&work->irq_work); in bpf_send_signal_common() [all …]
|