Lines Matching refs:worker

217 	struct worker		*manager;	/* L: purely informational */
348 struct worker *rescuer; /* MD: rescue worker */
581 #define for_each_pool_worker(worker, pool) \ argument
582 list_for_each_entry((worker), &(pool)->workers, node) \
972 static inline void worker_set_flags(struct worker *worker, unsigned int flags) in worker_set_flags() argument
974 struct worker_pool *pool = worker->pool; in worker_set_flags()
980 !(worker->flags & WORKER_NOT_RUNNING)) { in worker_set_flags()
984 worker->flags |= flags; in worker_set_flags()
994 static inline void worker_clr_flags(struct worker *worker, unsigned int flags) in worker_clr_flags() argument
996 struct worker_pool *pool = worker->pool; in worker_clr_flags()
997 unsigned int oflags = worker->flags; in worker_clr_flags()
1001 worker->flags &= ~flags; in worker_clr_flags()
1009 if (!(worker->flags & WORKER_NOT_RUNNING)) in worker_clr_flags()
1014 static struct worker *first_idle_worker(struct worker_pool *pool) in first_idle_worker()
1019 return list_first_entry(&pool->idle_list, struct worker, entry); in first_idle_worker()
1032 static void worker_enter_idle(struct worker *worker) in worker_enter_idle() argument
1034 struct worker_pool *pool = worker->pool; in worker_enter_idle()
1036 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || in worker_enter_idle()
1037 WARN_ON_ONCE(!list_empty(&worker->entry) && in worker_enter_idle()
1038 (worker->hentry.next || worker->hentry.pprev))) in worker_enter_idle()
1042 worker->flags |= WORKER_IDLE; in worker_enter_idle()
1044 worker->last_active = jiffies; in worker_enter_idle()
1047 list_add(&worker->entry, &pool->idle_list); in worker_enter_idle()
1065 static void worker_leave_idle(struct worker *worker) in worker_leave_idle() argument
1067 struct worker_pool *pool = worker->pool; in worker_leave_idle()
1069 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) in worker_leave_idle()
1071 worker_clr_flags(worker, WORKER_IDLE); in worker_leave_idle()
1073 list_del_init(&worker->entry); in worker_leave_idle()
1109 static struct worker *find_worker_executing_work(struct worker_pool *pool, in find_worker_executing_work()
1112 struct worker *worker; in find_worker_executing_work() local
1114 hash_for_each_possible(pool->busy_hash, worker, hentry, in find_worker_executing_work()
1116 if (worker->current_work == work && in find_worker_executing_work()
1117 worker->current_func == work->func) in find_worker_executing_work()
1118 return worker; in find_worker_executing_work()
1177 static bool assign_work(struct work_struct *work, struct worker *worker, in assign_work() argument
1180 struct worker_pool *pool = worker->pool; in assign_work()
1181 struct worker *collision; in assign_work()
1199 move_linked_works(work, &worker->scheduled, nextp); in assign_work()
1235 struct worker *worker = first_idle_worker(pool); in kick_pool() local
1240 if (!need_more_worker(pool) || !worker) in kick_pool()
1248 p = worker->task; in kick_pool()
1387 struct worker *worker = kthread_data(task); in wq_worker_running() local
1389 if (!READ_ONCE(worker->sleeping)) in wq_worker_running()
1399 if (!(worker->flags & WORKER_NOT_RUNNING)) in wq_worker_running()
1400 worker->pool->nr_running++; in wq_worker_running()
1407 worker->current_at = worker->task->se.sum_exec_runtime; in wq_worker_running()
1409 WRITE_ONCE(worker->sleeping, 0); in wq_worker_running()
1421 struct worker *worker = kthread_data(task); in wq_worker_sleeping() local
1429 if (worker->flags & WORKER_NOT_RUNNING) in wq_worker_sleeping()
1432 pool = worker->pool; in wq_worker_sleeping()
1435 if (READ_ONCE(worker->sleeping)) in wq_worker_sleeping()
1438 WRITE_ONCE(worker->sleeping, 1); in wq_worker_sleeping()
1446 if (worker->flags & WORKER_NOT_RUNNING) { in wq_worker_sleeping()
1453 worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++; in wq_worker_sleeping()
1467 struct worker *worker = kthread_data(task); in wq_worker_tick() local
1468 struct pool_workqueue *pwq = worker->current_pwq; in wq_worker_tick()
1469 struct worker_pool *pool = worker->pool; in wq_worker_tick()
1491 if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) || in wq_worker_tick()
1492 worker->task->se.sum_exec_runtime - worker->current_at < in wq_worker_tick()
1498 worker_set_flags(worker, WORKER_CPU_INTENSIVE); in wq_worker_tick()
1499 wq_cpu_intensive_report(worker->current_func); in wq_worker_tick()
1534 struct worker *worker = kthread_data(task); in wq_worker_last_func() local
1536 return worker->last_func; in wq_worker_last_func()
2197 struct worker *worker; in is_chained_work() local
2199 worker = current_wq_worker(); in is_chained_work()
2204 return worker && worker->current_pwq->wq == wq; in is_chained_work()
2283 struct worker *worker; in __queue_work() local
2287 worker = find_worker_executing_work(last_pool, work); in __queue_work()
2289 if (worker && worker->current_pwq->wq == wq) { in __queue_work()
2290 pwq = worker->current_pwq; in __queue_work()
2634 static struct worker *alloc_worker(int node) in alloc_worker()
2636 struct worker *worker; in alloc_worker() local
2638 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node); in alloc_worker()
2639 if (worker) { in alloc_worker()
2640 INIT_LIST_HEAD(&worker->entry); in alloc_worker()
2641 INIT_LIST_HEAD(&worker->scheduled); in alloc_worker()
2642 INIT_LIST_HEAD(&worker->node); in alloc_worker()
2644 worker->flags = WORKER_PREP; in alloc_worker()
2646 return worker; in alloc_worker()
2666 static void worker_attach_to_pool(struct worker *worker, in worker_attach_to_pool() argument
2677 worker->flags |= WORKER_UNBOUND; in worker_attach_to_pool()
2680 kthread_set_per_cpu(worker->task, pool->cpu); in worker_attach_to_pool()
2683 if (worker->rescue_wq) in worker_attach_to_pool()
2684 set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool)); in worker_attach_to_pool()
2686 list_add_tail(&worker->node, &pool->workers); in worker_attach_to_pool()
2687 worker->pool = pool; in worker_attach_to_pool()
2692 static void unbind_worker(struct worker *worker) in unbind_worker() argument
2696 kthread_set_per_cpu(worker->task, -1); in unbind_worker()
2698 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0); in unbind_worker()
2700 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); in unbind_worker()
2704 static void detach_worker(struct worker *worker) in detach_worker() argument
2708 unbind_worker(worker); in detach_worker()
2709 list_del(&worker->node); in detach_worker()
2720 static void worker_detach_from_pool(struct worker *worker) in worker_detach_from_pool() argument
2722 struct worker_pool *pool = worker->pool; in worker_detach_from_pool()
2728 detach_worker(worker); in worker_detach_from_pool()
2729 worker->pool = NULL; in worker_detach_from_pool()
2733 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); in worker_detach_from_pool()
2736 static int format_worker_id(char *buf, size_t size, struct worker *worker, in format_worker_id() argument
2739 if (worker->rescue_wq) in format_worker_id()
2741 worker->rescue_wq->name); in format_worker_id()
2746 pool->cpu, worker->id, in format_worker_id()
2750 pool->id, worker->id); in format_worker_id()
2768 static struct worker *create_worker(struct worker_pool *pool) in create_worker()
2770 struct worker *worker; in create_worker() local
2781 worker = alloc_worker(pool->node); in create_worker()
2782 if (!worker) { in create_worker()
2787 worker->id = id; in create_worker()
2792 format_worker_id(id_buf, sizeof(id_buf), worker, pool); in create_worker()
2793 worker->task = kthread_create_on_node(worker_thread, worker, in create_worker()
2795 if (IS_ERR(worker->task)) { in create_worker()
2796 if (PTR_ERR(worker->task) == -EINTR) { in create_worker()
2801 worker->task); in create_worker()
2806 set_user_nice(worker->task, pool->attrs->nice); in create_worker()
2807 kthread_bind_mask(worker->task, pool_allowed_cpus(pool)); in create_worker()
2811 worker_attach_to_pool(worker, pool); in create_worker()
2816 worker->pool->nr_workers++; in create_worker()
2817 worker_enter_idle(worker); in create_worker()
2824 if (worker->task) in create_worker()
2825 wake_up_process(worker->task); in create_worker()
2829 return worker; in create_worker()
2833 kfree(worker); in create_worker()
2839 struct worker *worker; in detach_dying_workers() local
2841 list_for_each_entry(worker, cull_list, entry) in detach_dying_workers()
2842 detach_worker(worker); in detach_dying_workers()
2847 struct worker *worker, *tmp; in reap_dying_workers() local
2849 list_for_each_entry_safe(worker, tmp, cull_list, entry) { in reap_dying_workers()
2850 list_del_init(&worker->entry); in reap_dying_workers()
2851 kthread_stop_put(worker->task); in reap_dying_workers()
2852 kfree(worker); in reap_dying_workers()
2867 static void set_worker_dying(struct worker *worker, struct list_head *list) in set_worker_dying() argument
2869 struct worker_pool *pool = worker->pool; in set_worker_dying()
2875 if (WARN_ON(worker->current_work) || in set_worker_dying()
2876 WARN_ON(!list_empty(&worker->scheduled)) || in set_worker_dying()
2877 WARN_ON(!(worker->flags & WORKER_IDLE))) in set_worker_dying()
2883 worker->flags |= WORKER_DIE; in set_worker_dying()
2885 list_move(&worker->entry, list); in set_worker_dying()
2888 get_task_struct(worker->task); in set_worker_dying()
2912 struct worker *worker; in idle_worker_timeout() local
2916 worker = list_last_entry(&pool->idle_list, struct worker, entry); in idle_worker_timeout()
2917 expires = worker->last_active + IDLE_WORKER_TIMEOUT; in idle_worker_timeout()
2955 struct worker *worker; in idle_cull_fn() local
2958 worker = list_last_entry(&pool->idle_list, struct worker, entry); in idle_cull_fn()
2959 expires = worker->last_active + IDLE_WORKER_TIMEOUT; in idle_cull_fn()
2966 set_worker_dying(worker, &cull_list); in idle_cull_fn()
3096 static bool manage_workers(struct worker *worker) in manage_workers() argument
3098 struct worker_pool *pool = worker->pool; in manage_workers()
3104 pool->manager = worker; in manage_workers()
3128 static void process_one_work(struct worker *worker, struct work_struct *work) in process_one_work() argument
3133 struct worker_pool *pool = worker->pool; in process_one_work()
3155 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); in process_one_work()
3156 worker->current_work = work; in process_one_work()
3157 worker->current_func = work->func; in process_one_work()
3158 worker->current_pwq = pwq; in process_one_work()
3159 if (worker->task) in process_one_work()
3160 worker->current_at = worker->task->se.sum_exec_runtime; in process_one_work()
3162 worker->current_color = get_work_color(work_data); in process_one_work()
3168 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); in process_one_work()
3179 worker_set_flags(worker, WORKER_CPU_INTENSIVE); in process_one_work()
3229 worker->current_func(work); in process_one_work()
3234 trace_workqueue_execute_end(work, worker->current_func); in process_one_work()
3240 if (unlikely((worker->task && in_atomic()) || in process_one_work()
3248 worker->current_func); in process_one_work()
3261 if (worker->task) in process_one_work()
3271 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); in process_one_work()
3274 worker->last_func = worker->current_func; in process_one_work()
3277 hash_del(&worker->hentry); in process_one_work()
3278 worker->current_work = NULL; in process_one_work()
3279 worker->current_func = NULL; in process_one_work()
3280 worker->current_pwq = NULL; in process_one_work()
3281 worker->current_color = INT_MAX; in process_one_work()
3299 static void process_scheduled_works(struct worker *worker) in process_scheduled_works() argument
3304 while ((work = list_first_entry_or_null(&worker->scheduled, in process_scheduled_works()
3307 worker->pool->watchdog_ts = jiffies; in process_scheduled_works()
3310 process_one_work(worker, work); in process_scheduled_works()
3338 struct worker *worker = __worker; in worker_thread() local
3339 struct worker_pool *pool = worker->pool; in worker_thread()
3347 if (unlikely(worker->flags & WORKER_DIE)) { in worker_thread()
3354 worker->pool = NULL; in worker_thread()
3355 ida_free(&pool->worker_ida, worker->id); in worker_thread()
3359 worker_leave_idle(worker); in worker_thread()
3366 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) in worker_thread()
3374 WARN_ON_ONCE(!list_empty(&worker->scheduled)); in worker_thread()
3383 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); in worker_thread()
3390 if (assign_work(work, worker, NULL)) in worker_thread()
3391 process_scheduled_works(worker); in worker_thread()
3394 worker_set_flags(worker, WORKER_PREP); in worker_thread()
3403 worker_enter_idle(worker); in worker_thread()
3433 struct worker *rescuer = __rescuer; in rescuer_thread()
3545 static void bh_worker(struct worker *worker) in bh_worker() argument
3547 struct worker_pool *pool = worker->pool; in bh_worker()
3552 worker_leave_idle(worker); in bh_worker()
3561 WARN_ON_ONCE(!list_empty(&worker->scheduled)); in bh_worker()
3562 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); in bh_worker()
3569 if (assign_work(work, worker, NULL)) in bh_worker()
3570 process_scheduled_works(worker); in bh_worker()
3574 worker_set_flags(worker, WORKER_PREP); in bh_worker()
3576 worker_enter_idle(worker); in bh_worker()
3597 bh_worker(list_first_entry(&pool->workers, struct worker, node)); in workqueue_softirq_action()
3624 bh_worker(list_first_entry(&pool->workers, struct worker, node)); in drain_dead_softirq_workfn()
3694 struct worker *worker; in check_flush_dependency() local
3699 worker = current_wq_worker(); in check_flush_dependency()
3704 WARN_ONCE(worker && ((worker->current_pwq->wq->flags & in check_flush_dependency()
3707 worker->current_pwq->wq->name, worker->current_func, in check_flush_dependency()
3749 struct work_struct *target, struct worker *worker) in insert_wq_barrier() argument
3781 if (worker) { in insert_wq_barrier()
3782 head = worker->scheduled.next; in insert_wq_barrier()
3783 work_color = worker->current_color; in insert_wq_barrier()
4118 struct worker *worker = NULL; in start_flush_work() local
4137 worker = find_worker_executing_work(pool, work); in start_flush_work()
4138 if (!worker) in start_flush_work()
4140 pwq = worker->current_pwq; in start_flush_work()
4146 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
4902 struct worker *worker; in put_unbound_pool() local
4948 while ((worker = first_idle_worker(pool))) in put_unbound_pool()
4949 set_worker_dying(worker, &cull_list); in put_unbound_pool()
5529 struct worker *rescuer; in init_rescuer()
5834 struct worker *rescuer = wq->rescuer; in destroy_workqueue()
5965 struct worker *worker = current_wq_worker(); in current_work() local
5967 return worker ? worker->current_work : NULL; in current_work()
5981 struct worker *worker = current_wq_worker(); in current_is_workqueue_rescuer() local
5983 return worker && worker->rescue_wq; in current_is_workqueue_rescuer()
6072 struct worker *worker = current_wq_worker(); in set_worker_desc() local
6075 if (worker) { in set_worker_desc()
6077 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); in set_worker_desc()
6103 struct worker *worker; in print_worker_info() local
6112 worker = kthread_probe_data(task); in print_worker_info()
6118 copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn)); in print_worker_info()
6119 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq)); in print_worker_info()
6122 copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1); in print_worker_info()
6145 static void pr_cont_worker_id(struct worker *worker) in pr_cont_worker_id() argument
6147 struct worker_pool *pool = worker->pool; in pr_cont_worker_id()
6153 pr_cont("%d%s", task_pid_nr(worker->task), in pr_cont_worker_id()
6154 worker->rescue_wq ? "(RESCUER)" : ""); in pr_cont_worker_id()
6206 struct worker *worker; in show_pwq() local
6217 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
6218 if (worker->current_pwq == pwq) { in show_pwq()
6227 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_pwq()
6228 if (worker->current_pwq != pwq) in show_pwq()
6232 pr_cont_worker_id(worker); in show_pwq()
6233 pr_cont(":%ps", worker->current_func); in show_pwq()
6234 list_for_each_entry(work, &worker->scheduled, entry) in show_pwq()
6326 struct worker *worker; in show_one_worker_pool() local
6351 list_for_each_entry(worker, &pool->idle_list, entry) { in show_one_worker_pool()
6353 pr_cont_worker_id(worker); in show_one_worker_pool()
6423 struct worker *worker = kthread_data(task); in wq_worker_comm() local
6424 struct worker_pool *pool = worker->pool; in wq_worker_comm()
6427 off = format_worker_id(buf, size, worker, pool); in wq_worker_comm()
6436 if (worker->desc[0] != '\0') { in wq_worker_comm()
6437 if (worker->current_work) in wq_worker_comm()
6439 worker->desc); in wq_worker_comm()
6442 worker->desc); in wq_worker_comm()
6473 struct worker *worker; in unbind_workers() local
6487 for_each_pool_worker(worker, pool) in unbind_workers()
6488 worker->flags |= WORKER_UNBOUND; in unbind_workers()
6511 for_each_pool_worker(worker, pool) in unbind_workers()
6512 unbind_worker(worker); in unbind_workers()
6526 struct worker *worker; in rebind_workers() local
6537 for_each_pool_worker(worker, pool) { in rebind_workers()
6538 kthread_set_per_cpu(worker->task, pool->cpu); in rebind_workers()
6539 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, in rebind_workers()
6547 for_each_pool_worker(worker, pool) { in rebind_workers()
6548 unsigned int worker_flags = worker->flags; in rebind_workers()
6568 WRITE_ONCE(worker->flags, worker_flags); in rebind_workers()
6587 struct worker *worker; in restore_unbound_workers_cpumask() local
6598 for_each_pool_worker(worker, pool) in restore_unbound_workers_cpumask()
6599 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); in restore_unbound_workers_cpumask()
7461 struct worker *worker; in show_cpu_pool_hog() local
7467 hash_for_each(pool->busy_hash, bkt, worker, hentry) { in show_cpu_pool_hog()
7468 if (task_is_running(worker->task)) { in show_cpu_pool_hog()
7477 sched_show_task(worker->task); in show_cpu_pool_hog()