Lines Matching refs:worker
777 void __kthread_init_worker(struct kthread_worker *worker, in __kthread_init_worker() argument
781 memset(worker, 0, sizeof(struct kthread_worker)); in __kthread_init_worker()
782 raw_spin_lock_init(&worker->lock); in __kthread_init_worker()
783 lockdep_set_class_and_name(&worker->lock, key, name); in __kthread_init_worker()
784 INIT_LIST_HEAD(&worker->work_list); in __kthread_init_worker()
785 INIT_LIST_HEAD(&worker->delayed_work_list); in __kthread_init_worker()
806 struct kthread_worker *worker = worker_ptr; in kthread_worker_fn() local
813 WARN_ON(worker->task && worker->task != current); in kthread_worker_fn()
814 worker->task = current; in kthread_worker_fn()
816 if (worker->flags & KTW_FREEZABLE) in kthread_worker_fn()
824 raw_spin_lock_irq(&worker->lock); in kthread_worker_fn()
825 worker->task = NULL; in kthread_worker_fn()
826 raw_spin_unlock_irq(&worker->lock); in kthread_worker_fn()
831 raw_spin_lock_irq(&worker->lock); in kthread_worker_fn()
832 if (!list_empty(&worker->work_list)) { in kthread_worker_fn()
833 work = list_first_entry(&worker->work_list, in kthread_worker_fn()
837 worker->current_work = work; in kthread_worker_fn()
838 raw_spin_unlock_irq(&worker->lock); in kthread_worker_fn()
871 struct kthread_worker *worker; in __kthread_create_worker() local
875 worker = kzalloc(sizeof(*worker), GFP_KERNEL); in __kthread_create_worker()
876 if (!worker) in __kthread_create_worker()
879 kthread_init_worker(worker); in __kthread_create_worker()
884 task = __kthread_create_on_node(kthread_worker_fn, worker, in __kthread_create_worker()
892 worker->flags = flags; in __kthread_create_worker()
893 worker->task = task; in __kthread_create_worker()
895 return worker; in __kthread_create_worker()
898 kfree(worker); in __kthread_create_worker()
914 struct kthread_worker *worker; in kthread_create_worker() local
918 worker = __kthread_create_worker(-1, flags, namefmt, args); in kthread_create_worker()
921 return worker; in kthread_create_worker()
964 struct kthread_worker *worker; in kthread_create_worker_on_cpu() local
968 worker = __kthread_create_worker(cpu, flags, namefmt, args); in kthread_create_worker_on_cpu()
971 return worker; in kthread_create_worker_on_cpu()
980 static inline bool queuing_blocked(struct kthread_worker *worker, in queuing_blocked() argument
983 lockdep_assert_held(&worker->lock); in queuing_blocked()
988 static void kthread_insert_work_sanity_check(struct kthread_worker *worker, in kthread_insert_work_sanity_check() argument
991 lockdep_assert_held(&worker->lock); in kthread_insert_work_sanity_check()
994 WARN_ON_ONCE(work->worker && work->worker != worker); in kthread_insert_work_sanity_check()
998 static void kthread_insert_work(struct kthread_worker *worker, in kthread_insert_work() argument
1002 kthread_insert_work_sanity_check(worker, work); in kthread_insert_work()
1004 trace_sched_kthread_work_queue_work(worker, work); in kthread_insert_work()
1007 work->worker = worker; in kthread_insert_work()
1008 if (!worker->current_work && likely(worker->task)) in kthread_insert_work()
1009 wake_up_process(worker->task); in kthread_insert_work()
1024 bool kthread_queue_work(struct kthread_worker *worker, in kthread_queue_work() argument
1030 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_queue_work()
1031 if (!queuing_blocked(worker, work)) { in kthread_queue_work()
1032 kthread_insert_work(worker, work, &worker->work_list); in kthread_queue_work()
1035 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_queue_work()
1052 struct kthread_worker *worker = work->worker; in kthread_delayed_work_timer_fn() local
1059 if (WARN_ON_ONCE(!worker)) in kthread_delayed_work_timer_fn()
1062 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_delayed_work_timer_fn()
1064 WARN_ON_ONCE(work->worker != worker); in kthread_delayed_work_timer_fn()
1070 kthread_insert_work(worker, work, &worker->work_list); in kthread_delayed_work_timer_fn()
1072 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_delayed_work_timer_fn()
1076 static void __kthread_queue_delayed_work(struct kthread_worker *worker, in __kthread_queue_delayed_work() argument
1092 kthread_insert_work(worker, work, &worker->work_list); in __kthread_queue_delayed_work()
1097 kthread_insert_work_sanity_check(worker, work); in __kthread_queue_delayed_work()
1099 list_add(&work->node, &worker->delayed_work_list); in __kthread_queue_delayed_work()
1100 work->worker = worker; in __kthread_queue_delayed_work()
1120 bool kthread_queue_delayed_work(struct kthread_worker *worker, in kthread_queue_delayed_work() argument
1128 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_queue_delayed_work()
1130 if (!queuing_blocked(worker, work)) { in kthread_queue_delayed_work()
1131 __kthread_queue_delayed_work(worker, dwork, delay); in kthread_queue_delayed_work()
1135 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_queue_delayed_work()
1164 struct kthread_worker *worker; in kthread_flush_work() local
1167 worker = work->worker; in kthread_flush_work()
1168 if (!worker) in kthread_flush_work()
1171 raw_spin_lock_irq(&worker->lock); in kthread_flush_work()
1173 WARN_ON_ONCE(work->worker != worker); in kthread_flush_work()
1176 kthread_insert_work(worker, &fwork.work, work->node.next); in kthread_flush_work()
1177 else if (worker->current_work == work) in kthread_flush_work()
1178 kthread_insert_work(worker, &fwork.work, in kthread_flush_work()
1179 worker->work_list.next); in kthread_flush_work()
1183 raw_spin_unlock_irq(&worker->lock); in kthread_flush_work()
1202 struct kthread_worker *worker = work->worker; in kthread_cancel_delayed_work_timer() local
1211 raw_spin_unlock_irqrestore(&worker->lock, *flags); in kthread_cancel_delayed_work_timer()
1213 raw_spin_lock_irqsave(&worker->lock, *flags); in kthread_cancel_delayed_work_timer()
1267 bool kthread_mod_delayed_work(struct kthread_worker *worker, in kthread_mod_delayed_work() argument
1275 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_mod_delayed_work()
1278 if (!work->worker) { in kthread_mod_delayed_work()
1284 WARN_ON_ONCE(work->worker != worker); in kthread_mod_delayed_work()
1307 __kthread_queue_delayed_work(worker, dwork, delay); in kthread_mod_delayed_work()
1309 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_mod_delayed_work()
1316 struct kthread_worker *worker = work->worker; in __kthread_cancel_work_sync() local
1320 if (!worker) in __kthread_cancel_work_sync()
1323 raw_spin_lock_irqsave(&worker->lock, flags); in __kthread_cancel_work_sync()
1325 WARN_ON_ONCE(work->worker != worker); in __kthread_cancel_work_sync()
1332 if (worker->current_work != work) in __kthread_cancel_work_sync()
1340 raw_spin_unlock_irqrestore(&worker->lock, flags); in __kthread_cancel_work_sync()
1342 raw_spin_lock_irqsave(&worker->lock, flags); in __kthread_cancel_work_sync()
1346 raw_spin_unlock_irqrestore(&worker->lock, flags); in __kthread_cancel_work_sync()
1395 void kthread_flush_worker(struct kthread_worker *worker) in kthread_flush_worker() argument
1402 kthread_queue_work(worker, &fwork.work); in kthread_flush_worker()
1419 void kthread_destroy_worker(struct kthread_worker *worker) in kthread_destroy_worker() argument
1423 task = worker->task; in kthread_destroy_worker()
1427 kthread_flush_worker(worker); in kthread_destroy_worker()
1429 WARN_ON(!list_empty(&worker->delayed_work_list)); in kthread_destroy_worker()
1430 WARN_ON(!list_empty(&worker->work_list)); in kthread_destroy_worker()
1431 kfree(worker); in kthread_destroy_worker()