Lines Matching refs:worker

756 void __kthread_init_worker(struct kthread_worker *worker,  in __kthread_init_worker()  argument
760 memset(worker, 0, sizeof(struct kthread_worker)); in __kthread_init_worker()
761 raw_spin_lock_init(&worker->lock); in __kthread_init_worker()
762 lockdep_set_class_and_name(&worker->lock, key, name); in __kthread_init_worker()
763 INIT_LIST_HEAD(&worker->work_list); in __kthread_init_worker()
764 INIT_LIST_HEAD(&worker->delayed_work_list); in __kthread_init_worker()
785 struct kthread_worker *worker = worker_ptr; in kthread_worker_fn() local
792 WARN_ON(worker->task && worker->task != current); in kthread_worker_fn()
793 worker->task = current; in kthread_worker_fn()
795 if (worker->flags & KTW_FREEZABLE) in kthread_worker_fn()
803 raw_spin_lock_irq(&worker->lock); in kthread_worker_fn()
804 worker->task = NULL; in kthread_worker_fn()
805 raw_spin_unlock_irq(&worker->lock); in kthread_worker_fn()
810 raw_spin_lock_irq(&worker->lock); in kthread_worker_fn()
811 if (!list_empty(&worker->work_list)) { in kthread_worker_fn()
812 work = list_first_entry(&worker->work_list, in kthread_worker_fn()
816 worker->current_work = work; in kthread_worker_fn()
817 raw_spin_unlock_irq(&worker->lock); in kthread_worker_fn()
842 struct kthread_worker *worker; in __kthread_create_worker() local
846 worker = kzalloc(sizeof(*worker), GFP_KERNEL); in __kthread_create_worker()
847 if (!worker) in __kthread_create_worker()
850 kthread_init_worker(worker); in __kthread_create_worker()
855 task = __kthread_create_on_node(kthread_worker_fn, worker, in __kthread_create_worker()
863 worker->flags = flags; in __kthread_create_worker()
864 worker->task = task; in __kthread_create_worker()
866 return worker; in __kthread_create_worker()
869 kfree(worker); in __kthread_create_worker()
885 struct kthread_worker *worker; in kthread_create_worker() local
889 worker = __kthread_create_worker(-1, flags, namefmt, args); in kthread_create_worker()
892 return worker; in kthread_create_worker()
935 struct kthread_worker *worker; in kthread_create_worker_on_cpu() local
939 worker = __kthread_create_worker(cpu, flags, namefmt, args); in kthread_create_worker_on_cpu()
942 return worker; in kthread_create_worker_on_cpu()
951 static inline bool queuing_blocked(struct kthread_worker *worker, in queuing_blocked() argument
954 lockdep_assert_held(&worker->lock); in queuing_blocked()
959 static void kthread_insert_work_sanity_check(struct kthread_worker *worker, in kthread_insert_work_sanity_check() argument
962 lockdep_assert_held(&worker->lock); in kthread_insert_work_sanity_check()
965 WARN_ON_ONCE(work->worker && work->worker != worker); in kthread_insert_work_sanity_check()
969 static void kthread_insert_work(struct kthread_worker *worker, in kthread_insert_work() argument
973 kthread_insert_work_sanity_check(worker, work); in kthread_insert_work()
975 trace_sched_kthread_work_queue_work(worker, work); in kthread_insert_work()
978 work->worker = worker; in kthread_insert_work()
979 if (!worker->current_work && likely(worker->task)) in kthread_insert_work()
980 wake_up_process(worker->task); in kthread_insert_work()
995 bool kthread_queue_work(struct kthread_worker *worker, in kthread_queue_work() argument
1001 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_queue_work()
1002 if (!queuing_blocked(worker, work)) { in kthread_queue_work()
1003 kthread_insert_work(worker, work, &worker->work_list); in kthread_queue_work()
1006 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_queue_work()
1023 struct kthread_worker *worker = work->worker; in kthread_delayed_work_timer_fn() local
1030 if (WARN_ON_ONCE(!worker)) in kthread_delayed_work_timer_fn()
1033 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_delayed_work_timer_fn()
1035 WARN_ON_ONCE(work->worker != worker); in kthread_delayed_work_timer_fn()
1041 kthread_insert_work(worker, work, &worker->work_list); in kthread_delayed_work_timer_fn()
1043 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_delayed_work_timer_fn()
1047 static void __kthread_queue_delayed_work(struct kthread_worker *worker, in __kthread_queue_delayed_work() argument
1063 kthread_insert_work(worker, work, &worker->work_list); in __kthread_queue_delayed_work()
1068 kthread_insert_work_sanity_check(worker, work); in __kthread_queue_delayed_work()
1070 list_add(&work->node, &worker->delayed_work_list); in __kthread_queue_delayed_work()
1071 work->worker = worker; in __kthread_queue_delayed_work()
1091 bool kthread_queue_delayed_work(struct kthread_worker *worker, in kthread_queue_delayed_work() argument
1099 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_queue_delayed_work()
1101 if (!queuing_blocked(worker, work)) { in kthread_queue_delayed_work()
1102 __kthread_queue_delayed_work(worker, dwork, delay); in kthread_queue_delayed_work()
1106 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_queue_delayed_work()
1135 struct kthread_worker *worker; in kthread_flush_work() local
1138 worker = work->worker; in kthread_flush_work()
1139 if (!worker) in kthread_flush_work()
1142 raw_spin_lock_irq(&worker->lock); in kthread_flush_work()
1144 WARN_ON_ONCE(work->worker != worker); in kthread_flush_work()
1147 kthread_insert_work(worker, &fwork.work, work->node.next); in kthread_flush_work()
1148 else if (worker->current_work == work) in kthread_flush_work()
1149 kthread_insert_work(worker, &fwork.work, in kthread_flush_work()
1150 worker->work_list.next); in kthread_flush_work()
1154 raw_spin_unlock_irq(&worker->lock); in kthread_flush_work()
1173 struct kthread_worker *worker = work->worker; in kthread_cancel_delayed_work_timer() local
1182 raw_spin_unlock_irqrestore(&worker->lock, *flags); in kthread_cancel_delayed_work_timer()
1184 raw_spin_lock_irqsave(&worker->lock, *flags); in kthread_cancel_delayed_work_timer()
1238 bool kthread_mod_delayed_work(struct kthread_worker *worker, in kthread_mod_delayed_work() argument
1246 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_mod_delayed_work()
1249 if (!work->worker) { in kthread_mod_delayed_work()
1255 WARN_ON_ONCE(work->worker != worker); in kthread_mod_delayed_work()
1278 __kthread_queue_delayed_work(worker, dwork, delay); in kthread_mod_delayed_work()
1280 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_mod_delayed_work()
1287 struct kthread_worker *worker = work->worker; in __kthread_cancel_work_sync() local
1291 if (!worker) in __kthread_cancel_work_sync()
1294 raw_spin_lock_irqsave(&worker->lock, flags); in __kthread_cancel_work_sync()
1296 WARN_ON_ONCE(work->worker != worker); in __kthread_cancel_work_sync()
1303 if (worker->current_work != work) in __kthread_cancel_work_sync()
1311 raw_spin_unlock_irqrestore(&worker->lock, flags); in __kthread_cancel_work_sync()
1313 raw_spin_lock_irqsave(&worker->lock, flags); in __kthread_cancel_work_sync()
1317 raw_spin_unlock_irqrestore(&worker->lock, flags); in __kthread_cancel_work_sync()
1366 void kthread_flush_worker(struct kthread_worker *worker) in kthread_flush_worker() argument
1373 kthread_queue_work(worker, &fwork.work); in kthread_flush_worker()
1390 void kthread_destroy_worker(struct kthread_worker *worker) in kthread_destroy_worker() argument
1394 task = worker->task; in kthread_destroy_worker()
1398 kthread_flush_worker(worker); in kthread_destroy_worker()
1400 WARN_ON(!list_empty(&worker->delayed_work_list)); in kthread_destroy_worker()
1401 WARN_ON(!list_empty(&worker->work_list)); in kthread_destroy_worker()
1402 kfree(worker); in kthread_destroy_worker()