Lines Matching refs:work
807 struct kthread_work *work; in kthread_worker_fn() local
830 work = NULL; in kthread_worker_fn()
833 work = list_first_entry(&worker->work_list, in kthread_worker_fn()
835 list_del_init(&work->node); in kthread_worker_fn()
837 worker->current_work = work; in kthread_worker_fn()
840 if (work) { in kthread_worker_fn()
841 kthread_work_func_t func = work->func; in kthread_worker_fn()
843 trace_sched_kthread_work_execute_start(work); in kthread_worker_fn()
844 work->func(work); in kthread_worker_fn()
849 trace_sched_kthread_work_execute_end(work, func); in kthread_worker_fn()
981 struct kthread_work *work) in queuing_blocked() argument
985 return !list_empty(&work->node) || work->canceling; in queuing_blocked()
989 struct kthread_work *work) in kthread_insert_work_sanity_check() argument
992 WARN_ON_ONCE(!list_empty(&work->node)); in kthread_insert_work_sanity_check()
994 WARN_ON_ONCE(work->worker && work->worker != worker); in kthread_insert_work_sanity_check()
999 struct kthread_work *work, in kthread_insert_work() argument
1002 kthread_insert_work_sanity_check(worker, work); in kthread_insert_work()
1004 trace_sched_kthread_work_queue_work(worker, work); in kthread_insert_work()
1006 list_add_tail(&work->node, pos); in kthread_insert_work()
1007 work->worker = worker; in kthread_insert_work()
1025 struct kthread_work *work) in kthread_queue_work() argument
1031 if (!queuing_blocked(worker, work)) { in kthread_queue_work()
1032 kthread_insert_work(worker, work, &worker->work_list); in kthread_queue_work()
1051 struct kthread_work *work = &dwork->work; in kthread_delayed_work_timer_fn() local
1052 struct kthread_worker *worker = work->worker; in kthread_delayed_work_timer_fn()
1064 WARN_ON_ONCE(work->worker != worker); in kthread_delayed_work_timer_fn()
1067 WARN_ON_ONCE(list_empty(&work->node)); in kthread_delayed_work_timer_fn()
1068 list_del_init(&work->node); in kthread_delayed_work_timer_fn()
1069 if (!work->canceling) in kthread_delayed_work_timer_fn()
1070 kthread_insert_work(worker, work, &worker->work_list); in kthread_delayed_work_timer_fn()
1081 struct kthread_work *work = &dwork->work; in __kthread_queue_delayed_work() local
1092 kthread_insert_work(worker, work, &worker->work_list); in __kthread_queue_delayed_work()
1097 kthread_insert_work_sanity_check(worker, work); in __kthread_queue_delayed_work()
1099 list_add(&work->node, &worker->delayed_work_list); in __kthread_queue_delayed_work()
1100 work->worker = worker; in __kthread_queue_delayed_work()
1124 struct kthread_work *work = &dwork->work; in kthread_queue_delayed_work() local
1130 if (!queuing_blocked(worker, work)) { in kthread_queue_delayed_work()
1141 struct kthread_work work; member
1145 static void kthread_flush_work_fn(struct kthread_work *work) in kthread_flush_work_fn() argument
1148 container_of(work, struct kthread_flush_work, work); in kthread_flush_work_fn()
1158 void kthread_flush_work(struct kthread_work *work) in kthread_flush_work() argument
1161 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), in kthread_flush_work()
1167 worker = work->worker; in kthread_flush_work()
1173 WARN_ON_ONCE(work->worker != worker); in kthread_flush_work()
1175 if (!list_empty(&work->node)) in kthread_flush_work()
1176 kthread_insert_work(worker, &fwork.work, work->node.next); in kthread_flush_work()
1177 else if (worker->current_work == work) in kthread_flush_work()
1178 kthread_insert_work(worker, &fwork.work, in kthread_flush_work()
1197 static void kthread_cancel_delayed_work_timer(struct kthread_work *work, in kthread_cancel_delayed_work_timer() argument
1201 container_of(work, struct kthread_delayed_work, work); in kthread_cancel_delayed_work_timer()
1202 struct kthread_worker *worker = work->worker; in kthread_cancel_delayed_work_timer()
1210 work->canceling++; in kthread_cancel_delayed_work_timer()
1214 work->canceling--; in kthread_cancel_delayed_work_timer()
1230 static bool __kthread_cancel_work(struct kthread_work *work) in __kthread_cancel_work() argument
1236 if (!list_empty(&work->node)) { in __kthread_cancel_work()
1237 list_del_init(&work->node); in __kthread_cancel_work()
1271 struct kthread_work *work = &dwork->work; in kthread_mod_delayed_work() local
1278 if (!work->worker) { in kthread_mod_delayed_work()
1284 WARN_ON_ONCE(work->worker != worker); in kthread_mod_delayed_work()
1298 kthread_cancel_delayed_work_timer(work, &flags); in kthread_mod_delayed_work()
1299 if (work->canceling) { in kthread_mod_delayed_work()
1304 ret = __kthread_cancel_work(work); in kthread_mod_delayed_work()
1314 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) in __kthread_cancel_work_sync() argument
1316 struct kthread_worker *worker = work->worker; in __kthread_cancel_work_sync()
1325 WARN_ON_ONCE(work->worker != worker); in __kthread_cancel_work_sync()
1328 kthread_cancel_delayed_work_timer(work, &flags); in __kthread_cancel_work_sync()
1330 ret = __kthread_cancel_work(work); in __kthread_cancel_work_sync()
1332 if (worker->current_work != work) in __kthread_cancel_work_sync()
1339 work->canceling++; in __kthread_cancel_work_sync()
1341 kthread_flush_work(work); in __kthread_cancel_work_sync()
1343 work->canceling--; in __kthread_cancel_work_sync()
1367 bool kthread_cancel_work_sync(struct kthread_work *work) in kthread_cancel_work_sync() argument
1369 return __kthread_cancel_work_sync(work, false); in kthread_cancel_work_sync()
1384 return __kthread_cancel_work_sync(&dwork->work, true); in kthread_cancel_delayed_work_sync()
1398 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), in kthread_flush_worker()
1402 kthread_queue_work(worker, &fwork.work); in kthread_flush_worker()