Lines Matching refs:waiter
33 static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter, in __ww_mutex_add_waiter() argument
51 struct rt_mutex_waiter *waiter, in __ww_mutex_check_kill() argument
298 waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) in waiter_update_prio() argument
300 waiter->prio = __waiter_prio(task); in waiter_update_prio()
301 waiter->deadline = task->dl.deadline; in waiter_update_prio()
346 static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter, in rt_mutex_steal() argument
349 if (rt_mutex_waiter_less(waiter, top_waiter)) in rt_mutex_steal()
357 if (rt_prio(waiter->prio) || dl_prio(waiter->prio)) in rt_mutex_steal()
360 return rt_mutex_waiter_equal(waiter, top_waiter); in rt_mutex_steal()
396 rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) in rt_mutex_enqueue() argument
398 rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less); in rt_mutex_enqueue()
402 rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) in rt_mutex_dequeue() argument
404 if (RB_EMPTY_NODE(&waiter->tree_entry)) in rt_mutex_dequeue()
407 rb_erase_cached(&waiter->tree_entry, &lock->waiters); in rt_mutex_dequeue()
408 RB_CLEAR_NODE(&waiter->tree_entry); in rt_mutex_dequeue()
421 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) in rt_mutex_enqueue_pi() argument
423 rb_add_cached(&waiter->pi_tree_entry, &task->pi_waiters, __pi_waiter_less); in rt_mutex_enqueue_pi()
427 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) in rt_mutex_dequeue_pi() argument
429 if (RB_EMPTY_NODE(&waiter->pi_tree_entry)) in rt_mutex_dequeue_pi()
432 rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters); in rt_mutex_dequeue_pi()
433 RB_CLEAR_NODE(&waiter->pi_tree_entry); in rt_mutex_dequeue_pi()
498 rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, in rt_mutex_cond_detect_deadlock() argument
502 return waiter != NULL; in rt_mutex_cond_detect_deadlock()
581 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; in rt_mutex_adjust_prio_chain() local
633 waiter = task->pi_blocked_on; in rt_mutex_adjust_prio_chain()
644 if (!waiter) in rt_mutex_adjust_prio_chain()
663 if (next_lock != waiter->lock) in rt_mutex_adjust_prio_chain()
688 if (IS_ENABLED(CONFIG_PREEMPT_RT) && waiter->ww_ctx && detect_deadlock) in rt_mutex_adjust_prio_chain()
720 if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { in rt_mutex_adjust_prio_chain()
730 lock = waiter->lock; in rt_mutex_adjust_prio_chain()
826 rt_mutex_dequeue(lock, waiter); in rt_mutex_adjust_prio_chain()
844 waiter_update_prio(waiter, task); in rt_mutex_adjust_prio_chain()
846 rt_mutex_enqueue(lock, waiter); in rt_mutex_adjust_prio_chain()
866 wake_up_state(waiter->task, waiter->wake_state); in rt_mutex_adjust_prio_chain()
876 if (waiter == rt_mutex_top_waiter(lock)) { in rt_mutex_adjust_prio_chain()
884 rt_mutex_enqueue_pi(task, waiter); in rt_mutex_adjust_prio_chain()
887 } else if (prerequeue_top_waiter == waiter) { in rt_mutex_adjust_prio_chain()
898 rt_mutex_dequeue_pi(task, waiter); in rt_mutex_adjust_prio_chain()
899 waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
900 rt_mutex_enqueue_pi(task, waiter); in rt_mutex_adjust_prio_chain()
945 if (!detect_deadlock && waiter != top_waiter) in rt_mutex_adjust_prio_chain()
970 struct rt_mutex_waiter *waiter) in try_to_take_rt_mutex() argument
1004 if (waiter) { in try_to_take_rt_mutex()
1011 if (waiter == top_waiter || rt_mutex_steal(waiter, top_waiter)) { in try_to_take_rt_mutex()
1016 rt_mutex_dequeue(lock, waiter); in try_to_take_rt_mutex()
1086 struct rt_mutex_waiter *waiter, in task_blocks_on_rt_mutex() argument
1092 struct rt_mutex_waiter *top_waiter = waiter; in task_blocks_on_rt_mutex()
1111 waiter->task = task; in task_blocks_on_rt_mutex()
1112 waiter->lock = lock; in task_blocks_on_rt_mutex()
1113 waiter_update_prio(waiter, task); in task_blocks_on_rt_mutex()
1118 rt_mutex_enqueue(lock, waiter); in task_blocks_on_rt_mutex()
1120 task->pi_blocked_on = waiter; in task_blocks_on_rt_mutex()
1129 res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx); in task_blocks_on_rt_mutex()
1132 rt_mutex_dequeue(lock, waiter); in task_blocks_on_rt_mutex()
1143 if (waiter == rt_mutex_top_waiter(lock)) { in task_blocks_on_rt_mutex()
1145 rt_mutex_enqueue_pi(owner, waiter); in task_blocks_on_rt_mutex()
1150 } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { in task_blocks_on_rt_mutex()
1176 next_lock, waiter, task); in task_blocks_on_rt_mutex()
1192 struct rt_mutex_waiter *waiter; in mark_wakeup_next_waiter() local
1196 waiter = rt_mutex_top_waiter(lock); in mark_wakeup_next_waiter()
1205 rt_mutex_dequeue_pi(current, waiter); in mark_wakeup_next_waiter()
1229 rt_mutex_wake_q_add(wqh, waiter); in mark_wakeup_next_waiter()
1357 struct rt_mutex_waiter *waiter, in rtmutex_spin_on_owner() argument
1383 !rt_mutex_waiter_is_top_waiter(lock, waiter) || in rtmutex_spin_on_owner()
1395 struct rt_mutex_waiter *waiter, in rtmutex_spin_on_owner() argument
1416 struct rt_mutex_waiter *waiter) in remove_waiter() argument
1418 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); in remove_waiter()
1425 rt_mutex_dequeue(lock, waiter); in remove_waiter()
1438 rt_mutex_dequeue_pi(owner, waiter); in remove_waiter()
1483 struct rt_mutex_waiter *waiter) in rt_mutex_slowlock_block() argument
1491 if (try_to_take_rt_mutex(lock, current, waiter)) in rt_mutex_slowlock_block()
1504 ret = __ww_mutex_check_kill(rtm, waiter, ww_ctx); in rt_mutex_slowlock_block()
1509 if (waiter == rt_mutex_top_waiter(lock)) in rt_mutex_slowlock_block()
1515 if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) in rt_mutex_slowlock_block()
1561 struct rt_mutex_waiter *waiter) in __rt_mutex_slowlock() argument
1580 ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk); in __rt_mutex_slowlock()
1582 ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter); in __rt_mutex_slowlock()
1593 remove_waiter(lock, waiter); in __rt_mutex_slowlock()
1594 rt_mutex_handle_deadlock(ret, chwalk, waiter); in __rt_mutex_slowlock()
1609 struct rt_mutex_waiter waiter; in __rt_mutex_slowlock_locked() local
1612 rt_mutex_init_waiter(&waiter); in __rt_mutex_slowlock_locked()
1613 waiter.ww_ctx = ww_ctx; in __rt_mutex_slowlock_locked()
1616 &waiter); in __rt_mutex_slowlock_locked()
1618 debug_rt_mutex_free_waiter(&waiter); in __rt_mutex_slowlock_locked()
1671 struct rt_mutex_waiter waiter; in rtlock_slowlock_locked() local
1679 rt_mutex_init_rtlock_waiter(&waiter); in rtlock_slowlock_locked()
1684 task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK); in rtlock_slowlock_locked()
1688 if (try_to_take_rt_mutex(lock, current, &waiter)) in rtlock_slowlock_locked()
1691 if (&waiter == rt_mutex_top_waiter(lock)) in rtlock_slowlock_locked()
1697 if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) in rtlock_slowlock_locked()
1712 debug_rt_mutex_free_waiter(&waiter); in rtlock_slowlock_locked()