Lines Matching refs:lock
34 struct rt_mutex *lock, in __ww_mutex_add_waiter() argument
40 static inline void __ww_mutex_check_waiters(struct rt_mutex *lock, in __ww_mutex_check_waiters() argument
45 static inline void ww_mutex_lock_acquired(struct ww_mutex *lock, in ww_mutex_lock_acquired() argument
50 static inline int __ww_mutex_check_kill(struct rt_mutex *lock, in __ww_mutex_check_kill() argument
91 rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner) in rt_mutex_set_owner() argument
95 if (rt_mutex_has_waiters(lock)) in rt_mutex_set_owner()
98 WRITE_ONCE(lock->owner, (struct task_struct *)val); in rt_mutex_set_owner()
101 static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock) in clear_rt_mutex_waiters() argument
103 lock->owner = (struct task_struct *) in clear_rt_mutex_waiters()
104 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); in clear_rt_mutex_waiters()
107 static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock) in fixup_rt_mutex_waiters() argument
109 unsigned long owner, *p = (unsigned long *) &lock->owner; in fixup_rt_mutex_waiters()
111 if (rt_mutex_has_waiters(lock)) in fixup_rt_mutex_waiters()
182 static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, in rt_mutex_cmpxchg_acquire() argument
186 return try_cmpxchg_acquire(&lock->owner, &old, new); in rt_mutex_cmpxchg_acquire()
189 static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, in rt_mutex_cmpxchg_release() argument
193 return try_cmpxchg_release(&lock->owner, &old, new); in rt_mutex_cmpxchg_release()
201 static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) in mark_rt_mutex_waiters() argument
203 unsigned long owner, *p = (unsigned long *) &lock->owner; in mark_rt_mutex_waiters()
217 static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock, in unlock_rt_mutex_safe() argument
219 __releases(lock->wait_lock) in unlock_rt_mutex_safe()
221 struct task_struct *owner = rt_mutex_owner(lock); in unlock_rt_mutex_safe()
223 clear_rt_mutex_waiters(lock); in unlock_rt_mutex_safe()
224 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in unlock_rt_mutex_safe()
249 return rt_mutex_cmpxchg_release(lock, owner, NULL); in unlock_rt_mutex_safe()
253 static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, in rt_mutex_cmpxchg_acquire() argument
261 static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, in rt_mutex_cmpxchg_release() argument
268 static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) in mark_rt_mutex_waiters() argument
270 lock->owner = (struct task_struct *) in mark_rt_mutex_waiters()
271 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); in mark_rt_mutex_waiters()
277 static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock, in unlock_rt_mutex_safe() argument
279 __releases(lock->wait_lock) in unlock_rt_mutex_safe()
281 lock->owner = NULL; in unlock_rt_mutex_safe()
282 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in unlock_rt_mutex_safe()
396 rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) in rt_mutex_enqueue() argument
398 rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less); in rt_mutex_enqueue()
402 rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) in rt_mutex_dequeue() argument
407 rb_erase_cached(&waiter->tree_entry, &lock->waiters); in rt_mutex_dequeue()
508 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; in task_blocked_on_lock()
584 struct rt_mutex_base *lock; in rt_mutex_adjust_prio_chain() local
663 if (next_lock != waiter->lock) in rt_mutex_adjust_prio_chain()
730 lock = waiter->lock; in rt_mutex_adjust_prio_chain()
736 if (!raw_spin_trylock(&lock->wait_lock)) { in rt_mutex_adjust_prio_chain()
751 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { in rt_mutex_adjust_prio_chain()
766 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
787 if (!rt_mutex_owner(lock)) { in rt_mutex_adjust_prio_chain()
788 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
793 task = get_task_struct(rt_mutex_owner(lock)); in rt_mutex_adjust_prio_chain()
806 top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
810 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
823 prerequeue_top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
826 rt_mutex_dequeue(lock, waiter); in rt_mutex_adjust_prio_chain()
846 rt_mutex_enqueue(lock, waiter); in rt_mutex_adjust_prio_chain()
859 if (!rt_mutex_owner(lock)) { in rt_mutex_adjust_prio_chain()
865 if (prerequeue_top_waiter != rt_mutex_top_waiter(lock)) in rt_mutex_adjust_prio_chain()
867 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
872 task = get_task_struct(rt_mutex_owner(lock)); in rt_mutex_adjust_prio_chain()
876 if (waiter == rt_mutex_top_waiter(lock)) { in rt_mutex_adjust_prio_chain()
899 waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
924 top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
928 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
969 try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task, in try_to_take_rt_mutex() argument
972 lockdep_assert_held(&lock->wait_lock); in try_to_take_rt_mutex()
991 mark_rt_mutex_waiters(lock); in try_to_take_rt_mutex()
996 if (rt_mutex_owner(lock)) in try_to_take_rt_mutex()
1005 struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock); in try_to_take_rt_mutex()
1016 rt_mutex_dequeue(lock, waiter); in try_to_take_rt_mutex()
1029 if (rt_mutex_has_waiters(lock)) { in try_to_take_rt_mutex()
1032 rt_mutex_top_waiter(lock))) in try_to_take_rt_mutex()
1064 if (rt_mutex_has_waiters(lock)) in try_to_take_rt_mutex()
1065 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); in try_to_take_rt_mutex()
1073 rt_mutex_set_owner(lock, task); in try_to_take_rt_mutex()
1085 static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, in task_blocks_on_rt_mutex() argument
1091 struct task_struct *owner = rt_mutex_owner(lock); in task_blocks_on_rt_mutex()
1096 lockdep_assert_held(&lock->wait_lock); in task_blocks_on_rt_mutex()
1112 waiter->lock = lock; in task_blocks_on_rt_mutex()
1116 if (rt_mutex_has_waiters(lock)) in task_blocks_on_rt_mutex()
1117 top_waiter = rt_mutex_top_waiter(lock); in task_blocks_on_rt_mutex()
1118 rt_mutex_enqueue(lock, waiter); in task_blocks_on_rt_mutex()
1128 rtm = container_of(lock, struct rt_mutex, rtmutex); in task_blocks_on_rt_mutex()
1132 rt_mutex_dequeue(lock, waiter); in task_blocks_on_rt_mutex()
1143 if (waiter == rt_mutex_top_waiter(lock)) { in task_blocks_on_rt_mutex()
1173 raw_spin_unlock_irq(&lock->wait_lock); in task_blocks_on_rt_mutex()
1175 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, in task_blocks_on_rt_mutex()
1178 raw_spin_lock_irq(&lock->wait_lock); in task_blocks_on_rt_mutex()
1190 struct rt_mutex_base *lock) in mark_wakeup_next_waiter() argument
1196 waiter = rt_mutex_top_waiter(lock); in mark_wakeup_next_waiter()
1216 lock->owner = (void *) RT_MUTEX_HAS_WAITERS; in mark_wakeup_next_waiter()
1233 static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock) in __rt_mutex_slowtrylock() argument
1235 int ret = try_to_take_rt_mutex(lock, current, NULL); in __rt_mutex_slowtrylock()
1241 fixup_rt_mutex_waiters(lock); in __rt_mutex_slowtrylock()
1249 static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock) in rt_mutex_slowtrylock() argument
1259 if (rt_mutex_owner(lock)) in rt_mutex_slowtrylock()
1266 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowtrylock()
1268 ret = __rt_mutex_slowtrylock(lock); in rt_mutex_slowtrylock()
1270 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowtrylock()
1275 static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock) in __rt_mutex_trylock() argument
1277 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) in __rt_mutex_trylock()
1280 return rt_mutex_slowtrylock(lock); in __rt_mutex_trylock()
1286 static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock) in rt_mutex_slowunlock() argument
1292 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1294 debug_rt_mutex_unlock(lock); in rt_mutex_slowunlock()
1327 while (!rt_mutex_has_waiters(lock)) { in rt_mutex_slowunlock()
1329 if (unlock_rt_mutex_safe(lock, flags) == true) in rt_mutex_slowunlock()
1332 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1341 mark_wakeup_next_waiter(&wqh, lock); in rt_mutex_slowunlock()
1342 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1347 static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock) in __rt_mutex_unlock() argument
1349 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) in __rt_mutex_unlock()
1352 rt_mutex_slowunlock(lock); in __rt_mutex_unlock()
1356 static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, in rtmutex_spin_on_owner() argument
1365 if (owner != rt_mutex_owner(lock)) in rtmutex_spin_on_owner()
1383 !rt_mutex_waiter_is_top_waiter(lock, waiter) || in rtmutex_spin_on_owner()
1394 static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, in rtmutex_spin_on_owner() argument
1415 static void __sched remove_waiter(struct rt_mutex_base *lock, in remove_waiter() argument
1418 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); in remove_waiter()
1419 struct task_struct *owner = rt_mutex_owner(lock); in remove_waiter()
1422 lockdep_assert_held(&lock->wait_lock); in remove_waiter()
1425 rt_mutex_dequeue(lock, waiter); in remove_waiter()
1440 if (rt_mutex_has_waiters(lock)) in remove_waiter()
1441 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock)); in remove_waiter()
1460 raw_spin_unlock_irq(&lock->wait_lock); in remove_waiter()
1462 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, in remove_waiter()
1465 raw_spin_lock_irq(&lock->wait_lock); in remove_waiter()
1479 static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, in rt_mutex_slowlock_block() argument
1485 struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); in rt_mutex_slowlock_block()
1491 if (try_to_take_rt_mutex(lock, current, waiter)) in rt_mutex_slowlock_block()
1509 if (waiter == rt_mutex_top_waiter(lock)) in rt_mutex_slowlock_block()
1510 owner = rt_mutex_owner(lock); in rt_mutex_slowlock_block()
1513 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_slowlock_block()
1515 if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) in rt_mutex_slowlock_block()
1518 raw_spin_lock_irq(&lock->wait_lock); in rt_mutex_slowlock_block()
1557 static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, in __rt_mutex_slowlock() argument
1563 struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); in __rt_mutex_slowlock()
1567 lockdep_assert_held(&lock->wait_lock); in __rt_mutex_slowlock()
1570 if (try_to_take_rt_mutex(lock, current, NULL)) { in __rt_mutex_slowlock()
1580 ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk); in __rt_mutex_slowlock()
1582 ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter); in __rt_mutex_slowlock()
1593 remove_waiter(lock, waiter); in __rt_mutex_slowlock()
1601 fixup_rt_mutex_waiters(lock); in __rt_mutex_slowlock()
1605 static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock, in __rt_mutex_slowlock_locked() argument
1615 ret = __rt_mutex_slowlock(lock, ww_ctx, state, RT_MUTEX_MIN_CHAINWALK, in __rt_mutex_slowlock_locked()
1628 static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, in rt_mutex_slowlock() argument
1643 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowlock()
1644 ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state); in rt_mutex_slowlock()
1645 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowlock()
1650 static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock, in __rt_mutex_lock() argument
1653 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) in __rt_mutex_lock()
1656 return rt_mutex_slowlock(lock, NULL, state); in __rt_mutex_lock()
1669 static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock) in rtlock_slowlock_locked() argument
1674 lockdep_assert_held(&lock->wait_lock); in rtlock_slowlock_locked()
1676 if (try_to_take_rt_mutex(lock, current, NULL)) in rtlock_slowlock_locked()
1684 task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK); in rtlock_slowlock_locked()
1688 if (try_to_take_rt_mutex(lock, current, &waiter)) in rtlock_slowlock_locked()
1691 if (&waiter == rt_mutex_top_waiter(lock)) in rtlock_slowlock_locked()
1692 owner = rt_mutex_owner(lock); in rtlock_slowlock_locked()
1695 raw_spin_unlock_irq(&lock->wait_lock); in rtlock_slowlock_locked()
1697 if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) in rtlock_slowlock_locked()
1700 raw_spin_lock_irq(&lock->wait_lock); in rtlock_slowlock_locked()
1711 fixup_rt_mutex_waiters(lock); in rtlock_slowlock_locked()
1715 static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock) in rtlock_slowlock() argument
1719 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rtlock_slowlock()
1720 rtlock_slowlock_locked(lock); in rtlock_slowlock()
1721 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rtlock_slowlock()