/linux/include/linux/ |
A D | spinlock_api_up.h | 28 do { __acquire(lock); (void)(lock); } while (0) 43 do { __release(lock); (void)(lock); } while (0) 58 #define _raw_spin_lock(lock) __LOCK(lock) argument 60 #define _raw_read_lock(lock) __LOCK(lock) argument 61 #define _raw_write_lock(lock) __LOCK(lock) argument 63 #define _raw_spin_lock_bh(lock) __LOCK_BH(lock) argument 64 #define _raw_read_lock_bh(lock) __LOCK_BH(lock) argument 65 #define _raw_write_lock_bh(lock) __LOCK_BH(lock) argument 76 #define _raw_spin_unlock(lock) __UNLOCK(lock) argument 77 #define _raw_read_unlock(lock) __UNLOCK(lock) argument [all …]
|
A D | rwlock_api_smp.h | 18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); 19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); 45 #define _raw_read_lock(lock) __raw_read_lock(lock) argument 49 #define _raw_write_lock(lock) __raw_write_lock(lock) argument 53 #define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock) argument 57 #define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock) argument 61 #define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock) argument 77 #define _raw_read_trylock(lock) __raw_read_trylock(lock) argument 81 #define _raw_write_trylock(lock) __raw_write_trylock(lock) argument 85 #define _raw_read_unlock(lock) __raw_read_unlock(lock) argument [all …]
|
A D | spinlock.h | 108 __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \ 113 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) 121 #define raw_spin_is_contended(lock) (((void)(lock), 0)) argument 215 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) argument 217 #define raw_spin_lock(lock) _raw_spin_lock(lock) argument 274 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) argument 275 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) argument 276 #define raw_spin_unlock(lock) _raw_spin_unlock(lock) argument 284 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) argument 287 __cond_lock(lock, _raw_spin_trylock_bh(lock)) [all …]
|
A D | rwlock.h | 24 __rwlock_init((lock), #lock, &__key); \ 28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) 52 #define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock)) argument 53 #define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock)) argument 55 #define write_lock(lock) _raw_write_lock(lock) argument 56 #define read_lock(lock) _raw_read_lock(lock) argument 92 #define read_lock_irq(lock) _raw_read_lock_irq(lock) argument 93 #define read_lock_bh(lock) _raw_read_lock_bh(lock) argument 96 #define read_unlock(lock) _raw_read_unlock(lock) argument 97 #define write_unlock(lock) _raw_write_unlock(lock) argument [all …]
|
A D | spinlock_api_smp.h | 22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); 47 #define _raw_spin_lock(lock) __raw_spin_lock(lock) argument 51 #define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock) argument 55 #define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock) argument 59 #define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock) argument 63 #define _raw_spin_trylock(lock) __raw_spin_trylock(lock) argument 67 #define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock) argument 71 #define _raw_spin_unlock(lock) __raw_spin_unlock(lock) argument 75 #define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock) argument [all …]
|
A D | spinlock_rt.h | 45 rt_spin_lock(lock); in spin_lock() 89 rt_spin_lock(lock); in spin_lock_bh() 94 rt_spin_lock(lock); in spin_lock_irq() 106 rt_spin_unlock(lock); in spin_unlock() 127 __cond_lock(lock, rt_spin_trylock(lock)) 130 __cond_lock(lock, rt_spin_trylock_bh(lock)) 133 __cond_lock(lock, rt_spin_trylock(lock)) 146 __cond_lock(lock, __spin_trylock_irqsave(lock, flags)) 148 #define spin_is_contended(lock) (((void)(lock), 0)) argument 152 return rt_mutex_base_is_locked(&lock->lock); in spin_is_locked() [all …]
|
A D | spinlock_up.h | 31 lock->slock = 0; in arch_spin_lock() 39 lock->slock = 0; in arch_spin_trylock() 48 lock->slock = 1; in arch_spin_unlock() 54 #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) argument 55 #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) argument 56 #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) argument 57 #define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) argument 62 #define arch_spin_is_locked(lock) ((void)(lock), 0) argument 64 # define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) argument 66 # define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) argument [all …]
|
A D | local_lock_internal.h | 58 debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ 59 lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \ 62 local_lock_debug_init(lock); \ 69 debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ 70 lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \ 73 local_lock_debug_init(lock); \ 76 #define __local_lock(lock) \ argument 142 #define __local_lock_irq(lock) __local_lock(lock) argument 148 __local_lock(lock); \ 157 #define __local_unlock_irq(lock) __local_unlock(lock) argument [all …]
|
A D | mutex.h | 42 extern void mutex_destroy(struct mutex *lock); 166 #define mutex_lock(lock) mutex_lock_nested(lock, 0) argument 167 #define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) argument 168 #define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0) argument 169 #define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0) argument 178 extern void mutex_lock(struct mutex *lock); 183 # define mutex_lock_nested(lock, subclass) mutex_lock(lock) argument 184 # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) argument 185 # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) argument 186 # define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) argument [all …]
|
A D | lockdep.h | 176 lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ 190 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) 200 lockdep_set_class_and_name(lock, &__lockdep_no_track__, #lock) 205 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) argument 210 return lock->key == key; in lockdep_match_key() 252 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) argument 253 #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) argument 265 lock_set_class(lock, lock->name, lock->key, subclass, ip); in lock_set_subclass() 445 lock(_lock); \ 468 lock(_lock) [all …]
|
/linux/kernel/locking/ |
A D | spinlock_debug.c | 24 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __raw_spin_lock_init() 43 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __rwlock_init() 66 lock, READ_ONCE(lock->magic), in spin_dump() 100 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); in debug_spin_unlock() 102 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); in debug_spin_unlock() 161 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); in do_raw_read_lock() 180 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); in do_raw_read_unlock() 186 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); in debug_write_lock_before() 187 RWLOCK_BUG_ON(lock->owner == current, lock, "recursion"); in debug_write_lock_before() 200 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); in debug_write_unlock() [all …]
|
A D | mutex.c | 52 osq_lock_init(&lock->osq); in __mutex_init() 489 osq_unlock(&lock->osq); in mutex_optimistic_spin() 496 osq_unlock(&lock->osq); in mutex_optimistic_spin() 565 __ww_mutex_unlock(lock); in ww_mutex_unlock() 587 MUTEX_WARN_ON(lock->magic != lock); in __mutex_lock_common() 642 __mutex_add_waiter(lock, &waiter, &lock->wait_list); in __mutex_lock_common() 861 ww_mutex_unlock(lock); in ww_mutex_deadlock_injection() 1032 mutex_lock(lock); in mutex_lock_io() 1090 MUTEX_WARN_ON(lock->magic != lock); in mutex_trylock() 1150 mutex_lock(lock); in atomic_dec_and_mutex_lock() [all …]
|
A D | rtmutex_api.c | 141 __rt_mutex_unlock(&lock->rtmutex); in rt_mutex_unlock() 150 return rt_mutex_slowtrylock(lock); in rt_mutex_futex_trylock() 170 debug_rt_mutex_unlock(lock); in __rt_mutex_futex_unlock() 172 if (!rt_mutex_has_waiters(lock)) { in __rt_mutex_futex_unlock() 173 lock->owner = NULL; in __rt_mutex_futex_unlock() 216 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); in __rt_mutex_init() 241 __rt_mutex_base_init(lock); in rt_mutex_init_proxy_locked() 270 rt_mutex_clear_owner(lock); in rt_mutex_proxy_unlock() 348 remove_waiter(lock, waiter); in rt_mutex_start_proxy_lock() 434 remove_waiter(lock, waiter); in rt_mutex_cleanup_proxy_lock() [all …]
|
A D | spinlock.c | 154 __raw_spin_lock(lock); in _raw_spin_lock() 170 __raw_spin_lock_irq(lock); in _raw_spin_lock_irq() 178 __raw_spin_lock_bh(lock); in _raw_spin_lock_bh() 186 __raw_spin_unlock(lock); in _raw_spin_unlock() 228 __raw_read_lock(lock); in _raw_read_lock() 244 __raw_read_lock_irq(lock); in _raw_read_lock_irq() 252 __raw_read_lock_bh(lock); in _raw_read_lock_bh() 260 __raw_read_unlock(lock); in _raw_read_unlock() 300 __raw_write_lock(lock); in _raw_write_lock() 334 __raw_write_lock_bh(lock); in _raw_write_lock_bh() [all …]
|
A D | rtmutex.c | 36 struct rt_mutex *lock, in __ww_mutex_add_waiter() argument 110 xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner)); in rt_mutex_set_owner() 116 WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL)); in rt_mutex_clear_owner() 264 __releases(lock->wait_lock) in unlock_rt_mutex_safe() 338 __releases(lock->wait_lock) in unlock_rt_mutex_safe() 340 lock->owner = NULL; in unlock_rt_mutex_safe() 832 lock = waiter->lock; in rt_mutex_adjust_prio_chain() 857 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { in rt_mutex_adjust_prio_chain() 1111 if (rt_mutex_owner(lock)) in try_to_take_rt_mutex() 1230 waiter->lock = lock; in task_blocks_on_rt_mutex() [all …]
|
A D | ww_mutex.h | 9 __ww_waiter_first(struct mutex *lock) in __ww_waiter_first() argument 41 __ww_waiter_last(struct mutex *lock) in __ww_waiter_last() argument 64 return __mutex_owner(lock); in __ww_mutex_owner() 75 raw_spin_lock(&lock->wait_lock); in lock_wait_lock() 407 lock_wait_lock(&lock->base); in ww_mutex_set_context_fastpath() 409 unlock_wait_lock(&lock->base); in ww_mutex_set_context_fastpath() 490 struct MUTEX *lock, in __ww_mutex_add_waiter() argument 561 if (lock->ctx) { in __ww_mutex_unlock() 565 if (lock->ctx->acquired > 0) in __ww_mutex_unlock() 566 lock->ctx->acquired--; in __ww_mutex_unlock() [all …]
|
/linux/fs/bcachefs/ |
A D | six.c | 91 EBUG_ON(lock->owner); in six_set_owner() 92 lock->owner = owner; in six_set_owner() 321 if (six_lock_seq(lock) != seq || !six_trylock_ip(lock, type, ip)) in six_relock_ip() 506 six_lock_wakeup(lock, atomic_read(&lock->state), SIX_LOCK_read); in six_lock_slowpath() 573 lock->owner = NULL; in do_six_unlock_type() 576 lock->readers) { in do_six_unlock_type() 620 lock->seq++; in six_unlock_ip() 672 if (lock->readers) in six_lock_tryupgrade() 818 if (lock->readers) { in six_lock_readers_add() 837 WARN_ON(lock->readers && pcpu_read_count(lock)); in six_lock_exit() [all …]
|
A D | six.h | 159 void six_lock_exit(struct six_lock *lock); 173 #define six_lock_init(lock, flags) \ argument 177 __six_lock_init((lock), #lock, &__key, flags); \ 193 return lock->seq; in six_lock_seq() 207 return six_trylock_ip(lock, type, _THIS_IP_); in six_trylock_type() 289 return six_relock_ip(lock, type, seq, _THIS_IP_); in six_relock_type() 310 six_unlock_ip(lock, type, _THIS_IP_); in six_unlock_type() 316 return six_trylock_ip(lock, SIX_LOCK_##type, ip); \ 352 return six_lock_ip_##type(lock, fn, p, _THIS_IP_); \ 357 six_unlock_ip(lock, SIX_LOCK_##type, ip); \ [all …]
|
A D | seqmutex.h | 8 struct mutex lock; member 12 #define seqmutex_init(_lock) mutex_init(&(_lock)->lock) 16 return mutex_trylock(&lock->lock); in seqmutex_trylock() 19 static inline void seqmutex_lock(struct seqmutex *lock) in seqmutex_lock() argument 21 mutex_lock(&lock->lock); in seqmutex_lock() 22 lock->seq++; in seqmutex_lock() 27 u32 seq = lock->seq; in seqmutex_unlock() 28 mutex_unlock(&lock->lock); in seqmutex_unlock() 34 if (lock->seq != seq || !mutex_trylock(&lock->lock)) in seqmutex_relock() 37 if (lock->seq != seq) { in seqmutex_relock() [all …]
|
/linux/arch/alpha/include/asm/ |
A D | spinlock.h | 21 return lock.lock == 0; in arch_spin_value_unlocked() 27 lock->lock = 0; in arch_spin_unlock() 46 : "=&r" (tmp), "=m" (lock->lock) in arch_spin_lock() 47 : "m"(lock->lock) : "memory"); in arch_spin_lock() 52 return !test_and_set_bit(0, &lock->lock); in arch_spin_trylock() 74 : "m" (*lock) : "memory"); in arch_read_lock() 94 : "m" (*lock) : "memory"); in arch_write_lock() 114 : "m" (*lock) : "memory"); in arch_read_trylock() 136 : "m" (*lock) : "memory"); in arch_write_trylock() 154 : "m" (*lock) : "memory"); in arch_read_unlock() [all …]
|
/linux/drivers/md/persistent-data/ |
A D | dm-block-manager.c | 198 spin_lock(&lock->lock); in bl_down_read() 201 spin_unlock(&lock->lock); in bl_down_read() 217 spin_unlock(&lock->lock); in bl_down_read() 228 spin_lock(&lock->lock); in bl_down_read_nonblock() 241 spin_unlock(&lock->lock); in bl_down_read_nonblock() 247 spin_lock(&lock->lock); in bl_up_read() 253 spin_unlock(&lock->lock); in bl_up_read() 261 spin_lock(&lock->lock); in bl_down_write() 284 spin_unlock(&lock->lock); in bl_down_write() 294 spin_lock(&lock->lock); in bl_up_write() [all …]
|
/linux/arch/hexagon/include/asm/ |
A D | spinlock.h | 28 static inline void arch_read_lock(arch_rwlock_t *lock) in arch_read_lock() argument 37 : "r" (&lock->lock) in arch_read_lock() 51 : "r" (&lock->lock) in arch_read_unlock() 69 : "r" (&lock->lock) in arch_read_trylock() 85 : "r" (&lock->lock) in arch_write_lock() 102 : "r" (&lock->lock) in arch_write_trylock() 112 lock->lock = 0; in arch_write_unlock() 124 : "r" (&lock->lock) in arch_spin_lock() 133 lock->lock = 0; in arch_spin_unlock() 147 : "r" (&lock->lock) in arch_spin_trylock() [all …]
|
/linux/fs/ocfs2/dlm/ |
A D | dlmast.c | 79 BUG_ON(!lock); in __dlm_queue_ast() 81 res = lock->lockres; in __dlm_queue_ast() 91 lock->ast_pending, lock->ml.type); in __dlm_queue_ast() 101 dlm_lock_get(lock); in __dlm_queue_ast() 132 BUG_ON(!lock); in dlm_queue_ast() 145 BUG_ON(!lock); in __dlm_queue_bast() 206 fn = lock->ast; in dlm_do_local_ast() 226 lksb = lock->lksb; in dlm_do_remote_ast() 346 lock = NULL; in dlm_proxy_ast_handler() 387 lock->ml.type, lock->ml.convert_type); in dlm_proxy_ast_handler() [all …]
|
/linux/arch/powerpc/include/asm/ |
A D | simple_spinlock.h | 37 return lock.slock == 0; in arch_spin_value_unlocked() 101 splpar_spin_yield(lock); in spin_yield() 109 splpar_rw_yield(lock); in rw_yield() 122 splpar_spin_yield(lock); in arch_spin_lock() 133 lock->slock = 0; in arch_spin_unlock() 173 : "r" (&rw->lock), [eh] "n" (eh) in __arch_read_trylock() 253 : "r"(&rw->lock) in arch_read_unlock() 261 rw->lock = 0; in arch_write_unlock() 264 #define arch_spin_relax(lock) spin_yield(lock) argument 265 #define arch_read_relax(lock) rw_yield(lock) argument [all …]
|
/linux/drivers/acpi/acpica/ |
A D | utlock.c | 32 lock->num_readers = 0; in acpi_ut_create_rw_lock() 45 acpi_os_delete_mutex(lock->reader_mutex); in acpi_ut_delete_rw_lock() 46 acpi_os_delete_mutex(lock->writer_mutex); in acpi_ut_delete_rw_lock() 48 lock->num_readers = 0; in acpi_ut_delete_rw_lock() 49 lock->reader_mutex = NULL; in acpi_ut_delete_rw_lock() 50 lock->writer_mutex = NULL; in acpi_ut_delete_rw_lock() 82 lock->num_readers++; in acpi_ut_acquire_read_lock() 83 if (lock->num_readers == 1) { in acpi_ut_acquire_read_lock() 89 acpi_os_release_mutex(lock->reader_mutex); in acpi_ut_acquire_read_lock() 104 lock->num_readers--; in acpi_ut_release_read_lock() [all …]
|