Lines Matching refs:sem
68 # define DEBUG_RWSEMS_WARN_ON(c, sem) do { \ argument
71 #c, atomic_long_read(&(sem)->count), \
72 (unsigned long) sem->magic, \
73 atomic_long_read(&(sem)->owner), (long)current, \
74 list_empty(&(sem)->wait_list) ? "" : "not ")) \
78 # define DEBUG_RWSEMS_WARN_ON(c, sem) argument
140 static inline void rwsem_set_owner(struct rw_semaphore *sem) in rwsem_set_owner() argument
143 atomic_long_set(&sem->owner, (long)current); in rwsem_set_owner()
146 static inline void rwsem_clear_owner(struct rw_semaphore *sem) in rwsem_clear_owner() argument
149 atomic_long_set(&sem->owner, 0); in rwsem_clear_owner()
155 static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags) in rwsem_test_oflags() argument
157 return atomic_long_read(&sem->owner) & flags; in rwsem_test_oflags()
170 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem, in __rwsem_set_reader_owned() argument
174 (atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE); in __rwsem_set_reader_owned()
176 atomic_long_set(&sem->owner, val); in __rwsem_set_reader_owned()
179 static inline void rwsem_set_reader_owned(struct rw_semaphore *sem) in rwsem_set_reader_owned() argument
181 __rwsem_set_reader_owned(sem, current); in rwsem_set_reader_owned()
188 static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem) in rwsem_owner() argument
191 (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK); in rwsem_owner()
197 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem) in is_rwsem_reader_owned() argument
202 long count = atomic_long_read(&sem->count); in is_rwsem_reader_owned()
206 return rwsem_test_oflags(sem, RWSEM_READER_OWNED); in is_rwsem_reader_owned()
215 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem) in rwsem_clear_reader_owned() argument
217 unsigned long val = atomic_long_read(&sem->owner); in rwsem_clear_reader_owned()
220 if (atomic_long_try_cmpxchg(&sem->owner, &val, in rwsem_clear_reader_owned()
226 static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem) in rwsem_clear_reader_owned() argument
235 static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem) in rwsem_set_nonspinnable() argument
237 unsigned long owner = atomic_long_read(&sem->owner); in rwsem_set_nonspinnable()
244 } while (!atomic_long_try_cmpxchg(&sem->owner, &owner, in rwsem_set_nonspinnable()
248 static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp) in rwsem_read_trylock() argument
250 *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count); in rwsem_read_trylock()
253 rwsem_set_nonspinnable(sem); in rwsem_read_trylock()
256 rwsem_set_reader_owned(sem); in rwsem_read_trylock()
263 static inline bool rwsem_write_trylock(struct rw_semaphore *sem) in rwsem_write_trylock() argument
267 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) { in rwsem_write_trylock()
268 rwsem_set_owner(sem); in rwsem_write_trylock()
280 rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags) in rwsem_owner_flags() argument
282 unsigned long owner = atomic_long_read(&sem->owner); in rwsem_owner_flags()
308 void __init_rwsem(struct rw_semaphore *sem, const char *name, in __init_rwsem() argument
315 debug_check_no_locks_freed((void *)sem, sizeof(*sem)); in __init_rwsem()
316 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP); in __init_rwsem()
319 sem->magic = sem; in __init_rwsem()
321 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE); in __init_rwsem()
322 raw_spin_lock_init(&sem->wait_lock); in __init_rwsem()
323 INIT_LIST_HEAD(&sem->wait_list); in __init_rwsem()
324 atomic_long_set(&sem->owner, 0L); in __init_rwsem()
326 osq_lock_init(&sem->osq); in __init_rwsem()
343 #define rwsem_first_waiter(sem) \ argument
344 list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
368 rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter) in rwsem_add_waiter() argument
370 lockdep_assert_held(&sem->wait_lock); in rwsem_add_waiter()
371 list_add_tail(&waiter->list, &sem->wait_list); in rwsem_add_waiter()
384 rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter) in rwsem_del_waiter() argument
386 lockdep_assert_held(&sem->wait_lock); in rwsem_del_waiter()
388 if (likely(!list_empty(&sem->wait_list))) in rwsem_del_waiter()
391 atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count); in rwsem_del_waiter()
409 static void rwsem_mark_wake(struct rw_semaphore *sem, in rwsem_mark_wake() argument
417 lockdep_assert_held(&sem->wait_lock); in rwsem_mark_wake()
423 waiter = rwsem_first_waiter(sem); in rwsem_mark_wake()
444 if (unlikely(atomic_long_read(&sem->count) < 0)) in rwsem_mark_wake()
456 oldcount = atomic_long_fetch_add(adjustment, &sem->count); in rwsem_mark_wake()
471 atomic_long_add(-adjustment, &sem->count); in rwsem_mark_wake()
481 __rwsem_set_reader_owned(sem, owner); in rwsem_mark_wake()
508 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) { in rwsem_mark_wake()
525 oldcount = atomic_long_read(&sem->count); in rwsem_mark_wake()
526 if (list_empty(&sem->wait_list)) { in rwsem_mark_wake()
544 atomic_long_add(adjustment, &sem->count); in rwsem_mark_wake()
575 rwsem_del_wake_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter, in rwsem_del_wake_waiter() argument
577 __releases(&sem->wait_lock) in rwsem_del_wake_waiter()
579 bool first = rwsem_first_waiter(sem) == waiter; in rwsem_del_wake_waiter()
588 if (rwsem_del_waiter(sem, waiter) && first) in rwsem_del_wake_waiter()
589 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, wake_q); in rwsem_del_wake_waiter()
590 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_del_wake_waiter()
602 static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, in rwsem_try_write_lock() argument
605 struct rwsem_waiter *first = rwsem_first_waiter(sem); in rwsem_try_write_lock()
608 lockdep_assert_held(&sem->wait_lock); in rwsem_try_write_lock()
610 count = atomic_long_read(&sem->count); in rwsem_try_write_lock()
641 if (list_is_singular(&sem->wait_list)) in rwsem_try_write_lock()
644 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new)); in rwsem_try_write_lock()
662 rwsem_set_owner(sem); in rwsem_try_write_lock()
688 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) in rwsem_try_write_lock_unqueued() argument
690 long count = atomic_long_read(&sem->count); in rwsem_try_write_lock_unqueued()
693 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count, in rwsem_try_write_lock_unqueued()
695 rwsem_set_owner(sem); in rwsem_try_write_lock_unqueued()
703 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) in rwsem_can_spin_on_owner() argument
718 owner = rwsem_owner_flags(sem, &flags); in rwsem_can_spin_on_owner()
745 rwsem_spin_on_owner(struct rw_semaphore *sem) in rwsem_spin_on_owner() argument
753 owner = rwsem_owner_flags(sem, &flags); in rwsem_spin_on_owner()
765 new = rwsem_owner_flags(sem, &new_flags); in rwsem_spin_on_owner()
804 static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem) in rwsem_rspin_threshold() argument
806 long count = atomic_long_read(&sem->count); in rwsem_rspin_threshold()
817 static bool rwsem_optimistic_spin(struct rw_semaphore *sem) in rwsem_optimistic_spin() argument
825 if (!osq_lock(&sem->osq)) in rwsem_optimistic_spin()
837 owner_state = rwsem_spin_on_owner(sem); in rwsem_optimistic_spin()
844 taken = rwsem_try_write_lock_unqueued(sem); in rwsem_optimistic_spin()
861 if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE)) in rwsem_optimistic_spin()
863 rspin_threshold = rwsem_rspin_threshold(sem); in rwsem_optimistic_spin()
875 rwsem_set_nonspinnable(sem); in rwsem_optimistic_spin()
929 osq_unlock(&sem->osq); in rwsem_optimistic_spin()
939 static inline void clear_nonspinnable(struct rw_semaphore *sem) in clear_nonspinnable() argument
941 if (unlikely(rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))) in clear_nonspinnable()
942 atomic_long_andnot(RWSEM_NONSPINNABLE, &sem->owner); in clear_nonspinnable()
946 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) in rwsem_can_spin_on_owner() argument
951 static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem) in rwsem_optimistic_spin() argument
956 static inline void clear_nonspinnable(struct rw_semaphore *sem) { } in clear_nonspinnable() argument
959 rwsem_spin_on_owner(struct rw_semaphore *sem) in rwsem_spin_on_owner() argument
973 static inline void rwsem_cond_wake_waiter(struct rw_semaphore *sem, long count, in rwsem_cond_wake_waiter() argument
985 clear_nonspinnable(sem); in rwsem_cond_wake_waiter()
987 rwsem_mark_wake(sem, wake_type, wake_q); in rwsem_cond_wake_waiter()
994 rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int state) in rwsem_down_read_slowpath() argument
1006 if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) && in rwsem_down_read_slowpath()
1014 rwsem_set_reader_owned(sem); in rwsem_down_read_slowpath()
1022 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1023 if (!list_empty(&sem->wait_list)) in rwsem_down_read_slowpath()
1024 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, in rwsem_down_read_slowpath()
1026 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1029 return sem; in rwsem_down_read_slowpath()
1038 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1039 if (list_empty(&sem->wait_list)) { in rwsem_down_read_slowpath()
1046 if (!(atomic_long_read(&sem->count) & RWSEM_WRITER_MASK)) { in rwsem_down_read_slowpath()
1049 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1050 rwsem_set_reader_owned(sem); in rwsem_down_read_slowpath()
1052 return sem; in rwsem_down_read_slowpath()
1056 rwsem_add_waiter(sem, &waiter); in rwsem_down_read_slowpath()
1059 count = atomic_long_add_return(adjustment, &sem->count); in rwsem_down_read_slowpath()
1061 rwsem_cond_wake_waiter(sem, count, &wake_q); in rwsem_down_read_slowpath()
1062 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1067 trace_contention_begin(sem, LCB_F_READ); in rwsem_down_read_slowpath()
1077 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1080 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_slowpath()
1090 trace_contention_end(sem, 0); in rwsem_down_read_slowpath()
1091 return sem; in rwsem_down_read_slowpath()
1094 rwsem_del_wake_waiter(sem, &waiter, &wake_q); in rwsem_down_read_slowpath()
1097 trace_contention_end(sem, -EINTR); in rwsem_down_read_slowpath()
1105 rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) in rwsem_down_write_slowpath() argument
1111 if (rwsem_can_spin_on_owner(sem) && rwsem_optimistic_spin(sem)) { in rwsem_down_write_slowpath()
1113 return sem; in rwsem_down_write_slowpath()
1125 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1126 rwsem_add_waiter(sem, &waiter); in rwsem_down_write_slowpath()
1129 if (rwsem_first_waiter(sem) != &waiter) { in rwsem_down_write_slowpath()
1130 rwsem_cond_wake_waiter(sem, atomic_long_read(&sem->count), in rwsem_down_write_slowpath()
1137 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1139 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1142 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count); in rwsem_down_write_slowpath()
1147 trace_contention_begin(sem, LCB_F_WRITE); in rwsem_down_write_slowpath()
1150 if (rwsem_try_write_lock(sem, &waiter)) { in rwsem_down_write_slowpath()
1155 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1171 owner_state = rwsem_spin_on_owner(sem); in rwsem_down_write_slowpath()
1180 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1183 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1185 trace_contention_end(sem, 0); in rwsem_down_write_slowpath()
1186 return sem; in rwsem_down_write_slowpath()
1190 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_slowpath()
1191 rwsem_del_wake_waiter(sem, &waiter, &wake_q); in rwsem_down_write_slowpath()
1193 trace_contention_end(sem, -EINTR); in rwsem_down_write_slowpath()
1201 static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) in rwsem_wake() argument
1206 raw_spin_lock_irqsave(&sem->wait_lock, flags); in rwsem_wake()
1208 if (!list_empty(&sem->wait_list)) in rwsem_wake()
1209 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); in rwsem_wake()
1211 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in rwsem_wake()
1214 return sem; in rwsem_wake()
1222 static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) in rwsem_downgrade_wake() argument
1227 raw_spin_lock_irqsave(&sem->wait_lock, flags); in rwsem_downgrade_wake()
1229 if (!list_empty(&sem->wait_list)) in rwsem_downgrade_wake()
1230 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q); in rwsem_downgrade_wake()
1232 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in rwsem_downgrade_wake()
1235 return sem; in rwsem_downgrade_wake()
1241 static __always_inline int __down_read_common(struct rw_semaphore *sem, int state) in __down_read_common() argument
1247 if (!rwsem_read_trylock(sem, &count)) { in __down_read_common()
1248 if (IS_ERR(rwsem_down_read_slowpath(sem, count, state))) { in __down_read_common()
1252 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); in __down_read_common()
1259 static __always_inline void __down_read(struct rw_semaphore *sem) in __down_read() argument
1261 __down_read_common(sem, TASK_UNINTERRUPTIBLE); in __down_read()
1264 static __always_inline int __down_read_interruptible(struct rw_semaphore *sem) in __down_read_interruptible() argument
1266 return __down_read_common(sem, TASK_INTERRUPTIBLE); in __down_read_interruptible()
1269 static __always_inline int __down_read_killable(struct rw_semaphore *sem) in __down_read_killable() argument
1271 return __down_read_common(sem, TASK_KILLABLE); in __down_read_killable()
1274 static inline int __down_read_trylock(struct rw_semaphore *sem) in __down_read_trylock() argument
1279 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); in __down_read_trylock()
1282 tmp = atomic_long_read(&sem->count); in __down_read_trylock()
1284 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, in __down_read_trylock()
1286 rwsem_set_reader_owned(sem); in __down_read_trylock()
1298 static __always_inline int __down_write_common(struct rw_semaphore *sem, int state) in __down_write_common() argument
1303 if (unlikely(!rwsem_write_trylock(sem))) { in __down_write_common()
1304 if (IS_ERR(rwsem_down_write_slowpath(sem, state))) in __down_write_common()
1311 static __always_inline void __down_write(struct rw_semaphore *sem) in __down_write() argument
1313 __down_write_common(sem, TASK_UNINTERRUPTIBLE); in __down_write()
1316 static __always_inline int __down_write_killable(struct rw_semaphore *sem) in __down_write_killable() argument
1318 return __down_write_common(sem, TASK_KILLABLE); in __down_write_killable()
1321 static inline int __down_write_trylock(struct rw_semaphore *sem) in __down_write_trylock() argument
1326 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); in __down_write_trylock()
1327 ret = rwsem_write_trylock(sem); in __down_write_trylock()
1336 static inline void __up_read(struct rw_semaphore *sem) in __up_read() argument
1340 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); in __up_read()
1341 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); in __up_read()
1344 rwsem_clear_reader_owned(sem); in __up_read()
1345 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count); in __up_read()
1346 DEBUG_RWSEMS_WARN_ON(tmp < 0, sem); in __up_read()
1349 clear_nonspinnable(sem); in __up_read()
1350 rwsem_wake(sem); in __up_read()
1358 static inline void __up_write(struct rw_semaphore *sem) in __up_write() argument
1362 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); in __up_write()
1367 DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) && in __up_write()
1368 !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem); in __up_write()
1371 rwsem_clear_owner(sem); in __up_write()
1372 tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count); in __up_write()
1374 rwsem_wake(sem); in __up_write()
1381 static inline void __downgrade_write(struct rw_semaphore *sem) in __downgrade_write() argument
1392 DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem); in __downgrade_write()
1395 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count); in __downgrade_write()
1396 rwsem_set_reader_owned(sem); in __downgrade_write()
1398 rwsem_downgrade_wake(sem); in __downgrade_write()
1439 void __init_rwsem(struct rw_semaphore *sem, const char *name, in __init_rwsem() argument
1442 init_rwbase_rt(&(sem)->rwbase); in __init_rwsem()
1445 debug_check_no_locks_freed((void *)sem, sizeof(*sem)); in __init_rwsem()
1446 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP); in __init_rwsem()
1451 static inline void __down_read(struct rw_semaphore *sem) in __down_read() argument
1453 rwbase_read_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE); in __down_read()
1456 static inline int __down_read_interruptible(struct rw_semaphore *sem) in __down_read_interruptible() argument
1458 return rwbase_read_lock(&sem->rwbase, TASK_INTERRUPTIBLE); in __down_read_interruptible()
1461 static inline int __down_read_killable(struct rw_semaphore *sem) in __down_read_killable() argument
1463 return rwbase_read_lock(&sem->rwbase, TASK_KILLABLE); in __down_read_killable()
1466 static inline int __down_read_trylock(struct rw_semaphore *sem) in __down_read_trylock() argument
1468 return rwbase_read_trylock(&sem->rwbase); in __down_read_trylock()
1471 static inline void __up_read(struct rw_semaphore *sem) in __up_read() argument
1473 rwbase_read_unlock(&sem->rwbase, TASK_NORMAL); in __up_read()
1476 static inline void __sched __down_write(struct rw_semaphore *sem) in __down_write() argument
1478 rwbase_write_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE); in __down_write()
1481 static inline int __sched __down_write_killable(struct rw_semaphore *sem) in __down_write_killable() argument
1483 return rwbase_write_lock(&sem->rwbase, TASK_KILLABLE); in __down_write_killable()
1486 static inline int __down_write_trylock(struct rw_semaphore *sem) in __down_write_trylock() argument
1488 return rwbase_write_trylock(&sem->rwbase); in __down_write_trylock()
1491 static inline void __up_write(struct rw_semaphore *sem) in __up_write() argument
1493 rwbase_write_unlock(&sem->rwbase); in __up_write()
1496 static inline void __downgrade_write(struct rw_semaphore *sem) in __downgrade_write() argument
1498 rwbase_write_downgrade(&sem->rwbase); in __downgrade_write()
1502 #define DEBUG_RWSEMS_WARN_ON(c, sem) argument
1504 static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem, in __rwsem_set_reader_owned() argument
1509 static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem) in is_rwsem_reader_owned() argument
1511 int count = atomic_read(&sem->rwbase.readers); in is_rwsem_reader_owned()
1521 void __sched down_read(struct rw_semaphore *sem) in down_read() argument
1524 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); in down_read()
1526 LOCK_CONTENDED(sem, __down_read_trylock, __down_read); in down_read()
1530 int __sched down_read_interruptible(struct rw_semaphore *sem) in down_read_interruptible() argument
1533 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); in down_read_interruptible()
1535 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) { in down_read_interruptible()
1536 rwsem_release(&sem->dep_map, _RET_IP_); in down_read_interruptible()
1544 int __sched down_read_killable(struct rw_semaphore *sem) in down_read_killable() argument
1547 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); in down_read_killable()
1549 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) { in down_read_killable()
1550 rwsem_release(&sem->dep_map, _RET_IP_); in down_read_killable()
1561 int down_read_trylock(struct rw_semaphore *sem) in down_read_trylock() argument
1563 int ret = __down_read_trylock(sem); in down_read_trylock()
1566 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); in down_read_trylock()
1574 void __sched down_write(struct rw_semaphore *sem) in down_write() argument
1577 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); in down_write()
1578 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); in down_write()
1585 int __sched down_write_killable(struct rw_semaphore *sem) in down_write_killable() argument
1588 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); in down_write_killable()
1590 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, in down_write_killable()
1592 rwsem_release(&sem->dep_map, _RET_IP_); in down_write_killable()
1603 int down_write_trylock(struct rw_semaphore *sem) in down_write_trylock() argument
1605 int ret = __down_write_trylock(sem); in down_write_trylock()
1608 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_); in down_write_trylock()
1617 void up_read(struct rw_semaphore *sem) in up_read() argument
1619 rwsem_release(&sem->dep_map, _RET_IP_); in up_read()
1620 __up_read(sem); in up_read()
1627 void up_write(struct rw_semaphore *sem) in up_write() argument
1629 rwsem_release(&sem->dep_map, _RET_IP_); in up_write()
1630 __up_write(sem); in up_write()
1637 void downgrade_write(struct rw_semaphore *sem) in downgrade_write() argument
1639 lock_downgrade(&sem->dep_map, _RET_IP_); in downgrade_write()
1640 __downgrade_write(sem); in downgrade_write()
1646 void down_read_nested(struct rw_semaphore *sem, int subclass) in down_read_nested() argument
1649 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); in down_read_nested()
1650 LOCK_CONTENDED(sem, __down_read_trylock, __down_read); in down_read_nested()
1654 int down_read_killable_nested(struct rw_semaphore *sem, int subclass) in down_read_killable_nested() argument
1657 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); in down_read_killable_nested()
1659 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) { in down_read_killable_nested()
1660 rwsem_release(&sem->dep_map, _RET_IP_); in down_read_killable_nested()
1668 void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest) in _down_write_nest_lock() argument
1671 rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_); in _down_write_nest_lock()
1672 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); in _down_write_nest_lock()
1676 void down_read_non_owner(struct rw_semaphore *sem) in down_read_non_owner() argument
1679 __down_read(sem); in down_read_non_owner()
1686 __rwsem_set_reader_owned(sem, NULL); in down_read_non_owner()
1690 void down_write_nested(struct rw_semaphore *sem, int subclass) in down_write_nested() argument
1693 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); in down_write_nested()
1694 LOCK_CONTENDED(sem, __down_write_trylock, __down_write); in down_write_nested()
1698 int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass) in down_write_killable_nested() argument
1701 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); in down_write_killable_nested()
1703 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, in down_write_killable_nested()
1705 rwsem_release(&sem->dep_map, _RET_IP_); in down_write_killable_nested()
1713 void up_read_non_owner(struct rw_semaphore *sem) in up_read_non_owner() argument
1715 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); in up_read_non_owner()
1716 __up_read(sem); in up_read_non_owner()