| /fs/bcachefs/ |
| A D | six.c | 91 EBUG_ON(lock->owner); in six_set_owner() 92 lock->owner = owner; in six_set_owner() 321 if (six_lock_seq(lock) != seq || !six_trylock_ip(lock, type, ip)) in six_relock_ip() 495 six_lock_wakeup(lock, atomic_read(&lock->state), SIX_LOCK_read); in six_lock_slowpath() 570 lock->owner = NULL; in do_six_unlock_type() 573 lock->readers) { in do_six_unlock_type() 630 lock->seq++; in six_unlock_ip() 676 if (lock->readers) in six_lock_tryupgrade() 822 if (lock->readers) { in six_lock_readers_add() 841 WARN_ON(lock->readers && pcpu_read_count(lock)); in six_lock_exit() [all …]
|
| A D | six.h | 160 void six_lock_exit(struct six_lock *lock); 175 #define six_lock_init(lock, flags, gfp) \ argument 179 __six_lock_init((lock), #lock, &__key, flags, gfp); \ 195 return lock->seq; in six_lock_seq() 209 return six_trylock_ip(lock, type, _THIS_IP_); in six_trylock_type() 291 return six_relock_ip(lock, type, seq, _THIS_IP_); in six_relock_type() 312 six_unlock_ip(lock, type, _THIS_IP_); in six_unlock_type() 318 return six_trylock_ip(lock, SIX_LOCK_##type, ip); \ 354 return six_lock_ip_##type(lock, fn, p, _THIS_IP_); \ 359 six_unlock_ip(lock, SIX_LOCK_##type, ip); \ [all …]
|
| A D | seqmutex.h | 8 struct mutex lock; member 12 #define seqmutex_init(_lock) mutex_init(&(_lock)->lock) 16 return mutex_trylock(&lock->lock); in seqmutex_trylock() 19 static inline void seqmutex_lock(struct seqmutex *lock) in seqmutex_lock() argument 21 mutex_lock(&lock->lock); in seqmutex_lock() 22 lock->seq++; in seqmutex_lock() 27 u32 seq = lock->seq; in seqmutex_unlock() 28 mutex_unlock(&lock->lock); in seqmutex_unlock() 34 if (lock->seq != seq || !mutex_trylock(&lock->lock)) in seqmutex_relock() 37 if (lock->seq != seq) { in seqmutex_relock() [all …]
|
| A D | two_state_shared_lock.h | 20 static inline void two_state_lock_init(two_state_lock_t *lock) in two_state_lock_init() argument 22 atomic_long_set(&lock->v, 0); in two_state_lock_init() 23 init_waitqueue_head(&lock->wait); in two_state_lock_init() 26 static inline void bch2_two_state_unlock(two_state_lock_t *lock, int s) in bch2_two_state_unlock() argument 30 EBUG_ON(atomic_long_read(&lock->v) == 0); in bch2_two_state_unlock() 32 if (atomic_long_sub_return_release(i, &lock->v) == 0) in bch2_two_state_unlock() 33 wake_up_all(&lock->wait); in bch2_two_state_unlock() 41 old = atomic_long_read(&lock->v); in bch2_two_state_trylock() 45 } while (!atomic_long_try_cmpxchg_acquire(&lock->v, &old, old + i)); in bch2_two_state_trylock() 54 if (!bch2_two_state_trylock(lock, s)) in bch2_two_state_lock() [all …]
|
| A D | btree_cache.c | 61 if (b->c.lock.readers) in btree_node_to_freedlist() 80 mutex_lock(&bc->lock); in bch2_btree_node_to_freelist() 82 mutex_unlock(&bc->lock); in bch2_btree_node_to_freelist() 217 mutex_lock(&bc->lock); in bch2_node_pin() 224 mutex_unlock(&bc->lock); in bch2_node_pin() 232 mutex_lock(&bc->lock); in bch2_btree_cache_unpin() 298 mutex_lock(&bc->lock); in bch2_btree_node_hash_insert() 475 mutex_lock(&bc->lock); in bch2_btree_cache_scan() 586 mutex_lock(&bc->lock); in bch2_fs_btree_cache_exit() 690 mutex_init(&bc->lock); in bch2_fs_btree_cache_init_early() [all …]
|
| /fs/ocfs2/dlm/ |
| A D | dlmast.c | 79 BUG_ON(!lock); in __dlm_queue_ast() 81 res = lock->lockres; in __dlm_queue_ast() 91 lock->ast_pending, lock->ml.type); in __dlm_queue_ast() 101 dlm_lock_get(lock); in __dlm_queue_ast() 132 BUG_ON(!lock); in dlm_queue_ast() 145 BUG_ON(!lock); in __dlm_queue_bast() 206 fn = lock->ast; in dlm_do_local_ast() 226 lksb = lock->lksb; in dlm_do_remote_ast() 346 lock = NULL; in dlm_proxy_ast_handler() 387 lock->ml.type, lock->ml.convert_type); in dlm_proxy_ast_handler() [all …]
|
| A D | dlmlock.c | 87 lock->ml.type)) in dlm_can_grant_new_lock() 129 dlm_lock_get(lock); in dlmlock_master() 412 if (!lock) in dlm_new_lock() 430 return lock; in dlm_new_lock() 587 if (!lock) { in dlmlock() 606 if (lock->lksb != lksb || lock->ast != ast || in dlmlock() 607 lock->bast != bast || lock->astdata != data) { in dlmlock() 612 "astdata=%p\n", lock->lksb, lock->ast, in dlmlock() 613 lock->bast, lock->astdata); in dlmlock() 652 if (!lock) { in dlmlock() [all …]
|
| A D | dlmconvert.c | 114 lock->ml.type, lock->ml.convert_type, type); in __dlmconvert_master() 171 if (tmplock == lock) in __dlmconvert_master() 194 lock->ml.type = type; in __dlmconvert_master() 280 if (lock->ml.type == type && lock->ml.convert_type == LKM_IVMODE) { in dlmconvert_remote() 287 lock->ml.type, lock->ml.convert_type); in dlmconvert_remote() 485 lock = tmp_lock; in dlm_convert_lock_handler() 486 dlm_lock_get(lock); in dlm_convert_lock_handler() 491 if (!lock) { in dlm_convert_lock_handler() 502 lksb = lock->lksb; in dlm_convert_lock_handler() 537 if (lock) in dlm_convert_lock_handler() [all …]
|
| A D | dlmunlock.c | 205 dlm_lock_get(lock); in dlmunlock_common() 209 dlm_lock_put(lock); in dlmunlock_common() 212 dlm_lock_get(lock); in dlmunlock_common() 222 dlm_lock_put(lock); in dlmunlock_common() 253 dlm_lock_put(lock); in dlmunlock_common() 467 lock = iter; in dlm_unlock_lock_handler() 471 if (lock) in dlm_unlock_lock_handler() 477 if (!lock) { in dlm_unlock_lock_handler() 483 lksb = lock->lksb; in dlm_unlock_lock_handler() 507 if (!lock) in dlm_unlock_lock_handler() [all …]
|
| A D | dlmthread.c | 371 if (lock==target) in dlm_shuffle_lists() 389 if (lock==target) in dlm_shuffle_lists() 438 if (lock==target) in dlm_shuffle_lists() 452 if (lock==target) in dlm_shuffle_lists() 581 dlm_lock_get(lock); in dlm_flush_asts() 588 lock->ml.type, lock->ml.node); in dlm_flush_asts() 594 dlm_lock_put(lock); in dlm_flush_asts() 617 dlm_lock_put(lock); in dlm_flush_asts() 625 dlm_lock_get(lock); in dlm_flush_asts() 639 dlm_lock_put(lock); in dlm_flush_asts() [all …]
|
| A D | dlmdebug.c | 65 lock->ml.type, lock->ml.convert_type, lock->ml.node, in __dlm_print_lock() 83 struct dlm_lock *lock; in __dlm_print_one_lock_resource() local 105 __dlm_print_lock(lock); in __dlm_print_one_lock_resource() 109 __dlm_print_lock(lock); in __dlm_print_one_lock_resource() 113 __dlm_print_lock(lock); in __dlm_print_one_lock_resource() 419 list_type, lock->ml.type, lock->ml.convert_type, in dump_lock() 420 lock->ml.node, in dump_lock() 425 lock->ast_pending, lock->bast_pending, in dump_lock() 426 lock->convert_pending, lock->lock_pending, in dump_lock() 427 lock->cancel_pending, lock->unlock_pending, in dump_lock() [all …]
|
| /fs/lockd/ |
| A D | svclock.c | 155 lock->fl.c.flc_type); in nlmsvc_lookup_block() 421 locks_copy_lock(&call->a_args.lock.fl, &lock->fl); in nlmsvc_setgrantargs() 422 memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh)); in nlmsvc_setgrantargs() 424 call->a_args.lock.oh.len = lock->oh.len; in nlmsvc_setgrantargs() 437 memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len); in nlmsvc_setgrantargs() 493 lock->fl.c.flc_pid, in nlmsvc_lock() 514 lock = &block->b_call->a_args.lock; in nlmsvc_lock() 646 lock->fl.c.flc_type, (long long)lock->fl.fl_start, in nlmsvc_testlock() 688 &lock->fl, NULL); in nlmsvc_unlock() 692 &lock->fl, NULL); in nlmsvc_unlock() [all …]
|
| A D | xdr4.c | 76 struct file_lock *fl = &lock->fl; in svcxdr_decode_lock() 78 if (!svcxdr_decode_string(xdr, &lock->caller, &lock->len)) in svcxdr_decode_lock() 93 nlm4svc_set_file_lock_range(fl, lock->lock_start, lock->lock_len); in svcxdr_decode_lock() 161 argp->lock.fl.c.flc_type = F_WRLCK; in nlm4svc_decode_testargs() 220 argp->lock.fl.c.flc_type = F_UNLCK; in nlm4svc_decode_unlockargs() 268 struct nlm_lock *lock = &argp->lock; in nlm4svc_decode_shareargs() local 270 locks_init_lock(&lock->fl); in nlm4svc_decode_shareargs() 271 lock->svid = ~(u32)0; in nlm4svc_decode_shareargs() 275 if (!svcxdr_decode_string(xdr, &lock->caller, &lock->len)) in nlm4svc_decode_shareargs() 294 struct nlm_lock *lock = &argp->lock; in nlm4svc_decode_notify() local [all …]
|
| A D | xdr.c | 74 struct file_lock *fl = &lock->fl; in svcxdr_decode_lock() 77 if (!svcxdr_decode_string(xdr, &lock->caller, &lock->len)) in svcxdr_decode_lock() 167 argp->lock.fl.c.flc_type = F_WRLCK; in nlmsvc_decode_testargs() 226 argp->lock.fl.c.flc_type = F_UNLCK; in nlmsvc_decode_unlockargs() 274 struct nlm_lock *lock = &argp->lock; in nlmsvc_decode_shareargs() local 276 memset(lock, 0, sizeof(*lock)); in nlmsvc_decode_shareargs() 277 locks_init_lock(&lock->fl); in nlmsvc_decode_shareargs() 278 lock->svid = ~(u32)0; in nlmsvc_decode_shareargs() 282 if (!svcxdr_decode_string(xdr, &lock->caller, &lock->len)) in nlmsvc_decode_shareargs() 301 struct nlm_lock *lock = &argp->lock; in nlmsvc_decode_notify() local [all …]
|
| A D | clnt4xdr.c | 242 const struct nlm_lock *lock = &result->lock; in encode_nlm4_holder() local 248 encode_netobj(xdr, lock->oh.data, lock->oh.len); in encode_nlm4_holder() 258 struct nlm_lock *lock = &result->lock; in decode_nlm4_holder() local 265 memset(lock, 0, sizeof(*lock)); in decode_nlm4_holder() 325 encode_fh(xdr, &lock->fh); in encode_nlm4_lock() 326 encode_netobj(xdr, lock->oh.data, lock->oh.len); in encode_nlm4_lock() 357 const struct nlm_lock *lock = &args->lock; in nlm4_xdr_enc_testargs() local 361 encode_nlm4_lock(xdr, lock); in nlm4_xdr_enc_testargs() 379 const struct nlm_lock *lock = &args->lock; in nlm4_xdr_enc_lockargs() local 402 const struct nlm_lock *lock = &args->lock; in nlm4_xdr_enc_cancargs() local [all …]
|
| A D | clntxdr.c | 238 const struct nlm_lock *lock = &result->lock; in encode_nlm_holder() local 244 encode_netobj(xdr, lock->oh.data, lock->oh.len); in encode_nlm_holder() 254 struct nlm_lock *lock = &result->lock; in decode_nlm_holder() local 261 memset(lock, 0, sizeof(*lock)); in decode_nlm_holder() 327 encode_fh(xdr, &lock->fh); in encode_nlm_lock() 328 encode_netobj(xdr, lock->oh.data, lock->oh.len); in encode_nlm_lock() 358 const struct nlm_lock *lock = &args->lock; in nlm_xdr_enc_testargs() local 362 encode_nlm_lock(xdr, lock); in nlm_xdr_enc_testargs() 380 const struct nlm_lock *lock = &args->lock; in nlm_xdr_enc_lockargs() local 403 const struct nlm_lock *lock = &args->lock; in nlm_xdr_enc_cancargs() local [all …]
|
| A D | svc4proc.c | 28 struct nlm_lock *lock = &argp->lock; in nlm4svc_retrieve_args() local 36 (lock->lock_len && ((lock->lock_len - 1) > (OFFSET_MAX - lock->lock_start)))) in nlm4svc_retrieve_args() 40 if (!(host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len)) in nlm4svc_retrieve_args() 59 lock->fl.fl_start = (loff_t)lock->lock_start; in nlm4svc_retrieve_args() 60 lock->fl.fl_end = lock->lock_len ? in nlm4svc_retrieve_args() 61 (loff_t)(lock->lock_start + lock->lock_len - 1) : in nlm4svc_retrieve_args() 64 nlmsvc_locks_init_private(&lock->fl, host, (pid_t)lock->svid); in nlm4svc_retrieve_args() 65 if (!lock->fl.c.flc_owner) { in nlm4svc_retrieve_args() 113 &resp->lock); in __nlm4svc_proc_test() 299 argp->lock.caller, in nlm4svc_callback() [all …]
|
| A D | svcsubs.c | 106 struct nlm_lock *lock) in nlm_lookup_file() argument 115 hash = file_hash(&lock->fh); in nlm_lookup_file() 181 struct file_lock lock; in nlm_unlock_files() local 183 locks_init_lock(&lock); in nlm_unlock_files() 184 lock.c.flc_type = F_UNLCK; in nlm_unlock_files() 185 lock.fl_start = 0; in nlm_unlock_files() 186 lock.fl_end = OFFSET_MAX; in nlm_unlock_files() 188 lock.c.flc_pid = fl->c.flc_pid; in nlm_unlock_files() 189 lock.c.flc_flags = FL_POSIX; in nlm_unlock_files() 192 if (lock.c.flc_file && vfs_lock_file(lock.c.flc_file, F_SETLK, &lock, NULL)) in nlm_unlock_files() [all …]
|
| /fs/btrfs/ |
| A D | locking.h | 107 rwsem_release(&owner->lock##_map, _THIS_IP_); \ 121 #define btrfs_lockdep_acquire(owner, lock) \ argument 128 #define btrfs_lockdep_release(owner, lock) \ argument 129 rwsem_release(&owner->lock##_map, _THIS_IP_) 158 #define btrfs_lockdep_init_map(owner, lock) \ argument 160 static struct lock_class_key lock##_key; \ 161 lockdep_init_map(&owner->lock##_map, #lock, &lock##_key, 0); \ 167 static struct lock_class_key lock##_key; \ 169 &lock##_key, 0); \ 200 lockdep_assert_held_write(&eb->lock); in btrfs_assert_tree_write_locked() [all …]
|
| A D | locking.c | 169 up_read(&eb->lock); in btrfs_tree_read_unlock() 181 __acquires(&eb->lock) in btrfs_tree_lock_nested() 200 up_write(&eb->lock); in btrfs_tree_unlock() 316 atomic_set(&lock->readers, 0); in btrfs_drew_lock_init() 317 atomic_set(&lock->writers, 0); in btrfs_drew_lock_init() 325 if (atomic_read(&lock->readers)) in btrfs_drew_try_write_lock() 328 atomic_inc(&lock->writers); in btrfs_drew_try_write_lock() 333 btrfs_drew_write_unlock(lock); in btrfs_drew_try_write_lock() 345 wait_event(lock->pending_writers, !atomic_read(&lock->readers)); in btrfs_drew_write_lock() 361 atomic_inc(&lock->readers); in btrfs_drew_read_lock() [all …]
|
| A D | space-info.c | 367 spin_lock(&space_info->lock); in btrfs_add_bg_to_space_info() 624 spin_lock(&info->lock); in btrfs_dump_space_info() 627 spin_unlock(&info->lock); in btrfs_dump_space_info() 637 spin_lock(&cache->lock); in btrfs_dump_space_info() 646 spin_unlock(&cache->lock); in btrfs_dump_space_info() 1053 spin_lock(&global_rsv->lock); in steal_from_global_rsv() 1146 spin_lock(&space_info->lock); in do_async_reclaim_metadata_space() 1954 spin_lock(&sinfo->lock); in btrfs_account_ro_block_groups_free_space() 1969 spin_unlock(&sinfo->lock); in btrfs_account_ro_block_groups_free_space() 2091 spin_lock(&bg->lock); in do_reclaim_sweep() [all …]
|
| A D | delayed-ref.c | 38 spin_lock(&global_rsv->lock); in btrfs_check_space_for_delayed_refs() 40 spin_unlock(&global_rsv->lock); in btrfs_check_space_for_delayed_refs() 116 spin_lock(&local_rsv->lock); in btrfs_update_delayed_refs_rsv() 221 spin_lock(&block_rsv->lock); in btrfs_delayed_refs_rsv_refill() 590 spin_lock(&href->lock); in insert_delayed_ref() 595 spin_unlock(&href->lock); in insert_delayed_ref() 626 spin_unlock(&href->lock); in insert_delayed_ref() 1224 spin_lock(&head->lock); in btrfs_find_delayed_tree_ref() 1246 spin_unlock(&head->lock); in btrfs_find_delayed_tree_ref() 1269 spin_lock(&head->lock); in btrfs_destroy_delayed_refs() [all …]
|
| /fs/nfs_common/ |
| A D | nfslocalio.c | 52 spin_lock(&nfs_uuid->lock); in nfs_uuid_begin() 55 spin_unlock(&nfs_uuid->lock); in nfs_uuid_begin() 63 spin_unlock(&nfs_uuid->lock); in nfs_uuid_begin() 70 spin_unlock(&nfs_uuid->lock); in nfs_uuid_begin() 79 spin_lock(&nfs_uuid->lock); in nfs_uuid_end() 128 spin_lock(&nfs_uuid->lock); in nfs_uuid_is_local() 156 spin_lock(&nfs_uuid->lock); in nfs_uuid_put() 185 &nfs_uuid->lock); in nfs_uuid_put() 197 spin_lock(&nfs_uuid->lock); in nfs_uuid_put() 250 spin_lock(&nfs_uuid->lock); in nfs_uuid_add_file() [all …]
|
| /fs/nilfs2/ |
| A D | alloc.c | 149 spin_lock(lock); in nilfs_palloc_group_desc_nfrees() 151 spin_unlock(lock); in nilfs_palloc_group_desc_nfrees() 170 spin_lock(lock); in nilfs_palloc_group_desc_add_entries() 173 spin_unlock(lock); in nilfs_palloc_group_desc_add_entries() 229 spin_lock(lock); in nilfs_palloc_get_block() 237 spin_unlock(lock); in nilfs_palloc_get_block() 241 spin_lock(lock); in nilfs_palloc_get_block() 272 spin_lock(lock); in nilfs_palloc_delete_block() 277 spin_unlock(lock); in nilfs_palloc_delete_block() 592 spinlock_t *lock; in nilfs_palloc_prepare_alloc_entry() local [all …]
|
| /fs/xfs/ |
| A D | xfs_mru_cache.c | 230 __releases(mru->lock) __acquires(mru->lock) in _xfs_mru_cache_clear_reap_list() 246 spin_unlock(&mru->lock); in _xfs_mru_cache_clear_reap_list() 253 spin_lock(&mru->lock); in _xfs_mru_cache_clear_reap_list() 275 spin_lock(&mru->lock); in _xfs_mru_cache_reap() 289 spin_unlock(&mru->lock); in _xfs_mru_cache_reap() 380 spin_lock(&mru->lock); in xfs_mru_cache_flush() 384 spin_lock(&mru->lock); in xfs_mru_cache_flush() 428 spin_lock(&mru->lock); in xfs_mru_cache_insert() 461 spin_lock(&mru->lock); in xfs_mru_cache_remove() 517 spin_lock(&mru->lock); in xfs_mru_cache_lookup() [all …]
|