| /fs/nfs/ |
| A D | nfs4session.c | 45 p = &tbl->slots; in nfs4_shrink_slot_table() 53 tbl->max_slots--; in nfs4_shrink_slot_table() 111 slot->table = tbl; in nfs4_new_slot() 125 p = &tbl->slots; in nfs4_find_or_create_slot() 128 *p = nfs4_new_slot(tbl, tbl->max_slots, in nfs4_find_or_create_slot() 132 tbl->max_slots++; in nfs4_find_or_create_slot() 248 __func__, tbl->used_slots[0], tbl->highest_used_slotid, in nfs4_alloc_slot() 250 slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slotid + 1); in nfs4_alloc_slot() 257 __func__, tbl->used_slots[0], tbl->highest_used_slotid, in nfs4_alloc_slot() 279 p = &tbl->slots; in nfs4_reset_slot_table() [all …]
|
| A D | nfs4session.h | 83 extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl, 85 extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl); 86 extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl); 88 extern int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl, 93 extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl); 94 bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl, 96 void nfs41_wake_slot_table(struct nfs4_slot_table *tbl); 100 return !!test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state); in nfs4_slot_tbl_draining() 106 return !!test_bit(slotid, tbl->used_slots); in nfs4_test_locked_slot() 115 extern void nfs41_set_target_slotid(struct nfs4_slot_table *tbl, [all …]
|
| A D | callback_proc.c | 423 if (nfs4_test_locked_slot(tbl, slot->slot_nr)) in validate_seqid() 465 struct nfs4_slot_table *tbl; in referring_call_exists() local 474 tbl = &session->fc_slot_table; in referring_call_exists() 504 struct nfs4_slot_table *tbl; in nfs4_callback_sequence() local 519 tbl = &clp->cl_session->bc_slot_table; in nfs4_callback_sequence() 527 spin_lock(&tbl->slot_tbl_lock); in nfs4_callback_sequence() 540 slot = nfs4_lookup_slot(tbl, args->csa_slotid); in nfs4_callback_sequence() 547 status = validate_seqid(tbl, slot, args); in nfs4_callback_sequence() 550 if (!nfs4_try_to_lock_slot(tbl, slot)) { in nfs4_callback_sequence() 568 &tbl->slot_tbl_lock); in nfs4_callback_sequence() [all …]
|
| A D | nfs4client.c | 343 struct nfs4_slot_table *tbl; in nfs40_init_client() local 346 tbl = kzalloc(sizeof(*tbl), GFP_NOFS); in nfs40_init_client() 347 if (tbl == NULL) in nfs40_init_client() 350 ret = nfs4_setup_slot_table(tbl, NFS4_MAX_SLOT_TABLE, in nfs40_init_client() 353 nfs4_shutdown_slot_table(tbl); in nfs40_init_client() 354 kfree(tbl); in nfs40_init_client() 358 clp->cl_slot_tbl = tbl; in nfs40_init_client()
|
| A D | nfs4state.c | 260 spin_lock(&tbl->slot_tbl_lock); in nfs4_end_drain_slot_table() 261 nfs41_wake_slot_table(tbl); in nfs4_end_drain_slot_table() 262 spin_unlock(&tbl->slot_tbl_lock); in nfs4_end_drain_slot_table() 281 static int nfs4_drain_slot_tbl(struct nfs4_slot_table *tbl) in nfs4_drain_slot_tbl() argument 283 set_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state); in nfs4_drain_slot_tbl() 284 spin_lock(&tbl->slot_tbl_lock); in nfs4_drain_slot_tbl() 285 if (tbl->highest_used_slotid != NFS4_NO_SLOT) { in nfs4_drain_slot_tbl() 286 reinit_completion(&tbl->complete); in nfs4_drain_slot_tbl() 287 spin_unlock(&tbl->slot_tbl_lock); in nfs4_drain_slot_tbl() 288 return wait_for_completion_interruptible(&tbl->complete); in nfs4_drain_slot_tbl() [all …]
|
| A D | callback_xdr.c | 830 struct nfs4_slot_table *tbl = &session->bc_slot_table; in nfs4_callback_free_slot() local 832 spin_lock(&tbl->slot_tbl_lock); in nfs4_callback_free_slot() 837 nfs4_free_slot(tbl, slot); in nfs4_callback_free_slot() 838 spin_unlock(&tbl->slot_tbl_lock); in nfs4_callback_free_slot()
|
| A D | nfs4proc.c | 790 struct nfs4_slot_table *tbl; in nfs40_sequence_free_slot() local 792 tbl = slot->table; in nfs40_sequence_free_slot() 793 spin_lock(&tbl->slot_tbl_lock); in nfs40_sequence_free_slot() 795 nfs4_free_slot(tbl, slot); in nfs40_sequence_free_slot() 814 struct nfs4_slot_table *tbl; in nfs41_release_slot() local 819 tbl = slot->table; in nfs41_release_slot() 820 session = tbl->session; in nfs41_release_slot() 827 spin_lock(&tbl->slot_tbl_lock); in nfs41_release_slot() 831 if (tbl->highest_used_slotid > tbl->target_highest_slotid) in nfs41_release_slot() 838 nfs4_free_slot(tbl, slot); in nfs41_release_slot() [all …]
|
| /fs/bcachefs/ |
| A D | btree_key_cache.c | 192 struct bucket_table *tbl = rht_dereference_rcu(c->table.tbl, &c->table); in bkey_cached_reuse() local 666 struct bucket_table *tbl; in bch2_btree_key_cache_scan() local 675 tbl = rht_dereference_rcu(bc->table.tbl, &bc->table); in bch2_btree_key_cache_scan() 684 if (unlikely(tbl->nest)) { in bch2_btree_key_cache_scan() 691 if (iter >= tbl->size) in bch2_btree_key_cache_scan() 728 if (iter >= tbl->size) in bch2_btree_key_cache_scan() 762 struct bucket_table *tbl; in bch2_fs_btree_key_cache_exit() local 775 tbl = rht_dereference_rcu(bc->table.tbl, &bc->table); in bch2_fs_btree_key_cache_exit() 776 if (tbl) { in bch2_fs_btree_key_cache_exit() 777 if (tbl->nest) { in bch2_fs_btree_key_cache_exit() [all …]
|
| A D | debug.c | 517 struct bucket_table *tbl = in bch2_cached_btree_nodes_read() local 518 rht_dereference_rcu(c->btree_cache.table.tbl, in bch2_cached_btree_nodes_read() 520 if (i->iter < tbl->size) { in bch2_cached_btree_nodes_read() 524 rht_for_each_entry_rcu(b, pos, tbl, i->iter, hash) in bch2_cached_btree_nodes_read()
|
| A D | fs.c | 268 struct bucket_table *tbl = rht_dereference_rcu(ht->ht.tbl, &ht->ht); in bch2_inode_or_descendents_is_open() local 270 hash = rht_key_hashfn(&ht->ht, tbl, &inum, bch2_vfs_inodes_by_inum_params); in bch2_inode_or_descendents_is_open() 271 bkt = rht_bucket(tbl, hash); in bch2_inode_or_descendents_is_open() 275 rht_for_each_entry_rcu_from(inode, he, rht_ptr_rcu(bkt), tbl, hash, hash) { in bch2_inode_or_descendents_is_open() 297 tbl = rht_dereference_rcu(tbl->future_tbl, &ht->ht); in bch2_inode_or_descendents_is_open() 298 if (unlikely(tbl)) in bch2_inode_or_descendents_is_open() 2721 if (c->vfs_inodes_by_inum_table.ht.tbl) in bch2_fs_vfs_exit() 2723 if (c->vfs_inodes_table.tbl) in bch2_fs_vfs_exit()
|
| A D | btree_cache.h | 83 for ((_tbl) = rht_dereference_rcu((_c)->btree_cache.table.tbl, \
|
| A D | btree_io.c | 2692 struct bucket_table *tbl; in __bch2_btree_flush_all() local 2699 for_each_cached_btree(b, c, tbl, i, pos) in __bch2_btree_flush_all()
|
| A D | io_read.c | 1515 if (c->promote_table.tbl) in bch2_fs_io_read_exit()
|
| /fs/ |
| A D | fs_parser.c | 28 __lookup_constant(const struct constant_table *tbl, const char *name) in __lookup_constant() argument 30 for ( ; tbl->name; tbl++) in __lookup_constant() 31 if (strcmp(name, tbl->name) == 0) in __lookup_constant() 32 return tbl; in __lookup_constant() 42 int lookup_constant(const struct constant_table *tbl, const char *name, int not_found) in lookup_constant() argument 44 const struct constant_table *p = __lookup_constant(tbl, name); in lookup_constant()
|
| /fs/smb/client/ |
| A D | winucase.c | 629 const wchar_t *tbl; in cifs_toupper() local 636 tbl = toplevel[idx]; in cifs_toupper() 637 if (!tbl) in cifs_toupper() 644 out = tbl[idx]; in cifs_toupper()
|
| /fs/nfsd/ |
| A D | filecache.c | 1352 struct bucket_table *tbl; in nfsd_file_cache_stats_show() local 1360 tbl = rht_dereference_rcu(ht->tbl, ht); in nfsd_file_cache_stats_show() 1361 buckets = tbl->size; in nfsd_file_cache_stats_show()
|
| A D | nfs4state.c | 3348 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions) in find_client_in_id_table() argument 3353 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) { in find_client_in_id_table() 3367 struct list_head *tbl = nn->conf_id_hashtbl; in find_confirmed_client() local 3370 return find_client_in_id_table(tbl, clid, sessions); in find_confirmed_client() 3376 struct list_head *tbl = nn->unconf_id_hashtbl; in find_unconfirmed_client() local 3379 return find_client_in_id_table(tbl, clid, sessions); in find_unconfirmed_client()
|
| /fs/ntfs3/ |
| A D | fslog.c | 840 u16 esize = le16_to_cpu(tbl->size); in extend_rsttbl() 841 __le32 osize = cpu_to_le32(bytes_per_rt(tbl)); in extend_rsttbl() 842 u32 used = le16_to_cpu(tbl->used); in extend_rsttbl() 849 memcpy(rt + 1, tbl + 1, esize * used); in extend_rsttbl() 856 if (tbl->first_free) { in extend_rsttbl() 857 rt->first_free = tbl->first_free; in extend_rsttbl() 863 rt->total = tbl->total; in extend_rsttbl() 865 kfree(tbl); in extend_rsttbl() 878 struct RESTART_TABLE *t = *tbl; in alloc_rsttbl_idx() 881 *tbl = t = extend_rsttbl(t, 16, ~0u); in alloc_rsttbl_idx() [all …]
|
| /fs/nfs/filelayout/ |
| A D | filelayout.c | 131 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table; in filelayout_async_handle_error() local 174 rpc_wake_up(&tbl->slot_tbl_waitq); in filelayout_async_handle_error() 191 rpc_wake_up(&tbl->slot_tbl_waitq); in filelayout_async_handle_error()
|
| /fs/nfs/flexfilelayout/ |
| A D | flexfilelayout.c | 1117 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table; in ff_layout_async_handle_error_v4() local 1165 rpc_wake_up(&tbl->slot_tbl_waitq); in ff_layout_async_handle_error_v4() 1190 rpc_wake_up(&tbl->slot_tbl_waitq); in ff_layout_async_handle_error_v4()
|