Lines Matching refs:ci

44 	struct ceph_inode_info *ci = ceph_inode(inode);  in ceph_set_ino_cb()  local
47 ci->i_vino = *(struct ceph_vino *)data; in ceph_set_ino_cb()
48 inode->i_ino = ceph_vino_to_ino_t(ci->i_vino); in ceph_set_ino_cb()
82 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_get_snapdir() local
105 ci->i_rbytes = 0; in ceph_get_snapdir()
106 ci->i_btime = ceph_inode(parent)->i_btime; in ceph_get_snapdir()
111 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ in ceph_get_snapdir()
145 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci, in __get_or_create_frag() argument
153 p = &ci->i_fragtree.rb_node; in __get_or_create_frag()
176 rb_insert_color(&frag->node, &ci->i_fragtree); in __get_or_create_frag()
179 ceph_vinop(&ci->netfs.inode), f); in __get_or_create_frag()
186 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f) in __ceph_find_frag() argument
188 struct rb_node *n = ci->i_fragtree.rb_node; in __ceph_find_frag()
209 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v, in __ceph_choose_frag() argument
222 frag = __ceph_find_frag(ci, t); in __ceph_choose_frag()
251 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v, in ceph_choose_frag() argument
255 mutex_lock(&ci->i_fragtree_mutex); in ceph_choose_frag()
256 ret = __ceph_choose_frag(ci, v, pfrag, found); in ceph_choose_frag()
257 mutex_unlock(&ci->i_fragtree_mutex); in ceph_choose_frag()
269 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_fill_dirfrag() local
278 spin_lock(&ci->i_ceph_lock); in ceph_fill_dirfrag()
279 if (ci->i_auth_cap) in ceph_fill_dirfrag()
280 diri_auth = ci->i_auth_cap->mds; in ceph_fill_dirfrag()
281 spin_unlock(&ci->i_ceph_lock); in ceph_fill_dirfrag()
286 mutex_lock(&ci->i_fragtree_mutex); in ceph_fill_dirfrag()
289 frag = __ceph_find_frag(ci, id); in ceph_fill_dirfrag()
296 rb_erase(&frag->node, &ci->i_fragtree); in ceph_fill_dirfrag()
310 frag = __get_or_create_frag(ci, id); in ceph_fill_dirfrag()
328 mutex_unlock(&ci->i_fragtree_mutex); in ceph_fill_dirfrag()
353 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_fill_fragtree() local
360 mutex_lock(&ci->i_fragtree_mutex); in ceph_fill_fragtree()
362 if (nsplits != ci->i_fragtree_nsplits) { in ceph_fill_fragtree()
367 if (!__ceph_find_frag(ci, id)) in ceph_fill_fragtree()
369 } else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) { in ceph_fill_fragtree()
370 rb_node = rb_first(&ci->i_fragtree); in ceph_fill_fragtree()
377 if (id != __ceph_choose_frag(ci, id, NULL, NULL)) in ceph_fill_fragtree()
389 rb_node = rb_first(&ci->i_fragtree); in ceph_fill_fragtree()
413 rb_erase(&frag->node, &ci->i_fragtree); in ceph_fill_fragtree()
415 ci->i_fragtree_nsplits--; in ceph_fill_fragtree()
421 frag = __get_or_create_frag(ci, id); in ceph_fill_fragtree()
426 ci->i_fragtree_nsplits++; in ceph_fill_fragtree()
437 rb_erase(&frag->node, &ci->i_fragtree); in ceph_fill_fragtree()
439 ci->i_fragtree_nsplits--; in ceph_fill_fragtree()
444 mutex_unlock(&ci->i_fragtree_mutex); in ceph_fill_fragtree()
453 struct ceph_inode_info *ci; in ceph_alloc_inode() local
456 ci = alloc_inode_sb(sb, ceph_inode_cachep, GFP_NOFS); in ceph_alloc_inode()
457 if (!ci) in ceph_alloc_inode()
460 dout("alloc_inode %p\n", &ci->netfs.inode); in ceph_alloc_inode()
463 netfs_inode_init(&ci->netfs, &ceph_netfs_ops); in ceph_alloc_inode()
465 spin_lock_init(&ci->i_ceph_lock); in ceph_alloc_inode()
467 ci->i_version = 0; in ceph_alloc_inode()
468 ci->i_inline_version = 0; in ceph_alloc_inode()
469 ci->i_time_warp_seq = 0; in ceph_alloc_inode()
470 ci->i_ceph_flags = 0; in ceph_alloc_inode()
471 atomic64_set(&ci->i_ordered_count, 1); in ceph_alloc_inode()
472 atomic64_set(&ci->i_release_count, 1); in ceph_alloc_inode()
473 atomic64_set(&ci->i_complete_seq[0], 0); in ceph_alloc_inode()
474 atomic64_set(&ci->i_complete_seq[1], 0); in ceph_alloc_inode()
475 ci->i_symlink = NULL; in ceph_alloc_inode()
477 ci->i_max_bytes = 0; in ceph_alloc_inode()
478 ci->i_max_files = 0; in ceph_alloc_inode()
480 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout)); in ceph_alloc_inode()
481 memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout)); in ceph_alloc_inode()
482 RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL); in ceph_alloc_inode()
484 ci->i_fragtree = RB_ROOT; in ceph_alloc_inode()
485 mutex_init(&ci->i_fragtree_mutex); in ceph_alloc_inode()
487 ci->i_xattrs.blob = NULL; in ceph_alloc_inode()
488 ci->i_xattrs.prealloc_blob = NULL; in ceph_alloc_inode()
489 ci->i_xattrs.dirty = false; in ceph_alloc_inode()
490 ci->i_xattrs.index = RB_ROOT; in ceph_alloc_inode()
491 ci->i_xattrs.count = 0; in ceph_alloc_inode()
492 ci->i_xattrs.names_size = 0; in ceph_alloc_inode()
493 ci->i_xattrs.vals_size = 0; in ceph_alloc_inode()
494 ci->i_xattrs.version = 0; in ceph_alloc_inode()
495 ci->i_xattrs.index_version = 0; in ceph_alloc_inode()
497 ci->i_caps = RB_ROOT; in ceph_alloc_inode()
498 ci->i_auth_cap = NULL; in ceph_alloc_inode()
499 ci->i_dirty_caps = 0; in ceph_alloc_inode()
500 ci->i_flushing_caps = 0; in ceph_alloc_inode()
501 INIT_LIST_HEAD(&ci->i_dirty_item); in ceph_alloc_inode()
502 INIT_LIST_HEAD(&ci->i_flushing_item); in ceph_alloc_inode()
503 ci->i_prealloc_cap_flush = NULL; in ceph_alloc_inode()
504 INIT_LIST_HEAD(&ci->i_cap_flush_list); in ceph_alloc_inode()
505 init_waitqueue_head(&ci->i_cap_wq); in ceph_alloc_inode()
506 ci->i_hold_caps_max = 0; in ceph_alloc_inode()
507 INIT_LIST_HEAD(&ci->i_cap_delay_list); in ceph_alloc_inode()
508 INIT_LIST_HEAD(&ci->i_cap_snaps); in ceph_alloc_inode()
509 ci->i_head_snapc = NULL; in ceph_alloc_inode()
510 ci->i_snap_caps = 0; in ceph_alloc_inode()
512 ci->i_last_rd = ci->i_last_wr = jiffies - 3600 * HZ; in ceph_alloc_inode()
514 ci->i_nr_by_mode[i] = 0; in ceph_alloc_inode()
516 mutex_init(&ci->i_truncate_mutex); in ceph_alloc_inode()
517 ci->i_truncate_seq = 0; in ceph_alloc_inode()
518 ci->i_truncate_size = 0; in ceph_alloc_inode()
519 ci->i_truncate_pending = 0; in ceph_alloc_inode()
521 ci->i_max_size = 0; in ceph_alloc_inode()
522 ci->i_reported_size = 0; in ceph_alloc_inode()
523 ci->i_wanted_max_size = 0; in ceph_alloc_inode()
524 ci->i_requested_max_size = 0; in ceph_alloc_inode()
526 ci->i_pin_ref = 0; in ceph_alloc_inode()
527 ci->i_rd_ref = 0; in ceph_alloc_inode()
528 ci->i_rdcache_ref = 0; in ceph_alloc_inode()
529 ci->i_wr_ref = 0; in ceph_alloc_inode()
530 ci->i_wb_ref = 0; in ceph_alloc_inode()
531 ci->i_fx_ref = 0; in ceph_alloc_inode()
532 ci->i_wrbuffer_ref = 0; in ceph_alloc_inode()
533 ci->i_wrbuffer_ref_head = 0; in ceph_alloc_inode()
534 atomic_set(&ci->i_filelock_ref, 0); in ceph_alloc_inode()
535 atomic_set(&ci->i_shared_gen, 1); in ceph_alloc_inode()
536 ci->i_rdcache_gen = 0; in ceph_alloc_inode()
537 ci->i_rdcache_revoking = 0; in ceph_alloc_inode()
539 INIT_LIST_HEAD(&ci->i_unsafe_dirops); in ceph_alloc_inode()
540 INIT_LIST_HEAD(&ci->i_unsafe_iops); in ceph_alloc_inode()
541 spin_lock_init(&ci->i_unsafe_lock); in ceph_alloc_inode()
543 ci->i_snap_realm = NULL; in ceph_alloc_inode()
544 INIT_LIST_HEAD(&ci->i_snap_realm_item); in ceph_alloc_inode()
545 INIT_LIST_HEAD(&ci->i_snap_flush_item); in ceph_alloc_inode()
547 INIT_WORK(&ci->i_work, ceph_inode_work); in ceph_alloc_inode()
548 ci->i_work_mask = 0; in ceph_alloc_inode()
549 memset(&ci->i_btime, '\0', sizeof(ci->i_btime)); in ceph_alloc_inode()
550 return &ci->netfs.inode; in ceph_alloc_inode()
555 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_free_inode() local
557 kfree(ci->i_symlink); in ceph_free_inode()
558 kmem_cache_free(ceph_inode_cachep, ci); in ceph_free_inode()
563 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_evict_inode() local
577 ceph_fscache_unregister_inode_cookie(ci); in ceph_evict_inode()
579 __ceph_remove_caps(ci); in ceph_evict_inode()
581 if (__ceph_has_quota(ci, QUOTA_GET_ANY)) in ceph_evict_inode()
588 if (ci->i_snap_realm) { in ceph_evict_inode()
591 ci->i_snap_realm); in ceph_evict_inode()
594 ceph_put_snapid_map(mdsc, ci->i_snapid_map); in ceph_evict_inode()
595 ci->i_snap_realm = NULL; in ceph_evict_inode()
599 while ((n = rb_first(&ci->i_fragtree)) != NULL) { in ceph_evict_inode()
601 rb_erase(n, &ci->i_fragtree); in ceph_evict_inode()
604 ci->i_fragtree_nsplits = 0; in ceph_evict_inode()
606 __ceph_destroy_xattrs(ci); in ceph_evict_inode()
607 if (ci->i_xattrs.blob) in ceph_evict_inode()
608 ceph_buffer_put(ci->i_xattrs.blob); in ceph_evict_inode()
609 if (ci->i_xattrs.prealloc_blob) in ceph_evict_inode()
610 ceph_buffer_put(ci->i_xattrs.prealloc_blob); in ceph_evict_inode()
612 ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns)); in ceph_evict_inode()
613 ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns)); in ceph_evict_inode()
632 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_fill_file_size() local
636 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 || in ceph_fill_file_size()
637 (truncate_seq == ci->i_truncate_seq && size > isize)) { in ceph_fill_file_size()
651 ci->i_reported_size = size; in ceph_fill_file_size()
652 if (truncate_seq != ci->i_truncate_seq) { in ceph_fill_file_size()
654 ci->i_truncate_seq, truncate_seq); in ceph_fill_file_size()
655 ci->i_truncate_seq = truncate_seq; in ceph_fill_file_size()
671 __ceph_is_file_opened(ci)) { in ceph_fill_file_size()
672 ci->i_truncate_pending++; in ceph_fill_file_size()
677 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 && in ceph_fill_file_size()
678 ci->i_truncate_size != truncate_size) { in ceph_fill_file_size()
679 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size, in ceph_fill_file_size()
681 ci->i_truncate_size = truncate_size; in ceph_fill_file_size()
690 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_fill_file_time() local
698 if (ci->i_version == 0 || in ceph_fill_file_time()
705 if (ci->i_version == 0 || in ceph_fill_file_time()
706 ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { in ceph_fill_file_time()
712 ci->i_time_warp_seq, (int)time_warp_seq); in ceph_fill_file_time()
716 ci->i_time_warp_seq = time_warp_seq; in ceph_fill_file_time()
717 } else if (time_warp_seq == ci->i_time_warp_seq) { in ceph_fill_file_time()
740 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) { in ceph_fill_file_time()
744 ci->i_time_warp_seq = time_warp_seq; in ceph_fill_file_time()
751 inode, time_warp_seq, ci->i_time_warp_seq); in ceph_fill_file_time()
766 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_fill_inode() local
785 ci->i_version); in ceph_fill_inode()
831 if (ceph_snap(inode) != CEPH_NOSNAP && !ci->i_snapid_map) in ceph_fill_inode()
832 ci->i_snapid_map = ceph_get_snapid_map(mdsc, ceph_snap(inode)); in ceph_fill_inode()
834 spin_lock(&ci->i_ceph_lock); in ceph_fill_inode()
847 if (ci->i_version == 0 || in ceph_fill_inode()
849 le64_to_cpu(info->version) > (ci->i_version & ~1))) in ceph_fill_inode()
855 __ceph_caps_issued(ci, &issued); in ceph_fill_inode()
856 issued |= __ceph_caps_dirty(ci); in ceph_fill_inode()
866 __ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files); in ceph_fill_inode()
876 ceph_decode_timespec64(&ci->i_btime, &iinfo->btime); in ceph_fill_inode()
877 ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime); in ceph_fill_inode()
895 ci->i_files = le64_to_cpu(info->files); in ceph_fill_inode()
896 ci->i_subdirs = le64_to_cpu(info->subdirs); in ceph_fill_inode()
901 s64 old_pool = ci->i_layout.pool_id; in ceph_fill_inode()
904 ceph_file_layout_from_legacy(&ci->i_layout, &info->layout); in ceph_fill_inode()
905 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns, in ceph_fill_inode()
906 lockdep_is_held(&ci->i_ceph_lock)); in ceph_fill_inode()
907 rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns); in ceph_fill_inode()
909 if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns) in ceph_fill_inode()
910 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM; in ceph_fill_inode()
920 ci->i_max_size != le64_to_cpu(info->max_size)) { in ceph_fill_inode()
921 dout("max_size %lld -> %llu\n", ci->i_max_size, in ceph_fill_inode()
923 ci->i_max_size = le64_to_cpu(info->max_size); in ceph_fill_inode()
931 ci->i_dir_layout = iinfo->dir_layout; in ceph_fill_inode()
932 ci->i_rbytes = le64_to_cpu(info->rbytes); in ceph_fill_inode()
933 ci->i_rfiles = le64_to_cpu(info->rfiles); in ceph_fill_inode()
934 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs); in ceph_fill_inode()
935 ci->i_dir_pin = iinfo->dir_pin; in ceph_fill_inode()
936 ci->i_rsnaps = iinfo->rsnaps; in ceph_fill_inode()
937 ceph_decode_timespec64(&ci->i_rctime, &info->rctime); in ceph_fill_inode()
943 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) && in ceph_fill_inode()
944 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { in ceph_fill_inode()
945 if (ci->i_xattrs.blob) in ceph_fill_inode()
946 old_blob = ci->i_xattrs.blob; in ceph_fill_inode()
947 ci->i_xattrs.blob = xattr_blob; in ceph_fill_inode()
949 memcpy(ci->i_xattrs.blob->vec.iov_base, in ceph_fill_inode()
951 ci->i_xattrs.version = le64_to_cpu(info->xattr_version); in ceph_fill_inode()
958 if (le64_to_cpu(info->version) > ci->i_version) in ceph_fill_inode()
959 ci->i_version = le64_to_cpu(info->version); in ceph_fill_inode()
978 if (!ci->i_symlink) { in ceph_fill_inode()
982 spin_unlock(&ci->i_ceph_lock); in ceph_fill_inode()
998 spin_lock(&ci->i_ceph_lock); in ceph_fill_inode()
999 if (!ci->i_symlink) in ceph_fill_inode()
1000 ci->i_symlink = sym; in ceph_fill_inode()
1004 inode->i_link = ci->i_symlink; in ceph_fill_inode()
1029 ci->i_files == 0 && ci->i_subdirs == 0 && in ceph_fill_inode()
1032 !__ceph_dir_is_complete(ci)) { in ceph_fill_inode()
1035 __ceph_dir_set_complete(ci, in ceph_fill_inode()
1036 atomic64_read(&ci->i_release_count), in ceph_fill_inode()
1037 atomic64_read(&ci->i_ordered_count)); in ceph_fill_inode()
1044 ci->i_snap_caps |= info_caps; in ceph_fill_inode()
1049 iinfo->inline_version >= ci->i_inline_version) { in ceph_fill_inode()
1051 ci->i_inline_version = iinfo->inline_version; in ceph_fill_inode()
1052 if (ceph_has_inline_data(ci) && in ceph_fill_inode()
1061 __ceph_touch_fmode(ci, mdsc, cap_fmode); in ceph_fill_inode()
1064 spin_unlock(&ci->i_ceph_lock); in ceph_fill_inode()
1073 wake_up_all(&ci->i_cap_wq); in ceph_fill_inode()
1593 struct ceph_inode_info *ci = ceph_inode(dir); in fill_readdir_cache() local
1616 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) && in fill_readdir_cache()
1617 req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) { in fill_readdir_cache()
1632 struct ceph_inode_info *ci = ceph_inode(d_inode(parent)); in ceph_readdir_prepopulate() local
1648 last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash, in ceph_readdir_prepopulate()
1683 atomic64_read(&ci->i_release_count); in ceph_readdir_prepopulate()
1685 atomic64_read(&ci->i_ordered_count); in ceph_readdir_prepopulate()
1706 u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash, in ceph_readdir_prepopulate()
1741 atomic_read(&ci->i_shared_gen)) { in ceph_readdir_prepopulate()
1742 __ceph_dir_clear_ordered(ci); in ceph_readdir_prepopulate()
1825 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_inode_set_size() local
1828 spin_lock(&ci->i_ceph_lock); in ceph_inode_set_size()
1834 ret = __ceph_should_report_size(ci); in ceph_inode_set_size()
1836 spin_unlock(&ci->i_ceph_lock); in ceph_inode_set_size()
1844 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_queue_inode_work() local
1845 set_bit(work_bit, &ci->i_work_mask); in ceph_queue_inode_work()
1848 if (queue_work(fsc->inode_wq, &ci->i_work)) { in ceph_queue_inode_work()
1849 dout("queue_inode_work %p, mask=%lx\n", inode, ci->i_work_mask); in ceph_queue_inode_work()
1852 inode, ci->i_work_mask); in ceph_queue_inode_work()
1859 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_do_invalidate_pages() local
1865 mutex_lock(&ci->i_truncate_mutex); in ceph_do_invalidate_pages()
1872 mutex_unlock(&ci->i_truncate_mutex); in ceph_do_invalidate_pages()
1876 spin_lock(&ci->i_ceph_lock); in ceph_do_invalidate_pages()
1878 ci->i_rdcache_gen, ci->i_rdcache_revoking); in ceph_do_invalidate_pages()
1879 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { in ceph_do_invalidate_pages()
1880 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE)) in ceph_do_invalidate_pages()
1882 spin_unlock(&ci->i_ceph_lock); in ceph_do_invalidate_pages()
1883 mutex_unlock(&ci->i_truncate_mutex); in ceph_do_invalidate_pages()
1886 orig_gen = ci->i_rdcache_gen; in ceph_do_invalidate_pages()
1887 spin_unlock(&ci->i_ceph_lock); in ceph_do_invalidate_pages()
1894 spin_lock(&ci->i_ceph_lock); in ceph_do_invalidate_pages()
1895 if (orig_gen == ci->i_rdcache_gen && in ceph_do_invalidate_pages()
1896 orig_gen == ci->i_rdcache_revoking) { in ceph_do_invalidate_pages()
1898 ci->i_rdcache_gen); in ceph_do_invalidate_pages()
1899 ci->i_rdcache_revoking--; in ceph_do_invalidate_pages()
1903 inode, orig_gen, ci->i_rdcache_gen, in ceph_do_invalidate_pages()
1904 ci->i_rdcache_revoking); in ceph_do_invalidate_pages()
1905 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE)) in ceph_do_invalidate_pages()
1908 spin_unlock(&ci->i_ceph_lock); in ceph_do_invalidate_pages()
1909 mutex_unlock(&ci->i_truncate_mutex); in ceph_do_invalidate_pages()
1912 ceph_check_caps(ci, 0); in ceph_do_invalidate_pages()
1921 struct ceph_inode_info *ci = ceph_inode(inode); in __ceph_do_pending_vmtruncate() local
1925 mutex_lock(&ci->i_truncate_mutex); in __ceph_do_pending_vmtruncate()
1927 spin_lock(&ci->i_ceph_lock); in __ceph_do_pending_vmtruncate()
1928 if (ci->i_truncate_pending == 0) { in __ceph_do_pending_vmtruncate()
1930 spin_unlock(&ci->i_ceph_lock); in __ceph_do_pending_vmtruncate()
1931 mutex_unlock(&ci->i_truncate_mutex); in __ceph_do_pending_vmtruncate()
1939 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) { in __ceph_do_pending_vmtruncate()
1940 spin_unlock(&ci->i_ceph_lock); in __ceph_do_pending_vmtruncate()
1949 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref); in __ceph_do_pending_vmtruncate()
1951 to = ci->i_truncate_size; in __ceph_do_pending_vmtruncate()
1952 wrbuffer_refs = ci->i_wrbuffer_ref; in __ceph_do_pending_vmtruncate()
1954 ci->i_truncate_pending, to); in __ceph_do_pending_vmtruncate()
1955 spin_unlock(&ci->i_ceph_lock); in __ceph_do_pending_vmtruncate()
1960 spin_lock(&ci->i_ceph_lock); in __ceph_do_pending_vmtruncate()
1961 if (to == ci->i_truncate_size) { in __ceph_do_pending_vmtruncate()
1962 ci->i_truncate_pending = 0; in __ceph_do_pending_vmtruncate()
1965 spin_unlock(&ci->i_ceph_lock); in __ceph_do_pending_vmtruncate()
1969 mutex_unlock(&ci->i_truncate_mutex); in __ceph_do_pending_vmtruncate()
1972 ceph_check_caps(ci, 0); in __ceph_do_pending_vmtruncate()
1974 wake_up_all(&ci->i_cap_wq); in __ceph_do_pending_vmtruncate()
1979 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, in ceph_inode_work() local
1981 struct inode *inode = &ci->netfs.inode; in ceph_inode_work()
1983 if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) { in ceph_inode_work()
1987 if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask)) in ceph_inode_work()
1990 if (test_and_clear_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask)) in ceph_inode_work()
1993 if (test_and_clear_bit(CEPH_I_WORK_CHECK_CAPS, &ci->i_work_mask)) in ceph_inode_work()
1994 ceph_check_caps(ci, 0); in ceph_inode_work()
1996 if (test_and_clear_bit(CEPH_I_WORK_FLUSH_SNAPS, &ci->i_work_mask)) in ceph_inode_work()
1997 ceph_flush_snaps(ci, NULL); in ceph_inode_work()
2014 struct ceph_inode_info *ci = ceph_inode(inode); in __ceph_setattr() local
2037 spin_lock(&ci->i_ceph_lock); in __ceph_setattr()
2038 issued = __ceph_caps_issued(ci, NULL); in __ceph_setattr()
2040 if (!ci->i_head_snapc && in __ceph_setattr()
2044 spin_unlock(&ci->i_ceph_lock); in __ceph_setattr()
2046 spin_lock(&ci->i_ceph_lock); in __ceph_setattr()
2047 issued = __ceph_caps_issued(ci, NULL); in __ceph_setattr()
2103 ci->i_time_warp_seq++; in __ceph_setattr()
2128 ci->i_reported_size = attr->ia_size; in __ceph_setattr()
2146 ci->i_time_warp_seq++; in __ceph_setattr()
2192 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied, in __ceph_setattr()
2199 spin_unlock(&ci->i_ceph_lock); in __ceph_setattr()
2452 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_getattr() local
2475 if (ci->i_btime.tv_sec || ci->i_btime.tv_nsec) { in ceph_getattr()
2476 stat->btime = ci->i_btime; in ceph_getattr()
2488 stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0; in ceph_getattr()
2492 stat->size = ci->i_rbytes; in ceph_getattr()
2512 stat->size = ci->i_files + ci->i_subdirs; in ceph_getattr()
2523 stat->nlink = 1 + 1 + ci->i_subdirs; in ceph_getattr()
2534 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_inode_shutdown() local
2539 spin_lock(&ci->i_ceph_lock); in ceph_inode_shutdown()
2540 ci->i_ceph_flags |= CEPH_I_SHUTDOWN; in ceph_inode_shutdown()
2541 p = rb_first(&ci->i_caps); in ceph_inode_shutdown()
2548 spin_unlock(&ci->i_ceph_lock); in ceph_inode_shutdown()