Lines Matching refs:ci
49 struct ceph_inode_info *ci,
434 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds) in __get_cap_for_mds() argument
437 struct rb_node *n = ci->i_caps.rb_node; in __get_cap_for_mds()
451 struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds) in ceph_get_cap_for_mds() argument
455 spin_lock(&ci->i_ceph_lock); in ceph_get_cap_for_mds()
456 cap = __get_cap_for_mds(ci, mds); in ceph_get_cap_for_mds()
457 spin_unlock(&ci->i_ceph_lock); in ceph_get_cap_for_mds()
464 static void __insert_cap_node(struct ceph_inode_info *ci, in __insert_cap_node() argument
467 struct rb_node **p = &ci->i_caps.rb_node; in __insert_cap_node()
483 rb_insert_color(&new->ci_node, &ci->i_caps); in __insert_cap_node()
491 struct ceph_inode_info *ci) in __cap_set_timeouts() argument
494 ci->i_hold_caps_max = round_jiffies(jiffies + in __cap_set_timeouts()
496 dout("__cap_set_timeouts %p %lu\n", &ci->netfs.inode, in __cap_set_timeouts()
497 ci->i_hold_caps_max - jiffies); in __cap_set_timeouts()
509 struct ceph_inode_info *ci) in __cap_delay_requeue() argument
511 dout("__cap_delay_requeue %p flags 0x%lx at %lu\n", &ci->netfs.inode, in __cap_delay_requeue()
512 ci->i_ceph_flags, ci->i_hold_caps_max); in __cap_delay_requeue()
515 if (!list_empty(&ci->i_cap_delay_list)) { in __cap_delay_requeue()
516 if (ci->i_ceph_flags & CEPH_I_FLUSH) in __cap_delay_requeue()
518 list_del_init(&ci->i_cap_delay_list); in __cap_delay_requeue()
520 __cap_set_timeouts(mdsc, ci); in __cap_delay_requeue()
521 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list); in __cap_delay_requeue()
533 struct ceph_inode_info *ci) in __cap_delay_requeue_front() argument
535 dout("__cap_delay_requeue_front %p\n", &ci->netfs.inode); in __cap_delay_requeue_front()
537 ci->i_ceph_flags |= CEPH_I_FLUSH; in __cap_delay_requeue_front()
538 if (!list_empty(&ci->i_cap_delay_list)) in __cap_delay_requeue_front()
539 list_del_init(&ci->i_cap_delay_list); in __cap_delay_requeue_front()
540 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list); in __cap_delay_requeue_front()
550 struct ceph_inode_info *ci) in __cap_delay_cancel() argument
552 dout("__cap_delay_cancel %p\n", &ci->netfs.inode); in __cap_delay_cancel()
553 if (list_empty(&ci->i_cap_delay_list)) in __cap_delay_cancel()
556 list_del_init(&ci->i_cap_delay_list); in __cap_delay_cancel()
561 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap, in __check_cap_issue() argument
564 unsigned had = __ceph_caps_issued(ci, NULL); in __check_cap_issue()
566 lockdep_assert_held(&ci->i_ceph_lock); in __check_cap_issue()
572 if (S_ISREG(ci->netfs.inode.i_mode) && in __check_cap_issue()
575 ci->i_rdcache_gen++; in __check_cap_issue()
586 atomic_inc(&ci->i_shared_gen); in __check_cap_issue()
587 if (S_ISDIR(ci->netfs.inode.i_mode)) { in __check_cap_issue()
588 dout(" marking %p NOT complete\n", &ci->netfs.inode); in __check_cap_issue()
589 __ceph_dir_clear_complete(ci); in __check_cap_issue()
594 if (S_ISDIR(ci->netfs.inode.i_mode) && (had & CEPH_CAP_DIR_CREATE) && in __check_cap_issue()
596 ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns)); in __check_cap_issue()
597 memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout)); in __check_cap_issue()
606 void change_auth_cap_ses(struct ceph_inode_info *ci, in change_auth_cap_ses() argument
609 lockdep_assert_held(&ci->i_ceph_lock); in change_auth_cap_ses()
611 if (list_empty(&ci->i_dirty_item) && list_empty(&ci->i_flushing_item)) in change_auth_cap_ses()
615 if (!list_empty(&ci->i_dirty_item)) in change_auth_cap_ses()
616 list_move(&ci->i_dirty_item, &session->s_cap_dirty); in change_auth_cap_ses()
617 if (!list_empty(&ci->i_flushing_item)) in change_auth_cap_ses()
618 list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing); in change_auth_cap_ses()
638 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_add_cap() local
644 lockdep_assert_held(&ci->i_ceph_lock); in ceph_add_cap()
651 cap = __get_cap_for_mds(ci, mds); in ceph_add_cap()
662 cap->ci = ci; in ceph_add_cap()
663 __insert_cap_node(ci, cap); in ceph_add_cap()
690 WARN_ON(cap != ci->i_auth_cap); in ceph_add_cap()
699 if (!ci->i_snap_realm || in ceph_add_cap()
701 realmino != (u64)-1 && ci->i_snap_realm->ino != realmino)) { in ceph_add_cap()
711 __func__, realmino, ci->i_vino.ino, in ceph_add_cap()
712 ci->i_snap_realm ? ci->i_snap_realm->ino : 0); in ceph_add_cap()
715 __check_cap_issue(ci, cap, issued); in ceph_add_cap()
722 actual_wanted = __ceph_caps_wanted(ci); in ceph_add_cap()
728 __cap_delay_requeue(mdsc, ci); in ceph_add_cap()
732 if (!ci->i_auth_cap || in ceph_add_cap()
733 ceph_seq_cmp(ci->i_auth_cap->mseq, mseq) < 0) { in ceph_add_cap()
734 if (ci->i_auth_cap && in ceph_add_cap()
735 ci->i_auth_cap->session != cap->session) in ceph_add_cap()
736 change_auth_cap_ses(ci, cap->session); in ceph_add_cap()
737 ci->i_auth_cap = cap; in ceph_add_cap()
741 WARN_ON(ci->i_auth_cap == cap); in ceph_add_cap()
758 wake_up_all(&ci->i_cap_wq); in ceph_add_cap()
776 "but STALE (gen %u vs %u)\n", &cap->ci->netfs.inode, in __cap_is_valid()
789 int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented) in __ceph_caps_issued() argument
791 int have = ci->i_snap_caps; in __ceph_caps_issued()
797 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_caps_issued()
802 &ci->netfs.inode, cap, ceph_cap_string(cap->issued)); in __ceph_caps_issued()
812 if (ci->i_auth_cap) { in __ceph_caps_issued()
813 cap = ci->i_auth_cap; in __ceph_caps_issued()
822 int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap) in __ceph_caps_issued_other() argument
824 int have = ci->i_snap_caps; in __ceph_caps_issued_other()
828 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_caps_issued_other()
849 dout("__touch_cap %p cap %p mds%d\n", &cap->ci->netfs.inode, cap, in __touch_cap()
854 &cap->ci->netfs.inode, cap, s->s_mds); in __touch_cap()
864 int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) in __ceph_caps_issued_mask() argument
868 int have = ci->i_snap_caps; in __ceph_caps_issued_mask()
872 " (mask %s)\n", ceph_ino(&ci->netfs.inode), in __ceph_caps_issued_mask()
878 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_caps_issued_mask()
884 " (mask %s)\n", ceph_ino(&ci->netfs.inode), cap, in __ceph_caps_issued_mask()
896 " (mask %s)\n", ceph_ino(&ci->netfs.inode), in __ceph_caps_issued_mask()
904 for (q = rb_first(&ci->i_caps); q != p; in __ceph_caps_issued_mask()
921 int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask, in __ceph_caps_issued_mask_metric() argument
924 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb); in __ceph_caps_issued_mask_metric()
927 r = __ceph_caps_issued_mask(ci, mask, touch); in __ceph_caps_issued_mask_metric()
938 int __ceph_caps_revoking_other(struct ceph_inode_info *ci, in __ceph_caps_revoking_other() argument
944 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_caps_revoking_other()
953 int ceph_caps_revoking(struct ceph_inode_info *ci, int mask) in ceph_caps_revoking() argument
955 struct inode *inode = &ci->netfs.inode; in ceph_caps_revoking()
958 spin_lock(&ci->i_ceph_lock); in ceph_caps_revoking()
959 ret = __ceph_caps_revoking_other(ci, NULL, mask); in ceph_caps_revoking()
960 spin_unlock(&ci->i_ceph_lock); in ceph_caps_revoking()
966 int __ceph_caps_used(struct ceph_inode_info *ci) in __ceph_caps_used() argument
969 if (ci->i_pin_ref) in __ceph_caps_used()
971 if (ci->i_rd_ref) in __ceph_caps_used()
973 if (ci->i_rdcache_ref || in __ceph_caps_used()
974 (S_ISREG(ci->netfs.inode.i_mode) && in __ceph_caps_used()
975 ci->netfs.inode.i_data.nrpages)) in __ceph_caps_used()
977 if (ci->i_wr_ref) in __ceph_caps_used()
979 if (ci->i_wb_ref || ci->i_wrbuffer_ref) in __ceph_caps_used()
981 if (ci->i_fx_ref) in __ceph_caps_used()
991 int __ceph_caps_file_wanted(struct ceph_inode_info *ci) in __ceph_caps_file_wanted() argument
998 ceph_inode_to_client(&ci->netfs.inode)->mount_options; in __ceph_caps_file_wanted()
1002 if (S_ISDIR(ci->netfs.inode.i_mode)) { in __ceph_caps_file_wanted()
1006 if (ci->i_nr_by_mode[RD_SHIFT] > 0 || in __ceph_caps_file_wanted()
1007 time_after(ci->i_last_rd, used_cutoff)) in __ceph_caps_file_wanted()
1010 if (ci->i_nr_by_mode[WR_SHIFT] > 0 || in __ceph_caps_file_wanted()
1011 time_after(ci->i_last_wr, used_cutoff)) { in __ceph_caps_file_wanted()
1017 if (want || ci->i_nr_by_mode[PIN_SHIFT] > 0) in __ceph_caps_file_wanted()
1024 if (ci->i_nr_by_mode[RD_SHIFT] > 0) { in __ceph_caps_file_wanted()
1025 if (ci->i_nr_by_mode[RD_SHIFT] >= FMODE_WAIT_BIAS || in __ceph_caps_file_wanted()
1026 time_after(ci->i_last_rd, used_cutoff)) in __ceph_caps_file_wanted()
1028 } else if (time_after(ci->i_last_rd, idle_cutoff)) { in __ceph_caps_file_wanted()
1032 if (ci->i_nr_by_mode[WR_SHIFT] > 0) { in __ceph_caps_file_wanted()
1033 if (ci->i_nr_by_mode[WR_SHIFT] >= FMODE_WAIT_BIAS || in __ceph_caps_file_wanted()
1034 time_after(ci->i_last_wr, used_cutoff)) in __ceph_caps_file_wanted()
1036 } else if (time_after(ci->i_last_wr, idle_cutoff)) { in __ceph_caps_file_wanted()
1042 ci->i_nr_by_mode[LAZY_SHIFT] > 0) in __ceph_caps_file_wanted()
1052 int __ceph_caps_wanted(struct ceph_inode_info *ci) in __ceph_caps_wanted() argument
1054 int w = __ceph_caps_file_wanted(ci) | __ceph_caps_used(ci); in __ceph_caps_wanted()
1055 if (S_ISDIR(ci->netfs.inode.i_mode)) { in __ceph_caps_wanted()
1070 int __ceph_caps_mds_wanted(struct ceph_inode_info *ci, bool check) in __ceph_caps_mds_wanted() argument
1076 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_caps_mds_wanted()
1080 if (cap == ci->i_auth_cap) in __ceph_caps_mds_wanted()
1090 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_is_any_caps() local
1093 spin_lock(&ci->i_ceph_lock); in ceph_is_any_caps()
1094 ret = __ceph_is_any_real_caps(ci); in ceph_is_any_caps()
1095 spin_unlock(&ci->i_ceph_lock); in ceph_is_any_caps()
1109 struct ceph_inode_info *ci = cap->ci; in __ceph_remove_cap() local
1114 if (!ci) { in __ceph_remove_cap()
1119 lockdep_assert_held(&ci->i_ceph_lock); in __ceph_remove_cap()
1121 dout("__ceph_remove_cap %p from %p\n", cap, &ci->netfs.inode); in __ceph_remove_cap()
1123 mdsc = ceph_inode_to_client(&ci->netfs.inode)->mdsc; in __ceph_remove_cap()
1126 rb_erase(&cap->ci_node, &ci->i_caps); in __ceph_remove_cap()
1127 if (ci->i_auth_cap == cap) in __ceph_remove_cap()
1128 ci->i_auth_cap = NULL; in __ceph_remove_cap()
1144 cap->ci = NULL; in __ceph_remove_cap()
1161 cap->cap_ino = ci->i_vino.ino; in __ceph_remove_cap()
1168 if (!__ceph_is_any_real_caps(ci)) { in __ceph_remove_cap()
1173 if (ci->i_wr_ref == 0 && ci->i_snap_realm) in __ceph_remove_cap()
1174 ceph_change_snap_realm(&ci->netfs.inode, NULL); in __ceph_remove_cap()
1176 __cap_delay_cancel(mdsc, ci); in __ceph_remove_cap()
1182 struct ceph_inode_info *ci = cap->ci; in ceph_remove_cap() local
1186 if (!ci) { in ceph_remove_cap()
1191 lockdep_assert_held(&ci->i_ceph_lock); in ceph_remove_cap()
1193 fsc = ceph_inode_to_client(&ci->netfs.inode); in ceph_remove_cap()
1194 WARN_ON_ONCE(ci->i_auth_cap == cap && in ceph_remove_cap()
1195 !list_empty(&ci->i_dirty_item) && in ceph_remove_cap()
1197 !ceph_inode_is_shutdown(&ci->netfs.inode)); in ceph_remove_cap()
1319 void __ceph_remove_caps(struct ceph_inode_info *ci) in __ceph_remove_caps() argument
1325 spin_lock(&ci->i_ceph_lock); in __ceph_remove_caps()
1326 p = rb_first(&ci->i_caps); in __ceph_remove_caps()
1332 spin_unlock(&ci->i_ceph_lock); in __ceph_remove_caps()
1347 struct ceph_inode_info *ci = cap->ci; in __prep_cap() local
1348 struct inode *inode = &ci->netfs.inode; in __prep_cap()
1351 lockdep_assert_held(&ci->i_ceph_lock); in __prep_cap()
1363 ci->i_ceph_flags &= ~CEPH_I_FLUSH; in __prep_cap()
1378 arg->follows = flushing ? ci->i_head_snapc->seq : 0; in __prep_cap()
1383 ci->i_reported_size = arg->size; in __prep_cap()
1384 arg->max_size = ci->i_wanted_max_size; in __prep_cap()
1385 if (cap == ci->i_auth_cap) { in __prep_cap()
1387 ci->i_requested_max_size = arg->max_size; in __prep_cap()
1389 ci->i_requested_max_size = 0; in __prep_cap()
1393 arg->old_xattr_buf = __ceph_build_xattrs_blob(ci); in __prep_cap()
1394 arg->xattr_version = ci->i_xattrs.version; in __prep_cap()
1395 arg->xattr_buf = ci->i_xattrs.blob; in __prep_cap()
1404 arg->btime = ci->i_btime; in __prep_cap()
1415 arg->time_warp_seq = ci->i_time_warp_seq; in __prep_cap()
1421 arg->inline_data = ci->i_inline_version != CEPH_INLINE_NONE; in __prep_cap()
1423 !list_empty(&ci->i_cap_snaps)) { in __prep_cap()
1425 list_for_each_entry_reverse(capsnap, &ci->i_cap_snaps, ci_item) { in __prep_cap()
1442 static void __send_cap(struct cap_msg_args *arg, struct ceph_inode_info *ci) in __send_cap() argument
1445 struct inode *inode = &ci->netfs.inode; in __send_cap()
1452 spin_lock(&ci->i_ceph_lock); in __send_cap()
1453 __cap_delay_requeue(arg->session->s_mdsc, ci); in __send_cap()
1454 spin_unlock(&ci->i_ceph_lock); in __send_cap()
1462 wake_up_all(&ci->i_cap_wq); in __send_cap()
1528 static void __ceph_flush_snaps(struct ceph_inode_info *ci, in __ceph_flush_snaps() argument
1530 __releases(ci->i_ceph_lock) in __ceph_flush_snaps()
1531 __acquires(ci->i_ceph_lock) in __ceph_flush_snaps()
1533 struct inode *inode = &ci->netfs.inode; in __ceph_flush_snaps()
1541 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { in __ceph_flush_snaps()
1564 if (list_empty(&ci->i_flushing_item)) { in __ceph_flush_snaps()
1565 list_add_tail(&ci->i_flushing_item, in __ceph_flush_snaps()
1571 &ci->i_cap_flush_list); in __ceph_flush_snaps()
1578 ci->i_ceph_flags &= ~CEPH_I_FLUSH_SNAPS; in __ceph_flush_snaps()
1581 struct ceph_cap *cap = ci->i_auth_cap; in __ceph_flush_snaps()
1592 list_for_each_entry(iter, &ci->i_cap_flush_list, i_list) { in __ceph_flush_snaps()
1606 spin_unlock(&ci->i_ceph_lock); in __ceph_flush_snaps()
1620 spin_lock(&ci->i_ceph_lock); in __ceph_flush_snaps()
1624 void ceph_flush_snaps(struct ceph_inode_info *ci, in ceph_flush_snaps() argument
1627 struct inode *inode = &ci->netfs.inode; in ceph_flush_snaps()
1636 spin_lock(&ci->i_ceph_lock); in ceph_flush_snaps()
1637 if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) { in ceph_flush_snaps()
1641 if (!ci->i_auth_cap) { in ceph_flush_snaps()
1646 mds = ci->i_auth_cap->session->s_mds; in ceph_flush_snaps()
1653 spin_unlock(&ci->i_ceph_lock); in ceph_flush_snaps()
1661 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) in ceph_flush_snaps()
1662 __kick_flushing_caps(mdsc, session, ci, 0); in ceph_flush_snaps()
1664 __ceph_flush_snaps(ci, session); in ceph_flush_snaps()
1666 spin_unlock(&ci->i_ceph_lock); in ceph_flush_snaps()
1674 list_del_init(&ci->i_snap_flush_item); in ceph_flush_snaps()
1683 int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask, in __ceph_mark_dirty_caps() argument
1687 ceph_sb_to_client(ci->netfs.inode.i_sb)->mdsc; in __ceph_mark_dirty_caps()
1688 struct inode *inode = &ci->netfs.inode; in __ceph_mark_dirty_caps()
1689 int was = ci->i_dirty_caps; in __ceph_mark_dirty_caps()
1692 lockdep_assert_held(&ci->i_ceph_lock); in __ceph_mark_dirty_caps()
1694 if (!ci->i_auth_cap) { in __ceph_mark_dirty_caps()
1701 dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->netfs.inode, in __ceph_mark_dirty_caps()
1704 ci->i_dirty_caps |= mask; in __ceph_mark_dirty_caps()
1706 struct ceph_mds_session *session = ci->i_auth_cap->session; in __ceph_mark_dirty_caps()
1708 WARN_ON_ONCE(ci->i_prealloc_cap_flush); in __ceph_mark_dirty_caps()
1709 swap(ci->i_prealloc_cap_flush, *pcf); in __ceph_mark_dirty_caps()
1711 if (!ci->i_head_snapc) { in __ceph_mark_dirty_caps()
1713 ci->i_head_snapc = ceph_get_snap_context( in __ceph_mark_dirty_caps()
1714 ci->i_snap_realm->cached_context); in __ceph_mark_dirty_caps()
1717 &ci->netfs.inode, ci->i_head_snapc, ci->i_auth_cap); in __ceph_mark_dirty_caps()
1718 BUG_ON(!list_empty(&ci->i_dirty_item)); in __ceph_mark_dirty_caps()
1720 list_add(&ci->i_dirty_item, &session->s_cap_dirty); in __ceph_mark_dirty_caps()
1722 if (ci->i_flushing_caps == 0) { in __ceph_mark_dirty_caps()
1727 WARN_ON_ONCE(!ci->i_prealloc_cap_flush); in __ceph_mark_dirty_caps()
1729 BUG_ON(list_empty(&ci->i_dirty_item)); in __ceph_mark_dirty_caps()
1730 if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) && in __ceph_mark_dirty_caps()
1733 __cap_delay_requeue(mdsc, ci); in __ceph_mark_dirty_caps()
1785 static bool __detach_cap_flush_from_ci(struct ceph_inode_info *ci, in __detach_cap_flush_from_ci() argument
1791 if (wake && cf->i_list.prev != &ci->i_cap_flush_list) { in __detach_cap_flush_from_ci()
1811 struct ceph_inode_info *ci = ceph_inode(inode); in __mark_caps_flushing() local
1815 lockdep_assert_held(&ci->i_ceph_lock); in __mark_caps_flushing()
1816 BUG_ON(ci->i_dirty_caps == 0); in __mark_caps_flushing()
1817 BUG_ON(list_empty(&ci->i_dirty_item)); in __mark_caps_flushing()
1818 BUG_ON(!ci->i_prealloc_cap_flush); in __mark_caps_flushing()
1820 flushing = ci->i_dirty_caps; in __mark_caps_flushing()
1823 ceph_cap_string(ci->i_flushing_caps), in __mark_caps_flushing()
1824 ceph_cap_string(ci->i_flushing_caps | flushing)); in __mark_caps_flushing()
1825 ci->i_flushing_caps |= flushing; in __mark_caps_flushing()
1826 ci->i_dirty_caps = 0; in __mark_caps_flushing()
1829 swap(cf, ci->i_prealloc_cap_flush); in __mark_caps_flushing()
1834 list_del_init(&ci->i_dirty_item); in __mark_caps_flushing()
1840 if (list_empty(&ci->i_flushing_item)) { in __mark_caps_flushing()
1841 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing); in __mark_caps_flushing()
1846 list_add_tail(&cf->i_list, &ci->i_cap_flush_list); in __mark_caps_flushing()
1855 __releases(ci->i_ceph_lock) in try_nonblocking_invalidate()
1856 __acquires(ci->i_ceph_lock) in try_nonblocking_invalidate()
1858 struct ceph_inode_info *ci = ceph_inode(inode); in try_nonblocking_invalidate() local
1859 u32 invalidating_gen = ci->i_rdcache_gen; in try_nonblocking_invalidate()
1861 spin_unlock(&ci->i_ceph_lock); in try_nonblocking_invalidate()
1864 spin_lock(&ci->i_ceph_lock); in try_nonblocking_invalidate()
1867 invalidating_gen == ci->i_rdcache_gen) { in try_nonblocking_invalidate()
1871 ci->i_rdcache_revoking = ci->i_rdcache_gen - 1; in try_nonblocking_invalidate()
1878 bool __ceph_should_report_size(struct ceph_inode_info *ci) in __ceph_should_report_size() argument
1880 loff_t size = i_size_read(&ci->netfs.inode); in __ceph_should_report_size()
1882 if (ci->i_flushing_caps & CEPH_CAP_FILE_WR) in __ceph_should_report_size()
1884 if (size >= ci->i_max_size) in __ceph_should_report_size()
1887 if (ci->i_max_size > ci->i_reported_size && in __ceph_should_report_size()
1888 (size << 1) >= ci->i_max_size + ci->i_reported_size) in __ceph_should_report_size()
1902 void ceph_check_caps(struct ceph_inode_info *ci, int flags) in ceph_check_caps() argument
1904 struct inode *inode = &ci->netfs.inode; in ceph_check_caps()
1918 spin_lock(&ci->i_ceph_lock); in ceph_check_caps()
1919 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) { in ceph_check_caps()
1920 ci->i_ceph_flags |= CEPH_I_ASYNC_CHECK_CAPS; in ceph_check_caps()
1923 spin_unlock(&ci->i_ceph_lock); in ceph_check_caps()
1927 if (ci->i_ceph_flags & CEPH_I_FLUSH) in ceph_check_caps()
1931 file_wanted = __ceph_caps_file_wanted(ci); in ceph_check_caps()
1934 used = __ceph_caps_used(ci); in ceph_check_caps()
1942 issued = __ceph_caps_issued(ci, &implemented); in ceph_check_caps()
1954 __ceph_dir_is_complete(ci)) { in ceph_check_caps()
1975 if (ci->i_max_size == 0) in ceph_check_caps()
1983 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps), in ceph_check_caps()
1984 ceph_cap_string(ci->i_flushing_caps), in ceph_check_caps()
1998 !(ci->i_wb_ref || ci->i_wrbuffer_ref) && /* no dirty pages... */ in ceph_check_caps()
2008 ci->i_rdcache_revoking = ci->i_rdcache_gen; in ceph_check_caps()
2014 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in ceph_check_caps()
2022 ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap)) in ceph_check_caps()
2030 if (ci->i_auth_cap && cap != ci->i_auth_cap) in ceph_check_caps()
2031 cap_used &= ~ci->i_auth_cap->issued; in ceph_check_caps()
2040 if (cap == ci->i_auth_cap && in ceph_check_caps()
2043 if (ci->i_wanted_max_size > ci->i_max_size && in ceph_check_caps()
2044 ci->i_wanted_max_size > ci->i_requested_max_size) { in ceph_check_caps()
2050 if (__ceph_should_report_size(ci)) { in ceph_check_caps()
2056 if (cap == ci->i_auth_cap) { in ceph_check_caps()
2057 if ((flags & CHECK_CAPS_FLUSH) && ci->i_dirty_caps) { in ceph_check_caps()
2061 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) { in ceph_check_caps()
2086 if (S_ISREG(inode->i_mode) && ci->i_wrbuffer_ref && in ceph_check_caps()
2109 if (cap == ci->i_auth_cap && in ceph_check_caps()
2110 (ci->i_ceph_flags & in ceph_check_caps()
2112 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) in ceph_check_caps()
2113 __kick_flushing_caps(mdsc, session, ci, 0); in ceph_check_caps()
2114 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) in ceph_check_caps()
2115 __ceph_flush_snaps(ci, session); in ceph_check_caps()
2120 if (cap == ci->i_auth_cap && ci->i_dirty_caps) { in ceph_check_caps()
2121 flushing = ci->i_dirty_caps; in ceph_check_caps()
2140 spin_unlock(&ci->i_ceph_lock); in ceph_check_caps()
2141 __send_cap(&arg, ci); in ceph_check_caps()
2142 spin_lock(&ci->i_ceph_lock); in ceph_check_caps()
2148 if (__ceph_is_any_real_caps(ci) && in ceph_check_caps()
2149 list_empty(&ci->i_cap_delay_list) && in ceph_check_caps()
2152 __cap_delay_requeue(mdsc, ci); in ceph_check_caps()
2155 spin_unlock(&ci->i_ceph_lock); in ceph_check_caps()
2170 struct ceph_inode_info *ci = ceph_inode(inode); in try_flush_caps() local
2174 spin_lock(&ci->i_ceph_lock); in try_flush_caps()
2176 if (ci->i_dirty_caps && ci->i_auth_cap) { in try_flush_caps()
2177 struct ceph_cap *cap = ci->i_auth_cap; in try_flush_caps()
2182 spin_unlock(&ci->i_ceph_lock); in try_flush_caps()
2186 if (ci->i_ceph_flags & in try_flush_caps()
2188 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) in try_flush_caps()
2189 __kick_flushing_caps(mdsc, session, ci, 0); in try_flush_caps()
2190 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) in try_flush_caps()
2191 __ceph_flush_snaps(ci, session); in try_flush_caps()
2195 flushing = ci->i_dirty_caps; in try_flush_caps()
2200 __ceph_caps_used(ci), __ceph_caps_wanted(ci), in try_flush_caps()
2203 spin_unlock(&ci->i_ceph_lock); in try_flush_caps()
2205 __send_cap(&arg, ci); in try_flush_caps()
2207 if (!list_empty(&ci->i_cap_flush_list)) { in try_flush_caps()
2209 list_last_entry(&ci->i_cap_flush_list, in try_flush_caps()
2214 flushing = ci->i_flushing_caps; in try_flush_caps()
2215 spin_unlock(&ci->i_ceph_lock); in try_flush_caps()
2227 struct ceph_inode_info *ci = ceph_inode(inode); in caps_are_flushed() local
2230 spin_lock(&ci->i_ceph_lock); in caps_are_flushed()
2231 if (!list_empty(&ci->i_cap_flush_list)) { in caps_are_flushed()
2233 list_first_entry(&ci->i_cap_flush_list, in caps_are_flushed()
2238 spin_unlock(&ci->i_ceph_lock); in caps_are_flushed()
2248 struct ceph_inode_info *ci = ceph_inode(inode); in flush_mdlog_and_wait_inode_unsafe_requests() local
2252 spin_lock(&ci->i_unsafe_lock); in flush_mdlog_and_wait_inode_unsafe_requests()
2253 if (S_ISDIR(inode->i_mode) && !list_empty(&ci->i_unsafe_dirops)) { in flush_mdlog_and_wait_inode_unsafe_requests()
2254 req1 = list_last_entry(&ci->i_unsafe_dirops, in flush_mdlog_and_wait_inode_unsafe_requests()
2259 if (!list_empty(&ci->i_unsafe_iops)) { in flush_mdlog_and_wait_inode_unsafe_requests()
2260 req2 = list_last_entry(&ci->i_unsafe_iops, in flush_mdlog_and_wait_inode_unsafe_requests()
2265 spin_unlock(&ci->i_unsafe_lock); in flush_mdlog_and_wait_inode_unsafe_requests()
2289 spin_lock(&ci->i_unsafe_lock); in flush_mdlog_and_wait_inode_unsafe_requests()
2291 list_for_each_entry(req, &ci->i_unsafe_dirops, in flush_mdlog_and_wait_inode_unsafe_requests()
2303 list_for_each_entry(req, &ci->i_unsafe_iops, in flush_mdlog_and_wait_inode_unsafe_requests()
2314 spin_unlock(&ci->i_unsafe_lock); in flush_mdlog_and_wait_inode_unsafe_requests()
2317 spin_lock(&ci->i_ceph_lock); in flush_mdlog_and_wait_inode_unsafe_requests()
2318 if (ci->i_auth_cap) { in flush_mdlog_and_wait_inode_unsafe_requests()
2319 s = ci->i_auth_cap->session; in flush_mdlog_and_wait_inode_unsafe_requests()
2323 spin_unlock(&ci->i_ceph_lock); in flush_mdlog_and_wait_inode_unsafe_requests()
2363 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_fsync() local
2389 err = wait_event_interruptible(ci->i_cap_wq, in ceph_fsync()
2412 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_write_inode() local
2426 err = wait_event_interruptible(ci->i_cap_wq, in ceph_write_inode()
2432 spin_lock(&ci->i_ceph_lock); in ceph_write_inode()
2433 if (__ceph_caps_dirty(ci)) in ceph_write_inode()
2434 __cap_delay_requeue_front(mdsc, ci); in ceph_write_inode()
2435 spin_unlock(&ci->i_ceph_lock); in ceph_write_inode()
2442 struct ceph_inode_info *ci, in __kick_flushing_caps() argument
2444 __releases(ci->i_ceph_lock) in __kick_flushing_caps()
2445 __acquires(ci->i_ceph_lock) in __kick_flushing_caps()
2447 struct inode *inode = &ci->netfs.inode; in __kick_flushing_caps()
2455 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) in __kick_flushing_caps()
2458 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH; in __kick_flushing_caps()
2460 list_for_each_entry_reverse(cf, &ci->i_cap_flush_list, i_list) { in __kick_flushing_caps()
2467 list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) { in __kick_flushing_caps()
2471 cap = ci->i_auth_cap; in __kick_flushing_caps()
2488 __ceph_caps_used(ci), in __kick_flushing_caps()
2489 __ceph_caps_wanted(ci), in __kick_flushing_caps()
2492 spin_unlock(&ci->i_ceph_lock); in __kick_flushing_caps()
2493 __send_cap(&arg, ci); in __kick_flushing_caps()
2503 spin_unlock(&ci->i_ceph_lock); in __kick_flushing_caps()
2518 spin_lock(&ci->i_ceph_lock); in __kick_flushing_caps()
2525 struct ceph_inode_info *ci; in ceph_early_kick_flushing_caps() local
2535 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { in ceph_early_kick_flushing_caps()
2536 spin_lock(&ci->i_ceph_lock); in ceph_early_kick_flushing_caps()
2537 cap = ci->i_auth_cap; in ceph_early_kick_flushing_caps()
2540 &ci->netfs.inode, cap, session->s_mds); in ceph_early_kick_flushing_caps()
2541 spin_unlock(&ci->i_ceph_lock); in ceph_early_kick_flushing_caps()
2552 if ((cap->issued & ci->i_flushing_caps) != in ceph_early_kick_flushing_caps()
2553 ci->i_flushing_caps) { in ceph_early_kick_flushing_caps()
2560 __kick_flushing_caps(mdsc, session, ci, in ceph_early_kick_flushing_caps()
2563 ci->i_ceph_flags |= CEPH_I_KICK_FLUSH; in ceph_early_kick_flushing_caps()
2566 spin_unlock(&ci->i_ceph_lock); in ceph_early_kick_flushing_caps()
2573 struct ceph_inode_info *ci; in ceph_kick_flushing_caps() local
2585 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { in ceph_kick_flushing_caps()
2586 spin_lock(&ci->i_ceph_lock); in ceph_kick_flushing_caps()
2587 cap = ci->i_auth_cap; in ceph_kick_flushing_caps()
2590 &ci->netfs.inode, cap, session->s_mds); in ceph_kick_flushing_caps()
2591 spin_unlock(&ci->i_ceph_lock); in ceph_kick_flushing_caps()
2594 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) { in ceph_kick_flushing_caps()
2595 __kick_flushing_caps(mdsc, session, ci, in ceph_kick_flushing_caps()
2598 spin_unlock(&ci->i_ceph_lock); in ceph_kick_flushing_caps()
2603 struct ceph_inode_info *ci) in ceph_kick_flushing_inode_caps() argument
2606 struct ceph_cap *cap = ci->i_auth_cap; in ceph_kick_flushing_inode_caps()
2608 lockdep_assert_held(&ci->i_ceph_lock); in ceph_kick_flushing_inode_caps()
2610 dout("%s %p flushing %s\n", __func__, &ci->netfs.inode, in ceph_kick_flushing_inode_caps()
2611 ceph_cap_string(ci->i_flushing_caps)); in ceph_kick_flushing_inode_caps()
2613 if (!list_empty(&ci->i_cap_flush_list)) { in ceph_kick_flushing_inode_caps()
2616 list_move_tail(&ci->i_flushing_item, in ceph_kick_flushing_inode_caps()
2621 __kick_flushing_caps(mdsc, session, ci, oldest_flush_tid); in ceph_kick_flushing_inode_caps()
2630 void ceph_take_cap_refs(struct ceph_inode_info *ci, int got, in ceph_take_cap_refs() argument
2633 lockdep_assert_held(&ci->i_ceph_lock); in ceph_take_cap_refs()
2636 ci->i_pin_ref++; in ceph_take_cap_refs()
2638 ci->i_rd_ref++; in ceph_take_cap_refs()
2640 ci->i_rdcache_ref++; in ceph_take_cap_refs()
2642 ci->i_fx_ref++; in ceph_take_cap_refs()
2644 if (ci->i_wr_ref == 0 && !ci->i_head_snapc) { in ceph_take_cap_refs()
2646 ci->i_head_snapc = ceph_get_snap_context( in ceph_take_cap_refs()
2647 ci->i_snap_realm->cached_context); in ceph_take_cap_refs()
2649 ci->i_wr_ref++; in ceph_take_cap_refs()
2652 if (ci->i_wb_ref == 0) in ceph_take_cap_refs()
2653 ihold(&ci->netfs.inode); in ceph_take_cap_refs()
2654 ci->i_wb_ref++; in ceph_take_cap_refs()
2656 &ci->netfs.inode, ci->i_wb_ref-1, ci->i_wb_ref); in ceph_take_cap_refs()
2682 struct ceph_inode_info *ci = ceph_inode(inode); in try_get_cap_refs() local
2692 spin_lock(&ci->i_ceph_lock); in try_get_cap_refs()
2695 (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK)) { in try_get_cap_refs()
2702 while (ci->i_truncate_pending) { in try_get_cap_refs()
2703 spin_unlock(&ci->i_ceph_lock); in try_get_cap_refs()
2709 spin_lock(&ci->i_ceph_lock); in try_get_cap_refs()
2712 have = __ceph_caps_issued(ci, &implemented); in try_get_cap_refs()
2715 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) { in try_get_cap_refs()
2717 inode, endoff, ci->i_max_size); in try_get_cap_refs()
2718 if (endoff > ci->i_requested_max_size) in try_get_cap_refs()
2719 ret = ci->i_auth_cap ? -EFBIG : -EUCLEAN; in try_get_cap_refs()
2726 if (__ceph_have_pending_cap_snap(ci)) { in try_get_cap_refs()
2750 !ci->i_head_snapc && in try_get_cap_refs()
2762 spin_unlock(&ci->i_ceph_lock); in try_get_cap_refs()
2773 ceph_take_cap_refs(ci, *got, true); in try_get_cap_refs()
2779 if (ci->i_auth_cap && in try_get_cap_refs()
2781 struct ceph_mds_session *s = ci->i_auth_cap->session; in try_get_cap_refs()
2788 inode, ceph_cap_string(need), ci->i_auth_cap->mds); in try_get_cap_refs()
2798 mds_wanted = __ceph_caps_mds_wanted(ci, false); in try_get_cap_refs()
2812 __ceph_touch_fmode(ci, mdsc, flags); in try_get_cap_refs()
2814 spin_unlock(&ci->i_ceph_lock); in try_get_cap_refs()
2835 struct ceph_inode_info *ci = ceph_inode(inode); in check_max_size() local
2839 spin_lock(&ci->i_ceph_lock); in check_max_size()
2840 if (endoff >= ci->i_max_size && endoff > ci->i_wanted_max_size) { in check_max_size()
2843 ci->i_wanted_max_size = endoff; in check_max_size()
2846 if (ci->i_auth_cap && in check_max_size()
2847 (ci->i_auth_cap->issued & CEPH_CAP_FILE_WR) && in check_max_size()
2848 ci->i_wanted_max_size > ci->i_max_size && in check_max_size()
2849 ci->i_wanted_max_size > ci->i_requested_max_size) in check_max_size()
2851 spin_unlock(&ci->i_ceph_lock); in check_max_size()
2853 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY); in check_max_size()
2901 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_get_caps() local
2938 ceph_get_fmode(ci, flags, FMODE_WAIT_BIAS); in ceph_get_caps()
2939 add_wait_queue(&ci->i_cap_wq, &wait); in ceph_get_caps()
2951 remove_wait_queue(&ci->i_cap_wq, &wait); in ceph_get_caps()
2952 ceph_put_fmode(ci, flags, FMODE_WAIT_BIAS); in ceph_get_caps()
2965 ceph_put_cap_refs(ci, _got); in ceph_get_caps()
2988 if (S_ISREG(ci->netfs.inode.i_mode) && in ceph_get_caps()
2989 ceph_has_inline_data(ci) && in ceph_get_caps()
3005 ceph_put_cap_refs(ci, _got); in ceph_get_caps()
3029 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps) in ceph_get_cap_refs() argument
3031 spin_lock(&ci->i_ceph_lock); in ceph_get_cap_refs()
3032 ceph_take_cap_refs(ci, caps, false); in ceph_get_cap_refs()
3033 spin_unlock(&ci->i_ceph_lock); in ceph_get_cap_refs()
3041 static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci, in ceph_try_drop_cap_snap() argument
3050 if (!list_is_last(&capsnap->ci_item, &ci->i_cap_snaps)) in ceph_try_drop_cap_snap()
3051 ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS; in ceph_try_drop_cap_snap()
3075 static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had, in __ceph_put_cap_refs() argument
3078 struct inode *inode = &ci->netfs.inode; in __ceph_put_cap_refs()
3082 spin_lock(&ci->i_ceph_lock); in __ceph_put_cap_refs()
3084 --ci->i_pin_ref; in __ceph_put_cap_refs()
3086 if (--ci->i_rd_ref == 0) in __ceph_put_cap_refs()
3089 if (--ci->i_rdcache_ref == 0) in __ceph_put_cap_refs()
3092 if (--ci->i_fx_ref == 0) in __ceph_put_cap_refs()
3095 if (--ci->i_wb_ref == 0) { in __ceph_put_cap_refs()
3102 inode, ci->i_wb_ref+1, ci->i_wb_ref); in __ceph_put_cap_refs()
3105 if (--ci->i_wr_ref == 0) { in __ceph_put_cap_refs()
3108 if (ci->i_wrbuffer_ref_head == 0 && in __ceph_put_cap_refs()
3109 ci->i_dirty_caps == 0 && in __ceph_put_cap_refs()
3110 ci->i_flushing_caps == 0) { in __ceph_put_cap_refs()
3111 BUG_ON(!ci->i_head_snapc); in __ceph_put_cap_refs()
3112 ceph_put_snap_context(ci->i_head_snapc); in __ceph_put_cap_refs()
3113 ci->i_head_snapc = NULL; in __ceph_put_cap_refs()
3116 if (!__ceph_is_any_real_caps(ci) && ci->i_snap_realm) in __ceph_put_cap_refs()
3120 if (check_flushsnaps && __ceph_have_pending_cap_snap(ci)) { in __ceph_put_cap_refs()
3122 list_last_entry(&ci->i_cap_snaps, in __ceph_put_cap_refs()
3127 if (ceph_try_drop_cap_snap(ci, capsnap)) in __ceph_put_cap_refs()
3130 else if (__ceph_finish_cap_snap(ci, capsnap)) in __ceph_put_cap_refs()
3134 spin_unlock(&ci->i_ceph_lock); in __ceph_put_cap_refs()
3142 ceph_check_caps(ci, 0); in __ceph_put_cap_refs()
3144 ceph_flush_snaps(ci, NULL); in __ceph_put_cap_refs()
3156 wake_up_all(&ci->i_cap_wq); in __ceph_put_cap_refs()
3161 void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) in ceph_put_cap_refs() argument
3163 __ceph_put_cap_refs(ci, had, PUT_CAP_REFS_SYNC); in ceph_put_cap_refs()
3166 void ceph_put_cap_refs_async(struct ceph_inode_info *ci, int had) in ceph_put_cap_refs_async() argument
3168 __ceph_put_cap_refs(ci, had, PUT_CAP_REFS_ASYNC); in ceph_put_cap_refs_async()
3171 void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci, int had) in ceph_put_cap_refs_no_check_caps() argument
3173 __ceph_put_cap_refs(ci, had, PUT_CAP_REFS_NO_CHECK); in ceph_put_cap_refs_no_check_caps()
3183 void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, in ceph_put_wrbuffer_cap_refs() argument
3186 struct inode *inode = &ci->netfs.inode; in ceph_put_wrbuffer_cap_refs()
3193 spin_lock(&ci->i_ceph_lock); in ceph_put_wrbuffer_cap_refs()
3194 ci->i_wrbuffer_ref -= nr; in ceph_put_wrbuffer_cap_refs()
3195 if (ci->i_wrbuffer_ref == 0) { in ceph_put_wrbuffer_cap_refs()
3200 if (ci->i_head_snapc == snapc) { in ceph_put_wrbuffer_cap_refs()
3201 ci->i_wrbuffer_ref_head -= nr; in ceph_put_wrbuffer_cap_refs()
3202 if (ci->i_wrbuffer_ref_head == 0 && in ceph_put_wrbuffer_cap_refs()
3203 ci->i_wr_ref == 0 && in ceph_put_wrbuffer_cap_refs()
3204 ci->i_dirty_caps == 0 && in ceph_put_wrbuffer_cap_refs()
3205 ci->i_flushing_caps == 0) { in ceph_put_wrbuffer_cap_refs()
3206 BUG_ON(!ci->i_head_snapc); in ceph_put_wrbuffer_cap_refs()
3207 ceph_put_snap_context(ci->i_head_snapc); in ceph_put_wrbuffer_cap_refs()
3208 ci->i_head_snapc = NULL; in ceph_put_wrbuffer_cap_refs()
3212 ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr, in ceph_put_wrbuffer_cap_refs()
3213 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, in ceph_put_wrbuffer_cap_refs()
3216 list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) { in ceph_put_wrbuffer_cap_refs()
3228 WARN_ON_ONCE(ci->i_auth_cap); in ceph_put_wrbuffer_cap_refs()
3236 if (ceph_try_drop_cap_snap(ci, capsnap)) { in ceph_put_wrbuffer_cap_refs()
3239 ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS; in ceph_put_wrbuffer_cap_refs()
3247 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr, in ceph_put_wrbuffer_cap_refs()
3248 ci->i_wrbuffer_ref, capsnap->dirty_pages, in ceph_put_wrbuffer_cap_refs()
3254 spin_unlock(&ci->i_ceph_lock); in ceph_put_wrbuffer_cap_refs()
3257 ceph_check_caps(ci, 0); in ceph_put_wrbuffer_cap_refs()
3259 ceph_flush_snaps(ci, NULL); in ceph_put_wrbuffer_cap_refs()
3262 wake_up_all(&ci->i_cap_wq); in ceph_put_wrbuffer_cap_refs()
3328 __releases(ci->i_ceph_lock) in handle_cap_grant()
3331 struct ceph_inode_info *ci = ceph_inode(inode); in handle_cap_grant() local
3360 !(ci->i_wrbuffer_ref || ci->i_wb_ref)) { in handle_cap_grant()
3364 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { in handle_cap_grant()
3366 ci->i_rdcache_revoking = ci->i_rdcache_gen; in handle_cap_grant()
3383 WARN_ON(cap != ci->i_auth_cap); in handle_cap_grant()
3393 __check_cap_issue(ci, cap, newcaps); in handle_cap_grant()
3408 ci->i_btime = extra_info->btime; in handle_cap_grant()
3426 if (version > ci->i_xattrs.version) { in handle_cap_grant()
3429 if (ci->i_xattrs.blob) in handle_cap_grant()
3430 ceph_buffer_put(ci->i_xattrs.blob); in handle_cap_grant()
3431 ci->i_xattrs.blob = ceph_buffer_get(xattr_buf); in handle_cap_grant()
3432 ci->i_xattrs.version = version; in handle_cap_grant()
3450 ci->i_files = extra_info->nfiles; in handle_cap_grant()
3451 ci->i_subdirs = extra_info->nsubdirs; in handle_cap_grant()
3456 s64 old_pool = ci->i_layout.pool_id; in handle_cap_grant()
3459 ceph_file_layout_from_legacy(&ci->i_layout, &grant->layout); in handle_cap_grant()
3460 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns, in handle_cap_grant()
3461 lockdep_is_held(&ci->i_ceph_lock)); in handle_cap_grant()
3462 rcu_assign_pointer(ci->i_layout.pool_ns, extra_info->pool_ns); in handle_cap_grant()
3464 if (ci->i_layout.pool_id != old_pool || in handle_cap_grant()
3466 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM; in handle_cap_grant()
3477 if (ci->i_auth_cap == cap && (newcaps & CEPH_CAP_ANY_FILE_WR)) { in handle_cap_grant()
3478 if (max_size != ci->i_max_size) { in handle_cap_grant()
3480 ci->i_max_size, max_size); in handle_cap_grant()
3481 ci->i_max_size = max_size; in handle_cap_grant()
3482 if (max_size >= ci->i_wanted_max_size) { in handle_cap_grant()
3483 ci->i_wanted_max_size = 0; /* reset */ in handle_cap_grant()
3484 ci->i_requested_max_size = 0; in handle_cap_grant()
3491 wanted = __ceph_caps_wanted(ci); in handle_cap_grant()
3492 used = __ceph_caps_used(ci); in handle_cap_grant()
3493 dirty = __ceph_caps_dirty(ci); in handle_cap_grant()
3529 else if (cap == ci->i_auth_cap) in handle_cap_grant()
3545 if (cap == ci->i_auth_cap && in handle_cap_grant()
3546 __ceph_caps_revoking_other(ci, cap, newcaps)) in handle_cap_grant()
3558 extra_info->inline_version >= ci->i_inline_version) { in handle_cap_grant()
3559 ci->i_inline_version = extra_info->inline_version; in handle_cap_grant()
3560 if (ci->i_inline_version != CEPH_INLINE_NONE && in handle_cap_grant()
3566 if (ci->i_auth_cap == cap) { in handle_cap_grant()
3570 if (ci->i_requested_max_size > max_size || in handle_cap_grant()
3573 ci->i_requested_max_size = 0; in handle_cap_grant()
3577 ceph_kick_flushing_inode_caps(session, ci); in handle_cap_grant()
3581 spin_unlock(&ci->i_ceph_lock); in handle_cap_grant()
3602 wake_up_all(&ci->i_cap_wq); in handle_cap_grant()
3606 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_NOINVAL); in handle_cap_grant()
3608 ceph_check_caps(ci, CHECK_CAPS_NOINVAL); in handle_cap_grant()
3619 __releases(ci->i_ceph_lock) in handle_cap_flush_ack()
3621 struct ceph_inode_info *ci = ceph_inode(inode); in handle_cap_flush_ack() local
3632 list_for_each_entry_safe(cf, tmp_cf, &ci->i_cap_flush_list, i_list) { in handle_cap_flush_ack()
3646 wake_ci |= __detach_cap_flush_from_ci(ci, cf); in handle_cap_flush_ack()
3662 ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps), in handle_cap_flush_ack()
3663 ceph_cap_string(ci->i_flushing_caps & ~cleaned)); in handle_cap_flush_ack()
3668 ci->i_flushing_caps &= ~cleaned; in handle_cap_flush_ack()
3675 if (ci->i_flushing_caps == 0) { in handle_cap_flush_ack()
3676 if (list_empty(&ci->i_cap_flush_list)) { in handle_cap_flush_ack()
3677 list_del_init(&ci->i_flushing_item); in handle_cap_flush_ack()
3689 if (ci->i_dirty_caps == 0) { in handle_cap_flush_ack()
3691 BUG_ON(!list_empty(&ci->i_dirty_item)); in handle_cap_flush_ack()
3693 if (ci->i_wr_ref == 0 && in handle_cap_flush_ack()
3694 ci->i_wrbuffer_ref_head == 0) { in handle_cap_flush_ack()
3695 BUG_ON(!ci->i_head_snapc); in handle_cap_flush_ack()
3696 ceph_put_snap_context(ci->i_head_snapc); in handle_cap_flush_ack()
3697 ci->i_head_snapc = NULL; in handle_cap_flush_ack()
3700 BUG_ON(list_empty(&ci->i_dirty_item)); in handle_cap_flush_ack()
3706 spin_unlock(&ci->i_ceph_lock); in handle_cap_flush_ack()
3717 wake_up_all(&ci->i_cap_wq); in handle_cap_flush_ack()
3727 struct ceph_inode_info *ci = ceph_inode(inode); in __ceph_remove_capsnap() local
3731 lockdep_assert_held(&ci->i_ceph_lock); in __ceph_remove_capsnap()
3733 dout("removing capsnap %p, inode %p ci %p\n", capsnap, inode, ci); in __ceph_remove_capsnap()
3736 ret = __detach_cap_flush_from_ci(ci, &capsnap->cap_flush); in __ceph_remove_capsnap()
3741 if (list_empty(&ci->i_cap_flush_list)) in __ceph_remove_capsnap()
3742 list_del_init(&ci->i_flushing_item); in __ceph_remove_capsnap()
3753 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_remove_capsnap() local
3755 lockdep_assert_held(&ci->i_ceph_lock); in ceph_remove_capsnap()
3771 struct ceph_inode_info *ci = ceph_inode(inode); in handle_cap_flushsnap_ack() local
3779 inode, ci, session->s_mds, follows); in handle_cap_flushsnap_ack()
3781 spin_lock(&ci->i_ceph_lock); in handle_cap_flushsnap_ack()
3782 list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) { in handle_cap_flushsnap_ack()
3799 spin_unlock(&ci->i_ceph_lock); in handle_cap_flushsnap_ack()
3805 wake_up_all(&ci->i_cap_wq); in handle_cap_flushsnap_ack()
3821 struct ceph_inode_info *ci = ceph_inode(inode); in handle_cap_trunc() local
3828 int dirty = __ceph_caps_dirty(ci); in handle_cap_trunc()
3832 lockdep_assert_held(&ci->i_ceph_lock); in handle_cap_trunc()
3858 struct ceph_inode_info *ci = ceph_inode(inode); in handle_cap_export() local
3876 inode, ci, mds, mseq, target); in handle_cap_export()
3879 spin_lock(&ci->i_ceph_lock); in handle_cap_export()
3880 cap = __get_cap_for_mds(ci, mds); in handle_cap_export()
3904 tcap = __get_cap_for_mds(ci, target); in handle_cap_export()
3915 if (cap == ci->i_auth_cap) { in handle_cap_export()
3916 ci->i_auth_cap = tcap; in handle_cap_export()
3917 change_auth_cap_ses(ci, tcap->session); in handle_cap_export()
3924 int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0; in handle_cap_export()
3929 if (!list_empty(&ci->i_cap_flush_list) && in handle_cap_export()
3930 ci->i_auth_cap == tcap) { in handle_cap_export()
3932 list_move_tail(&ci->i_flushing_item, in handle_cap_export()
3941 spin_unlock(&ci->i_ceph_lock); in handle_cap_export()
3967 spin_unlock(&ci->i_ceph_lock); in handle_cap_export()
3989 struct ceph_inode_info *ci = ceph_inode(inode); in handle_cap_import() local
4011 inode, ci, mds, mseq, peer); in handle_cap_import()
4013 cap = __get_cap_for_mds(ci, mds); in handle_cap_import()
4016 spin_unlock(&ci->i_ceph_lock); in handle_cap_import()
4018 spin_lock(&ci->i_ceph_lock); in handle_cap_import()
4029 __ceph_caps_issued(ci, &issued); in handle_cap_import()
4030 issued |= __ceph_caps_dirty(ci); in handle_cap_import()
4035 ocap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL; in handle_cap_import()
4068 struct ceph_inode_info *ci; in ceph_handle_caps() local
4203 ci = ceph_inode(inode); in ceph_handle_caps()
4231 spin_lock(&ci->i_ceph_lock); in ceph_handle_caps()
4242 spin_lock(&ci->i_ceph_lock); in ceph_handle_caps()
4248 spin_unlock(&ci->i_ceph_lock); in ceph_handle_caps()
4256 __ceph_caps_issued(ci, &extra_info.issued); in ceph_handle_caps()
4257 extra_info.issued |= __ceph_caps_dirty(ci); in ceph_handle_caps()
4269 spin_unlock(&ci->i_ceph_lock); in ceph_handle_caps()
4275 spin_unlock(&ci->i_ceph_lock); in ceph_handle_caps()
4318 struct ceph_inode_info *ci; in ceph_check_delayed_caps() local
4327 ci = list_first_entry(&mdsc->cap_delay_list, in ceph_check_delayed_caps()
4330 if (time_before(loop_start, ci->i_hold_caps_max - delay_max)) { in ceph_check_delayed_caps()
4332 delay = ci->i_hold_caps_max; in ceph_check_delayed_caps()
4335 if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 && in ceph_check_delayed_caps()
4336 time_before(jiffies, ci->i_hold_caps_max)) in ceph_check_delayed_caps()
4338 list_del_init(&ci->i_cap_delay_list); in ceph_check_delayed_caps()
4340 inode = igrab(&ci->netfs.inode); in ceph_check_delayed_caps()
4344 ceph_check_caps(ci, 0); in ceph_check_delayed_caps()
4360 struct ceph_inode_info *ci; in flush_dirty_session_caps() local
4366 ci = list_first_entry(&s->s_cap_dirty, struct ceph_inode_info, in flush_dirty_session_caps()
4368 inode = &ci->netfs.inode; in flush_dirty_session_caps()
4373 ceph_check_caps(ci, CHECK_CAPS_FLUSH); in flush_dirty_session_caps()
4386 void __ceph_touch_fmode(struct ceph_inode_info *ci, in __ceph_touch_fmode() argument
4391 ci->i_last_rd = now; in __ceph_touch_fmode()
4393 ci->i_last_wr = now; in __ceph_touch_fmode()
4396 __ceph_is_any_real_caps(ci) && in __ceph_touch_fmode()
4397 list_empty(&ci->i_cap_delay_list)) in __ceph_touch_fmode()
4398 __cap_delay_requeue(mdsc, ci); in __ceph_touch_fmode()
4401 void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count) in ceph_get_fmode() argument
4403 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->netfs.inode.i_sb); in ceph_get_fmode()
4411 spin_lock(&ci->i_ceph_lock); in ceph_get_fmode()
4418 if (i && ci->i_nr_by_mode[i]) in ceph_get_fmode()
4422 ci->i_nr_by_mode[i] += count; in ceph_get_fmode()
4427 spin_unlock(&ci->i_ceph_lock); in ceph_get_fmode()
4435 void ceph_put_fmode(struct ceph_inode_info *ci, int fmode, int count) in ceph_put_fmode() argument
4437 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->netfs.inode.i_sb); in ceph_put_fmode()
4445 spin_lock(&ci->i_ceph_lock); in ceph_put_fmode()
4448 BUG_ON(ci->i_nr_by_mode[i] < count); in ceph_put_fmode()
4449 ci->i_nr_by_mode[i] -= count; in ceph_put_fmode()
4457 if (i && ci->i_nr_by_mode[i]) in ceph_put_fmode()
4463 spin_unlock(&ci->i_ceph_lock); in ceph_put_fmode()
4474 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_drop_caps_for_unlink() local
4477 spin_lock(&ci->i_ceph_lock); in ceph_drop_caps_for_unlink()
4479 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); in ceph_drop_caps_for_unlink()
4481 if (__ceph_caps_dirty(ci)) { in ceph_drop_caps_for_unlink()
4484 __cap_delay_requeue_front(mdsc, ci); in ceph_drop_caps_for_unlink()
4487 spin_unlock(&ci->i_ceph_lock); in ceph_drop_caps_for_unlink()
4502 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_encode_inode_release() local
4508 spin_lock(&ci->i_ceph_lock); in ceph_encode_inode_release()
4509 used = __ceph_caps_used(ci); in ceph_encode_inode_release()
4510 dirty = __ceph_caps_dirty(ci); in ceph_encode_inode_release()
4519 cap = __get_cap_for_mds(ci, mds); in ceph_encode_inode_release()
4535 int wanted = __ceph_caps_wanted(ci); in ceph_encode_inode_release()
4546 if (cap == ci->i_auth_cap && in ceph_encode_inode_release()
4548 ci->i_requested_max_size = 0; in ceph_encode_inode_release()
4571 spin_unlock(&ci->i_ceph_lock); in ceph_encode_inode_release()
4619 struct ceph_inode_info *ci = ceph_inode(inode); in remove_capsnaps() local
4623 lockdep_assert_held(&ci->i_ceph_lock); in remove_capsnaps()
4625 dout("removing capsnaps, ci is %p, inode is %p\n", ci, inode); in remove_capsnaps()
4627 while (!list_empty(&ci->i_cap_snaps)) { in remove_capsnaps()
4628 capsnap = list_first_entry(&ci->i_cap_snaps, in remove_capsnaps()
4635 wake_up_all(&ci->i_cap_wq); in remove_capsnaps()
4644 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_purge_inode_cap() local
4649 lockdep_assert_held(&ci->i_ceph_lock); in ceph_purge_inode_cap()
4652 cap, ci, &ci->netfs.inode); in ceph_purge_inode_cap()
4654 is_auth = (cap == ci->i_auth_cap); in ceph_purge_inode_cap()
4662 if (ci->i_wrbuffer_ref > 0) in ceph_purge_inode_cap()
4669 while (!list_empty(&ci->i_cap_flush_list)) { in ceph_purge_inode_cap()
4670 cf = list_first_entry(&ci->i_cap_flush_list, in ceph_purge_inode_cap()
4678 if (!list_empty(&ci->i_dirty_item)) { in ceph_purge_inode_cap()
4681 ceph_cap_string(ci->i_dirty_caps), in ceph_purge_inode_cap()
4683 ci->i_dirty_caps = 0; in ceph_purge_inode_cap()
4684 list_del_init(&ci->i_dirty_item); in ceph_purge_inode_cap()
4687 if (!list_empty(&ci->i_flushing_item)) { in ceph_purge_inode_cap()
4690 ceph_cap_string(ci->i_flushing_caps), in ceph_purge_inode_cap()
4692 ci->i_flushing_caps = 0; in ceph_purge_inode_cap()
4693 list_del_init(&ci->i_flushing_item); in ceph_purge_inode_cap()
4702 if (ci->i_wrbuffer_ref_head == 0 && in ceph_purge_inode_cap()
4703 ci->i_wr_ref == 0 && in ceph_purge_inode_cap()
4704 ci->i_dirty_caps == 0 && in ceph_purge_inode_cap()
4705 ci->i_flushing_caps == 0) { in ceph_purge_inode_cap()
4706 ceph_put_snap_context(ci->i_head_snapc); in ceph_purge_inode_cap()
4707 ci->i_head_snapc = NULL; in ceph_purge_inode_cap()
4711 if (atomic_read(&ci->i_filelock_ref) > 0) { in ceph_purge_inode_cap()
4713 ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK; in ceph_purge_inode_cap()
4718 if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) { in ceph_purge_inode_cap()
4719 cf = ci->i_prealloc_cap_flush; in ceph_purge_inode_cap()
4720 ci->i_prealloc_cap_flush = NULL; in ceph_purge_inode_cap()
4725 if (!list_empty(&ci->i_cap_snaps)) in ceph_purge_inode_cap()