Lines Matching refs:mp
47 static int xfs_icwalk(struct xfs_mount *mp,
72 struct xfs_mount *mp, in xfs_inode_alloc() argument
81 ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL); in xfs_inode_alloc()
83 if (inode_init_always(mp->m_super, VFS_I(ip))) { in xfs_inode_alloc()
93 XFS_STATS_INC(mp, vn_active); in xfs_inode_alloc()
99 ip->i_mount = mp; in xfs_inode_alloc()
107 ip->i_diflags2 = mp->m_ino_geo.new_diflags2; in xfs_inode_alloc()
190 struct xfs_mount *mp) in xfs_reclaim_work_queue() argument
194 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { in xfs_reclaim_work_queue()
195 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, in xfs_reclaim_work_queue()
209 struct xfs_mount *mp = pag->pag_mount; in xfs_blockgc_queue() local
211 if (!xfs_is_blockgc_enabled(mp)) in xfs_blockgc_queue()
229 struct xfs_mount *mp = pag->pag_mount; in xfs_perag_set_inode_tag() local
244 spin_lock(&mp->m_perag_lock); in xfs_perag_set_inode_tag()
245 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag); in xfs_perag_set_inode_tag()
246 spin_unlock(&mp->m_perag_lock); in xfs_perag_set_inode_tag()
251 xfs_reclaim_work_queue(mp); in xfs_perag_set_inode_tag()
268 struct xfs_mount *mp = pag->pag_mount; in xfs_perag_clear_inode_tag() local
288 spin_lock(&mp->m_perag_lock); in xfs_perag_clear_inode_tag()
289 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag); in xfs_perag_clear_inode_tag()
290 spin_unlock(&mp->m_perag_lock); in xfs_perag_clear_inode_tag()
305 struct xfs_mount *mp, in xfs_reinit_inode() argument
317 error = inode_init_always(mp->m_super, inode); in xfs_reinit_inode()
339 struct xfs_mount *mp = ip->i_mount; in xfs_iget_recycle() local
360 error = xfs_reinit_inode(mp, inode); in xfs_iget_recycle()
388 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_iget_recycle()
440 struct xfs_mount *mp) in xfs_inodegc_queue_all() argument
446 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_queue_all()
448 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0); in xfs_inodegc_queue_all()
464 struct xfs_mount *mp = ip->i_mount; in xfs_iget_cache_hit() local
545 XFS_STATS_INC(mp, xs_ig_found); in xfs_iget_cache_hit()
551 XFS_STATS_INC(mp, xs_ig_frecycle); in xfs_iget_cache_hit()
565 if (xfs_is_inodegc_enabled(mp)) in xfs_iget_cache_hit()
566 xfs_inodegc_queue_all(mp); in xfs_iget_cache_hit()
572 struct xfs_mount *mp, in xfs_iget_cache_miss() argument
582 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); in xfs_iget_cache_miss()
585 ip = xfs_inode_alloc(mp, ino); in xfs_iget_cache_miss()
603 if (xfs_has_v3inodes(mp) && in xfs_iget_cache_miss()
604 (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) { in xfs_iget_cache_miss()
609 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp); in xfs_iget_cache_miss()
675 XFS_STATS_INC(mp, xs_ig_dup); in xfs_iget_cache_miss()
710 struct xfs_mount *mp, in xfs_iget() argument
725 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) in xfs_iget()
728 XFS_STATS_INC(mp, xs_ig_attempts); in xfs_iget()
731 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); in xfs_iget()
732 agino = XFS_INO_TO_AGINO(mp, ino); in xfs_iget()
749 XFS_STATS_INC(mp, xs_ig_missed); in xfs_iget()
751 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, in xfs_iget()
799 struct xfs_mount *mp, in xfs_icache_inode_is_allocated() argument
807 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip); in xfs_icache_inode_is_allocated()
967 struct xfs_mount *mp) in xfs_want_reclaim_sick() argument
969 return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) || in xfs_want_reclaim_sick()
970 xfs_is_shutdown(mp); in xfs_want_reclaim_sick()
975 struct xfs_mount *mp) in xfs_reclaim_inodes() argument
981 if (xfs_want_reclaim_sick(mp)) in xfs_reclaim_inodes()
984 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { in xfs_reclaim_inodes()
985 xfs_ail_push_all_sync(mp->m_ail); in xfs_reclaim_inodes()
986 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw); in xfs_reclaim_inodes()
999 struct xfs_mount *mp, in xfs_reclaim_inodes_nr() argument
1007 if (xfs_want_reclaim_sick(mp)) in xfs_reclaim_inodes_nr()
1011 xfs_reclaim_work_queue(mp); in xfs_reclaim_inodes_nr()
1012 xfs_ail_push_all(mp->m_ail); in xfs_reclaim_inodes_nr()
1014 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw); in xfs_reclaim_inodes_nr()
1024 struct xfs_mount *mp) in xfs_reclaim_inodes_count() argument
1030 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { in xfs_reclaim_inodes_count()
1122 struct xfs_mount *mp = container_of(to_delayed_work(work), in xfs_reclaim_worker() local
1125 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL); in xfs_reclaim_worker()
1126 xfs_reclaim_work_queue(mp); in xfs_reclaim_worker()
1177 struct xfs_mount *mp = ip->i_mount; in xfs_blockgc_set_iflag() local
1192 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in xfs_blockgc_set_iflag()
1195 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_blockgc_set_iflag()
1215 struct xfs_mount *mp = ip->i_mount; in xfs_blockgc_clear_iflag() local
1229 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in xfs_blockgc_clear_iflag()
1232 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_blockgc_clear_iflag()
1358 struct xfs_mount *mp) in xfs_blockgc_stop() argument
1363 if (!xfs_clear_blockgc_enabled(mp)) in xfs_blockgc_stop()
1366 for_each_perag(mp, agno, pag) in xfs_blockgc_stop()
1368 trace_xfs_blockgc_stop(mp, __return_address); in xfs_blockgc_stop()
1374 struct xfs_mount *mp) in xfs_blockgc_start() argument
1379 if (xfs_set_blockgc_enabled(mp)) in xfs_blockgc_start()
1382 trace_xfs_blockgc_start(mp, __return_address); in xfs_blockgc_start()
1383 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG) in xfs_blockgc_start()
1459 struct xfs_mount *mp = pag->pag_mount; in xfs_blockgc_worker() local
1462 trace_xfs_blockgc_worker(mp, __return_address); in xfs_blockgc_worker()
1466 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d", in xfs_blockgc_worker()
1477 struct xfs_mount *mp, in xfs_blockgc_free_space() argument
1482 trace_xfs_blockgc_free_space(mp, icw, _RET_IP_); in xfs_blockgc_free_space()
1484 error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw); in xfs_blockgc_free_space()
1488 xfs_inodegc_flush(mp); in xfs_blockgc_free_space()
1498 struct xfs_mount *mp) in xfs_blockgc_flush_all() argument
1503 trace_xfs_blockgc_flush_all(mp, __return_address); in xfs_blockgc_flush_all()
1510 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG) in xfs_blockgc_flush_all()
1514 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG) in xfs_blockgc_flush_all()
1517 xfs_inodegc_flush(mp); in xfs_blockgc_flush_all()
1532 struct xfs_mount *mp, in xfs_blockgc_free_dquots() argument
1550 if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) { in xfs_blockgc_free_dquots()
1551 icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id); in xfs_blockgc_free_dquots()
1556 if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) { in xfs_blockgc_free_dquots()
1557 icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id); in xfs_blockgc_free_dquots()
1562 if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) { in xfs_blockgc_free_dquots()
1571 return xfs_blockgc_free_space(mp, &icw); in xfs_blockgc_free_dquots()
1651 struct xfs_mount *mp = pag->pag_mount; in xfs_icwalk_ag() local
1704 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) in xfs_icwalk_ag()
1706 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); in xfs_icwalk_ag()
1707 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) in xfs_icwalk_ag()
1756 struct xfs_mount *mp, in xfs_icwalk() argument
1765 for_each_perag_tag(mp, agno, pag, goal) { in xfs_icwalk()
1810 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_set_reclaimable() local
1813 if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) { in xfs_inodegc_set_reclaimable()
1819 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); in xfs_inodegc_set_reclaimable()
1826 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), in xfs_inodegc_set_reclaimable()
1888 struct xfs_mount *mp) in xfs_inodegc_push() argument
1890 if (!xfs_is_inodegc_enabled(mp)) in xfs_inodegc_push()
1892 trace_xfs_inodegc_push(mp, __return_address); in xfs_inodegc_push()
1893 xfs_inodegc_queue_all(mp); in xfs_inodegc_push()
1902 struct xfs_mount *mp) in xfs_inodegc_flush() argument
1904 xfs_inodegc_push(mp); in xfs_inodegc_flush()
1905 trace_xfs_inodegc_flush(mp, __return_address); in xfs_inodegc_flush()
1906 flush_workqueue(mp->m_inodegc_wq); in xfs_inodegc_flush()
1915 struct xfs_mount *mp) in xfs_inodegc_stop() argument
1917 if (!xfs_clear_inodegc_enabled(mp)) in xfs_inodegc_stop()
1920 xfs_inodegc_queue_all(mp); in xfs_inodegc_stop()
1921 drain_workqueue(mp->m_inodegc_wq); in xfs_inodegc_stop()
1923 trace_xfs_inodegc_stop(mp, __return_address); in xfs_inodegc_stop()
1932 struct xfs_mount *mp) in xfs_inodegc_start() argument
1934 if (xfs_set_inodegc_enabled(mp)) in xfs_inodegc_start()
1937 trace_xfs_inodegc_start(mp, __return_address); in xfs_inodegc_start()
1938 xfs_inodegc_queue_all(mp); in xfs_inodegc_start()
1946 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_want_queue_rt_file() local
1951 if (__percpu_counter_compare(&mp->m_frextents, in xfs_inodegc_want_queue_rt_file()
1952 mp->m_low_rtexts[XFS_LOWSP_5_PCNT], in xfs_inodegc_want_queue_rt_file()
1974 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_want_queue_work() local
1976 if (items > mp->m_ino_geo.inodes_per_cluster) in xfs_inodegc_want_queue_work()
1979 if (__percpu_counter_compare(&mp->m_fdblocks, in xfs_inodegc_want_queue_work()
1980 mp->m_low_space[XFS_LOWSP_5_PCNT], in xfs_inodegc_want_queue_work()
2041 struct xfs_mount *mp = ip->i_mount; in xfs_inodegc_queue() local
2052 gc = get_cpu_ptr(mp->m_inodegc); in xfs_inodegc_queue()
2062 if (!xfs_is_inodegc_enabled(mp)) { in xfs_inodegc_queue()
2070 trace_xfs_inodegc_queue(mp, __return_address); in xfs_inodegc_queue()
2071 mod_delayed_work(mp->m_inodegc_wq, &gc->work, queue_delay); in xfs_inodegc_queue()
2075 trace_xfs_inodegc_throttle(mp, __return_address); in xfs_inodegc_queue()
2085 struct xfs_mount *mp, in xfs_inodegc_cpu_dead() argument
2092 dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu); in xfs_inodegc_cpu_dead()
2108 gc = get_cpu_ptr(mp->m_inodegc); in xfs_inodegc_cpu_dead()
2113 if (xfs_is_inodegc_enabled(mp)) { in xfs_inodegc_cpu_dead()
2114 trace_xfs_inodegc_queue(mp, __return_address); in xfs_inodegc_cpu_dead()
2115 mod_delayed_work(mp->m_inodegc_wq, &gc->work, 0); in xfs_inodegc_cpu_dead()
2134 struct xfs_mount *mp = ip->i_mount; in xfs_inode_mark_reclaimable() local
2137 XFS_STATS_INC(mp, vn_reclaim); in xfs_inode_mark_reclaimable()
2173 struct xfs_mount *mp = container_of(shrink, struct xfs_mount, in xfs_inodegc_shrinker_count() local
2178 if (!xfs_is_inodegc_enabled(mp)) in xfs_inodegc_shrinker_count()
2182 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_shrinker_count()
2195 struct xfs_mount *mp = container_of(shrink, struct xfs_mount, in xfs_inodegc_shrinker_scan() local
2201 if (!xfs_is_inodegc_enabled(mp)) in xfs_inodegc_shrinker_scan()
2204 trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address); in xfs_inodegc_shrinker_scan()
2207 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_shrinker_scan()
2212 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0); in xfs_inodegc_shrinker_scan()
2230 struct xfs_mount *mp) in xfs_inodegc_register_shrinker() argument
2232 struct shrinker *shrink = &mp->m_inodegc_shrinker; in xfs_inodegc_register_shrinker()
2240 return register_shrinker(shrink, "xfs-inodegc:%s", mp->m_super->s_id); in xfs_inodegc_register_shrinker()