| /fs/gfs2/ |
| A D | inode.c | 136 &ip->i_gl); in gfs2_inode_lookup() 192 glock_set_object(ip->i_gl, ip); in gfs2_inode_lookup() 229 if (ip->i_gl) { in gfs2_inode_lookup() 230 gfs2_glock_put(ip->i_gl); in gfs2_inode_lookup() 231 ip->i_gl = NULL; in gfs2_inode_lookup() 445 struct gfs2_glock *gl = ip->i_gl; in gfs2_final_release_pages() 851 glock_set_object(ip->i_gl, ip); in gfs2_create_inode() 914 if (ip->i_gl) { in gfs2_create_inode() 915 gfs2_glock_put(ip->i_gl); in gfs2_create_inode() 916 ip->i_gl = NULL; in gfs2_create_inode() [all …]
|
| A D | super.c | 136 struct gfs2_glock *j_gl = ip->i_gl; in gfs2_make_fs_rw() 251 gfs2_trans_add_meta(m_ip->i_gl, m_bh); in update_statfs() 491 if (unlikely(!ip->i_gl)) in gfs2_dirty_inode() 518 gfs2_trans_add_meta(ip->i_gl, bh); in gfs2_dirty_inode() 1223 glock_needs_demote(ip->i_gl), in gfs2_upgrade_iopen_glock() 1227 if (glock_needs_demote(ip->i_gl)) in gfs2_upgrade_iopen_glock() 1331 if (!ret && ip->i_gl) in evict_unlinked_inode() 1358 gfs2_ail_flush(ip->i_gl, 0); in evict_linked_inode() 1455 if (ip->i_gl) { in gfs2_evict_inode() 1456 glock_clear_object(ip->i_gl, ip); in gfs2_evict_inode() [all …]
|
| A D | xattr.c | 282 gfs2_trans_add_meta(ip->i_gl, bh); in ea_dealloc_unstuffed() 508 gfs2_trans_add_meta(ip->i_gl, bh[x]); in gfs2_iter_unstuffed() 650 *bhp = gfs2_meta_new(ip->i_gl, block); in ea_alloc_blk() 651 gfs2_trans_add_meta(ip->i_gl, *bhp); in ea_alloc_blk() 712 bh = gfs2_meta_new(ip->i_gl, block); in ea_write() 713 gfs2_trans_add_meta(ip->i_gl, bh); in ea_write() 885 gfs2_trans_add_meta(ip->i_gl, bh); in ea_set_simple_noalloc() 1005 gfs2_trans_add_meta(ip->i_gl, indbh); in ea_set_block() 1013 indbh = gfs2_meta_new(ip->i_gl, blk); in ea_set_block() 1336 gfs2_trans_add_meta(ip->i_gl, indbh); in ea_dealloc_indirect() [all …]
|
| A D | util.c | 61 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_NOEXP | in check_journal_clean() 123 struct gfs2_glock *i_gl; in signal_our_withdraw() local 134 i_gl = ip->i_gl; in signal_our_withdraw() 189 wait_on_bit(&i_gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); in signal_our_withdraw() 209 if (i_gl->gl_ops->go_unlocked) { in signal_our_withdraw() 210 set_bit(GLF_UNLOCKED, &i_gl->gl_flags); in signal_our_withdraw() 211 wait_on_bit(&i_gl->gl_flags, GLF_UNLOCKED, TASK_UNINTERRUPTIBLE); in signal_our_withdraw() 448 gfs2_dump_glock(NULL, ip->i_gl, 1); in gfs2_consist_inode_i()
|
| A D | file.c | 169 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); in gfs2_fileattr_get() 245 gfs2_log_flush(sdp, ip->i_gl, in do_gfs2_set_flags() 265 gfs2_trans_add_meta(ip->i_gl, bh); in do_gfs2_set_flags() 433 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); in gfs2_page_mkwrite() 458 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); in gfs2_page_mkwrite() 560 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); in gfs2_fault() 770 gfs2_ail_flush(ip->i_gl, 1); in gfs2_fsync() 840 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh); in gfs2_file_direct_read() 903 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh); in gfs2_file_direct_write() 983 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); in gfs2_file_read_iter() [all …]
|
| A D | acl.c | 70 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { in gfs2_get_acl() 71 int ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, in gfs2_get_acl() 129 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { in gfs2_set_acl() 130 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); in gfs2_set_acl()
|
| A D | dir.c | 95 bh = gfs2_meta_new(ip->i_gl, block); in gfs2_dir_get_new_buffer() 96 gfs2_trans_add_meta(ip->i_gl, bh); in gfs2_dir_get_new_buffer() 130 gfs2_trans_add_meta(ip->i_gl, dibh); in gfs2_dir_write_stuffed() 211 gfs2_trans_add_meta(ip->i_gl, bh); in gfs2_dir_write_data() 233 gfs2_trans_add_meta(ip->i_gl, dibh); in gfs2_dir_write_data() 682 gfs2_trans_add_meta(dip->i_gl, bh); in dirent_del() 721 gfs2_trans_add_meta(ip->i_gl, bh); in do_init_dirent() 878 bh = gfs2_meta_new(ip->i_gl, bn); in new_leaf() 883 gfs2_trans_add_meta(ip->i_gl, bh); in new_leaf() 1484 struct gfs2_glock *gl = ip->i_gl; in gfs2_dir_readahead() [all …]
|
| A D | dentry.c | 60 had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL); in gfs2_drevalidate() 62 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); in gfs2_drevalidate()
|
| A D | bmap.c | 81 gfs2_trans_add_data(ip->i_gl, bh); in gfs2_unstuffer_folio() 127 gfs2_trans_add_meta(ip->i_gl, dibh); in __gfs2_unstuff_inode() 680 gfs2_trans_add_meta(ip->i_gl, dibh); in __gfs2_iomap_alloc() 752 gfs2_indirect_init(mp, ip->i_gl, i, in __gfs2_iomap_alloc() 1391 gfs2_trans_add_meta(ip->i_gl, dibh); in trunc_start() 1563 gfs2_trans_add_meta(ip->i_gl, bh); in sweep_bh_for_rgrps() 1597 gfs2_trans_add_meta(ip->i_gl, dibh); in sweep_bh_for_rgrps() 1964 gfs2_trans_add_meta(ip->i_gl, dibh); in punch_hole() 2009 gfs2_trans_add_meta(ip->i_gl, dibh); in trunc_end() 2108 gfs2_trans_add_meta(ip->i_gl, dibh); in do_grow() [all …]
|
| A D | aops.c | 115 gfs2_trans_add_databufs(ip->i_gl, folio, 0, folio_size(folio)); in __gfs2_jdata_write_folio() 136 if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE)) in gfs2_jdata_writeback() 374 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | in gfs2_jdata_writepages() 572 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); in gfs2_bmap()
|
| A D | lops.c | 781 struct gfs2_glock *gl = ip->i_gl; in buf_lo_scan_elements() 836 gfs2_inode_metasync(ip->i_gl); in buf_lo_after_scan() 842 gfs2_inode_metasync(ip->i_gl); in buf_lo_after_scan() 1005 struct gfs2_glock *gl = ip->i_gl; in databuf_lo_scan_elements() 1056 gfs2_inode_metasync(ip->i_gl); in databuf_lo_after_scan() 1063 gfs2_inode_metasync(ip->i_gl); in databuf_lo_after_scan()
|
| A D | meta_io.c | 449 if (!ip->i_gl) in gfs2_journal_wipe() 455 bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE); in gfs2_journal_wipe() 490 struct gfs2_glock *gl = ip->i_gl; in gfs2_meta_buffer()
|
| A D | quota.c | 422 error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits, in bh_get() 680 gfs2_trans_add_meta(ip->i_gl, qd->qd_bh); in do_qc() 768 gfs2_trans_add_data(ip->i_gl, bh); in gfs2_write_buf_to_page() 923 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); in do_sync() 981 gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, in do_sync() 1050 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); in do_glock() 1427 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); in gfs2_quota_init() 1739 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); in gfs2_set_dqblk()
|
| A D | recovery.c | 36 struct gfs2_glock *gl = ip->i_gl; in gfs2_replay_read_block() 348 gfs2_inode_metasync(ip->i_gl); in update_statfs_inode() 442 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, in gfs2_recover_func()
|
| A D | trace_gfs2.h | 463 __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev; 499 __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev; 531 __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
|
| A D | ops_fstype.c | 547 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh); in gfs2_jindex_hold() 661 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_NOPID, in init_statfs() 756 sdp->sd_jinode_gl = ip->i_gl; in init_journal() 757 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, in init_journal() 929 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_NOPID, in init_per_node()
|
| A D | export.c | 112 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &gh); in gfs2_get_name()
|
| A D | incore.h | 390 struct gfs2_glock *i_gl; member
|
| A D | glops.c | 603 struct gfs2_glock *j_gl = ip->i_gl; in freeze_go_xmote_bh()
|
| A D | glock.c | 675 if (gl == m_ip->i_gl) in is_system_glock() 1011 gfs2_glock_poke(ip->i_gl); in gfs2_try_evict()
|
| A D | rgrp.c | 1037 struct gfs2_glock *gl = ip->i_gl; in gfs2_rindex_update() 2456 gfs2_trans_add_meta(ip->i_gl, dibh); in gfs2_alloc_blocks()
|