Lines Matching refs:inode

38 static void f2fs_zero_post_eof_page(struct inode *inode, loff_t new_size)  in f2fs_zero_post_eof_page()  argument
40 loff_t old_size = i_size_read(inode); in f2fs_zero_post_eof_page()
46 truncate_pagecache(inode, old_size); in f2fs_zero_post_eof_page()
51 struct inode *inode = file_inode(vmf->vma->vm_file); in f2fs_filemap_fault() local
57 f2fs_update_iostat(F2FS_I_SB(inode), inode, in f2fs_filemap_fault()
60 trace_f2fs_filemap_fault(inode, vmf->pgoff, flags, ret); in f2fs_filemap_fault()
68 struct inode *inode = file_inode(vmf->vma->vm_file); in f2fs_vm_page_mkwrite() local
69 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_vm_page_mkwrite()
71 bool need_alloc = !f2fs_is_pinned_file(inode); in f2fs_vm_page_mkwrite()
75 if (unlikely(IS_IMMUTABLE(inode))) in f2fs_vm_page_mkwrite()
78 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) { in f2fs_vm_page_mkwrite()
93 err = f2fs_convert_inline_inode(inode); in f2fs_vm_page_mkwrite()
98 if (f2fs_compressed_file(inode)) { in f2fs_vm_page_mkwrite()
99 int ret = f2fs_is_compressed_cluster(inode, folio->index); in f2fs_vm_page_mkwrite()
113 sb_start_pagefault(inode->i_sb); in f2fs_vm_page_mkwrite()
115 f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); in f2fs_vm_page_mkwrite()
117 filemap_invalidate_lock(inode->i_mapping); in f2fs_vm_page_mkwrite()
118 f2fs_zero_post_eof_page(inode, (folio->index + 1) << PAGE_SHIFT); in f2fs_vm_page_mkwrite()
119 filemap_invalidate_unlock(inode->i_mapping); in f2fs_vm_page_mkwrite()
122 filemap_invalidate_lock_shared(inode->i_mapping); in f2fs_vm_page_mkwrite()
125 if (unlikely(folio->mapping != inode->i_mapping || in f2fs_vm_page_mkwrite()
126 folio_pos(folio) > i_size_read(inode) || in f2fs_vm_page_mkwrite()
133 set_new_dnode(&dn, inode, NULL, NULL, 0); in f2fs_vm_page_mkwrite()
140 if (f2fs_is_pinned_file(inode) && in f2fs_vm_page_mkwrite()
153 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); in f2fs_vm_page_mkwrite()
163 i_size_read(inode)) { in f2fs_vm_page_mkwrite()
166 offset = i_size_read(inode) & ~PAGE_MASK; in f2fs_vm_page_mkwrite()
171 f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE); in f2fs_vm_page_mkwrite()
175 filemap_invalidate_unlock_shared(inode->i_mapping); in f2fs_vm_page_mkwrite()
177 sb_end_pagefault(inode->i_sb); in f2fs_vm_page_mkwrite()
181 trace_f2fs_vm_page_mkwrite(inode, folio->index, vmf->vma->vm_flags, ret); in f2fs_vm_page_mkwrite()
191 static int get_parent_ino(struct inode *inode, nid_t *pino) in get_parent_ino() argument
199 dentry = d_find_alias(inode); in get_parent_ino()
208 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode) in need_do_checkpoint() argument
210 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in need_do_checkpoint()
213 if (!S_ISREG(inode->i_mode)) in need_do_checkpoint()
215 else if (f2fs_compressed_file(inode)) in need_do_checkpoint()
217 else if (inode->i_nlink != 1) in need_do_checkpoint()
221 else if (file_wrong_pino(inode)) in need_do_checkpoint()
225 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) in need_do_checkpoint()
232 f2fs_need_dentry_mark(sbi, inode->i_ino) && in need_do_checkpoint()
233 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino, in need_do_checkpoint()
236 else if (f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino, in need_do_checkpoint()
255 static void try_to_fix_pino(struct inode *inode) in try_to_fix_pino() argument
257 struct f2fs_inode_info *fi = F2FS_I(inode); in try_to_fix_pino()
261 if (file_wrong_pino(inode) && inode->i_nlink == 1 && in try_to_fix_pino()
262 get_parent_ino(inode, &pino)) { in try_to_fix_pino()
263 f2fs_i_pino_write(inode, pino); in try_to_fix_pino()
264 file_got_pino(inode); in try_to_fix_pino()
272 struct inode *inode = file->f_mapping->host; in f2fs_do_sync_file() local
273 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_do_sync_file()
274 nid_t ino = inode->i_ino; in f2fs_do_sync_file()
283 if (unlikely(f2fs_readonly(inode->i_sb))) in f2fs_do_sync_file()
286 trace_f2fs_sync_file_enter(inode); in f2fs_do_sync_file()
288 if (S_ISDIR(inode->i_mode)) in f2fs_do_sync_file()
292 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks) in f2fs_do_sync_file()
293 set_inode_flag(inode, FI_NEED_IPU); in f2fs_do_sync_file()
295 clear_inode_flag(inode, FI_NEED_IPU); in f2fs_do_sync_file()
298 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); in f2fs_do_sync_file()
303 if (!f2fs_skip_inode_update(inode, datasync)) { in f2fs_do_sync_file()
304 f2fs_write_inode(inode, NULL); in f2fs_do_sync_file()
311 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) && in f2fs_do_sync_file()
318 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) || in f2fs_do_sync_file()
340 f2fs_down_read(&F2FS_I(inode)->i_sem); in f2fs_do_sync_file()
341 cp_reason = need_do_checkpoint(inode); in f2fs_do_sync_file()
342 f2fs_up_read(&F2FS_I(inode)->i_sem); in f2fs_do_sync_file()
346 ret = f2fs_sync_fs(inode->i_sb, 1); in f2fs_do_sync_file()
352 try_to_fix_pino(inode); in f2fs_do_sync_file()
353 clear_inode_flag(inode, FI_APPEND_WRITE); in f2fs_do_sync_file()
354 clear_inode_flag(inode, FI_UPDATE_WRITE); in f2fs_do_sync_file()
359 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id); in f2fs_do_sync_file()
371 f2fs_mark_inode_dirty_sync(inode, true); in f2fs_do_sync_file()
372 f2fs_write_inode(inode, NULL); in f2fs_do_sync_file()
392 clear_inode_flag(inode, FI_APPEND_WRITE); in f2fs_do_sync_file()
395 ret = f2fs_issue_flush(sbi, inode->i_ino); in f2fs_do_sync_file()
398 clear_inode_flag(inode, FI_UPDATE_WRITE); in f2fs_do_sync_file()
403 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); in f2fs_do_sync_file()
418 struct inode *inode = mapping->host; in __found_offset() local
421 if (f2fs_compressed_file(inode)) { in __found_offset()
422 block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_folio, in __found_offset()
423 ALIGN_DOWN(dn->ofs_in_node, F2FS_I(inode)->i_cluster_size)); in __found_offset()
450 struct inode *inode = file->f_mapping->host; in f2fs_seek_block() local
451 loff_t maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode)); in f2fs_seek_block()
458 inode_lock_shared(inode); in f2fs_seek_block()
460 isize = i_size_read(inode); in f2fs_seek_block()
465 if (f2fs_has_inline_data(inode)) { in f2fs_seek_block()
478 set_new_dnode(&dn, inode, NULL, NULL, 0); in f2fs_seek_block()
492 end_offset = ADDRS_PER_PAGE(dn.node_folio, inode); in f2fs_seek_block()
503 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode), in f2fs_seek_block()
523 inode_unlock_shared(inode); in f2fs_seek_block()
526 inode_unlock_shared(inode); in f2fs_seek_block()
532 struct inode *inode = file->f_mapping->host; in f2fs_llseek() local
533 loff_t maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode)); in f2fs_llseek()
540 maxbytes, i_size_read(inode)); in f2fs_llseek()
554 struct inode *inode = file_inode(file); in f2fs_file_mmap_prepare() local
556 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) in f2fs_file_mmap_prepare()
559 if (!f2fs_is_compress_backend_ready(inode)) in f2fs_file_mmap_prepare()
565 f2fs_down_read(&F2FS_I(inode)->i_sem); in f2fs_file_mmap_prepare()
566 set_inode_flag(inode, FI_MMAP_FILE); in f2fs_file_mmap_prepare()
567 f2fs_up_read(&F2FS_I(inode)->i_sem); in f2fs_file_mmap_prepare()
572 static int finish_preallocate_blocks(struct inode *inode) in finish_preallocate_blocks() argument
577 f2fs_down_read(&F2FS_I(inode)->i_sem); in finish_preallocate_blocks()
578 opened = is_inode_flag_set(inode, FI_OPENED_FILE); in finish_preallocate_blocks()
579 f2fs_up_read(&F2FS_I(inode)->i_sem); in finish_preallocate_blocks()
583 inode_lock(inode); in finish_preallocate_blocks()
584 if (is_inode_flag_set(inode, FI_OPENED_FILE)) in finish_preallocate_blocks()
587 if (!file_should_truncate(inode)) in finish_preallocate_blocks()
590 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in finish_preallocate_blocks()
591 filemap_invalidate_lock(inode->i_mapping); in finish_preallocate_blocks()
593 truncate_setsize(inode, i_size_read(inode)); in finish_preallocate_blocks()
594 ret = f2fs_truncate(inode); in finish_preallocate_blocks()
596 filemap_invalidate_unlock(inode->i_mapping); in finish_preallocate_blocks()
597 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in finish_preallocate_blocks()
601 file_dont_truncate(inode); in finish_preallocate_blocks()
603 f2fs_down_write(&F2FS_I(inode)->i_sem); in finish_preallocate_blocks()
604 set_inode_flag(inode, FI_OPENED_FILE); in finish_preallocate_blocks()
605 f2fs_up_write(&F2FS_I(inode)->i_sem); in finish_preallocate_blocks()
607 inode_unlock(inode); in finish_preallocate_blocks()
611 static int f2fs_file_open(struct inode *inode, struct file *filp) in f2fs_file_open() argument
613 int err = fscrypt_file_open(inode, filp); in f2fs_file_open()
618 if (!f2fs_is_compress_backend_ready(inode)) in f2fs_file_open()
621 err = fsverity_file_open(inode, filp); in f2fs_file_open()
628 err = dquot_file_open(inode, filp); in f2fs_file_open()
632 err = finish_preallocate_blocks(inode); in f2fs_file_open()
634 atomic_inc(&F2FS_I(inode)->open_count); in f2fs_file_open()
640 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in f2fs_truncate_data_blocks_range()
645 int cluster_size = F2FS_I(dn->inode)->i_cluster_size; in f2fs_truncate_data_blocks_range()
646 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks); in f2fs_truncate_data_blocks_range()
650 addr = get_dnode_addr(dn->inode, dn->node_folio) + ofs; in f2fs_truncate_data_blocks_range()
657 if (f2fs_compressed_file(dn->inode) && in f2fs_truncate_data_blocks_range()
660 f2fs_i_compr_blocks_update(dn->inode, in f2fs_truncate_data_blocks_range()
706 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false); in f2fs_truncate_data_blocks_range()
715 dn->inode) + ofs; in f2fs_truncate_data_blocks_range()
718 dec_valid_block_count(sbi, dn->inode, nr_free); in f2fs_truncate_data_blocks_range()
723 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid, in f2fs_truncate_data_blocks_range()
727 static int truncate_partial_data_page(struct inode *inode, u64 from, in truncate_partial_data_page() argument
732 struct address_space *mapping = inode->i_mapping; in truncate_partial_data_page()
748 folio = f2fs_get_lock_data_folio(inode, index, true); in truncate_partial_data_page()
756 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode)); in truncate_partial_data_page()
763 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock) in f2fs_do_truncate_blocks() argument
765 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_do_truncate_blocks()
772 trace_f2fs_truncate_blocks_enter(inode, from); in f2fs_do_truncate_blocks()
774 if (IS_DEVICE_ALIASING(inode) && from) { in f2fs_do_truncate_blocks()
781 if (free_from >= max_file_blocks(inode)) in f2fs_do_truncate_blocks()
787 ifolio = f2fs_get_inode_folio(sbi, inode->i_ino); in f2fs_do_truncate_blocks()
793 if (IS_DEVICE_ALIASING(inode)) { in f2fs_do_truncate_blocks()
794 struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ]; in f2fs_do_truncate_blocks()
799 dec_valid_block_count(sbi, inode, ei.len); in f2fs_do_truncate_blocks()
806 if (f2fs_has_inline_data(inode)) { in f2fs_do_truncate_blocks()
807 f2fs_truncate_inline_inode(inode, ifolio, from); in f2fs_do_truncate_blocks()
813 set_new_dnode(&dn, inode, ifolio, NULL, 0); in f2fs_do_truncate_blocks()
821 count = ADDRS_PER_PAGE(dn.node_folio, inode); in f2fs_do_truncate_blocks()
833 err = f2fs_truncate_inode_blocks(inode, free_from); in f2fs_do_truncate_blocks()
840 err = truncate_partial_data_page(inode, from, truncate_page); in f2fs_do_truncate_blocks()
842 trace_f2fs_truncate_blocks_exit(inode, err); in f2fs_do_truncate_blocks()
846 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) in f2fs_truncate_blocks() argument
856 if (f2fs_compressed_file(inode)) in f2fs_truncate_blocks()
858 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT); in f2fs_truncate_blocks()
861 err = f2fs_do_truncate_blocks(inode, free_from, lock); in f2fs_truncate_blocks()
870 if (f2fs_compressed_file(inode) && !free_from in f2fs_truncate_blocks()
871 && is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) in f2fs_truncate_blocks()
872 clear_inode_flag(inode, FI_COMPRESS_RELEASED); in f2fs_truncate_blocks()
875 err = f2fs_truncate_partial_cluster(inode, from, lock); in f2fs_truncate_blocks()
884 int f2fs_truncate(struct inode *inode) in f2fs_truncate() argument
888 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) in f2fs_truncate()
891 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || in f2fs_truncate()
892 S_ISLNK(inode->i_mode))) in f2fs_truncate()
895 trace_f2fs_truncate(inode); in f2fs_truncate()
897 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) in f2fs_truncate()
900 err = f2fs_dquot_initialize(inode); in f2fs_truncate()
905 if (!f2fs_may_inline_data(inode)) { in f2fs_truncate()
906 err = f2fs_convert_inline_inode(inode); in f2fs_truncate()
911 err = f2fs_truncate_blocks(inode, i_size_read(inode), true); in f2fs_truncate()
915 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); in f2fs_truncate()
916 f2fs_mark_inode_dirty_sync(inode, false); in f2fs_truncate()
920 static bool f2fs_force_buffered_io(struct inode *inode, int rw) in f2fs_force_buffered_io() argument
922 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_force_buffered_io()
924 if (!fscrypt_dio_supported(inode)) in f2fs_force_buffered_io()
926 if (fsverity_active(inode)) in f2fs_force_buffered_io()
928 if (f2fs_compressed_file(inode)) in f2fs_force_buffered_io()
934 if (f2fs_has_inline_data(inode) && rw == READ) in f2fs_force_buffered_io()
945 !f2fs_is_pinned_file(inode)) in f2fs_force_buffered_io()
956 struct inode *inode = d_inode(path->dentry); in f2fs_getattr() local
957 struct f2fs_inode_info *fi = F2FS_I(inode); in f2fs_getattr()
961 if (f2fs_has_extra_attr(inode) && in f2fs_getattr()
962 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) && in f2fs_getattr()
977 if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) { in f2fs_getattr()
978 unsigned int bsize = i_blocksize(inode); in f2fs_getattr()
981 if (!f2fs_force_buffered_io(inode, WRITE)) { in f2fs_getattr()
992 if (IS_ENCRYPTED(inode)) in f2fs_getattr()
998 if (IS_VERITY(inode)) in f2fs_getattr()
1008 generic_fillattr(idmap, request_mask, inode, stat); in f2fs_getattr()
1011 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) || in f2fs_getattr()
1012 f2fs_has_inline_dentry(inode)) in f2fs_getattr()
1020 struct inode *inode, const struct iattr *attr) in __setattr_copy() argument
1024 i_uid_update(idmap, attr, inode); in __setattr_copy()
1025 i_gid_update(idmap, attr, inode); in __setattr_copy()
1027 inode_set_atime_to_ts(inode, attr->ia_atime); in __setattr_copy()
1029 inode_set_mtime_to_ts(inode, attr->ia_mtime); in __setattr_copy()
1031 inode_set_ctime_to_ts(inode, attr->ia_ctime); in __setattr_copy()
1035 if (!in_group_or_capable(idmap, inode, i_gid_into_vfsgid(idmap, inode))) in __setattr_copy()
1037 set_acl_inode(inode, mode); in __setattr_copy()
1047 struct inode *inode = d_inode(dentry); in f2fs_setattr() local
1048 struct f2fs_inode_info *fi = F2FS_I(inode); in f2fs_setattr()
1049 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_setattr()
1067 if (unlikely(IS_IMMUTABLE(inode))) in f2fs_setattr()
1070 if (unlikely(IS_APPEND(inode) && in f2fs_setattr()
1076 if (!f2fs_is_compress_backend_ready(inode) || in f2fs_setattr()
1077 IS_DEVICE_ALIASING(inode)) in f2fs_setattr()
1079 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) && in f2fs_setattr()
1089 if (f2fs_is_pinned_file(inode) && in f2fs_setattr()
1090 attr->ia_size <= i_size_read(inode) && in f2fs_setattr()
1096 if (is_quota_modification(idmap, inode, attr)) { in f2fs_setattr()
1097 err = f2fs_dquot_initialize(inode); in f2fs_setattr()
1101 if (i_uid_needs_update(idmap, attr, inode) || in f2fs_setattr()
1102 i_gid_needs_update(idmap, attr, inode)) { in f2fs_setattr()
1104 err = dquot_transfer(idmap, inode, attr); in f2fs_setattr()
1114 i_uid_update(idmap, attr, inode); in f2fs_setattr()
1115 i_gid_update(idmap, attr, inode); in f2fs_setattr()
1116 f2fs_mark_inode_dirty_sync(inode, true); in f2fs_setattr()
1121 loff_t old_size = i_size_read(inode); in f2fs_setattr()
1123 if (attr->ia_size > MAX_INLINE_DATA(inode)) { in f2fs_setattr()
1128 err = f2fs_convert_inline_inode(inode); in f2fs_setattr()
1138 inode_dio_wait(inode); in f2fs_setattr()
1141 filemap_invalidate_lock(inode->i_mapping); in f2fs_setattr()
1144 f2fs_zero_post_eof_page(inode, attr->ia_size); in f2fs_setattr()
1145 truncate_setsize(inode, attr->ia_size); in f2fs_setattr()
1148 err = f2fs_truncate(inode); in f2fs_setattr()
1153 filemap_invalidate_unlock(inode->i_mapping); in f2fs_setattr()
1159 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); in f2fs_setattr()
1160 fi->last_disk_size = i_size_read(inode); in f2fs_setattr()
1164 __setattr_copy(idmap, inode, attr); in f2fs_setattr()
1167 err = posix_acl_chmod(idmap, dentry, f2fs_get_inode_mode(inode)); in f2fs_setattr()
1169 if (is_inode_flag_set(inode, FI_ACL_MODE)) { in f2fs_setattr()
1171 inode->i_mode = fi->i_acl_mode; in f2fs_setattr()
1172 clear_inode_flag(inode, FI_ACL_MODE); in f2fs_setattr()
1177 f2fs_mark_inode_dirty_sync(inode, true); in f2fs_setattr()
1196 static int fill_zero(struct inode *inode, pgoff_t index, in fill_zero() argument
1199 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in fill_zero()
1208 folio = f2fs_get_new_data_folio(inode, NULL, index, false); in fill_zero()
1221 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) in f2fs_truncate_hole() argument
1229 set_new_dnode(&dn, inode, NULL, NULL, 0); in f2fs_truncate_hole()
1240 end_offset = ADDRS_PER_PAGE(dn.node_folio, inode); in f2fs_truncate_hole()
1243 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset); in f2fs_truncate_hole()
1253 static int f2fs_punch_hole(struct inode *inode, loff_t offset, loff_t len) in f2fs_punch_hole() argument
1259 ret = f2fs_convert_inline_inode(inode); in f2fs_punch_hole()
1263 filemap_invalidate_lock(inode->i_mapping); in f2fs_punch_hole()
1264 f2fs_zero_post_eof_page(inode, offset + len); in f2fs_punch_hole()
1265 filemap_invalidate_unlock(inode->i_mapping); in f2fs_punch_hole()
1274 ret = fill_zero(inode, pg_start, off_start, in f2fs_punch_hole()
1280 ret = fill_zero(inode, pg_start++, off_start, in f2fs_punch_hole()
1286 ret = fill_zero(inode, pg_end, 0, off_end); in f2fs_punch_hole()
1293 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_punch_hole()
1300 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_punch_hole()
1301 filemap_invalidate_lock(inode->i_mapping); in f2fs_punch_hole()
1303 truncate_pagecache_range(inode, blk_start, blk_end - 1); in f2fs_punch_hole()
1306 ret = f2fs_truncate_hole(inode, pg_start, pg_end); in f2fs_punch_hole()
1309 filemap_invalidate_unlock(inode->i_mapping); in f2fs_punch_hole()
1310 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_punch_hole()
1317 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr, in __read_out_blkaddrs() argument
1320 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in __read_out_blkaddrs()
1325 set_new_dnode(&dn, inode, NULL, NULL, 0); in __read_out_blkaddrs()
1332 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) - in __read_out_blkaddrs()
1339 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_folio, inode) - in __read_out_blkaddrs()
1372 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr, in __roll_back_blkaddrs() argument
1375 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in __roll_back_blkaddrs()
1383 set_new_dnode(&dn, inode, NULL, NULL, 0); in __roll_back_blkaddrs()
1386 dec_valid_block_count(sbi, inode, 1); in __roll_back_blkaddrs()
1396 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode, in __clone_blkaddrs()
1484 static int __exchange_data_block(struct inode *src_inode, in __exchange_data_block()
1485 struct inode *dst_inode, pgoff_t src, pgoff_t dst, in __exchange_data_block()
1536 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len) in f2fs_do_collapse() argument
1538 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_do_collapse()
1539 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); in f2fs_do_collapse()
1547 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_do_collapse()
1548 filemap_invalidate_lock(inode->i_mapping); in f2fs_do_collapse()
1550 f2fs_zero_post_eof_page(inode, offset + len); in f2fs_do_collapse()
1553 f2fs_drop_extent_tree(inode); in f2fs_do_collapse()
1554 truncate_pagecache(inode, offset); in f2fs_do_collapse()
1555 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true); in f2fs_do_collapse()
1558 filemap_invalidate_unlock(inode->i_mapping); in f2fs_do_collapse()
1559 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_do_collapse()
1563 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) in f2fs_collapse_range() argument
1568 if (offset + len >= i_size_read(inode)) in f2fs_collapse_range()
1575 ret = f2fs_convert_inline_inode(inode); in f2fs_collapse_range()
1580 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); in f2fs_collapse_range()
1584 ret = f2fs_do_collapse(inode, offset, len); in f2fs_collapse_range()
1589 filemap_invalidate_lock(inode->i_mapping); in f2fs_collapse_range()
1590 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); in f2fs_collapse_range()
1591 truncate_pagecache(inode, offset); in f2fs_collapse_range()
1593 new_size = i_size_read(inode) - len; in f2fs_collapse_range()
1594 ret = f2fs_truncate_blocks(inode, new_size, true); in f2fs_collapse_range()
1595 filemap_invalidate_unlock(inode->i_mapping); in f2fs_collapse_range()
1597 f2fs_i_size_write(inode, new_size); in f2fs_collapse_range()
1604 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in f2fs_do_zero_range()
1651 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, in f2fs_zero_range() argument
1654 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_zero_range()
1655 struct address_space *mapping = inode->i_mapping; in f2fs_zero_range()
1657 loff_t new_size = i_size_read(inode); in f2fs_zero_range()
1661 ret = inode_newsize_ok(inode, (len + offset)); in f2fs_zero_range()
1665 ret = f2fs_convert_inline_inode(inode); in f2fs_zero_range()
1674 f2fs_zero_post_eof_page(inode, offset + len); in f2fs_zero_range()
1684 ret = fill_zero(inode, pg_start, off_start, in f2fs_zero_range()
1692 ret = fill_zero(inode, pg_start++, off_start, in f2fs_zero_range()
1706 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_zero_range()
1709 truncate_pagecache_range(inode, in f2fs_zero_range()
1715 set_new_dnode(&dn, inode, NULL, NULL, 0); in f2fs_zero_range()
1720 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_zero_range()
1724 end_offset = ADDRS_PER_PAGE(dn.node_folio, inode); in f2fs_zero_range()
1732 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_zero_range()
1745 ret = fill_zero(inode, pg_end, 0, off_end); in f2fs_zero_range()
1754 if (new_size > i_size_read(inode)) { in f2fs_zero_range()
1756 file_set_keep_isize(inode); in f2fs_zero_range()
1758 f2fs_i_size_write(inode, new_size); in f2fs_zero_range()
1763 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) in f2fs_insert_range() argument
1765 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_insert_range()
1766 struct address_space *mapping = inode->i_mapping; in f2fs_insert_range()
1771 new_size = i_size_read(inode) + len; in f2fs_insert_range()
1772 ret = inode_newsize_ok(inode, new_size); in f2fs_insert_range()
1776 if (offset >= i_size_read(inode)) in f2fs_insert_range()
1783 ret = f2fs_convert_inline_inode(inode); in f2fs_insert_range()
1790 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true); in f2fs_insert_range()
1803 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); in f2fs_insert_range()
1806 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_insert_range()
1809 f2fs_zero_post_eof_page(inode, offset + len); in f2fs_insert_range()
1810 truncate_pagecache(inode, offset); in f2fs_insert_range()
1819 f2fs_drop_extent_tree(inode); in f2fs_insert_range()
1821 ret = __exchange_data_block(inode, inode, idx, in f2fs_insert_range()
1826 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_insert_range()
1833 truncate_pagecache(inode, offset); in f2fs_insert_range()
1837 f2fs_i_size_write(inode, new_size); in f2fs_insert_range()
1841 static int f2fs_expand_inode_data(struct inode *inode, loff_t offset, in f2fs_expand_inode_data() argument
1844 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_expand_inode_data()
1859 err = inode_newsize_ok(inode, (len + offset)); in f2fs_expand_inode_data()
1863 err = f2fs_convert_inline_inode(inode); in f2fs_expand_inode_data()
1867 filemap_invalidate_lock(inode->i_mapping); in f2fs_expand_inode_data()
1868 f2fs_zero_post_eof_page(inode, offset + len); in f2fs_expand_inode_data()
1869 filemap_invalidate_unlock(inode->i_mapping); in f2fs_expand_inode_data()
1885 if (f2fs_is_pinned_file(inode)) { in f2fs_expand_inode_data()
1900 inode->i_ino, pg_start, pg_end); in f2fs_expand_inode_data()
1923 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_DIO); in f2fs_expand_inode_data()
1924 file_dont_truncate(inode); in f2fs_expand_inode_data()
1936 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_AIO); in f2fs_expand_inode_data()
1955 if (new_size > i_size_read(inode)) { in f2fs_expand_inode_data()
1957 file_set_keep_isize(inode); in f2fs_expand_inode_data()
1959 f2fs_i_size_write(inode, new_size); in f2fs_expand_inode_data()
1968 struct inode *inode = file_inode(file); in f2fs_fallocate() local
1971 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) in f2fs_fallocate()
1973 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode))) in f2fs_fallocate()
1975 if (!f2fs_is_compress_backend_ready(inode) || IS_DEVICE_ALIASING(inode)) in f2fs_fallocate()
1979 if (!S_ISREG(inode->i_mode)) in f2fs_fallocate()
1982 if (IS_ENCRYPTED(inode) && in f2fs_fallocate()
1991 inode_lock(inode); in f2fs_fallocate()
1997 if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) && in f2fs_fallocate()
2012 inode_dio_wait(inode); in f2fs_fallocate()
2015 if (offset >= inode->i_size) in f2fs_fallocate()
2018 ret = f2fs_punch_hole(inode, offset, len); in f2fs_fallocate()
2020 ret = f2fs_collapse_range(inode, offset, len); in f2fs_fallocate()
2022 ret = f2fs_zero_range(inode, offset, len, mode); in f2fs_fallocate()
2024 ret = f2fs_insert_range(inode, offset, len); in f2fs_fallocate()
2026 ret = f2fs_expand_inode_data(inode, offset, len, mode); in f2fs_fallocate()
2030 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); in f2fs_fallocate()
2031 f2fs_mark_inode_dirty_sync(inode, false); in f2fs_fallocate()
2032 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); in f2fs_fallocate()
2036 inode_unlock(inode); in f2fs_fallocate()
2038 trace_f2fs_fallocate(inode, mode, offset, len, ret); in f2fs_fallocate()
2042 static int f2fs_release_file(struct inode *inode, struct file *filp) in f2fs_release_file() argument
2044 if (atomic_dec_and_test(&F2FS_I(inode)->open_count)) in f2fs_release_file()
2045 f2fs_remove_donate_inode(inode); in f2fs_release_file()
2052 atomic_read(&inode->i_writecount) != 1) in f2fs_release_file()
2055 inode_lock(inode); in f2fs_release_file()
2056 f2fs_abort_atomic_write(inode, true); in f2fs_release_file()
2057 inode_unlock(inode); in f2fs_release_file()
2064 struct inode *inode = file_inode(file); in f2fs_file_flush() local
2072 if (F2FS_I(inode)->atomic_write_task == current && in f2fs_file_flush()
2074 inode_lock(inode); in f2fs_file_flush()
2075 f2fs_abort_atomic_write(inode, true); in f2fs_file_flush()
2076 inode_unlock(inode); in f2fs_file_flush()
2082 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask) in f2fs_setflags_common() argument
2084 struct f2fs_inode_info *fi = F2FS_I(inode); in f2fs_setflags_common()
2091 if (IS_NOQUOTA(inode)) in f2fs_setflags_common()
2095 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode))) in f2fs_setflags_common()
2097 if (!f2fs_empty_dir(inode)) in f2fs_setflags_common()
2102 if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) in f2fs_setflags_common()
2110 if (!f2fs_disable_compressed_file(inode)) in f2fs_setflags_common()
2114 int err = f2fs_convert_inline_inode(inode); in f2fs_setflags_common()
2119 if (!f2fs_may_compress(inode) || in f2fs_setflags_common()
2120 (S_ISREG(inode->i_mode) && in f2fs_setflags_common()
2121 F2FS_HAS_BLOCKS(inode))) { in f2fs_setflags_common()
2125 err = set_compress_context(inode); in f2fs_setflags_common()
2134 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) && in f2fs_setflags_common()
2138 set_inode_flag(inode, FI_PROJ_INHERIT); in f2fs_setflags_common()
2140 clear_inode_flag(inode, FI_PROJ_INHERIT); in f2fs_setflags_common()
2142 inode_set_ctime_current(inode); in f2fs_setflags_common()
2143 f2fs_set_inode_flags(inode); in f2fs_setflags_common()
2144 f2fs_mark_inode_dirty_sync(inode, true); in f2fs_setflags_common()
2234 struct inode *inode = file_inode(filp); in f2fs_ioc_getversion() local
2236 return put_user(inode->i_generation, (int __user *)arg); in f2fs_ioc_getversion()
2241 struct inode *inode = file_inode(filp); in f2fs_ioc_start_atomic_write() local
2243 struct f2fs_inode_info *fi = F2FS_I(inode); in f2fs_ioc_start_atomic_write()
2244 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_ioc_start_atomic_write()
2251 if (!inode_owner_or_capable(idmap, inode)) in f2fs_ioc_start_atomic_write()
2254 if (!S_ISREG(inode->i_mode)) in f2fs_ioc_start_atomic_write()
2264 inode_lock(inode); in f2fs_ioc_start_atomic_write()
2266 if (!f2fs_disable_compressed_file(inode) || in f2fs_ioc_start_atomic_write()
2267 f2fs_is_pinned_file(inode)) { in f2fs_ioc_start_atomic_write()
2272 if (f2fs_is_atomic_file(inode)) in f2fs_ioc_start_atomic_write()
2275 ret = f2fs_convert_inline_inode(inode); in f2fs_ioc_start_atomic_write()
2286 if (get_dirty_pages(inode)) in f2fs_ioc_start_atomic_write()
2288 inode->i_ino, get_dirty_pages(inode)); in f2fs_ioc_start_atomic_write()
2289 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); in f2fs_ioc_start_atomic_write()
2297 struct inode *dir = d_inode(dentry->d_parent); in f2fs_ioc_start_atomic_write()
2307 F2FS_I(fi->cow_inode)->atomic_inode = inode; in f2fs_ioc_start_atomic_write()
2319 f2fs_write_inode(inode, NULL); in f2fs_ioc_start_atomic_write()
2321 stat_inc_atomic_inode(inode); in f2fs_ioc_start_atomic_write()
2323 set_inode_flag(inode, FI_ATOMIC_FILE); in f2fs_ioc_start_atomic_write()
2325 isize = i_size_read(inode); in f2fs_ioc_start_atomic_write()
2328 set_inode_flag(inode, FI_ATOMIC_REPLACE); in f2fs_ioc_start_atomic_write()
2329 truncate_inode_pages_final(inode->i_mapping); in f2fs_ioc_start_atomic_write()
2330 f2fs_i_size_write(inode, 0); in f2fs_ioc_start_atomic_write()
2343 stat_update_max_atomic_write(inode); in f2fs_ioc_start_atomic_write()
2346 inode_unlock(inode); in f2fs_ioc_start_atomic_write()
2353 struct inode *inode = file_inode(filp); in f2fs_ioc_commit_atomic_write() local
2360 if (!inode_owner_or_capable(idmap, inode)) in f2fs_ioc_commit_atomic_write()
2367 f2fs_balance_fs(F2FS_I_SB(inode), true); in f2fs_ioc_commit_atomic_write()
2369 inode_lock(inode); in f2fs_ioc_commit_atomic_write()
2371 if (f2fs_is_atomic_file(inode)) { in f2fs_ioc_commit_atomic_write()
2372 ret = f2fs_commit_atomic_write(inode); in f2fs_ioc_commit_atomic_write()
2376 f2fs_abort_atomic_write(inode, ret); in f2fs_ioc_commit_atomic_write()
2381 inode_unlock(inode); in f2fs_ioc_commit_atomic_write()
2388 struct inode *inode = file_inode(filp); in f2fs_ioc_abort_atomic_write() local
2395 if (!inode_owner_or_capable(idmap, inode)) in f2fs_ioc_abort_atomic_write()
2402 inode_lock(inode); in f2fs_ioc_abort_atomic_write()
2404 f2fs_abort_atomic_write(inode, true); in f2fs_ioc_abort_atomic_write()
2406 inode_unlock(inode); in f2fs_ioc_abort_atomic_write()
2409 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); in f2fs_ioc_abort_atomic_write()
2487 struct inode *inode = file_inode(filp); in f2fs_ioc_shutdown() local
2488 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_ioc_shutdown()
2521 static int f2fs_keep_noreuse_range(struct inode *inode, in f2fs_keep_noreuse_range() argument
2524 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_keep_noreuse_range()
2525 u64 max_bytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode)); in f2fs_keep_noreuse_range()
2529 if (!S_ISREG(inode->i_mode)) in f2fs_keep_noreuse_range()
2539 inode_lock(inode); in f2fs_keep_noreuse_range()
2540 if (f2fs_is_atomic_file(inode)) { in f2fs_keep_noreuse_range()
2541 inode_unlock(inode); in f2fs_keep_noreuse_range()
2548 if (!list_empty(&F2FS_I(inode)->gdonate_list)) { in f2fs_keep_noreuse_range()
2549 list_del_init(&F2FS_I(inode)->gdonate_list); in f2fs_keep_noreuse_range()
2551 if (is_inode_flag_set(inode, FI_DONATE_FINISHED)) in f2fs_keep_noreuse_range()
2554 set_inode_flag(inode, FI_DONATE_FINISHED); in f2fs_keep_noreuse_range()
2558 if (list_empty(&F2FS_I(inode)->gdonate_list)) { in f2fs_keep_noreuse_range()
2559 list_add_tail(&F2FS_I(inode)->gdonate_list, in f2fs_keep_noreuse_range()
2563 list_move_tail(&F2FS_I(inode)->gdonate_list, in f2fs_keep_noreuse_range()
2566 F2FS_I(inode)->donate_start = start; in f2fs_keep_noreuse_range()
2567 F2FS_I(inode)->donate_end = end - 1; in f2fs_keep_noreuse_range()
2568 clear_inode_flag(inode, FI_DONATE_FINISHED); in f2fs_keep_noreuse_range()
2571 inode_unlock(inode); in f2fs_keep_noreuse_range()
2578 struct inode *inode = file_inode(filp); in f2fs_ioc_fitrim() local
2579 struct super_block *sb = inode->i_sb; in f2fs_ioc_fitrim()
2607 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); in f2fs_ioc_fitrim()
2623 struct inode *inode = file_inode(filp); in f2fs_ioc_set_encryption_policy() local
2626 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode))) in f2fs_ioc_set_encryption_policy()
2630 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); in f2fs_ioc_set_encryption_policy()
2643 struct inode *inode = file_inode(filp); in f2fs_ioc_get_encryption_pwsalt() local
2644 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_ioc_get_encryption_pwsalt()
2734 struct inode *inode = file_inode(filp); in f2fs_ioc_gc() local
2735 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_ioc_gc()
2838 struct inode *inode = file_inode(filp); in f2fs_ioc_write_checkpoint() local
2839 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_ioc_write_checkpoint()
2867 struct inode *inode = file_inode(filp); in f2fs_defragment_range() local
2880 inode_lock(inode); in f2fs_defragment_range()
2884 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE)); in f2fs_defragment_range()
2886 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) || in f2fs_defragment_range()
2887 f2fs_is_atomic_file(inode)) { in f2fs_defragment_range()
2893 set_inode_flag(inode, FI_OPU_WRITE); in f2fs_defragment_range()
2894 if (f2fs_should_update_inplace(inode, NULL)) { in f2fs_defragment_range()
2900 err = filemap_write_and_wait_range(inode->i_mapping, in f2fs_defragment_range()
2910 if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) { in f2fs_defragment_range()
2925 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT); in f2fs_defragment_range()
2972 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT); in f2fs_defragment_range()
2981 set_inode_flag(inode, FI_SKIP_WRITES); in f2fs_defragment_range()
2988 folio = f2fs_get_lock_data_folio(inode, idx, true); in f2fs_defragment_range()
3010 clear_inode_flag(inode, FI_SKIP_WRITES); in f2fs_defragment_range()
3012 err = filemap_fdatawrite(inode->i_mapping); in f2fs_defragment_range()
3017 clear_inode_flag(inode, FI_SKIP_WRITES); in f2fs_defragment_range()
3019 clear_inode_flag(inode, FI_OPU_WRITE); in f2fs_defragment_range()
3021 inode_unlock(inode); in f2fs_defragment_range()
3029 struct inode *inode = file_inode(filp); in f2fs_ioc_defragment() local
3030 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_ioc_defragment()
3037 if (!S_ISREG(inode->i_mode)) in f2fs_ioc_defragment()
3052 max_file_blocks(inode))) in f2fs_ioc_defragment()
3077 struct inode *src = file_inode(file_in); in f2fs_move_file_range()
3078 struct inode *dst = file_inode(file_out); in f2fs_move_file_range()
3250 struct inode *inode = file_inode(filp); in f2fs_ioc_flush_device() local
3251 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_ioc_flush_device()
3321 struct inode *inode = file_inode(filp); in f2fs_ioc_get_features() local
3322 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature); in f2fs_ioc_get_features()
3331 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid) in f2fs_transfer_project_quota() argument
3334 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_transfer_project_quota()
3342 err = __dquot_transfer(inode, transfer_to); in f2fs_transfer_project_quota()
3349 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid) in f2fs_ioc_setproject() argument
3351 struct f2fs_inode_info *fi = F2FS_I(inode); in f2fs_ioc_setproject()
3352 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_ioc_setproject()
3364 if (!f2fs_has_extra_attr(inode)) in f2fs_ioc_setproject()
3374 if (IS_NOQUOTA(inode)) in f2fs_ioc_setproject()
3380 err = f2fs_dquot_initialize(inode); in f2fs_ioc_setproject()
3385 err = f2fs_transfer_project_quota(inode, kprojid); in f2fs_ioc_setproject()
3390 inode_set_ctime_current(inode); in f2fs_ioc_setproject()
3391 f2fs_mark_inode_dirty_sync(inode, true); in f2fs_ioc_setproject()
3397 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid) in f2fs_transfer_project_quota() argument
3402 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid) in f2fs_ioc_setproject() argument
3412 struct inode *inode = d_inode(dentry); in f2fs_fileattr_get() local
3413 struct f2fs_inode_info *fi = F2FS_I(inode); in f2fs_fileattr_get()
3416 if (IS_ENCRYPTED(inode)) in f2fs_fileattr_get()
3418 if (IS_VERITY(inode)) in f2fs_fileattr_get()
3420 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) in f2fs_fileattr_get()
3422 if (is_inode_flag_set(inode, FI_PIN_FILE)) in f2fs_fileattr_get()
3427 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode))) in f2fs_fileattr_get()
3436 struct inode *inode = d_inode(dentry); in f2fs_fileattr_set() local
3441 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) in f2fs_fileattr_set()
3443 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode))) in f2fs_fileattr_set()
3452 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags) in f2fs_fileattr_set()
3455 err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask)); in f2fs_fileattr_set()
3457 err = f2fs_ioc_setproject(inode, fa->fsx_projid); in f2fs_fileattr_set()
3462 int f2fs_pin_file_control(struct inode *inode, bool inc) in f2fs_pin_file_control() argument
3464 struct f2fs_inode_info *fi = F2FS_I(inode); in f2fs_pin_file_control()
3465 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_pin_file_control()
3467 if (IS_DEVICE_ALIASING(inode)) in f2fs_pin_file_control()
3472 __func__, inode->i_ino, fi->i_gc_failures); in f2fs_pin_file_control()
3473 clear_inode_flag(inode, FI_PIN_FILE); in f2fs_pin_file_control()
3479 f2fs_i_gc_failures_write(inode, fi->i_gc_failures + 1); in f2fs_pin_file_control()
3486 struct inode *inode = file_inode(filp); in f2fs_ioc_set_pin_file() local
3487 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_ioc_set_pin_file()
3494 if (!S_ISREG(inode->i_mode)) in f2fs_ioc_set_pin_file()
3500 if (!pin && IS_DEVICE_ALIASING(inode)) in f2fs_ioc_set_pin_file()
3507 inode_lock(inode); in f2fs_ioc_set_pin_file()
3509 if (f2fs_is_atomic_file(inode)) { in f2fs_ioc_set_pin_file()
3515 clear_inode_flag(inode, FI_PIN_FILE); in f2fs_ioc_set_pin_file()
3516 f2fs_i_gc_failures_write(inode, 0); in f2fs_ioc_set_pin_file()
3518 } else if (f2fs_is_pinned_file(inode)) { in f2fs_ioc_set_pin_file()
3522 if (F2FS_HAS_BLOCKS(inode)) { in f2fs_ioc_set_pin_file()
3529 f2fs_should_update_outplace(inode, NULL)) { in f2fs_ioc_set_pin_file()
3534 if (f2fs_pin_file_control(inode, false)) { in f2fs_ioc_set_pin_file()
3539 ret = f2fs_convert_inline_inode(inode); in f2fs_ioc_set_pin_file()
3543 if (!f2fs_disable_compressed_file(inode)) { in f2fs_ioc_set_pin_file()
3548 set_inode_flag(inode, FI_PIN_FILE); in f2fs_ioc_set_pin_file()
3549 ret = F2FS_I(inode)->i_gc_failures; in f2fs_ioc_set_pin_file()
3553 inode_unlock(inode); in f2fs_ioc_set_pin_file()
3560 struct inode *inode = file_inode(filp); in f2fs_ioc_get_pin_file() local
3563 if (is_inode_flag_set(inode, FI_PIN_FILE)) in f2fs_ioc_get_pin_file()
3564 pin = F2FS_I(inode)->i_gc_failures; in f2fs_ioc_get_pin_file()
3576 struct inode *inode = file_inode(filp); in f2fs_ioc_io_prio() local
3582 if (!S_ISREG(inode->i_mode) || level >= F2FS_IOPRIO_MAX) in f2fs_ioc_io_prio()
3585 inode_lock(inode); in f2fs_ioc_io_prio()
3586 F2FS_I(inode)->ioprio_hint = level; in f2fs_ioc_io_prio()
3587 inode_unlock(inode); in f2fs_ioc_io_prio()
3591 int f2fs_precache_extents(struct inode *inode) in f2fs_precache_extents() argument
3593 struct f2fs_inode_info *fi = F2FS_I(inode); in f2fs_precache_extents()
3599 if (is_inode_flag_set(inode, FI_NO_EXTENT)) in f2fs_precache_extents()
3608 end = F2FS_BLK_ALIGN(i_size_read(inode)); in f2fs_precache_extents()
3614 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRECACHE); in f2fs_precache_extents()
3650 struct inode *inode = file_inode(filp); in f2fs_ioc_enable_verity() local
3652 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); in f2fs_ioc_enable_verity()
3654 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) { in f2fs_ioc_enable_verity()
3655 f2fs_warn(F2FS_I_SB(inode), in f2fs_ioc_enable_verity()
3657 inode->i_ino); in f2fs_ioc_enable_verity()
3682 struct inode *inode = file_inode(filp); in f2fs_ioc_getfslabel() local
3683 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_ioc_getfslabel()
3708 struct inode *inode = file_inode(filp); in f2fs_ioc_setfslabel() local
3709 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_ioc_setfslabel()
3742 static int f2fs_get_compress_blocks(struct inode *inode, __u64 *blocks) in f2fs_get_compress_blocks() argument
3744 if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) in f2fs_get_compress_blocks()
3747 if (!f2fs_compressed_file(inode)) in f2fs_get_compress_blocks()
3750 *blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks); in f2fs_get_compress_blocks()
3757 struct inode *inode = file_inode(filp); in f2fs_ioc_get_compress_blocks() local
3761 ret = f2fs_get_compress_blocks(inode, &blocks); in f2fs_ioc_get_compress_blocks()
3770 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in release_compress_blocks()
3772 int cluster_size = F2FS_I(dn->inode)->i_cluster_size; in release_compress_blocks()
3777 blkaddr = data_blkaddr(dn->inode, dn->node_folio, in release_compress_blocks()
3809 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false); in release_compress_blocks()
3810 dec_valid_block_count(sbi, dn->inode, in release_compress_blocks()
3823 struct inode *inode = file_inode(filp); in f2fs_release_compress_blocks() local
3824 struct f2fs_inode_info *fi = F2FS_I(inode); in f2fs_release_compress_blocks()
3825 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_release_compress_blocks()
3843 inode_lock(inode); in f2fs_release_compress_blocks()
3845 writecount = atomic_read(&inode->i_writecount); in f2fs_release_compress_blocks()
3852 if (!f2fs_compressed_file(inode) || in f2fs_release_compress_blocks()
3853 is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) { in f2fs_release_compress_blocks()
3858 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); in f2fs_release_compress_blocks()
3867 set_inode_flag(inode, FI_COMPRESS_RELEASED); in f2fs_release_compress_blocks()
3868 inode_set_ctime_current(inode); in f2fs_release_compress_blocks()
3869 f2fs_mark_inode_dirty_sync(inode, true); in f2fs_release_compress_blocks()
3872 filemap_invalidate_lock(inode->i_mapping); in f2fs_release_compress_blocks()
3874 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); in f2fs_release_compress_blocks()
3882 set_new_dnode(&dn, inode, NULL, NULL, 0); in f2fs_release_compress_blocks()
3895 end_offset = ADDRS_PER_PAGE(dn.node_folio, inode); in f2fs_release_compress_blocks()
3912 filemap_invalidate_unlock(inode->i_mapping); in f2fs_release_compress_blocks()
3917 inode_unlock(inode); in f2fs_release_compress_blocks()
3929 __func__, inode->i_ino, inode->i_blocks, in f2fs_release_compress_blocks()
3940 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in reserve_compress_blocks()
3941 int cluster_size = F2FS_I(dn->inode)->i_cluster_size; in reserve_compress_blocks()
3946 blkaddr = data_blkaddr(dn->inode, dn->node_folio, in reserve_compress_blocks()
3963 blkaddr = data_blkaddr(dn->inode, dn->node_folio, in reserve_compress_blocks()
3997 ret = inc_valid_block_count(sbi, dn->inode, in reserve_compress_blocks()
4007 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true); in reserve_compress_blocks()
4019 struct inode *inode = file_inode(filp); in f2fs_reserve_compress_blocks() local
4020 struct f2fs_inode_info *fi = F2FS_I(inode); in f2fs_reserve_compress_blocks()
4021 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_reserve_compress_blocks()
4038 inode_lock(inode); in f2fs_reserve_compress_blocks()
4040 if (!f2fs_compressed_file(inode) || in f2fs_reserve_compress_blocks()
4041 !is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) { in f2fs_reserve_compress_blocks()
4050 filemap_invalidate_lock(inode->i_mapping); in f2fs_reserve_compress_blocks()
4052 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); in f2fs_reserve_compress_blocks()
4060 set_new_dnode(&dn, inode, NULL, NULL, 0); in f2fs_reserve_compress_blocks()
4073 end_offset = ADDRS_PER_PAGE(dn.node_folio, inode); in f2fs_reserve_compress_blocks()
4089 filemap_invalidate_unlock(inode->i_mapping); in f2fs_reserve_compress_blocks()
4093 clear_inode_flag(inode, FI_COMPRESS_RELEASED); in f2fs_reserve_compress_blocks()
4094 inode_set_ctime_current(inode); in f2fs_reserve_compress_blocks()
4095 f2fs_mark_inode_dirty_sync(inode, true); in f2fs_reserve_compress_blocks()
4100 inode_unlock(inode); in f2fs_reserve_compress_blocks()
4111 __func__, inode->i_ino, inode->i_blocks, in f2fs_reserve_compress_blocks()
4119 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode, in f2fs_secure_erase() argument
4136 if (IS_ENCRYPTED(inode)) in f2fs_secure_erase()
4137 ret = fscrypt_zeroout_range(inode, off, block, len); in f2fs_secure_erase()
4148 struct inode *inode = file_inode(filp); in f2fs_sec_trim_file() local
4149 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_sec_trim_file()
4150 struct address_space *mapping = inode->i_mapping; in f2fs_sec_trim_file()
4167 !S_ISREG(inode->i_mode)) in f2fs_sec_trim_file()
4173 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi))) in f2fs_sec_trim_file()
4179 inode_lock(inode); in f2fs_sec_trim_file()
4181 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) || in f2fs_sec_trim_file()
4182 range.start >= inode->i_size) { in f2fs_sec_trim_file()
4190 if (inode->i_size - range.start > range.len) { in f2fs_sec_trim_file()
4194 sbi->sb->s_maxbytes : inode->i_size; in f2fs_sec_trim_file()
4207 ret = f2fs_convert_inline_inode(inode); in f2fs_sec_trim_file()
4211 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_sec_trim_file()
4227 set_new_dnode(&dn, inode, NULL, NULL, 0); in f2fs_sec_trim_file()
4237 end_offset = ADDRS_PER_PAGE(dn.node_folio, inode); in f2fs_sec_trim_file()
4267 inode, prev_index, prev_block, in f2fs_sec_trim_file()
4296 ret = f2fs_secure_erase(prev_bdev, inode, prev_index, in f2fs_sec_trim_file()
4301 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_sec_trim_file()
4303 inode_unlock(inode); in f2fs_sec_trim_file()
4311 struct inode *inode = file_inode(filp); in f2fs_ioc_get_compress_option() local
4314 if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) in f2fs_ioc_get_compress_option()
4317 inode_lock_shared(inode); in f2fs_ioc_get_compress_option()
4319 if (!f2fs_compressed_file(inode)) { in f2fs_ioc_get_compress_option()
4320 inode_unlock_shared(inode); in f2fs_ioc_get_compress_option()
4324 option.algorithm = F2FS_I(inode)->i_compress_algorithm; in f2fs_ioc_get_compress_option()
4325 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size; in f2fs_ioc_get_compress_option()
4327 inode_unlock_shared(inode); in f2fs_ioc_get_compress_option()
4338 struct inode *inode = file_inode(filp); in f2fs_ioc_set_compress_option() local
4339 struct f2fs_inode_info *fi = F2FS_I(inode); in f2fs_ioc_set_compress_option()
4340 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_ioc_set_compress_option()
4362 inode_lock(inode); in f2fs_ioc_set_compress_option()
4364 f2fs_down_write(&F2FS_I(inode)->i_sem); in f2fs_ioc_set_compress_option()
4365 if (!f2fs_compressed_file(inode)) { in f2fs_ioc_set_compress_option()
4370 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) { in f2fs_ioc_set_compress_option()
4375 if (F2FS_HAS_BLOCKS(inode)) { in f2fs_ioc_set_compress_option()
4392 f2fs_mark_inode_dirty_sync(inode, true); in f2fs_ioc_set_compress_option()
4394 if (!f2fs_is_compress_backend_ready(inode)) in f2fs_ioc_set_compress_option()
4399 inode_unlock(inode); in f2fs_ioc_set_compress_option()
4405 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len) in redirty_blocks() argument
4407 DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx); in redirty_blocks()
4408 struct address_space *mapping = inode->i_mapping; in redirty_blocks()
4429 f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(folio)); in redirty_blocks()
4445 struct inode *inode = file_inode(filp); in f2fs_ioc_decompress_file() local
4446 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_ioc_decompress_file()
4447 struct f2fs_inode_info *fi = F2FS_I(inode); in f2fs_ioc_decompress_file()
4463 inode_lock(inode); in f2fs_ioc_decompress_file()
4465 if (!f2fs_is_compress_backend_ready(inode)) { in f2fs_ioc_decompress_file()
4470 if (!f2fs_compressed_file(inode) || in f2fs_ioc_decompress_file()
4471 is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) { in f2fs_ioc_decompress_file()
4476 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); in f2fs_ioc_decompress_file()
4483 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); in f2fs_ioc_decompress_file()
4489 if (!f2fs_is_compressed_cluster(inode, page_idx)) in f2fs_ioc_decompress_file()
4492 ret = redirty_blocks(inode, page_idx, fi->i_cluster_size); in f2fs_ioc_decompress_file()
4496 if (get_dirty_pages(inode) >= BLKS_PER_SEG(sbi)) { in f2fs_ioc_decompress_file()
4497 ret = filemap_fdatawrite(inode->i_mapping); in f2fs_ioc_decompress_file()
4510 ret = filemap_write_and_wait_range(inode->i_mapping, 0, in f2fs_ioc_decompress_file()
4518 inode_unlock(inode); in f2fs_ioc_decompress_file()
4526 struct inode *inode = file_inode(filp); in f2fs_ioc_compress_file() local
4527 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_ioc_compress_file()
4528 struct f2fs_inode_info *fi = F2FS_I(inode); in f2fs_ioc_compress_file()
4544 inode_lock(inode); in f2fs_ioc_compress_file()
4546 if (!f2fs_is_compress_backend_ready(inode)) { in f2fs_ioc_compress_file()
4551 if (!f2fs_compressed_file(inode) || in f2fs_ioc_compress_file()
4552 is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) { in f2fs_ioc_compress_file()
4557 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); in f2fs_ioc_compress_file()
4561 set_inode_flag(inode, FI_ENABLE_COMPRESS); in f2fs_ioc_compress_file()
4563 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); in f2fs_ioc_compress_file()
4569 if (f2fs_is_sparse_cluster(inode, page_idx)) in f2fs_ioc_compress_file()
4572 ret = redirty_blocks(inode, page_idx, fi->i_cluster_size); in f2fs_ioc_compress_file()
4576 if (get_dirty_pages(inode) >= BLKS_PER_SEG(sbi)) { in f2fs_ioc_compress_file()
4577 ret = filemap_fdatawrite(inode->i_mapping); in f2fs_ioc_compress_file()
4590 ret = filemap_write_and_wait_range(inode->i_mapping, 0, in f2fs_ioc_compress_file()
4593 clear_inode_flag(inode, FI_ENABLE_COMPRESS); in f2fs_ioc_compress_file()
4600 inode_unlock(inode); in f2fs_ioc_compress_file()
4715 static bool f2fs_should_use_dio(struct inode *inode, struct kiocb *iocb, in f2fs_should_use_dio() argument
4723 if (f2fs_force_buffered_io(inode, iov_iter_rw(iter))) in f2fs_should_use_dio()
4738 if (!IS_ALIGNED(align, i_blocksize(inode)) && in f2fs_should_use_dio()
4739 IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev))) in f2fs_should_use_dio()
4764 struct inode *inode = file_inode(file); in f2fs_dio_read_iter() local
4765 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_dio_read_iter()
4766 struct f2fs_inode_info *fi = F2FS_I(inode); in f2fs_dio_read_iter()
4775 trace_f2fs_direct_IO_enter(inode, iocb, count, READ); in f2fs_dio_read_iter()
4787 if (f2fs_is_atomic_file(inode)) { in f2fs_dio_read_iter()
4813 trace_f2fs_direct_IO_exit(inode, pos, count, READ, ret); in f2fs_dio_read_iter()
4820 struct inode *inode = file_inode(file); in f2fs_trace_rw_file_path() local
4823 buf = f2fs_getname(F2FS_I_SB(inode)); in f2fs_trace_rw_file_path()
4830 trace_f2fs_datawrite_start(inode, pos, count, in f2fs_trace_rw_file_path()
4833 trace_f2fs_dataread_start(inode, pos, count, in f2fs_trace_rw_file_path()
4841 struct inode *inode = file_inode(iocb->ki_filp); in f2fs_file_read_iter() local
4846 if (!f2fs_is_compress_backend_ready(inode)) in f2fs_file_read_iter()
4853 dio = f2fs_should_use_dio(inode, iocb, to); in f2fs_file_read_iter()
4856 if (f2fs_lfs_mode(F2FS_I_SB(inode)) && in f2fs_file_read_iter()
4857 get_pages(F2FS_I_SB(inode), F2FS_DIO_WRITE) && in f2fs_file_read_iter()
4858 (!f2fs_is_pinned_file(inode) || !dio)) in f2fs_file_read_iter()
4859 inode_dio_wait(inode); in f2fs_file_read_iter()
4866 f2fs_update_iostat(F2FS_I_SB(inode), inode, in f2fs_file_read_iter()
4869 trace_f2fs_dataread_end(inode, pos, ret); in f2fs_file_read_iter()
4877 struct inode *inode = file_inode(in); in f2fs_file_splice_read() local
4881 if (!f2fs_is_compress_backend_ready(inode)) in f2fs_file_splice_read()
4889 f2fs_update_iostat(F2FS_I_SB(inode), inode, in f2fs_file_splice_read()
4892 trace_f2fs_dataread_end(inode, pos, ret); in f2fs_file_splice_read()
4899 struct inode *inode = file_inode(file); in f2fs_write_checks() local
4903 if (IS_IMMUTABLE(inode)) in f2fs_write_checks()
4906 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) in f2fs_write_checks()
4917 filemap_invalidate_lock(inode->i_mapping); in f2fs_write_checks()
4918 f2fs_zero_post_eof_page(inode, iocb->ki_pos + iov_iter_count(from)); in f2fs_write_checks()
4919 filemap_invalidate_unlock(inode->i_mapping); in f2fs_write_checks()
4933 struct inode *inode = file_inode(iocb->ki_filp); in f2fs_preallocate_blocks() local
4934 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_preallocate_blocks()
4948 if (dio && i_size_read(inode) && in f2fs_preallocate_blocks()
4949 (F2FS_BYTES_TO_BLK(pos) < F2FS_BLK_ALIGN(i_size_read(inode)))) in f2fs_preallocate_blocks()
4960 if (f2fs_has_inline_data(inode)) { in f2fs_preallocate_blocks()
4962 if (pos + count <= MAX_INLINE_DATA(inode)) in f2fs_preallocate_blocks()
4964 ret = f2fs_convert_inline_inode(inode); in f2fs_preallocate_blocks()
4977 if (!IS_DEVICE_ALIASING(inode)) in f2fs_preallocate_blocks()
4981 inode->i_write_hint); in f2fs_preallocate_blocks()
4988 ret = f2fs_map_blocks(inode, &map, flag); in f2fs_preallocate_blocks()
4993 set_inode_flag(inode, FI_PREALLOCATED_ALL); in f2fs_preallocate_blocks()
5001 struct inode *inode = file_inode(file); in f2fs_buffered_write_iter() local
5010 f2fs_update_iostat(F2FS_I_SB(inode), inode, in f2fs_buffered_write_iter()
5032 struct inode *inode = iter->inode; in f2fs_dio_write_submit_io() local
5033 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_dio_write_submit_io()
5034 enum log_type type = f2fs_rw_hint_to_seg_type(sbi, inode->i_write_hint); in f2fs_dio_write_submit_io()
5063 struct inode *inode = file_inode(file); in f2fs_dio_write_iter() local
5064 struct f2fs_inode_info *fi = F2FS_I(inode); in f2fs_dio_write_iter()
5065 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_dio_write_iter()
5073 trace_f2fs_direct_IO_enter(inode, iocb, count, WRITE); in f2fs_dio_write_iter()
5077 if (f2fs_has_inline_data(inode) || in f2fs_dio_write_iter()
5078 !f2fs_overwrite_io(inode, pos, count)) { in f2fs_dio_write_iter()
5093 ret = f2fs_convert_inline_inode(inode); in f2fs_dio_write_iter()
5109 if (pos + count > inode->i_size) in f2fs_dio_write_iter()
5129 if (pos + ret > inode->i_size) in f2fs_dio_write_iter()
5130 f2fs_i_size_write(inode, pos + ret); in f2fs_dio_write_iter()
5132 set_inode_flag(inode, FI_UPDATE_WRITE); in f2fs_dio_write_iter()
5145 f2fs_write_failed(inode, iocb->ki_pos); in f2fs_dio_write_iter()
5167 trace_f2fs_direct_IO_exit(inode, pos, count, WRITE, ret); in f2fs_dio_write_iter()
5173 struct inode *inode = file_inode(iocb->ki_filp); in f2fs_file_write_iter() local
5184 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { in f2fs_file_write_iter()
5189 if (!f2fs_is_compress_backend_ready(inode)) { in f2fs_file_write_iter()
5195 if (!inode_trylock(inode)) { in f2fs_file_write_iter()
5200 inode_lock(inode); in f2fs_file_write_iter()
5203 if (f2fs_is_pinned_file(inode) && in f2fs_file_write_iter()
5204 !f2fs_overwrite_io(inode, pos, count)) { in f2fs_file_write_iter()
5214 dio = f2fs_should_use_dio(inode, iocb, from); in f2fs_file_write_iter()
5217 if (dio && f2fs_is_atomic_file(inode)) { in f2fs_file_write_iter()
5237 trace_f2fs_datawrite_end(inode, orig_pos, ret); in f2fs_file_write_iter()
5241 if (preallocated && i_size_read(inode) < target_size) { in f2fs_file_write_iter()
5242 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_file_write_iter()
5243 filemap_invalidate_lock(inode->i_mapping); in f2fs_file_write_iter()
5244 if (!f2fs_truncate(inode)) in f2fs_file_write_iter()
5245 file_dont_truncate(inode); in f2fs_file_write_iter()
5246 filemap_invalidate_unlock(inode->i_mapping); in f2fs_file_write_iter()
5247 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_file_write_iter()
5249 file_dont_truncate(inode); in f2fs_file_write_iter()
5252 clear_inode_flag(inode, FI_PREALLOCATED_ALL); in f2fs_file_write_iter()
5254 inode_unlock(inode); in f2fs_file_write_iter()
5256 trace_f2fs_file_write_iter(inode, orig_pos, orig_count, ret); in f2fs_file_write_iter()
5277 struct inode *inode = file_inode(filp); in f2fs_file_fadvise() local
5281 if (S_ISFIFO(inode->i_mode)) in f2fs_file_fadvise()
5290 F2FS_I_SB(inode)->seq_file_ra_mul; in f2fs_file_fadvise()
5297 f2fs_precache_extents(inode); in f2fs_file_fadvise()
5305 (test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) && in f2fs_file_fadvise()
5306 f2fs_compressed_file(inode))) in f2fs_file_fadvise()
5307 f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino); in f2fs_file_fadvise()
5309 err = f2fs_keep_noreuse_range(inode, offset, len); in f2fs_file_fadvise()