Home
last modified time | relevance | path

Searched refs:e (Results 1 – 25 of 96) sorted by relevance

1234

/fs/
A Dbinfmt_misc.c94 Node *e; in search_binfmt_handler() local
116 if ((*s++ ^ e->magic[j]) & e->mask[j]) in search_binfmt_handler()
147 if (e) in get_binfmt_handler()
454 DUMP_PREFIX_NONE, e->mask, p - e->mask); in create_entry()
467 BINPRM_BUF_SIZE - e->size < e->offset) in create_entry()
473 DUMP_PREFIX_NONE, e->magic, e->size); in create_entry()
481 DUMP_PREFIX_NONE, e->mask, e->size); in create_entry()
485 masked[i] = e->magic[i] & e->mask[i]; in create_entry()
509 if (!e->magic[0] || strchr(e->magic, '/')) in create_entry()
608 dp = bin2hex(dp, e->magic, e->size); in entry_status()
[all …]
/fs/bcachefs/
A Dreplicas.c27 BUG_ON(!e->nr_devs); in verify_replicas_entry()
29 e->nr_required >= e->nr_devs); in verify_replicas_entry()
32 BUG_ON(e->devs[i] >= e->devs[i + 1]); in verify_replicas_entry()
38 bubble_sort(e->devs, e->nr_devs, u8_cmp); in bch2_replicas_entry_sort()
63 prt_printf(out, ": %u/%u [", e->nr_required, e->nr_devs); in bch2_replicas_entry_to_text()
177 e->nr_devs = 0; in bch2_bkey_to_replicas()
208 e->nr_devs = 0; in bch2_devlist_to_replicas()
209 e->nr_required = 1; in bch2_devlist_to_replicas()
483 unsafe_memcpy(&k.replicas, e, replicas_entry_bytes(e), in bch2_replicas_gc2()
540 memcpy(dst, e, replicas_entry_bytes(e)); in __bch2_sb_replicas_to_cpu_replicas()
[all …]
A Dtime_stats.c49 if (unlikely(!e->step)) { in quantiles_update()
50 e->m = v; in quantiles_update()
52 } else if (e->m > v) { in quantiles_update()
53 e->m = e->m >= e->step in quantiles_update()
54 ? e->m - e->step in quantiles_update()
56 } else if (e->m < v) { in quantiles_update()
57 e->m = e->m + e->step > e->m in quantiles_update()
58 ? e->m + e->step in quantiles_update()
62 if ((e->m > v ? e->m - v : v - e->m) < e->step) in quantiles_update()
63 e->step = max_t(unsigned, e->step / 2, 1); in quantiles_update()
[all …]
A Dsb-errors.c23 return bch2_sb_field_nr_entries(e); in bch2_sb_field_errors_nr_entries()
39 if (!BCH_SB_ERROR_ENTRY_NR(&e->entries[i])) { in bch2_sb_errors_validate()
47 BCH_SB_ERROR_ENTRY_ID(&e->entries[i]) >= in bch2_sb_errors_validate()
92 bch_sb_errors_cpu *e = &c->fsck_error_counts; in bch2_fs_errors_to_text() local
93 darray_for_each(*e, i) { in bch2_fs_errors_to_text()
114 for (i = 0; i < e->nr; i++) { in bch2_sb_error_count()
115 if (err == e->data[i].id) { in bch2_sb_error_count()
116 e->data[i].nr++; in bch2_sb_error_count()
120 if (err < e->data[i].id) in bch2_sb_error_count()
124 if (darray_make_room(e, 1)) in bch2_sb_error_count()
[all …]
A Djournal_seq_blacklist.c57 struct journal_seq_blacklist_entry *e = in bch2_journal_seq_blacklist_add() local
60 if (end < le64_to_cpu(e->start)) in bch2_journal_seq_blacklist_add()
63 if (start > le64_to_cpu(e->end)) { in bch2_journal_seq_blacklist_add()
73 start = min(start, le64_to_cpu(e->start)); in bch2_journal_seq_blacklist_add()
74 end = max(end, le64_to_cpu(e->end)); in bch2_journal_seq_blacklist_add()
185 if (le64_to_cpu(e->start) >= in bch2_sb_journal_seq_blacklist_validate()
186 le64_to_cpu(e->end)) { in bch2_sb_journal_seq_blacklist_validate()
188 i, le64_to_cpu(e->start), le64_to_cpu(e->end)); in bch2_sb_journal_seq_blacklist_validate()
193 le64_to_cpu(e[0].end) > in bch2_sb_journal_seq_blacklist_validate()
194 le64_to_cpu(e[1].start)) { in bch2_sb_journal_seq_blacklist_validate()
[all …]
A Dsb-downgrade.c234 for (const u16 *e = i->errors; e < i->errors + i->nr_errors; e++) in bch2_sb_set_upgrade() local
235 __set_bit_le64(*e, ext->errors_silent); in bch2_sb_set_upgrade()
291 return (void *) &e->errors[le16_to_cpu(e->nr_errors)]; in downgrade_entry_next_c()
307 (void *) i < vstruct_end(&e->field); in bch2_sb_downgrade_validate()
314 if ((void *) &i->errors[0] > vstruct_end(&e->field)) in bch2_sb_downgrade_validate()
343 for_each_downgrade_entry(e, i) { in bch2_sb_downgrade_to_text()
449 unsigned e = le16_to_cpu(i->errors[j]); in bch2_sb_set_downgrade() local
450 if (e < BCH_FSCK_ERR_MAX) in bch2_sb_set_downgrade()
451 __set_bit(e, c->sb.errors_silent); in bch2_sb_set_downgrade()
452 if (e < sizeof(ext->errors_silent) * 8) in bch2_sb_set_downgrade()
[all …]
A Ddisk_accounting.c328 ? bch2_mark_replicas(c, &r.e) in bch2_accounting_update_sb_one()
431 (e->v[1] && in accounting_mem_entry_is_zero()
545 darray_for_each(acc->k, e) { in bch2_accounting_free_counters()
546 free_percpu(e->v[gc]); in bch2_accounting_free_counters()
547 e->v[gc] = NULL; in bch2_accounting_free_counters()
557 darray_for_each(acc->k, e) { in bch2_gc_accounting_start()
558 e->v[1] = __alloc_percpu_gfp(e->nr_counters * sizeof(u64), in bch2_gc_accounting_start()
560 if (!e->v[1]) { in bch2_gc_accounting_start()
688 invalid_dev = r.e.devs[i]; in bch2_disk_accounting_validate_late()
765 darray_for_each(acc->k, e) in bch2_accounting_read()
[all …]
A Dextents.h51 __extent_entry_type(const union bch_extent_entry *e) in __extent_entry_type() argument
53 return e->type ? __ffs(e->type) : BCH_EXTENT_ENTRY_MAX; in __extent_entry_type()
57 extent_entry_type(const union bch_extent_entry *e) in extent_entry_type() argument
59 int ret = __ffs(e->type); in extent_entry_type()
120 switch (__extent_entry_type(e)) { in extent_entry_is_crc()
235 to_entry(&e.v->start[0]), in bch2_bkey_ptrs_c()
236 to_entry(extent_entry_last(e)) in bch2_bkey_ptrs_c()
243 e.v->start, in bch2_bkey_ptrs_c()
244 extent_entry_last(e) in bch2_bkey_ptrs_c()
267 to_entry(&e.v->start[0]), in bch2_bkey_ptrs_c()
[all …]
A Dreplicas.h37 static inline void bch2_replicas_entry_cached(struct bch_replicas_entry_v1 *e, in bch2_replicas_entry_cached() argument
40 e->data_type = BCH_DATA_cached; in bch2_replicas_entry_cached()
41 e->nr_devs = 1; in bch2_replicas_entry_cached()
42 e->nr_required = 1; in bch2_replicas_entry_cached()
43 e->devs[0] = dev; in bch2_replicas_entry_cached()
A Dreplicas_format.h31 #define replicas_entry_add_dev(e, d) ({ \ argument
32 (e)->nr_devs++; \
33 (e)->devs[(e)->nr_devs - 1] = (d); \
/fs/ntfs3/
A Dindex.c566 if (!e || de_is_last(e)) in hdr_find_split()
597 if (!e) in hdr_insert_head()
602 memmove(Add2Ptr(e, ins_bytes), e, to_move); in hdr_insert_head()
1156 if (e && !de_is_last(e) && in indx_find()
1234 e = hdr_next_de(&n->index->ihdr, e); in indx_find_sort()
1638 for (e = e0;; e = hdr_next_de(hdr, e)) { in indx_insert_into_root()
2010 for (e = hdr_first_de(hdr);; e = hdr_next_de(hdr, e)) { in indx_find_buffer()
2037 e = Add2Ptr(e, le16_to_cpu(e->size)); in indx_find_buffer()
2126 for (e = hdr_first_de(hdr); e; e = hdr_next_de(hdr, e)) { in indx_free_children()
2413 for (e = hdr_first_de(hdr);; e = hdr_next_de(hdr, e)) { in indx_delete_entry()
[all …]
A Dbitmap.c254 if (e->start.key + e->count.key == bit) { in wnd_add_free_ext()
261 e0 = e; in wnd_add_free_ext()
269 next_end = e->start.key + e->count.key; in wnd_add_free_ext()
282 e0 = e; in wnd_add_free_ext()
376 end = e->start.key + e->count.key; in wnd_remove_free_ext()
477 if (!e) in wnd_remove_free_ext()
481 if (e) { in wnd_remove_free_ext()
886 end = e->start.key + e->count.key; in wnd_is_free()
923 if (e->start.key + e->count.key > bit) in wnd_is_used()
1034 if (!e) in wnd_find()
[all …]
A Dntfs.h637 __le64 *v = Add2Ptr(e, le16_to_cpu(e->size) - sizeof(__le64)); in de_set_vbn_le()
644 __le64 *v = Add2Ptr(e, le16_to_cpu(e->size) - sizeof(__le64)); in de_set_vbn()
651 return *(__le64 *)Add2Ptr(e, le16_to_cpu(e->size) - sizeof(__le64)); in de_get_vbn_le()
656 __le64 *v = Add2Ptr(e, le16_to_cpu(e->size) - sizeof(__le64)); in de_get_vbn()
663 return Add2Ptr(e, le16_to_cpu(e->size)); in de_get_next()
686 (u64)(-1) != *((u64 *)Add2Ptr(e, le16_to_cpu(e->size) - in de_has_vcn_ex()
716 struct NTFS_DE *e; in hdr_first_de() local
722 e = Add2Ptr(hdr, de_off); in hdr_first_de()
723 esize = le16_to_cpu(e->size); in hdr_first_de()
727 return e; in hdr_first_de()
[all …]
A Ddir.c240 struct NTFS_DE *e; in dir_search_u() local
295 ino = ino_get(&e->ref); in ntfs_dir_emit()
358 const struct NTFS_DE *e; in ntfs_read_hdr() local
367 e = Add2Ptr(hdr, off); in ntfs_read_hdr()
368 e_size = le16_to_cpu(e->size); in ntfs_read_hdr()
372 if (de_is_last(e)) in ntfs_read_hdr()
529 struct NTFS_DE *e = NULL; in ntfs_dir_count() local
552 e = Add2Ptr(hdr, off); in ntfs_dir_count()
553 e_size = le16_to_cpu(e->size); in ntfs_dir_count()
560 if (de_is_last(e)) in ntfs_dir_count()
[all …]
A Dfslog.c623 __le32 *e; in enum_rstbl() local
643 e = Add2Ptr(e, rsize)) { in enum_rstbl()
786 __le32 *e; in free_rsttbl_idx() local
804 *e = 0; in free_rsttbl_idx()
831 e = Add2Ptr(e, esize), off += esize) { in init_rsttbl()
877 __le32 *e; in alloc_rsttbl_idx() local
914 __le32 *e; in alloc_rsttbl_from_idx() local
940 return e; in alloc_rsttbl_from_idx()
995 return e; in alloc_rsttbl_from_idx()
2901 e = Add2Ptr(e, esize); in check_if_root_index()
[all …]
/fs/erofs/
A Ddecompressor_crypto.c85 for (e = z_erofs_crypto[alg]; e->crypto_name; ++e) in z_erofs_crypto_get_engine()
86 if (e->tfm) in z_erofs_crypto_get_engine()
87 return e->tfm; in z_erofs_crypto_get_engine()
132 for (e = z_erofs_crypto[alg]; e->crypto_name; ++e) { in z_erofs_crypto_enable_engine()
134 if (e->tfm) in z_erofs_crypto_enable_engine()
141 e->tfm = tfm; in z_erofs_crypto_enable_engine()
157 for (e = z_erofs_crypto[alg]; e->crypto_name; ++e) { in z_erofs_crypto_disable_all_engines()
158 if (!e->tfm) in z_erofs_crypto_disable_all_engines()
161 e->tfm = NULL; in z_erofs_crypto_disable_all_engines()
173 for (e = z_erofs_crypto[alg]; e->crypto_name; ++e) { in z_erofs_crypto_show_engines()
[all …]
/fs/dlm/
A Drequestqueue.c36 struct rq_entry *e; in dlm_add_requestqueue() local
41 if (!e) { in dlm_add_requestqueue()
47 e->nodeid = nodeid; in dlm_add_requestqueue()
67 struct rq_entry *e; in dlm_process_requestqueue() local
80 ms = &e->request; in dlm_process_requestqueue()
88 e->recover_seq); in dlm_process_requestqueue()
90 dlm_receive_message_saved(ls, &e->request, e->recover_seq); in dlm_process_requestqueue()
91 list_del(&e->list); in dlm_process_requestqueue()
92 kfree(e); in dlm_process_requestqueue()
140 ms = &e->request; in dlm_purge_requestqueue()
[all …]
A Dlowcomms.c122 #define DLM_WQ_REMAIN_BYTES(e) (PAGE_SIZE - e->end) argument
123 #define DLM_WQ_LENGTH_BYTES(e) (e->end - e->offset) argument
264 if (!e || e->users || e->len == 0) in con_next_wq()
267 return e; in con_next_wq()
1119 if (e->len == 0 && e->users == 0) in writequeue_entry_complete()
1201 *ppc = page_address(e->page) + e->end; in new_wq_entry()
1212 if (!e) in new_wq_entry()
1225 return e; in new_wq_entry()
1242 if (!e) { in dlm_lowcomms_new_msg_con()
1310 e->len = DLM_WQ_LENGTH_BYTES(e); in _dlm_lowcomms_commit_msg()
[all …]
/fs/afs/
A Dmisc.c146 e->aborted = false; in afs_prioritise_error()
147 e->error = 0; in afs_prioritise_error()
151 e->error == -ETIME) in afs_prioritise_error()
190 if (e->responded) in afs_prioritise_error()
192 e->error = error; in afs_prioritise_error()
193 e->aborted = false; in afs_prioritise_error()
198 e->aborted = true; in afs_prioritise_error()
199 e->responded = true; in afs_prioritise_error()
202 e->aborted = false; in afs_prioritise_error()
203 e->responded = true; in afs_prioritise_error()
[all …]
/fs/ubifs/
A Drecovery.c1262 if (!e) in add_ino()
1307 if (!e) in remove_ino()
1310 kfree(e); in remove_ino()
1323 kfree(e); in ubifs_destroy_size_tree()
1367 if (e) { in ubifs_recover_size_accum()
1379 if (e) { in ubifs_recover_size_accum()
1390 if (e) in ubifs_recover_size_accum()
1449 (unsigned long)e->inum, e->i_size, e->d_size, err); in fix_size_in_place()
1513 kfree(e); in inode_fix_size()
1562 if (e->exists && e->i_size < e->d_size) { in ubifs_recover_size()
[all …]
/fs/hfs/
A Dbfind.c63 int b, e; in __hfs_brec_find() local
67 e = bnode->num_recs - 1; in __hfs_brec_find()
70 rec = (e + b) / 2; in __hfs_brec_find()
80 e = rec; in __hfs_brec_find()
87 e = rec - 1; in __hfs_brec_find()
88 } while (b <= e); in __hfs_brec_find()
89 if (rec != e && e >= 0) { in __hfs_brec_find()
90 len = hfs_brec_lenoff(bnode, e, &off); in __hfs_brec_find()
91 keylen = hfs_brec_keylen(bnode, e); in __hfs_brec_find()
99 fd->record = e; in __hfs_brec_find()
/fs/hfsplus/
A Dbfind.c108 int b, e; in __hfs_brec_find() local
113 e = bnode->num_recs - 1; in __hfs_brec_find()
116 rec = (e + b) / 2; in __hfs_brec_find()
124 if (rec_found(bnode, fd, &b, &e, &rec)) { in __hfs_brec_find()
128 } while (b <= e); in __hfs_brec_find()
130 if (rec != e && e >= 0) { in __hfs_brec_find()
131 len = hfs_brec_lenoff(bnode, e, &off); in __hfs_brec_find()
132 keylen = hfs_brec_keylen(bnode, e); in __hfs_brec_find()
141 fd->record = e; in __hfs_brec_find()
/fs/ocfs2/
A Ddlmglue.h143 #define ocfs2_inode_lock_full(i, r, e, f)\ argument
144 ocfs2_inode_lock_full_nested(i, r, e, f, OI_LS_NORMAL)
145 #define ocfs2_inode_lock_nested(i, b, e, s)\ argument
146 ocfs2_inode_lock_full_nested(i, b, e, 0, s)
149 #define ocfs2_inode_lock(i, b, e) ocfs2_inode_lock_full_nested(i, b, e, 0, OI_LS_NORMAL) argument
150 #define ocfs2_try_inode_lock(i, b, e)\ argument
151 ocfs2_inode_lock_full_nested(i, b, e, OCFS2_META_LOCK_NOQUEUE,\
/fs/f2fs/
A Dnode.c244 __free_nat_entry(e); in __del_from_nat_cache()
397 if (e) { in f2fs_need_dentry_mark()
414 if (e && !get_nat_flag(e, IS_CHECKPOINTED)) in f2fs_is_checkpointed_node()
428 if (e && get_nat_flag(e, HAS_LAST_FSYNC) && in f2fs_need_inode_block_update()
453 if (!e) in cache_nat_entry()
461 if (e != new) in cache_nat_entry()
475 if (!e) { in set_node_addr()
490 if (e != new) in set_node_addr()
518 if (e) { in set_node_addr()
578 if (e) { in f2fs_get_node_info()
[all …]
A Dcheckpoint.c516 if (!e) in __add_ino_entry()
525 if (!e) { in __add_ino_entry()
531 e = new; in __add_ino_entry()
536 e->ino = ino; in __add_ino_entry()
549 if (new && e != new) in __add_ino_entry()
556 struct ino_entry *e; in __remove_ino_entry() local
560 if (e) { in __remove_ino_entry()
561 list_del(&e->list); in __remove_ino_entry()
587 struct ino_entry *e; in f2fs_exist_written_data() local
624 struct ino_entry *e; in f2fs_is_dirty_device() local
[all …]

Completed in 87 milliseconds

1234