Home
last modified time | relevance | path

Searched refs:g (Results 1 – 25 of 30) sorted by relevance

12

/fs/bcachefs/
A Ddisk_groups.c61 len = strnlen(g->label, sizeof(g->label)); in bch2_sb_disk_groups_validate()
75 for (g = sorted; g + 1 < sorted + nr_groups; g++) in bch2_sb_disk_groups_validate()
77 !group_cmp(&g[0], &g[1])) { in bch2_sb_disk_groups_validate()
80 (int) sizeof(g->label), g->label); in bch2_sb_disk_groups_validate()
100 g++) { in bch2_sb_disk_groups_to_text()
108 BCH_GROUP_PARENT(g), g->label); in bch2_sb_disk_groups_to_text()
188 return g && t.group < g->nr && !g->entries[t.group].deleted in bch2_target_to_mask()
209 g && t.group < g->nr && !g->entries[t.group].deleted in bch2_dev_in_target()
346 if (v >= (g ? g->nr : 0)) in __bch2_disk_path_to_text()
382 for (unsigned i = 0; i < (g ? g->nr : 0); i++) { in bch2_disk_groups_to_text()
[all …]
A Dbtree_locking.c80 for (i = g->g; i < g->g + g->nr; i++) { in print_cycle()
94 for (i = g->g; i != g->g + g->nr; i++) { in print_chain()
96 if (i != g->g) in print_chain()
105 closure_put(&g->g[--g->nr].trans->ref); in lock_graph_up()
116 while (g->g + g->nr > i) in lock_graph_pop_from()
145 for (i = from + 1; i < g->g + g->nr; i++) in lock_graph_remove_non_waiters()
174 if (i == g->g) { in abort_lock()
204 for (struct trans_waiting_for_lock *i = g->g; i < g->g + g->nr; i++) { in break_cycle_fail()
238 for (i = from; i < g->g + g->nr; i++) { in break_cycle()
263 for (struct trans_waiting_for_lock *i = g->g; i < g->g + g->nr; i++) in lock_graph_descend()
[all …]
A Dbuckets.c133 if (!g) { in bch2_check_fix_ptr()
156 g->gen_valid = true; in bch2_check_fix_ptr()
157 g->gen = p.ptr.gen; in bch2_check_fix_ptr()
173 p.ptr.gen, g->gen, in bch2_check_fix_ptr()
205 p.ptr.gen, g->gen, in bch2_check_fix_ptr()
330 (!g->gen_valid || gen_cmp(p.ptr.gen, g->gen) > 0)) || in bch2_check_fix_ptrs()
669 bucket_lock(g); in bch2_trigger_pointer()
673 bucket_unlock(g); in bch2_trigger_pointer()
1044 bucket_lock(g); in bch2_mark_metadata_bucket()
1064 bucket_unlock(g); in bch2_mark_metadata_bucket()
[all …]
A Dalloc_background.c598 g.k.p = pos; in bch2_bucket_gens_init()
602 g.v.gens[offset] = gen; in bch2_bucket_gens_init()
778 g = bch2_trans_kmalloc(trans, sizeof(*g)); in bch2_bucket_gen_update()
792 g->k.p = iter.pos; in bch2_bucket_gen_update()
1034 if (unlikely(!g)) in bch2_trigger_alloc()
1036 g->gen_valid = 1; in bch2_trigger_alloc()
1037 g->gen = new_a->gen; in bch2_trigger_alloc()
1340 g.v.gens[i])) { in bch2_check_alloc_hole_bucket_gens()
1341 g.v.gens[i] = 0; in bch2_check_alloc_hole_bucket_gens()
1353 memcpy(u, &g, sizeof(g)); in bch2_check_alloc_hole_bucket_gens()
[all …]
A Dec.c324 struct bucket *g = gc_bucket(ca, bucket.offset); in mark_stripe_bucket() local
325 if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n%s", in mark_stripe_bucket()
332 bucket_lock(g); in mark_stripe_bucket()
333 struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old; in mark_stripe_bucket()
335 alloc_to_bucket(g, new); in mark_stripe_bucket()
336 bucket_unlock(g); in mark_stripe_bucket()
/fs/proc/
A Darray.c153 int g, umask = -1; in task_state() local
201 for (g = 0; g < group_info->ngroups; g++) in task_state()
202 seq_put_decimal_ull(m, g ? " " : "", in task_state()
203 from_kgid_munged(user_ns, group_info->gid[g])); in task_state()
210 for (g = ns->level; g <= pid->level; g++) in task_state()
211 seq_put_decimal_ull(m, "\t", task_tgid_nr_ns(p, pid->numbers[g].ns)); in task_state()
213 for (g = ns->level; g <= pid->level; g++) in task_state()
214 seq_put_decimal_ull(m, "\t", task_pid_nr_ns(p, pid->numbers[g].ns)); in task_state()
216 for (g = ns->level; g <= pid->level; g++) in task_state()
217 seq_put_decimal_ull(m, "\t", task_pgrp_nr_ns(p, pid->numbers[g].ns)); in task_state()
[all …]
/fs/xfs/libxfs/
A Dxfs_rtgroup.h247 struct xfs_groups *g = &mp->m_groups[XG_TYPE_RTG]; in xfs_rtb_to_daddr() local
249 if (xfs_has_rtgroups(mp) && !g->has_daddr_gaps) { in xfs_rtb_to_daddr()
252 rtbno = (xfs_rtblock_t)rgno * g->blocks + (rtbno & g->blkmask); in xfs_rtb_to_daddr()
255 return XFS_FSB_TO_BB(mp, g->start_fsb + rtbno); in xfs_rtb_to_daddr()
263 struct xfs_groups *g = &mp->m_groups[XG_TYPE_RTG]; in xfs_daddr_to_rtb() local
266 bno = XFS_BB_TO_FSBT(mp, daddr) - g->start_fsb; in xfs_daddr_to_rtb()
267 if (xfs_has_rtgroups(mp) && !g->has_daddr_gaps) { in xfs_daddr_to_rtb()
271 rgno = div_u64_rem(bno, g->blocks, &rgbno); in xfs_daddr_to_rtb()
272 return ((xfs_rtblock_t)rgno << g->blklog) + rgbno; in xfs_daddr_to_rtb()
A Dxfs_zones.c139 struct xfs_groups *g = &mp->m_groups[XG_TYPE_RTG]; in xfs_zone_validate() local
147 if (XFS_BB_TO_FSB(mp, zone->capacity) != g->blocks) { in xfs_zone_validate()
151 g->blocks); in xfs_zone_validate()
155 if (g->has_daddr_gaps) { in xfs_zone_validate()
156 expected_size = 1 << g->blklog; in xfs_zone_validate()
166 expected_size = g->blocks; in xfs_zone_validate()
A Dxfs_group.h123 struct xfs_groups *g = &mp->m_groups[xg->xg_type]; in xfs_gbno_to_daddr() local
126 if (g->has_daddr_gaps) in xfs_gbno_to_daddr()
129 fsbno = (xfs_fsblock_t)xg->xg_gno * g->blocks + gbno; in xfs_gbno_to_daddr()
131 return XFS_FSB_TO_BB(mp, g->start_fsb + fsbno); in xfs_gbno_to_daddr()
/fs/dlm/
A Dconfig.c410 static struct config_group *make_cluster(struct config_group *g, in make_cluster() argument
445 static void drop_cluster(struct config_group *g, struct config_item *i) in drop_cluster() argument
466 static struct config_group *make_space(struct config_group *g, const char *name) in make_space() argument
494 static void drop_space(struct config_group *g, struct config_item *i) in drop_space() argument
511 static struct config_item *make_comm(struct config_group *g, const char *name) in make_comm() argument
538 static void drop_comm(struct config_group *g, struct config_item *i) in drop_comm() argument
555 static struct config_item *make_node(struct config_group *g, const char *name) in make_node() argument
557 struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent); in make_node()
586 static void drop_node(struct config_group *g, struct config_item *i) in drop_node() argument
588 struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent); in drop_node()
/fs/gfs2/
A Ddir.c487 struct dirent_gather *g = opaque; in gfs2_dirent_gather() local
489 g->pdent[g->offset++] = dent; in gfs2_dirent_gather()
1378 struct dirent_gather g; in gfs2_dir_read_leaf() local
1416 g.offset = 0; in gfs2_dir_read_leaf()
1426 offset = g.offset; in gfs2_dir_read_leaf()
1433 if (entries2 != g.offset) { in gfs2_dir_read_leaf()
1438 entries2, g.offset); in gfs2_dir_read_leaf()
1573 struct dirent_gather g; in gfs2_dir_read() local
1599 g.offset = 0; in gfs2_dir_read()
1601 gfs2_dirent_gather, NULL, &g); in gfs2_dir_read()
[all …]
/fs/
A Dfs_struct.c56 struct task_struct *g, *p; in chroot_fs_refs() local
61 for_each_process_thread(g, p) { in chroot_fs_refs()
/fs/xfs/
A Dxfs_mount.c692 struct xfs_groups *g = &mp->m_groups[type]; in xfs_calc_group_awu_max() local
695 if (g->blocks == 0) in xfs_calc_group_awu_max()
698 return max_pow_of_two_factor(g->blocks); in xfs_calc_group_awu_max()
699 return rounddown_pow_of_two(g->blocks); in xfs_calc_group_awu_max()
708 struct xfs_groups *g = &mp->m_groups[type]; in xfs_calc_atomic_write_unit_max() local
714 g->awu_max = min3(max_write, max_ioend, max_gsize); in xfs_calc_atomic_write_unit_max()
716 max_gsize, g->awu_max); in xfs_calc_atomic_write_unit_max()
A Dxfs_quota.h194 #define xfs_qm_vop_create_dqattach(tp, ip, u, g, p) argument
/fs/ext4/
A Dialloc.c375 static void get_orlov_stats(struct super_block *sb, ext4_group_t g, in get_orlov_stats() argument
382 s_flex_groups, g); in get_orlov_stats()
389 desc = ext4_get_group_desc(sb, g, NULL); in get_orlov_stats()
435 ext4_group_t i, grp, g, ngroups; in find_group_orlov() local
469 g = (parent_group + i) % ngroups; in find_group_orlov()
470 get_orlov_stats(sb, g, flex_size, &stats); in find_group_orlov()
479 grp = g; in find_group_orlov()
/fs/zonefs/
A DKconfig9 device (e.g. host-managed or host-aware SMR disk drives) as files.
/fs/cramfs/
A DREADME5 a bit looser, e.g. it doesn't care if the <file_data> items are
65 and specific data block alignments e.g. for XIP applications.
176 e.g. get read_folio to decompress to a buffer of size MAX_BLKSIZE (which
A DKconfig47 MTD device capable of direct memory mapping e.g. from
/fs/coda/
A DKconfig10 disconnected operation (e.g. for laptops), read/write server
/fs/erofs/
A DKconfig20 file system with modern designs (e.g. no buffer heads, inline
22 scenarios which need high-performance read-only solutions, e.g.
/fs/verity/
A DKconfig22 against a known good value, e.g. from a digital signature).
/fs/configfs/
A Ddir.c739 struct config_group *g, *n; in configfs_remove_default_groups() local
741 list_for_each_entry_safe(g, n, &group->default_groups, group_entry) { in configfs_remove_default_groups()
742 list_del(&g->group_entry); in configfs_remove_default_groups()
743 config_item_put(&g->cg_item); in configfs_remove_default_groups()
/fs/nfs/
A Ddir.c2969 int g; in access_cmp() local
2994 for (g = 0; g < ga->ngroups; g++) { in access_cmp()
2995 if (gid_lt(ga->gid[g], gb->gid[g])) in access_cmp()
2997 if (gid_gt(ga->gid[g], gb->gid[g])) in access_cmp()
/fs/smb/client/
A DKconfig33 and NAS appliances (e.g. Samba, Windows 11, Windows Server 2022,
34 MacOS) and even in the cloud (e.g. Microsoft Azure) and also by the
/fs/nfsd/
A Dexport.c1350 static bool secinfo_flags_equal(int f, int g) in secinfo_flags_equal() argument
1353 g &= NFSEXP_SECINFO_FLAGS; in secinfo_flags_equal()
1354 return f == g; in secinfo_flags_equal()

Completed in 65 milliseconds

12