| /fs/crypto/ |
| A D | inline_crypt.c | 32 if (devs) in fscrypt_get_devices() 33 return devs; in fscrypt_get_devices() 35 devs = kmalloc(sizeof(*devs), GFP_KERNEL); in fscrypt_get_devices() 36 if (!devs) in fscrypt_get_devices() 38 devs[0] = sb->s_bdev; in fscrypt_get_devices() 40 return devs; in fscrypt_get_devices() 139 if (IS_ERR(devs)) in fscrypt_select_encryption_impl() 151 kfree(devs); in fscrypt_select_encryption_impl() 186 if (IS_ERR(devs)) { in fscrypt_prepare_inline_crypt_key() 195 kfree(devs); in fscrypt_prepare_inline_crypt_key() [all …]
|
| /fs/bcachefs/ |
| A D | sb-members.h | 44 return bitmap_weight(devs->d, BCH_SB_MEMBERS_MAX); in dev_mask_nr() 50 darray_for_each(devs, i) in bch2_dev_list_has_dev() 59 darray_for_each(*devs, i) in bch2_dev_list_drop_dev() 61 darray_remove_item(devs, i); in bch2_dev_list_drop_dev() 69 if (!bch2_dev_list_has_dev(*devs, dev)) { in bch2_dev_list_add_dev() 70 BUG_ON(devs->nr >= ARRAY_SIZE(devs->data)); in bch2_dev_list_add_dev() 71 devs->data[devs->nr++] = dev; in bch2_dev_list_add_dev() 191 return dev < c->sb.nr_devices && c->devs[dev]; in bch2_dev_exists() 203 return rcu_dereference_check(c->devs[dev], 1); in bch2_dev_have_ref() 210 return rcu_dereference_protected(c->devs[dev], in bch2_dev_locked() [all …]
|
| A D | replicas.c | 32 BUG_ON(e->devs[i] >= e->devs[i + 1]); in verify_replicas_entry() 38 bubble_sort(e->devs, e->nr_devs, u8_cmp); in bch2_replicas_entry_sort() 85 if (r->devs[i] != BCH_SB_MEMBER_INVALID && in bch2_replicas_entry_sb_validate() 114 !bch2_dev_exists(c, r->devs[i])) { in bch2_replicas_entry_validate() 201 struct bch_devs_list devs) in bch2_devlist_to_replicas() argument 211 darray_for_each(devs, i) in bch2_devlist_to_replicas() 577 memcpy(dst->devs, e->devs, e->nr_devs); in __bch2_sb_replicas_v0_to_cpu_replicas() 638 memcpy(dst->devs, src->devs, src->nr_devs); in bch2_cpu_replicas_to_sb_replicas_v0() 829 nr_online += test_bit(e->devs[i], devs.d); in bch2_have_enough_devs() 884 if (r->devs[i] == dev) in bch2_sb_dev_has_data() [all …]
|
| A D | replicas_format.h | 8 __u8 devs[] __counted_by(nr_devs); 20 __u8 devs[] __counted_by(nr_devs); 29 (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs) 33 (e)->devs[(e)->nr_devs - 1] = (d); \
|
| A D | disk_groups.h | 65 struct bch_devs_mask devs = c->rw_devs[data_type]; in target_rw_devs() local 69 bitmap_and(devs.d, devs.d, t->d, BCH_SB_MEMBERS_MAX); in target_rw_devs() 70 return devs; in target_rw_devs()
|
| A D | chardev.c | 61 char **devs = NULL; 75 devs = kcalloc(arg.nr_devs, sizeof(char *), GFP_KERNEL); 77 if (copy_from_user(user_devs, user_arg->devs, 82 devs[i] = strndup_user((const char __user *)(unsigned long) 85 ret= PTR_ERR_OR_ZERO(devs[i]); 90 c = bch2_fs_open(devs, arg.nr_devs, bch2_opts_empty()); 95 if (devs) 97 kfree(devs[i]); 98 kfree(devs);
|
| A D | disk_groups.c | 156 __set_bit(i, dst->devs.d); in bch2_sb_disk_groups_to_cpu() 181 ? rcu_dereference(c->devs[t.dev]) in bch2_target_to_mask() 189 ? &g->entries[t.group].devs in bch2_target_to_mask() 210 ? &g->entries[t.group].devs in bch2_dev_in_target() 394 for_each_member_device_rcu(c, ca, &g->entries[i].devs) in bch2_disk_groups_to_text() 533 ? rcu_dereference(c->devs[t.dev]) in bch2_target_to_text()
|
| A D | disk_accounting.c | 99 bubble_sort(k->replicas.devs, k->replicas.nr_devs, u8_cmp); in bch2_disk_accounting_mod() 211 bkey_fsck_err_on(acc_k.replicas.devs[i] >= acc_k.replicas.devs[i + 1], in bch2_accounting_validate() 477 u8 bytes[struct_size_t(struct bch_replicas_usage, r.devs, in bch2_fs_replicas_usage_read() 686 if (r.e.devs[i] != BCH_SB_MEMBER_INVALID && in bch2_disk_accounting_validate_late() 687 !bch2_dev_exists(c, r.e.devs[i])) { in bch2_disk_accounting_validate_late() 688 invalid_dev = r.e.devs[i]; in bch2_disk_accounting_validate_late()
|
| A D | ec.c | 1490 struct bch_devs_mask *devs) in pick_blocksize() argument 1497 for_each_member_device_rcu(c, ca, devs) in pick_blocksize() 1581 struct bch_devs_mask devs = h->devs; in ec_stripe_head_devs_update() local 1588 nr_devs = dev_mask_nr(&h->devs); in ec_stripe_head_devs_update() 1590 for_each_member_device_rcu(c, ca, &h->devs) in ec_stripe_head_devs_update() 1592 __clear_bit(ca->dev_idx, h->devs.d); in ec_stripe_head_devs_update() 1593 nr_devs_with_durability = dev_mask_nr(&h->devs); in ec_stripe_head_devs_update() 1595 h->blocksize = pick_blocksize(c, &h->devs); in ec_stripe_head_devs_update() 1598 for_each_member_device_rcu(c, ca, &h->devs) in ec_stripe_head_devs_update() 1625 bitmap_andnot(devs_leaving.d, devs.d, h->devs.d, BCH_SB_MEMBERS_MAX); in ec_stripe_head_devs_update() [all …]
|
| A D | bcachefs_ioctl.h | 49 __u64 devs[]; 428 __u64 devs[] __counted_by(nr_devs);
|
| A D | disk_groups_types.h | 9 struct bch_devs_mask devs; member
|
| A D | extents_types.h | 39 } devs[BCH_REPLICAS_MAX + 1]; member
|
| A D | extents.c | 56 for (struct bch_dev_io_failures *f = failed->devs; in bch2_io_failures_to_text() 57 f < failed->devs + failed->nr; in bch2_io_failures_to_text() 96 for (i = f->devs; i < f->devs + f->nr; i++) in bch2_dev_io_failures() 110 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs)); in bch2_mark_io_failure() 112 f = &failed->devs[failed->nr++]; in bch2_mark_io_failure() 131 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs)); in bch2_mark_btree_validate_failure() 133 f = &failed->devs[failed->nr++]; in bch2_mark_btree_validate_failure() 856 if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev]) in bch2_bkey_durability_safe()
|
| A D | ec_types.h | 9 devs, BCH_BKEY_PTRS_MAX)];
|
| A D | replicas.h | 43 e->devs[0] = dev; in bch2_replicas_entry_cached()
|
| A D | journal_io.c | 1532 struct dev_alloc_list *devs, in journal_advance_devs_to_next_bucket() argument 1538 darray_for_each(*devs, i) { in journal_advance_devs_to_next_bucket() 1539 struct bch_dev *ca = rcu_dereference(c->devs[*i]); in journal_advance_devs_to_next_bucket() 1563 struct dev_alloc_list *devs, in __journal_write_alloc() argument 1570 darray_for_each(*devs, i) { in __journal_write_alloc() 1616 struct bch_devs_mask devs; in journal_write_alloc() local 1627 devs = target_rw_devs(c, BCH_DATA_journal, target); in journal_write_alloc() 1628 bch2_dev_alloc_list(c, &j->wp.stripe, &devs, &devs_sorted); in journal_write_alloc() 1734 journal_seq_pin(j, seq)->devs = w->devs_written; in CLOSURE_CALLBACK()
|
| A D | fs-io.c | 63 struct bch_devs_mask devs; in bch2_inode_flush_nocow_writes_async() local 70 devs = inode->ei_devs_need_flush; in bch2_inode_flush_nocow_writes_async() 73 for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) { in bch2_inode_flush_nocow_writes_async() 75 ca = rcu_dereference(c->devs[dev]); in bch2_inode_flush_nocow_writes_async()
|
| A D | journal_types.h | 73 struct bch_devs_list devs; member
|
| A D | journal_reclaim.c | 937 ? bch2_dev_list_has_dev(p->devs, dev_idx) in bch2_journal_flush_device_pins() 938 : p->devs.nr < c->opts.metadata_replicas) in bch2_journal_flush_device_pins() 971 journal_seq_pin(j, seq)->devs); in bch2_journal_flush_device_pins()
|
| A D | ec.h | 238 struct bch_devs_mask devs; member
|
| /fs/erofs/ |
| A D | super.c | 227 sbi->devs->flatdev = true; in erofs_scan_devices() 231 down_read(&sbi->devs->rwsem); in erofs_scan_devices() 251 ++sbi->devs->extra_devices; in erofs_scan_devices() 258 up_read(&sbi->devs->rwsem); in erofs_scan_devices() 501 ++sbi->devs->extra_devices; in erofs_fc_parse_param() 811 if (!devs) in erofs_free_dev_context() 814 idr_destroy(&devs->tree); in erofs_free_dev_context() 815 kfree(devs); in erofs_free_dev_context() 852 if (!sbi->devs) { in erofs_init_fs_context() 858 idr_init(&sbi->devs->tree); in erofs_init_fs_context() [all …]
|
| A D | data.c | 187 struct erofs_dev_context *devs = EROFS_SB(sb)->devs; in erofs_map_dev() local 195 down_read(&devs->rwsem); in erofs_map_dev() 196 dif = idr_find(&devs->tree, map->m_deviceid - 1); in erofs_map_dev() 198 up_read(&devs->rwsem); in erofs_map_dev() 201 if (devs->flatdev) { in erofs_map_dev() 203 up_read(&devs->rwsem); in erofs_map_dev() 207 up_read(&devs->rwsem); in erofs_map_dev() 208 } else if (devs->extra_devices && !devs->flatdev) { in erofs_map_dev() 209 down_read(&devs->rwsem); in erofs_map_dev() 210 idr_for_each_entry(&devs->tree, dif, id) { in erofs_map_dev() [all …]
|
| /fs/nfs/ |
| A D | callback_xdr.c | 278 args->devs = kmalloc_array(n, sizeof(*args->devs), GFP_KERNEL); in decode_devicenotify_args() 279 if (!args->devs) { in decode_devicenotify_args() 286 struct cb_devicenotifyitem *dev = &args->devs[i]; in decode_devicenotify_args() 338 kfree(args->devs); in decode_devicenotify_args() 340 args->devs = NULL; in decode_devicenotify_args()
|
| A D | callback.h | 157 struct cb_devicenotifyitem *devs; member
|
| A D | callback_proc.c | 381 struct cb_devicenotifyitem *dev = &args->devs[i]; in nfs4_callback_devicenotify() 393 kfree(args->devs); in nfs4_callback_devicenotify()
|