| /linux/drivers/md/dm-vdo/ |
| A D | vdo.c | 106 struct vdo *vdo; in filter_vdos_locked() local 123 struct vdo *vdo; in vdo_find_matching() local 432 thread->vdo = vdo; in vdo_make_thread() 541 struct vdo *vdo; in vdo_make() local 681 void vdo_destroy(struct vdo *vdo) in vdo_destroy() argument 901 vdo->states.vdo.state = vdo_get_state(vdo); in record_vdo() 1032 struct vdo *vdo = listener; in notify_vdo_of_read_only_mode() local 1081 struct vdo *vdo = parent->vdo; in vdo_wait_until_not_entering_read_only_mode() local 1145 struct vdo *vdo = completion->vdo; in make_thread_read_only() local 1209 struct vdo *vdo = parent->vdo; in vdo_allow_read_only_mode_entry() local [all …]
|
| A D | dm-vdo-target.c | 1390 struct vdo *vdo = completion->vdo; in pre_load_callback() local 1640 struct vdo *vdo = completion->vdo; in check_may_grow_physical() local 1968 struct vdo *vdo = completion->vdo; in write_super_block_for_suspend() local 1998 struct vdo *vdo = completion->vdo; in suspend_callback() local 2181 struct vdo *vdo = completion->vdo; in load_callback() local 2285 struct vdo *vdo = completion->vdo; in handle_load_error() local 2321 struct vdo *vdo = completion->vdo; in write_super_block_for_resume() local 2351 struct vdo *vdo = completion->vdo; in resume_callback() local 2431 struct vdo *vdo = completion->vdo; in grow_logical_callback() local 2480 struct vdo *vdo = completion->vdo; in handle_logical_growth_error() local [all …]
|
| A D | vdo.h | 57 struct vdo *vdo; member 166 struct vdo { struct 275 static inline bool vdo_uses_bio_ack_queue(struct vdo *vdo) in vdo_uses_bio_ack_queue() argument 303 void vdo_destroy(struct vdo *vdo); 311 int __must_check vdo_synchronous_flush(struct vdo *vdo); 315 bool vdo_set_compressing(struct vdo *vdo, bool enable); 317 bool vdo_get_compressing(struct vdo *vdo); 333 int vdo_enable_read_only_entry(struct vdo *vdo); 341 bool __must_check vdo_is_read_only(struct vdo *vdo); 347 void vdo_enter_recovery_mode(struct vdo *vdo); [all …]
|
| A D | flush.c | 26 struct vdo *vdo; member 135 int vdo_make_flusher(struct vdo *vdo) in vdo_make_flusher() argument 142 vdo->flusher->vdo = vdo; in vdo_make_flusher() 143 vdo->flusher->thread_id = vdo->thread_config.packer_thread; in vdo_make_flusher() 145 vdo_initialize_completion(&vdo->flusher->completion, vdo, in vdo_make_flusher() 151 vdo->flusher); in vdo_make_flusher() 369 static void initialize_flush(struct vdo_flush *flush, struct vdo *vdo) in initialize_flush() argument 392 void vdo_launch_flush(struct vdo *vdo, struct bio *bio) in vdo_launch_flush() argument 416 initialize_flush(flush, vdo); in vdo_launch_flush() 463 struct vdo *vdo = completion->vdo; in vdo_complete_flush_callback() local [all …]
|
| A D | repair.c | 253 struct vdo *vdo = completion->vdo; in finish_repair() local 326 struct vdo *vdo = completion->vdo; in drain_slab_depot() local 640 struct vdo *vdo = completion->vdo; in rebuild_reference_counts() local 797 struct vdo *vdo = completion->vdo; in add_slab_journal_entries() local 855 struct vdo *vdo = completion->vdo; in vdo_replay_into_slab_journals() local 1099 struct vdo *vdo = completion->vdo; in recover_block_map() local 1326 struct vdo *vdo = repair->completion.vdo; in append_sector_entries() local 1406 struct vdo *vdo = repair->completion.vdo; in parse_journal_for_rebuild() local 1456 struct vdo *vdo = repair->completion.vdo; in extract_new_mappings() local 1684 struct vdo *vdo = vio->completion.vdo; in read_journal_endio() local [all …]
|
| A D | dump.c | 55 static void do_dump(struct vdo *vdo, unsigned int dump_options_requested, in do_dump() argument 65 atomic64_read(&vdo->stats.bios_completed)); in do_dump() 72 for (id = 0; id < vdo->thread_config.thread_count; id++) in do_dump() 73 vdo_dump_work_queue(vdo->threads[id].queue); in do_dump() 76 vdo_dump_hash_zones(vdo->hash_zones); in do_dump() 77 dump_data_vio_pool(vdo->data_vio_pool, in do_dump() 80 vdo_dump_status(vdo); in do_dump() 130 int vdo_dump(struct vdo *vdo, unsigned int argc, char *const *argv, const char *why) in vdo_dump() argument 138 do_dump(vdo, dump_options_requested, why); in vdo_dump() 143 void vdo_dump_all(struct vdo *vdo, const char *why) in vdo_dump_all() argument [all …]
|
| A D | completion.c | 52 struct vdo *vdo, in vdo_initialize_completion() argument 56 completion->vdo = vdo; in vdo_initialize_completion() 111 struct vdo *vdo = completion->vdo; in vdo_enqueue_completion() local 114 if (VDO_ASSERT(thread_id < vdo->thread_config.thread_count, in vdo_enqueue_completion() 117 vdo->thread_config.thread_count) != VDO_SUCCESS) in vdo_enqueue_completion() 123 vdo_enqueue_work_queue(vdo->threads[thread_id].queue, completion); in vdo_enqueue_completion()
|
| A D | logical-zone.c | 54 struct vdo *vdo = zones->vdo; in initialize_zone() local 62 if (zone_number < vdo->thread_config.logical_zone_count - 1) in initialize_zone() 65 vdo_initialize_completion(&zone->completion, vdo, in initialize_zone() 70 zone->block_map_zone = &vdo->block_map->zones[zone_number]; in initialize_zone() 77 return vdo_make_default_thread(vdo, zone->thread_id); in initialize_zone() 87 int vdo_make_logical_zones(struct vdo *vdo, struct logical_zones **zones_ptr) in vdo_make_logical_zones() argument 92 zone_count_t zone_count = vdo->thread_config.logical_zone_count; in vdo_make_logical_zones() 102 zones->vdo = vdo; in vdo_make_logical_zones() 113 vdo->thread_config.admin_thread, zones, NULL, in vdo_make_logical_zones() 114 vdo, &zones->manager); in vdo_make_logical_zones() [all …]
|
| A D | vio.h | 56 return vio->completion.vdo->thread_config.bio_threads[vio->bio_zone]; in get_vio_bio_zone_thread_id() 78 int allocate_vio_components(struct vdo *vdo, enum vio_type vio_type, 81 int __must_check create_multi_block_metadata_vio(struct vdo *vdo, enum vio_type vio_type, 86 static inline int __must_check create_metadata_vio(struct vdo *vdo, enum vio_type vio_type, in create_metadata_vio() argument 91 return create_multi_block_metadata_vio(vdo, vio_type, priority, parent, 1, data, in create_metadata_vio() 109 enum vio_priority priority, struct vdo *vdo) in initialize_vio() argument 118 vdo_initialize_completion(&vio->completion, vdo, VIO_COMPLETION); in initialize_vio() 191 int __must_check make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id,
|
| A D | vio.c | 44 struct vdo *vdo = vio->completion.vdo; in pbn_from_vio_bio() local 78 int allocate_vio_components(struct vdo *vdo, enum vio_type vio_type, in allocate_vio_components() argument 118 int create_multi_block_metadata_vio(struct vdo *vdo, enum vio_type vio_type, in create_multi_block_metadata_vio() argument 176 struct vdo *vdo = vio->completion.vdo; in vdo_set_bio_properties() local 177 struct device_config *config = vdo->device_config; in vdo_set_bio_properties() 179 pbn -= vdo->geometry.bio_offset; in vdo_set_bio_properties() 252 struct vdo *vdo = vio->completion.vdo; in update_vio_error_stats() local 256 atomic64_inc(&vdo->stats.read_only_error_count); in update_vio_error_stats() 260 atomic64_inc(&vdo->stats.no_space_error_count); in update_vio_error_stats() 311 int make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id, in make_vio_pool() argument [all …]
|
| A D | io-submitter.c | 76 struct atomic_statistics *stats = &vio->completion.vdo->stats; in count_all_bios() 109 struct vdo *vdo = vio->completion.vdo; in send_bio_to_device() local 112 atomic64_inc(&vdo->stats.bios_submitted); in send_bio_to_device() 114 bio_set_dev(bio, vdo_get_backing_device(vdo)); in send_bio_to_device() 141 struct io_submitter *submitter = vio->completion.vdo->io_submitter; in get_bio_list() 273 struct vdo *vdo = vio->completion.vdo; in try_bio_map_merge() local 275 &vdo->io_submitter->bio_queue_data[vio->bio_zone]; in try_bio_map_merge() 345 const struct admin_state_code *code = vdo_get_admin_state(completion->vdo); in __submit_metadata_vio() 375 unsigned int max_requests_active, struct vdo *vdo, in vdo_make_io_submitter() argument 417 result = vdo_make_thread(vdo, vdo->thread_config.bio_threads[i], in vdo_make_io_submitter() [all …]
|
| A D | message-stats.h | 11 int vdo_write_config(struct vdo *vdo, char **buf, unsigned int *maxlen); 12 int vdo_write_stats(struct vdo *vdo, char *buf, unsigned int maxlen);
|
| A D | dump.h | 11 int vdo_dump(struct vdo *vdo, unsigned int argc, char *const *argv, const char *why); 13 void vdo_dump_all(struct vdo *vdo, const char *why);
|
| A D | data-vio.c | 244 struct vdo *vdo = vdo_from_data_vio(data_vio); in initialize_lbn_lock() local 259 struct vdo *vdo = vdo_from_data_vio(data_vio); in launch_locked_request() local 273 struct vdo *vdo = vdo_from_data_vio(data_vio); in acknowledge_data_vio() local 422 struct vdo *vdo = vdo_from_data_vio(data_vio); in attempt_logical_block_lock() local 428 if (data_vio->logical.lbn >= vdo->states.vdo.config.logical_blocks) { in attempt_logical_block_lock() 534 static void launch_bio(struct vdo *vdo, struct data_vio *data_vio, struct bio *bio) in launch_bio() argument 786 static int initialize_data_vio(struct data_vio *data_vio, struct vdo *vdo) in initialize_data_vio() argument 841 int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size, in make_data_vio_pool() argument 999 static void assert_on_vdo_cpu_thread(const struct vdo *vdo, const char *name) in assert_on_vdo_cpu_thread() argument 1306 struct vdo *vdo = vdo_from_data_vio(data_vio); in perform_cleanup_stage() local [all …]
|
| A D | flush.h | 28 int __must_check vdo_make_flusher(struct vdo *vdo); 38 void vdo_launch_flush(struct vdo *vdo, struct bio *bio);
|
| A D | logical-zone.h | 57 struct vdo *vdo; member 66 int __must_check vdo_make_logical_zones(struct vdo *vdo,
|
| A D | physical-zone.c | 325 static int initialize_zone(struct vdo *vdo, struct physical_zones *zones) in initialize_zone() argument 342 zone->thread_id = vdo->thread_config.physical_threads[zone_number]; in initialize_zone() 343 zone->allocator = &vdo->depot->allocators[zone_number]; in initialize_zone() 344 zone->next = &zones->zones[(zone_number + 1) % vdo->thread_config.physical_zone_count]; in initialize_zone() 345 result = vdo_make_default_thread(vdo, zone->thread_id); in initialize_zone() 361 int vdo_make_physical_zones(struct vdo *vdo, struct physical_zones **zones_ptr) in vdo_make_physical_zones() argument 365 zone_count_t zone_count = vdo->thread_config.physical_zone_count; in vdo_make_physical_zones() 376 result = initialize_zone(vdo, zones); in vdo_make_physical_zones()
|
| A D | Makefile | 5 obj-$(CONFIG_DM_VDO) += dm-vdo.o 7 dm-vdo-objs := \ 14 dm-vdo-target.o \ 40 vdo.o \
|
| A D | types.h | 211 struct vdo *vdo; member 314 struct vdo *vdo; member 333 struct vdo;
|
| A D | block-map.h | 51 struct vdo *vdo; member 255 struct vdo *vdo; member 334 block_count_t logical_blocks, struct vdo *vdo,
|
| /linux/include/linux/usb/ |
| A D | pd_vdo.h | 78 #define PD_VDO_VID(vdo) ((vdo) >> 16) argument 79 #define PD_VDO_SVDM(vdo) (((vdo) >> 15) & 1) argument 80 #define PD_VDO_SVDM_VER(vdo) (((vdo) >> 13) & 0x3) argument 81 #define PD_VDO_OPOS(vdo) (((vdo) >> 8) & 0x7) argument 82 #define PD_VDO_CMD(vdo) ((vdo) & 0x1f) argument 83 #define PD_VDO_CMDT(vdo) (((vdo) >> 6) & 0x3) argument 154 #define PD_IDH_PTYPE(vdo) (((vdo) >> 27) & 0x7) argument 155 #define PD_IDH_VID(vdo) ((vdo) & 0xffff) argument 156 #define PD_IDH_MODAL_SUPP(vdo) ((vdo) & (1 << 26)) argument 166 #define PD_CSTAT_XID(vdo) (vdo) argument [all …]
|
| A D | typec_altmode.h | 29 u32 vdo; member 60 int (*enter)(struct typec_altmode *altmode, u32 *vdo); 62 void (*attention)(struct typec_altmode *altmode, u32 vdo); 64 const u32 *vdo, int cnt); 70 int typec_altmode_enter(struct typec_altmode *altmode, u32 *vdo); 72 int typec_altmode_attention(struct typec_altmode *altmode, u32 vdo); 74 const u32 header, const u32 *vdo, int count); 87 int (*enter)(struct typec_altmode *altmode, enum typec_plug_index sop, u32 *vdo); 90 const u32 hdr, const u32 *vdo, int cnt); 93 int typec_cable_altmode_enter(struct typec_altmode *altmode, enum typec_plug_index sop, u32 *vdo); [all …]
|
| /linux/Documentation/admin-guide/device-mapper/ |
| A D | vdo.rst | 3 dm-vdo 20 https://github.com/dm-vdo/vdo/ 35 rarely needed except by dm-vdo developers. 46 https://github.com/dm-vdo/vdoestimator/ 115 outside the vdo volume, threads of this type allow the vdo 147 the vdo is chosen; the vdo storage device must be large 225 Stop the vdo volume. 241 All vdo devices accept messages in the form: 270 default: Equivalent to 'queues vdo' 286 The name of the vdo volume. [all …]
|
| /linux/drivers/usb/typec/altmodes/ |
| A D | displayport.c | 106 DP_CAP_DFP_D_PIN_ASSIGN(dp->port->vdo); in dp_altmode_configure() 115 DP_CAP_PIN_ASSIGN_DFP_D(dp->port->vdo); in dp_altmode_configure() 246 u32 vdo; in dp_altmode_work() local 275 vdo = 1; in dp_altmode_work() 330 dp->data.status = vdo; in dp_altmode_attention() 384 dp->data.status = *vdo; in dp_altmode_vdm() 533 cap = DP_CAP_CAPABILITY(dp->alt->vdo); in configuration_store() 569 cap = DP_CAP_CAPABILITY(dp->alt->vdo); in configuration_show() 733 if (!(DP_CAP_PIN_ASSIGN_DFP_D(port->vdo) & in dp_altmode_probe() 735 !(DP_CAP_PIN_ASSIGN_UFP_D(port->vdo) & in dp_altmode_probe() [all …]
|
| /linux/drivers/usb/typec/ |
| A D | bus.c | 126 int typec_altmode_enter(struct typec_altmode *adev, u32 *vdo) in typec_altmode_enter() argument 147 return pdev->ops->enter(pdev, vdo); in typec_altmode_enter() 186 int typec_altmode_attention(struct typec_altmode *adev, u32 vdo) in typec_altmode_attention() argument 197 pdev->ops->attention(pdev, vdo); in typec_altmode_attention() 215 const u32 header, const u32 *vdo, int count) in typec_altmode_vdm() argument 233 return pdev->ops->vdm(pdev, header, vdo, count); in typec_altmode_vdm() 259 int typec_cable_altmode_enter(struct typec_altmode *adev, enum typec_plug_index sop, u32 *vdo) in typec_cable_altmode_enter() argument 278 return pdev->cable_ops->enter(pdev, sop, vdo); in typec_cable_altmode_enter() 322 const u32 header, const u32 *vdo, int count) in typec_cable_altmode_vdm() argument 345 return pdev->cable_ops->vdm(pdev, sop, header, vdo, count); in typec_cable_altmode_vdm()
|