| /drivers/md/ |
| A D | dm-delay.c | 88 struct dm_delay_info *delayed, *next; in flush_delayed_bios() local 99 list_for_each_entry_safe(delayed, next, &local_list, list) { in flush_delayed_bios() 102 struct bio *bio = dm_bio_from_per_bio_data(delayed, in flush_delayed_bios() 104 list_del(&delayed->list); in flush_delayed_bios() 106 delayed->class->ops--; in flush_delayed_bios() 113 next_expires = delayed->expires; in flush_delayed_bios() 115 next_expires = min(next_expires, delayed->expires); in flush_delayed_bios() 314 struct dm_delay_info *delayed; in delay_bio() local 322 delayed->context = dc; in delay_bio() 331 list_add_tail(&delayed->list, &dc->delayed_bios); in delay_bio() [all …]
|
| A D | Kconfig | 229 delayed writes.
|
| /drivers/usb/serial/ |
| A D | usb_wwan.c | 182 usb_anchor_urb(this_urb, &portdata->delayed); in usb_wwan_write() 402 urb = usb_get_from_anchor(&portdata->delayed); in usb_wwan_close() 456 init_usb_anchor(&portdata->delayed); in usb_wwan_port_probe() 578 urb = usb_get_from_anchor(&portdata->delayed); in usb_wwan_submit_delayed_urbs()
|
| A D | sierra.c | 272 struct usb_anchor delayed; member 481 usb_anchor_urb(urb, &portdata->delayed); in sierra_write() 747 urb = usb_get_from_anchor(&portdata->delayed); in sierra_close() 870 init_usb_anchor(&portdata->delayed); in sierra_port_probe() 961 urb = usb_get_from_anchor(&portdata->delayed); in sierra_submit_delayed_urbs()
|
| A D | usb-wwan.h | 51 struct usb_anchor delayed; member
|
| /drivers/gpu/drm/amd/amdgpu/ |
| A D | amdgpu_vm_sdma.c | 48 : &p->vm->delayed; in amdgpu_vm_sdma_alloc_job() 112 ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring, in amdgpu_vm_sdma_commit()
|
| A D | amdgpu_vm.c | 516 return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init_entities() 529 drm_sched_entity_destroy(&vm->delayed); in amdgpu_vm_fini_entities() 550 if (drm_sched_entity_error(&vm->delayed)) in amdgpu_vm_generation() 675 spin_lock(&vm->delayed.lock); in amdgpu_vm_ready() 676 ret &= !vm->delayed.stopped; in amdgpu_vm_ready() 677 spin_unlock(&vm->delayed.lock); in amdgpu_vm_ready() 2428 return drm_sched_entity_flush(&vm->delayed, timeout); in amdgpu_vm_wait_idle()
|
| A D | amdgpu_ttm.h | 185 bool delayed);
|
| A D | amdgpu_ttm.c | 2170 bool delayed) in amdgpu_ttm_prepare_job() argument 2176 struct drm_sched_entity *entity = delayed ? &adev->mman.low_pr : in amdgpu_ttm_prepare_job() 2255 bool vm_needs_flush, bool delayed) in amdgpu_ttm_fill_mem() argument 2268 &job, delayed); in amdgpu_ttm_fill_mem() 2357 bool delayed) in amdgpu_fill_buffer() argument 2387 &next, true, delayed); in amdgpu_fill_buffer()
|
| A D | amdgpu_vm.h | 382 struct drm_sched_entity delayed; member
|
| /drivers/cpufreq/ |
| A D | sa1110-cpufreq.c | 130 static inline void set_mdcas(u_int *mdcas, int delayed, u_int rcd) in set_mdcas() argument 135 shift = delayed + 1 + rcd; in set_mdcas()
|
| /drivers/crypto/intel/qat/qat_common/ |
| A D | icp_qat_hw.h | 375 #define ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(dir, delayed, \ argument 379 (((delayed) & QAT_COMPRESSION_DELAYED_MATCH_MASK) << \
|
| /drivers/gpu/drm/ci/ |
| A D | gitlab-ci.yml | 198 # `when: delayed` + `start_in:`, for build-only jobs. 201 .build-only-delayed-rules: 210 when: delayed 220 when: delayed
|
| /drivers/gpu/vga/ |
| A D | Kconfig | 12 X isn't running and delayed switching until the next logoff. This
|
| /drivers/usb/class/ |
| A D | cdc-acm.h | 103 struct usb_anchor delayed; /* writes queued for a device about to be woken */ member
|
| A D | cdc-acm.c | 781 urb = usb_get_from_anchor(&acm->delayed); in acm_port_shutdown() 855 usb_anchor_urb(wb->urb, &acm->delayed); in acm_tty_write() 1400 init_usb_anchor(&acm->delayed); in acm_probe() 1668 urb = usb_get_from_anchor(&acm->delayed); in acm_resume()
|
| /drivers/gpu/drm/ci/xfails/ |
| A D | i915-tgl-fails.txt | 30 syncobj_wait@wait-delayed-signal,Timeout
|
| /drivers/hwtracing/coresight/ |
| A D | coresight-etm4x-core.c | 2213 struct etm4_init_arg *delayed; in etm4_probe() local 2246 delayed = devm_kmalloc(dev, sizeof(*delayed), GFP_KERNEL); in etm4_probe() 2247 if (!delayed) { in etm4_probe() 2252 *delayed = init_arg; in etm4_probe() 2254 per_cpu(delayed_probe, drvdata->cpu) = delayed; in etm4_probe()
|
| /drivers/xen/events/ |
| A D | events_base.c | 548 struct delayed_work delayed; member 585 &eoi->delayed, delay); in lateeoi_list_add() 655 eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed); in xen_irq_lateeoi_worker() 670 &eoi->delayed, in xen_irq_lateeoi_worker() 693 INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker); in xen_cpu_init_eoi()
|
| /drivers/scsi/ |
| A D | aha1542.c | 83 bool delayed = true; in wait_mask() local 87 delayed = false; in wait_mask() 94 if (delayed) in wait_mask()
|
| /drivers/firewire/ |
| A D | core-card.c | 223 void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset) in fw_schedule_bus_reset() argument 233 delayed ? DIV_ROUND_UP(HZ, 100) : 0)) in fw_schedule_bus_reset()
|
| /drivers/net/wireless/ath/ath11k/ |
| A D | qmi.c | 1879 bool delayed; in ath11k_qmi_respond_fw_mem_request() local 1895 delayed = true; in ath11k_qmi_respond_fw_mem_request() 1900 delayed = false; in ath11k_qmi_respond_fw_mem_request() 1921 delayed); in ath11k_qmi_respond_fw_mem_request() 1944 if (delayed && resp.resp.error == 0) in ath11k_qmi_respond_fw_mem_request()
|
| /drivers/net/wireless/ath/ath12k/ |
| A D | qmi.c | 2376 bool delayed; in ath12k_qmi_respond_fw_mem_request() local 2389 delayed = true; in ath12k_qmi_respond_fw_mem_request() 2393 delayed = false; in ath12k_qmi_respond_fw_mem_request() 2433 if (delayed && resp.resp.error == 0) in ath12k_qmi_respond_fw_mem_request()
|
| /drivers/gpu/drm/i915/ |
| A D | intel_uncore.c | 763 bool delayed) in __intel_uncore_forcewake_put() argument 778 if (delayed && in __intel_uncore_forcewake_put()
|
| /drivers/s390/net/ |
| A D | qeth_core_main.c | 396 int delayed) in qeth_compute_cq_notification() argument 402 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK; in qeth_compute_cq_notification() 408 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE : in qeth_compute_cq_notification() 412 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR : in qeth_compute_cq_notification()
|