Home
last modified time | relevance | path

Searched refs:rq (Results 1 – 25 of 652) sorted by relevance

12345678910>>...27

/linux/drivers/gpu/drm/i915/
A Di915_request.c334 rq->ring->head = rq->postfix; in i915_request_retire()
355 rq->engine->remove_active_request(rq); in i915_request_retire()
508 if (rq->infix == rq->postfix) in __i915_request_skip()
511 RQ_TRACE(rq, "error: %d\n", rq->fence.error); in __i915_request_skip()
519 rq->infix = rq->postfix; in __i915_request_skip()
548 rq = i915_request_get(rq); in i915_request_mark_eio()
955 rq->head = rq->ring->emit; in __i915_request_create()
957 ret = rq->engine->request_alloc(rq); in __i915_request_create()
1710 rq->postfix = intel_ring_offset(rq, cs); in __i915_request_commit()
2088 rq->fence.context, rq->fence.seqno, in i915_request_show()
[all …]
A Di915_request.h56 #define RQ_TRACE(rq, fmt, ...) do { \ argument
396 dma_fence_put(&rq->fence); in i915_request_put()
485 seqno = __hwsp_seqno(rq); in hwsp_seqno()
493 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1); in __i915_request_has_started()
526 if (i915_request_signaled(rq)) in i915_request_started()
551 if (!i915_request_is_active(rq)) in i915_request_is_running()
555 result = __i915_request_has_started(rq) && i915_request_is_active(rq); in i915_request_is_running()
579 return !list_empty(&rq->sched.link); in i915_request_is_ready()
584 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); in __i915_request_is_complete()
591 if (i915_request_signaled(rq)) in i915_request_completed()
[all …]
/linux/kernel/sched/
A Dsched.h671 struct rq *rq; member
1135 static inline int cpu_of(struct rq *rq) in cpu_of() argument
1410 struct rq *rq = task_rq(p); in cfs_rq_of() local
1646 struct rq *rq; in this_rq_lock_irq() local
1700 queue_balance_callback(struct rq *rq, in queue_balance_callback() argument
1702 void (*func)(struct rq *rq)) in queue_balance_callback() argument
2127 void (*rq_online)(struct rq *rq);
2128 void (*rq_offline)(struct rq *rq);
2130 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
2150 void (*update_curr)(struct rq *rq);
[all …]
A Dpelt.h13 static inline u64 thermal_load_avg(struct rq *rq) in thermal_load_avg() argument
24 static inline u64 thermal_load_avg(struct rq *rq) in thermal_load_avg() argument
31 int update_irq_load_avg(struct rq *rq, u64 running);
34 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
78 rq->clock_pelt = rq_clock_task(rq); in update_rq_clock_pelt()
130 rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt; in update_idle_rq_clock_pelt()
133 static inline u64 rq_clock_pelt(struct rq *rq) in rq_clock_pelt() argument
138 return rq->clock_pelt - rq->lost_idle_time; in rq_clock_pelt()
189 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
194 static inline u64 rq_clock_pelt(struct rq *rq) in rq_clock_pelt() argument
[all …]
A Ddeadline.c36 struct rq *rq = task_rq(p); in dl_rq_of_se() local
235 struct rq *rq; in dl_change_utilization() local
319 struct rq *rq = rq_of_dl_rq(dl_rq); in task_non_contending() local
986 struct rq *rq = task_rq(p); in start_dl_timer() local
1047 struct rq *rq; in dl_task_timer() local
1353 struct rq *rq; in inactive_task_timer() local
1759 struct rq *rq; in select_task_rq_dl() local
1807 struct rq *rq; in migrate_task_rq_dl() local
2433 struct rq *rq; in set_cpus_allowed_dl() local
2470 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr); in rq_online_dl()
[all …]
A Dstats.h11 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument
13 if (rq) { in rq_sched_info_arrive()
23 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument
25 if (rq) in rq_sched_info_depart()
30 rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeue() argument
32 if (rq) in rq_sched_info_dequeue()
45 void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
48 void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
50 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
167 struct rq *rq; in psi_ttwu_dequeue() local
[all …]
A Drt.c166 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local
170 rt_rq->rq = rq; in init_tg_rt_entry()
252 struct rq *rq = rq_of_rt_se(rt_se); in rt_rq_of_se() local
537 struct rq *rq = rq_of_rt_rq(rt_rq); in sched_rt_rq_enqueue() local
638 struct rq *rq = rq_of_rt_rq(rt_rq); in sched_rt_rq_enqueue() local
1048 struct rq *rq = rq_of_rt_rq(rt_rq); in dequeue_top_rt_rq() local
1065 struct rq *rq = rq_of_rt_rq(rt_rq); in enqueue_top_rt_rq() local
1547 requeue_task_rt(rq, rq->curr, 0); in yield_task_rt()
1557 struct rq *rq; in select_task_rq_rt() local
2257 struct rq *rq; in rto_push_irq_work_func() local
[all …]
A Dstop_task.c20 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop() argument
27 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_stop() argument
37 static struct task_struct *pick_task_stop(struct rq *rq) in pick_task_stop() argument
42 return rq->stop; in pick_task_stop()
45 static struct task_struct *pick_next_task_stop(struct rq *rq) in pick_next_task_stop() argument
56 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_stop() argument
62 dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_stop() argument
67 static void yield_task_stop(struct rq *rq) in yield_task_stop() argument
103 static void switched_to_stop(struct rq *rq, struct task_struct *p) in switched_to_stop() argument
109 prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio) in prio_changed_stop() argument
[all …]
A Dcore.c550 struct rq *rq; in __task_rq_lock() local
575 struct rq *rq; in task_rq_lock() local
1385 struct rq *rq; in uclamp_update_util_min_rt_default() local
1664 struct rq *rq; in uclamp_update_active() local
2868 struct rq *rq; in __set_cpus_allowed_ptr() local
2893 struct rq *rq; in restrict_cpus_allowed_ptr() local
3205 struct rq *rq; in wait_task_inactive() local
3505 struct rq *rq; in ttwu_stat() local
3632 struct rq *rq; in ttwu_runnable() local
4480 struct rq *rq; in wake_up_new_task() local
[all …]
/linux/drivers/scsi/fnic/
A Dvnic_rq.c42 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
59 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
60 rq->buf_index = 0; in vnic_rq_alloc_bufs()
70 vdev = rq->vdev; in vnic_rq_free()
79 rq->ctrl = NULL; in vnic_rq_free()
87 rq->index = index; in vnic_rq_alloc()
88 rq->vdev = vdev; in vnic_rq_alloc()
91 if (!rq->ctrl) { in vnic_rq_alloc()
120 iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size); in vnic_rq_init()
129 rq->to_use = rq->to_clean = in vnic_rq_init()
[all …]
A Dvnic_rq.h114 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
119 return rq->to_use->desc; in vnic_rq_next_desc()
124 return rq->to_use->index; in vnic_rq_next_index()
129 return rq->buf_index++; in vnic_rq_next_buf_index()
144 rq->to_use = buf; in vnic_rq_post()
145 rq->ring.desc_avail--; in vnic_rq_post()
189 buf = rq->to_clean; in vnic_rq_service()
197 rq->ring.desc_avail++; in vnic_rq_service()
199 rq->to_clean = buf->next; in vnic_rq_service()
204 buf = rq->to_clean; in vnic_rq_service()
[all …]
/linux/drivers/net/ethernet/cisco/enic/
A Dvnic_rq.c39 if (!rq->bufs[i]) in vnic_rq_alloc_bufs()
44 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
61 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
71 vdev = rq->vdev; in vnic_rq_free()
82 rq->ctrl = NULL; in vnic_rq_free()
90 rq->index = index; in vnic_rq_alloc()
91 rq->vdev = vdev; in vnic_rq_alloc()
94 if (!rq->ctrl) { in vnic_rq_alloc()
133 rq->to_use = rq->to_clean = in vnic_rq_init_start()
198 rq->ring.desc_avail = rq->ring.desc_count - 1; in vnic_rq_clean()
[all …]
/linux/include/linux/
A Dblk-mq.h216 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) argument
478 struct request *rq; member
858 return rq + 1; in blk_mq_rq_to_pdu()
872 rq->q->mq_ops->cleanup_rq(rq); in blk_mq_cleanup_rq()
880 rq->bio = rq->biotail = bio; in blk_rq_bio_prep()
931 if ((rq->bio)) \
967 if (!rq->bio) in blk_rq_cur_bytes()
1012 return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter); in req_bvec()
1077 return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); in blk_rq_zone_no()
1082 return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); in blk_rq_zone_is_seq()
[all …]
/linux/drivers/gpu/drm/i915/gt/
A Dgen8_engine_cs.c73 intel_ring_advance(rq, cs); in gen8_emit_flush_rcs()
363 *cs++ = hwsp_offset(rq); in gen8_emit_init_breadcrumb()
390 rq->infix = intel_ring_offset(rq, cs); in gen8_emit_init_breadcrumb()
466 GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0); in assert_request_valid()
479 rq->wa_tail = intel_ring_offset(rq, cs); in gen8_emit_wa_tail()
512 rq->tail = intel_ring_offset(rq, cs); in gen8_emit_fini_breadcrumb_tail()
513 assert_ring_tail_valid(rq->ring, rq->tail); in gen8_emit_fini_breadcrumb_tail()
520 return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0); in emit_xcs_breadcrumb()
525 return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs)); in gen8_emit_fini_breadcrumb_xcs()
605 rq->tail = intel_ring_offset(rq, cs); in gen12_emit_fini_breadcrumb_tail()
[all …]
A Dgen6_engine_cs.c166 *cs++ = rq->fence.seqno; in gen6_emit_breadcrumb_rcs()
171 rq->tail = intel_ring_offset(rq, cs); in gen6_emit_breadcrumb_rcs()
172 assert_ring_tail_valid(rq->ring, rq->tail); in gen6_emit_breadcrumb_rcs()
336 gen7_stall_cs(rq); in gen7_emit_flush_rcs()
363 *cs++ = rq->fence.seqno; in gen7_emit_breadcrumb_rcs()
368 rq->tail = intel_ring_offset(rq, cs); in gen7_emit_breadcrumb_rcs()
369 assert_ring_tail_valid(rq->ring, rq->tail); in gen7_emit_breadcrumb_rcs()
385 rq->tail = intel_ring_offset(rq, cs); in gen6_emit_breadcrumb_xcs()
386 assert_ring_tail_valid(rq->ring, rq->tail); in gen6_emit_breadcrumb_xcs()
417 rq->tail = intel_ring_offset(rq, cs); in gen7_emit_breadcrumb_xcs()
[all …]
A Dselftest_execlists.c262 i915_request_await_dma_fence(rq[1], &rq[0]->fence); in live_unlite_restore()
267 GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix); in live_unlite_restore()
287 i915_request_await_dma_fence(rq[0], &rq[1]->fence); in live_unlite_restore()
290 GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix); in live_unlite_restore()
737 err = rq->engine->emit_init_breadcrumb(rq); in live_error_interrupt()
874 err = rq->engine->emit_init_breadcrumb(rq); in semaphore_queue()
1186 rq[B1] = create_rewinder(ce, rq[A1], slot, Z); in live_timeslice_rewind()
2582 ring_size = rq->wa_tail - rq->head; in live_chain_preempt()
2766 err = rq->engine->emit_bb_start(rq, in create_gang()
3189 err = rq->engine->emit_bb_start(rq, in create_gpr_client()
[all …]
A Dintel_breadcrumbs.c107 if (rq->context != ce) in check_signal_order()
215 &rq->fence.flags)) in signal_irq_work()
249 rq->engine->sched_engine->retire_inflight_request_prio(rq); in signal_irq_work()
251 spin_lock(&rq->lock); in signal_irq_work()
257 i915_request_put(rq); in signal_irq_work()
338 i915_request_get(rq); in irq_signal_request()
390 i915_request_get(rq); in insert_breadcrumb()
450 i915_request_put(rq); in i915_request_cancel_breadcrumb()
468 &rq->fence.flags)) in intel_context_remove_breadcrumbs()
473 i915_request_put(rq); in intel_context_remove_breadcrumbs()
[all …]
A Dintel_execlists_submission.c449 rq->fence.context, rq->fence.seqno); in reset_active()
692 rq->tail = rq->wa_tail; in execlists_update_context()
730 rq->fence.context, rq->fence.seqno, in dump_port()
919 rq ? execlists_update_context(rq) : 0, in execlists_submit_ports()
1145 if (!rq || __i915_request_is_complete(rq)) in needs_timeslice()
1971 rq->head, rq->tail, in process_csb()
2310 cap->rq = active_request(cap->rq->context->timeline, cap->rq); in execlists_capture()
2311 cap->rq = i915_request_get_rcu(cap->rq); in execlists_capture()
2955 rq = active_request(ce->timeline, rq); in execlists_reset_active()
3250 rq->fence.context, rq->fence.seqno, in kick_execlists()
[all …]
A Dselftest_timeline.c519 return rq; in checked_tl_write()
893 struct i915_request *rq = fetch_and_zero(&w->rq); in check_watcher() local
969 return rq; in wrap_timeline()
974 return rq; in wrap_timeline()
1076 switch_tl_lock(rq, watcher[0].rq); in live_hwsp_read()
1082 switch_tl_lock(watcher[0].rq, rq); in live_hwsp_read()
1090 switch_tl_lock(rq, watcher[1].rq); in live_hwsp_read()
1096 switch_tl_lock(watcher[1].rq, rq); in live_hwsp_read()
1107 rq = wrap_timeline(rq); in live_hwsp_read()
1217 GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno); in live_hwsp_rollover_kernel()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
A Den_rx.c217 rq, &cqd->title); in mlx5e_decompress_cqes_start()
295 if (rq->xsk_pool) in mlx5e_page_alloc()
327 if (rq->xsk_pool) in mlx5e_page_release()
363 return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags]; in get_frag()
702 .umr.rq = rq, in mlx5e_alloc_rx_mpwqe()
838 struct mlx5e_rq *rq = &c->rq; in mlx5e_handle_shampo_hd_umr() local
984 rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk; in mlx5e_post_rx_mpwqes()
1508 xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq); in mlx5e_fill_xdp_buff()
1600 mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe); in trigger_report()
2137 rq, cqe); in mlx5e_poll_rx_cq()
[all …]
/linux/fs/erofs/
A Ddecompressor.c63 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; in z_erofs_lz4_prepare_dstpages()
126 oend = rq->pageofs_out + rq->outputsize; in z_erofs_lz4_handle_inplace_io()
138 if (rq->out[j] == rq->in[i]) in z_erofs_lz4_handle_inplace_io()
157 in = rq->in; in z_erofs_lz4_handle_inplace_io()
221 rq->inputsize, rq->outputsize, rq->outputsize); in z_erofs_lz4_decompress_mem()
224 rq->inputsize, rq->outputsize); in z_erofs_lz4_decompress_mem()
228 ret, rq->inputsize, inputmargin, rq->outputsize); in z_erofs_lz4_decompress_mem()
259 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; in z_erofs_lz4_decompress()
310 if (rq->out[0] == *rq->in) { in z_erofs_shifted_transform()
324 if (rq->out[1] == *rq->in) { in z_erofs_shifted_transform()
[all …]
/linux/drivers/scsi/esas2r/
A Desas2r_disc.c359 rq->interrupt_cx = dc; in esas2r_disc_start_port()
529 rq, in esas2r_disc_block_dev_scan()
538 rq->timeout = 30000; in esas2r_disc_block_dev_scan()
539 rq->interrupt_cx = dc; in esas2r_disc_block_dev_scan()
604 rq, in esas2r_disc_raid_grp_info()
615 rq->interrupt_cx = dc; in esas2r_disc_raid_grp_info()
666 rq->req_stat); in esas2r_disc_raid_grp_info_cb()
715 rq, in esas2r_disc_part_info()
730 rq->interrupt_cx = dc; in esas2r_disc_part_info()
807 rq, in esas2r_disc_passthru_dev_info()
[all …]
A Desas2r_vda.c93 clear_vda_request(rq); in esas2r_process_vda_ioctl()
97 rq->interrupt_cx = vi; in esas2r_process_vda_ioctl()
200 rq->vrq->mgt.dev_index = in esas2r_process_vda_ioctl()
355 clear_vda_request(rq); in esas2r_build_flash_req()
382 clear_vda_request(rq); in esas2r_build_mgt_req()
424 clear_vda_request(rq); in esas2r_build_ae_req()
435 rq->vrq_md->phys_addr + in esas2r_build_ae_req()
455 clear_vda_request(rq); in esas2r_build_cli_req()
472 clear_vda_request(rq); in esas2r_build_ioctl_req()
490 clear_vda_request(rq); in esas2r_build_cfg_req()
[all …]
/linux/block/
A Dblk-flush.c102 if (blk_rq_sectors(rq)) in blk_flush_policy()
127 rq->bio = rq->biotail; in blk_flush_restore_request()
131 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request()
172 rq->flush.seq |= seq; in blk_flush_complete_seq()
173 cmd_flags = rq->cmd_flags; in blk_flush_complete_seq()
218 struct request *rq, *n; in flush_end_io() local
358 WARN_ON(rq->tag < 0); in mq_flush_data_end_io()
402 rq->cmd_flags |= REQ_SYNC; in blk_insert_flush()
415 BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ in blk_insert_flush()
432 memset(&rq->flush, 0, sizeof(rq->flush)); in blk_insert_flush()
[all …]
A Dblk-mq.c525 rq->bio = rq->biotail = NULL; in blk_mq_alloc_request()
811 rq_qos_done(rq->q, rq); in __blk_mq_end_request()
812 rq->end_io(rq, error); in __blk_mq_end_request()
863 rq_qos_done(rq->q, rq); in blk_mq_end_request_batch()
892 rq->q->mq_ops->complete(rq); in blk_complete_reqs()
995 rq->q->mq_ops->complete(rq); in blk_mq_complete_request()
1227 rq->end_io(rq, 0); in blk_mq_put_rq_ref()
1401 hctx->tags->rqs[rq->tag] = rq; in __blk_mq_get_driver_tag()
1644 bd.rq = rq; in blk_mq_dispatch_rq_list()
2321 .rq = rq, in __blk_mq_issue_directly()
[all …]

Completed in 134 milliseconds

12345678910>>...27