Lines Matching refs:rq

112 	struct i915_request *rq = to_request(fence);  in i915_fence_release()  local
114 GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT && in i915_fence_release()
115 rq->guc_prio != GUC_PRIO_FINI); in i915_fence_release()
124 i915_sw_fence_fini(&rq->submit); in i915_fence_release()
125 i915_sw_fence_fini(&rq->semaphore); in i915_fence_release()
132 if (!intel_engine_is_virtual(rq->engine) && in i915_fence_release()
133 !cmpxchg(&rq->engine->request_pool, NULL, rq)) { in i915_fence_release()
134 intel_context_put(rq->context); in i915_fence_release()
138 intel_context_put(rq->context); in i915_fence_release()
140 kmem_cache_free(slab_requests, rq); in i915_fence_release()
161 __notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk)) in __notify_execute_cb() argument
165 if (llist_empty(&rq->execute_cb)) in __notify_execute_cb()
169 llist_del_all(&rq->execute_cb), in __notify_execute_cb()
174 static void __notify_execute_cb_irq(struct i915_request *rq) in __notify_execute_cb_irq() argument
176 __notify_execute_cb(rq, irq_work_queue); in __notify_execute_cb_irq()
185 void i915_request_notify_execute_cb_imm(struct i915_request *rq) in i915_request_notify_execute_cb_imm() argument
187 __notify_execute_cb(rq, irq_work_imm); in i915_request_notify_execute_cb_imm()
203 static void __i915_request_fill(struct i915_request *rq, u8 val) in __i915_request_fill() argument
205 void *vaddr = rq->ring->vaddr; in __i915_request_fill()
208 head = rq->infix; in __i915_request_fill()
209 if (rq->postfix < head) { in __i915_request_fill()
210 memset(vaddr + head, val, rq->ring->size - head); in __i915_request_fill()
213 memset(vaddr + head, val, rq->postfix - head); in __i915_request_fill()
227 i915_request_active_engine(struct i915_request *rq, in i915_request_active_engine() argument
240 locked = READ_ONCE(rq->engine); in i915_request_active_engine()
242 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { in i915_request_active_engine()
248 if (i915_request_is_active(rq)) { in i915_request_active_engine()
249 if (!__i915_request_is_complete(rq)) in i915_request_active_engine()
259 static void __rq_init_watchdog(struct i915_request *rq) in __rq_init_watchdog() argument
261 rq->watchdog.timer.function = NULL; in __rq_init_watchdog()
266 struct i915_request *rq = in __rq_watchdog_expired() local
268 struct intel_gt *gt = rq->engine->gt; in __rq_watchdog_expired()
270 if (!i915_request_completed(rq)) { in __rq_watchdog_expired()
271 if (llist_add(&rq->watchdog.link, &gt->watchdog.list)) in __rq_watchdog_expired()
274 i915_request_put(rq); in __rq_watchdog_expired()
280 static void __rq_arm_watchdog(struct i915_request *rq) in __rq_arm_watchdog() argument
282 struct i915_request_watchdog *wdg = &rq->watchdog; in __rq_arm_watchdog()
283 struct intel_context *ce = rq->context; in __rq_arm_watchdog()
288 i915_request_get(rq); in __rq_arm_watchdog()
299 static void __rq_cancel_watchdog(struct i915_request *rq) in __rq_cancel_watchdog() argument
301 struct i915_request_watchdog *wdg = &rq->watchdog; in __rq_cancel_watchdog()
304 i915_request_put(rq); in __rq_cancel_watchdog()
307 bool i915_request_retire(struct i915_request *rq) in i915_request_retire() argument
309 if (!__i915_request_is_complete(rq)) in i915_request_retire()
312 RQ_TRACE(rq, "\n"); in i915_request_retire()
314 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); in i915_request_retire()
315 trace_i915_request_retire(rq); in i915_request_retire()
316 i915_request_mark_complete(rq); in i915_request_retire()
318 __rq_cancel_watchdog(rq); in i915_request_retire()
329 GEM_BUG_ON(!list_is_first(&rq->link, in i915_request_retire()
330 &i915_request_timeline(rq)->requests)); in i915_request_retire()
333 __i915_request_fill(rq, POISON_FREE); in i915_request_retire()
334 rq->ring->head = rq->postfix; in i915_request_retire()
336 if (!i915_request_signaled(rq)) { in i915_request_retire()
337 spin_lock_irq(&rq->lock); in i915_request_retire()
338 dma_fence_signal_locked(&rq->fence); in i915_request_retire()
339 spin_unlock_irq(&rq->lock); in i915_request_retire()
342 if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) in i915_request_retire()
343 atomic_dec(&rq->engine->gt->rps.num_waiters); in i915_request_retire()
355 rq->engine->remove_active_request(rq); in i915_request_retire()
356 GEM_BUG_ON(!llist_empty(&rq->execute_cb)); in i915_request_retire()
358 __list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */ in i915_request_retire()
360 intel_context_exit(rq->context); in i915_request_retire()
361 intel_context_unpin(rq->context); in i915_request_retire()
363 free_capture_list(rq); in i915_request_retire()
364 i915_sched_node_fini(&rq->sched); in i915_request_retire()
365 i915_request_put(rq); in i915_request_retire()
370 void i915_request_retire_upto(struct i915_request *rq) in i915_request_retire_upto() argument
372 struct intel_timeline * const tl = i915_request_timeline(rq); in i915_request_retire_upto()
375 RQ_TRACE(rq, "\n"); in i915_request_retire_upto()
376 GEM_BUG_ON(!__i915_request_is_complete(rq)); in i915_request_retire_upto()
381 } while (i915_request_retire(tmp) && tmp != rq); in i915_request_retire_upto()
392 struct i915_request * const *port, *rq; in __request_in_flight() local
439 (rq = READ_ONCE(*port)); /* may race with promotion of pending[] */ in __request_in_flight()
441 if (rq->context == signal->context) { in __request_in_flight()
442 inflight = i915_seqno_passed(rq->fence.seqno, in __request_in_flight()
453 __await_execution(struct i915_request *rq, in __await_execution() argument
466 cb->fence = &rq->submit; in __await_execution()
504 void __i915_request_skip(struct i915_request *rq) in __i915_request_skip() argument
506 GEM_BUG_ON(!fatal_error(rq->fence.error)); in __i915_request_skip()
508 if (rq->infix == rq->postfix) in __i915_request_skip()
511 RQ_TRACE(rq, "error: %d\n", rq->fence.error); in __i915_request_skip()
518 __i915_request_fill(rq, 0); in __i915_request_skip()
519 rq->infix = rq->postfix; in __i915_request_skip()
522 bool i915_request_set_error_once(struct i915_request *rq, int error) in i915_request_set_error_once() argument
528 if (i915_request_signaled(rq)) in i915_request_set_error_once()
531 old = READ_ONCE(rq->fence.error); in i915_request_set_error_once()
535 } while (!try_cmpxchg(&rq->fence.error, &old, error)); in i915_request_set_error_once()
540 struct i915_request *i915_request_mark_eio(struct i915_request *rq) in i915_request_mark_eio() argument
542 if (__i915_request_is_complete(rq)) in i915_request_mark_eio()
545 GEM_BUG_ON(i915_request_signaled(rq)); in i915_request_mark_eio()
548 rq = i915_request_get(rq); in i915_request_mark_eio()
550 i915_request_set_error_once(rq, -EIO); in i915_request_mark_eio()
551 i915_request_mark_complete(rq); in i915_request_mark_eio()
553 return rq; in i915_request_mark_eio()
713 void i915_request_cancel(struct i915_request *rq, int error) in i915_request_cancel() argument
715 if (!i915_request_set_error_once(rq, error)) in i915_request_cancel()
718 set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags); in i915_request_cancel()
720 intel_context_cancel_request(rq->context, rq); in i915_request_cancel()
762 struct i915_request *rq = container_of(fence, typeof(*rq), semaphore); in semaphore_notify() local
769 i915_request_put(rq); in semaphore_notify()
778 struct i915_request *rq, *rn; in retire_requests() local
780 list_for_each_entry_safe(rq, rn, &tl->requests, link) in retire_requests()
781 if (!i915_request_retire(rq)) in retire_requests()
790 struct i915_request *rq; in request_alloc_slow() local
794 rq = xchg(rsvd, NULL); in request_alloc_slow()
795 if (!rq) /* Use the normal failure path for one final WARN */ in request_alloc_slow()
798 return rq; in request_alloc_slow()
805 rq = list_first_entry(&tl->requests, typeof(*rq), link); in request_alloc_slow()
806 i915_request_retire(rq); in request_alloc_slow()
808 rq = kmem_cache_alloc(slab_requests, in request_alloc_slow()
810 if (rq) in request_alloc_slow()
811 return rq; in request_alloc_slow()
814 rq = list_last_entry(&tl->requests, typeof(*rq), link); in request_alloc_slow()
815 cond_synchronize_rcu(rq->rcustate); in request_alloc_slow()
826 struct i915_request *rq = arg; in __i915_request_ctor() local
828 spin_lock_init(&rq->lock); in __i915_request_ctor()
829 i915_sched_node_init(&rq->sched); in __i915_request_ctor()
830 i915_sw_fence_init(&rq->submit, submit_notify); in __i915_request_ctor()
831 i915_sw_fence_init(&rq->semaphore, semaphore_notify); in __i915_request_ctor()
833 rq->capture_list = NULL; in __i915_request_ctor()
835 init_llist_head(&rq->execute_cb); in __i915_request_ctor()
842 struct i915_request *rq; in __i915_request_create() local
880 rq = kmem_cache_alloc(slab_requests, in __i915_request_create()
882 if (unlikely(!rq)) { in __i915_request_create()
883 rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp); in __i915_request_create()
884 if (!rq) { in __i915_request_create()
902 rq->context = intel_context_get(ce); in __i915_request_create()
903 rq->engine = ce->engine; in __i915_request_create()
904 rq->ring = ce->ring; in __i915_request_create()
905 rq->execution_mask = ce->engine->mask; in __i915_request_create()
907 ret = intel_timeline_get_seqno(tl, rq, &seqno); in __i915_request_create()
911 dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, in __i915_request_create()
914 RCU_INIT_POINTER(rq->timeline, tl); in __i915_request_create()
915 rq->hwsp_seqno = tl->hwsp_seqno; in __i915_request_create()
916 GEM_BUG_ON(__i915_request_is_complete(rq)); in __i915_request_create()
918 rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */ in __i915_request_create()
920 rq->guc_prio = GUC_PRIO_INIT; in __i915_request_create()
923 i915_sw_fence_reinit(&i915_request_get(rq)->submit); in __i915_request_create()
924 i915_sw_fence_reinit(&i915_request_get(rq)->semaphore); in __i915_request_create()
926 i915_sched_node_reinit(&rq->sched); in __i915_request_create()
929 rq->batch = NULL; in __i915_request_create()
930 __rq_init_watchdog(rq); in __i915_request_create()
931 GEM_BUG_ON(rq->capture_list); in __i915_request_create()
932 GEM_BUG_ON(!llist_empty(&rq->execute_cb)); in __i915_request_create()
946 rq->reserved_space = in __i915_request_create()
947 2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32); in __i915_request_create()
955 rq->head = rq->ring->emit; in __i915_request_create()
957 ret = rq->engine->request_alloc(rq); in __i915_request_create()
961 rq->infix = rq->ring->emit; /* end of header; start of user payload */ in __i915_request_create()
964 list_add_tail_rcu(&rq->link, &tl->requests); in __i915_request_create()
966 return rq; in __i915_request_create()
969 ce->ring->emit = rq->head; in __i915_request_create()
972 GEM_BUG_ON(!list_empty(&rq->sched.signalers_list)); in __i915_request_create()
973 GEM_BUG_ON(!list_empty(&rq->sched.waiters_list)); in __i915_request_create()
977 kmem_cache_free(slab_requests, rq); in __i915_request_create()
986 struct i915_request *rq; in i915_request_create() local
994 rq = list_first_entry(&tl->requests, typeof(*rq), link); in i915_request_create()
995 if (!list_is_last(&rq->link, &tl->requests)) in i915_request_create()
996 i915_request_retire(rq); in i915_request_create()
999 rq = __i915_request_create(ce, GFP_KERNEL); in i915_request_create()
1001 if (IS_ERR(rq)) in i915_request_create()
1005 rq->cookie = lockdep_pin_lock(&tl->mutex); in i915_request_create()
1007 return rq; in i915_request_create()
1011 return rq; in i915_request_create()
1015 i915_request_await_start(struct i915_request *rq, struct i915_request *signal) in i915_request_await_start() argument
1020 if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline)) in i915_request_await_start()
1071 if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence)) in i915_request_await_start()
1072 err = i915_sw_fence_await_dma_fence(&rq->submit, in i915_request_await_start()
1081 already_busywaiting(struct i915_request *rq) in already_busywaiting() argument
1095 return rq->sched.semaphores | READ_ONCE(rq->engine->saturated); in already_busywaiting()
1290 static void mark_external(struct i915_request *rq) in mark_external() argument
1300 rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN; in mark_external()
1304 __i915_request_await_external(struct i915_request *rq, struct dma_fence *fence) in __i915_request_await_external() argument
1306 mark_external(rq); in __i915_request_await_external()
1307 return i915_sw_fence_await_dma_fence(&rq->submit, fence, in __i915_request_await_external()
1308 i915_fence_context_timeout(rq->engine->i915, in __i915_request_await_external()
1314 i915_request_await_external(struct i915_request *rq, struct dma_fence *fence) in i915_request_await_external() argument
1320 return __i915_request_await_external(rq, fence); in i915_request_await_external()
1326 err = __i915_request_await_external(rq, iter); in i915_request_await_external()
1330 err = i915_request_await_dma_fence(rq, chain->fence); in i915_request_await_external()
1339 static inline bool is_parallel_rq(struct i915_request *rq) in is_parallel_rq() argument
1341 return intel_context_is_parallel(rq->context); in is_parallel_rq()
1344 static inline struct intel_context *request_to_parent(struct i915_request *rq) in request_to_parent() argument
1346 return intel_context_to_parent(rq->context); in request_to_parent()
1359 i915_request_await_execution(struct i915_request *rq, in i915_request_await_execution() argument
1381 if (fence->context == rq->fence.context) in i915_request_await_execution()
1390 if (is_same_parallel_context(rq, to_request(fence))) in i915_request_await_execution()
1392 ret = __i915_request_await_execution(rq, in i915_request_await_execution()
1395 ret = i915_request_await_external(rq, fence); in i915_request_await_execution()
1456 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) in i915_request_await_dma_fence() argument
1488 if (fence->context == rq->fence.context) in i915_request_await_dma_fence()
1493 intel_timeline_sync_is_later(i915_request_timeline(rq), in i915_request_await_dma_fence()
1498 if (is_same_parallel_context(rq, to_request(fence))) in i915_request_await_dma_fence()
1500 ret = i915_request_await_request(rq, to_request(fence)); in i915_request_await_dma_fence()
1502 ret = i915_request_await_external(rq, fence); in i915_request_await_dma_fence()
1509 intel_timeline_sync_set(i915_request_timeline(rq), in i915_request_await_dma_fence()
1555 __i915_request_ensure_parallel_ordering(struct i915_request *rq, in __i915_request_ensure_parallel_ordering() argument
1560 GEM_BUG_ON(!is_parallel_rq(rq)); in __i915_request_ensure_parallel_ordering()
1562 prev = request_to_parent(rq)->parallel.last_rq; in __i915_request_ensure_parallel_ordering()
1565 i915_sw_fence_await_sw_fence(&rq->submit, in __i915_request_ensure_parallel_ordering()
1567 &rq->submitq); in __i915_request_ensure_parallel_ordering()
1569 if (rq->engine->sched_engine->schedule) in __i915_request_ensure_parallel_ordering()
1570 __i915_sched_node_add_dependency(&rq->sched, in __i915_request_ensure_parallel_ordering()
1572 &rq->dep, in __i915_request_ensure_parallel_ordering()
1578 request_to_parent(rq)->parallel.last_rq = i915_request_get(rq); in __i915_request_ensure_parallel_ordering()
1581 &rq->fence)); in __i915_request_ensure_parallel_ordering()
1585 __i915_request_ensure_ordering(struct i915_request *rq, in __i915_request_ensure_ordering() argument
1590 GEM_BUG_ON(is_parallel_rq(rq)); in __i915_request_ensure_ordering()
1593 &rq->fence)); in __i915_request_ensure_ordering()
1596 bool uses_guc = intel_engine_uses_guc(rq->engine); in __i915_request_ensure_ordering()
1598 rq->engine->mask); in __i915_request_ensure_ordering()
1599 bool same_context = prev->context == rq->context; in __i915_request_ensure_ordering()
1609 rq->fence.seqno)); in __i915_request_ensure_ordering()
1612 i915_sw_fence_await_sw_fence(&rq->submit, in __i915_request_ensure_ordering()
1614 &rq->submitq); in __i915_request_ensure_ordering()
1616 __i915_sw_fence_await_dma_fence(&rq->submit, in __i915_request_ensure_ordering()
1618 &rq->dmaq); in __i915_request_ensure_ordering()
1619 if (rq->engine->sched_engine->schedule) in __i915_request_ensure_ordering()
1620 __i915_sched_node_add_dependency(&rq->sched, in __i915_request_ensure_ordering()
1622 &rq->dep, in __i915_request_ensure_ordering()
1630 __i915_request_add_to_timeline(struct i915_request *rq) in __i915_request_add_to_timeline() argument
1632 struct intel_timeline *timeline = i915_request_timeline(rq); in __i915_request_add_to_timeline()
1665 if (likely(!is_parallel_rq(rq))) in __i915_request_add_to_timeline()
1666 prev = __i915_request_ensure_ordering(rq, timeline); in __i915_request_add_to_timeline()
1668 prev = __i915_request_ensure_parallel_ordering(rq, timeline); in __i915_request_add_to_timeline()
1675 GEM_BUG_ON(timeline->seqno != rq->fence.seqno); in __i915_request_add_to_timeline()
1685 struct i915_request *__i915_request_commit(struct i915_request *rq) in __i915_request_commit() argument
1687 struct intel_engine_cs *engine = rq->engine; in __i915_request_commit()
1688 struct intel_ring *ring = rq->ring; in __i915_request_commit()
1691 RQ_TRACE(rq, "\n"); in __i915_request_commit()
1698 GEM_BUG_ON(rq->reserved_space > ring->space); in __i915_request_commit()
1699 rq->reserved_space = 0; in __i915_request_commit()
1700 rq->emitted_jiffies = jiffies; in __i915_request_commit()
1708 cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw); in __i915_request_commit()
1710 rq->postfix = intel_ring_offset(rq, cs); in __i915_request_commit()
1712 return __i915_request_add_to_timeline(rq); in __i915_request_commit()
1715 void __i915_request_queue_bh(struct i915_request *rq) in __i915_request_queue_bh() argument
1717 i915_sw_fence_commit(&rq->semaphore); in __i915_request_queue_bh()
1718 i915_sw_fence_commit(&rq->submit); in __i915_request_queue_bh()
1721 void __i915_request_queue(struct i915_request *rq, in __i915_request_queue() argument
1735 if (attr && rq->engine->sched_engine->schedule) in __i915_request_queue()
1736 rq->engine->sched_engine->schedule(rq, attr); in __i915_request_queue()
1739 __i915_request_queue_bh(rq); in __i915_request_queue()
1743 void i915_request_add(struct i915_request *rq) in i915_request_add() argument
1745 struct intel_timeline * const tl = i915_request_timeline(rq); in i915_request_add()
1750 lockdep_unpin_lock(&tl->mutex, rq->cookie); in i915_request_add()
1752 trace_i915_request_add(rq); in i915_request_add()
1753 __i915_request_commit(rq); in i915_request_add()
1757 ctx = rcu_dereference(rq->context->gem_context); in i915_request_add()
1762 __i915_request_queue(rq, &attr); in i915_request_add()
1800 static bool __i915_spin_request(struct i915_request * const rq, int state) in __i915_spin_request() argument
1816 if (!i915_request_is_running(rq)) in __i915_spin_request()
1830 timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns); in __i915_spin_request()
1833 if (dma_fence_is_signaled(&rq->fence)) in __i915_spin_request()
1875 long i915_request_wait(struct i915_request *rq, in i915_request_wait() argument
1886 if (dma_fence_is_signaled(&rq->fence)) in i915_request_wait()
1892 trace_i915_request_wait_begin(rq, flags); in i915_request_wait()
1900 mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_); in i915_request_wait()
1926 __i915_spin_request(rq, state)) in i915_request_wait()
1941 if (flags & I915_WAIT_PRIORITY && !i915_request_started(rq)) in i915_request_wait()
1942 intel_rps_boost(rq); in i915_request_wait()
1945 if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake)) in i915_request_wait()
1963 if (i915_request_is_ready(rq)) in i915_request_wait()
1964 __intel_engine_flush_submission(rq->engine, false); in i915_request_wait()
1969 if (dma_fence_is_signaled(&rq->fence)) in i915_request_wait()
1987 dma_fence_remove_callback(&rq->fence, &wait.cb); in i915_request_wait()
1991 mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_); in i915_request_wait()
1992 trace_i915_request_wait_end(rq); in i915_request_wait()
2008 static char queue_status(const struct i915_request *rq) in queue_status() argument
2010 if (i915_request_is_active(rq)) in queue_status()
2013 if (i915_request_is_ready(rq)) in queue_status()
2014 return intel_engine_is_virtual(rq->engine) ? 'V' : 'R'; in queue_status()
2019 static const char *run_status(const struct i915_request *rq) in run_status() argument
2021 if (__i915_request_is_complete(rq)) in run_status()
2024 if (__i915_request_has_started(rq)) in run_status()
2027 if (!i915_sw_fence_signaled(&rq->semaphore)) in run_status()
2033 static const char *fence_status(const struct i915_request *rq) in fence_status() argument
2035 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) in fence_status()
2038 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) in fence_status()
2045 const struct i915_request *rq, in i915_request_show() argument
2049 const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence); in i915_request_show()
2083 x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf)); in i915_request_show()
2087 queue_status(rq), in i915_request_show()
2088 rq->fence.context, rq->fence.seqno, in i915_request_show()
2089 run_status(rq), in i915_request_show()
2090 fence_status(rq), in i915_request_show()
2092 jiffies_to_msecs(jiffies - rq->emitted_jiffies), in i915_request_show()
2096 static bool engine_match_ring(struct intel_engine_cs *engine, struct i915_request *rq) in engine_match_ring() argument
2100 return ring == i915_ggtt_offset(rq->ring->vma); in engine_match_ring()
2103 static bool match_ring(struct i915_request *rq) in match_ring() argument
2109 if (!intel_engine_is_virtual(rq->engine)) in match_ring()
2110 return engine_match_ring(rq->engine, rq); in match_ring()
2114 while ((engine = intel_engine_get_sibling(rq->engine, i++))) { in match_ring()
2115 found = engine_match_ring(engine, rq); in match_ring()
2123 enum i915_request_state i915_test_request_state(struct i915_request *rq) in i915_test_request_state() argument
2125 if (i915_request_completed(rq)) in i915_test_request_state()
2128 if (!i915_request_started(rq)) in i915_test_request_state()
2131 if (match_ring(rq)) in i915_test_request_state()