Lines Matching refs:sched_engine
39 static void assert_priolists(struct i915_sched_engine * const sched_engine) in assert_priolists() argument
47 GEM_BUG_ON(rb_first_cached(&sched_engine->queue) != in assert_priolists()
48 rb_first(&sched_engine->queue.rb_root)); in assert_priolists()
51 for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) { in assert_priolists()
60 i915_sched_lookup_priolist(struct i915_sched_engine *sched_engine, int prio) in i915_sched_lookup_priolist() argument
66 lockdep_assert_held(&sched_engine->lock); in i915_sched_lookup_priolist()
67 assert_priolists(sched_engine); in i915_sched_lookup_priolist()
69 if (unlikely(sched_engine->no_priolist)) in i915_sched_lookup_priolist()
75 parent = &sched_engine->queue.rb_root.rb_node; in i915_sched_lookup_priolist()
90 p = &sched_engine->default_priolist; in i915_sched_lookup_priolist()
105 sched_engine->no_priolist = true; in i915_sched_lookup_priolist()
114 rb_insert_color_cached(&p->node, &sched_engine->queue, first); in i915_sched_lookup_priolist()
134 struct i915_sched_engine *sched_engine; in lock_sched_engine() local
144 while (locked != (sched_engine = READ_ONCE(rq->engine)->sched_engine)) { in lock_sched_engine()
147 spin_lock(&sched_engine->lock); in lock_sched_engine()
148 locked = sched_engine; in lock_sched_engine()
151 GEM_BUG_ON(locked != sched_engine); in lock_sched_engine()
159 struct i915_sched_engine *sched_engine; in __i915_schedule() local
234 sched_engine = node_to_request(node)->engine->sched_engine; in __i915_schedule()
235 spin_lock(&sched_engine->lock); in __i915_schedule()
238 sched_engine = lock_sched_engine(node, sched_engine, &cache); in __i915_schedule()
246 sched_engine = lock_sched_engine(node, sched_engine, &cache); in __i915_schedule()
247 lockdep_assert_held(&sched_engine->lock); in __i915_schedule()
253 GEM_BUG_ON(node_to_request(node)->engine->sched_engine != in __i915_schedule()
254 sched_engine); in __i915_schedule()
257 if (sched_engine->bump_inflight_request_prio) in __i915_schedule()
258 sched_engine->bump_inflight_request_prio(from, prio); in __i915_schedule()
276 i915_sched_lookup_priolist(sched_engine, in __i915_schedule()
282 if (sched_engine->kick_backend) in __i915_schedule()
283 sched_engine->kick_backend(node_to_request(node), prio); in __i915_schedule()
286 spin_unlock(&sched_engine->lock); in __i915_schedule()
439 struct i915_sched_engine *sched_engine = in default_destroy() local
440 container_of(kref, typeof(*sched_engine), ref); in default_destroy()
442 tasklet_kill(&sched_engine->tasklet); /* flush the callback */ in default_destroy()
443 kfree(sched_engine); in default_destroy()
446 static bool default_disabled(struct i915_sched_engine *sched_engine) in default_disabled() argument
454 struct i915_sched_engine *sched_engine; in i915_sched_engine_create() local
456 sched_engine = kzalloc(sizeof(*sched_engine), GFP_KERNEL); in i915_sched_engine_create()
457 if (!sched_engine) in i915_sched_engine_create()
460 kref_init(&sched_engine->ref); in i915_sched_engine_create()
462 sched_engine->queue = RB_ROOT_CACHED; in i915_sched_engine_create()
463 sched_engine->queue_priority_hint = INT_MIN; in i915_sched_engine_create()
464 sched_engine->destroy = default_destroy; in i915_sched_engine_create()
465 sched_engine->disabled = default_disabled; in i915_sched_engine_create()
467 INIT_LIST_HEAD(&sched_engine->requests); in i915_sched_engine_create()
468 INIT_LIST_HEAD(&sched_engine->hold); in i915_sched_engine_create()
470 spin_lock_init(&sched_engine->lock); in i915_sched_engine_create()
471 lockdep_set_subclass(&sched_engine->lock, subclass); in i915_sched_engine_create()
480 lock_map_acquire(&sched_engine->lock.dep_map); in i915_sched_engine_create()
481 lock_map_release(&sched_engine->lock.dep_map); in i915_sched_engine_create()
485 return sched_engine; in i915_sched_engine_create()