1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 /**
25 * DOC: Overview
26 *
27 * The GPU scheduler provides entities which allow userspace to push jobs
28 * into software queues which are then scheduled on a hardware run queue.
29 * The software queues have a priority among them. The scheduler selects the entities
30 * from the run queue using a FIFO. The scheduler provides dependency handling
31 * features among jobs. The driver is supposed to provide callback functions for
32 * backend operations to the scheduler like submitting a job to hardware run queue,
33 * returning the dependencies of a job etc.
34 *
35 * The organisation of the scheduler is the following:
36 *
37 * 1. Each hw run queue has one scheduler
38 * 2. Each scheduler has multiple run queues with different priorities
39 * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40 * 3. Each scheduler run queue has a queue of entities to schedule
41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42 * the hardware.
43 *
44 * The jobs in a entity are always scheduled in the order that they were pushed.
45 */
46
47 #include <linux/kthread.h>
48 #include <linux/wait.h>
49 #include <linux/sched.h>
50 #include <linux/completion.h>
51 #include <linux/dma-resv.h>
52 #include <uapi/linux/sched/types.h>
53
54 #include <drm/drm_print.h>
55 #include <drm/drm_gem.h>
56 #include <drm/gpu_scheduler.h>
57 #include <drm/spsc_queue.h>
58
59 #define CREATE_TRACE_POINTS
60 #include "gpu_scheduler_trace.h"
61
62 #define to_drm_sched_job(sched_job) \
63 container_of((sched_job), struct drm_sched_job, queue_node)
64
65 int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
66
67 /**
68 * DOC: sched_policy (int)
69 * Used to override default entities scheduling policy in a run queue.
70 */
71 MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
72 module_param_named(sched_policy, drm_sched_policy, int, 0444);
73
drm_sched_entity_compare_before(struct rb_node * a,const struct rb_node * b)74 static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
75 const struct rb_node *b)
76 {
77 struct drm_sched_entity *ent_a = rb_entry((a), struct drm_sched_entity, rb_tree_node);
78 struct drm_sched_entity *ent_b = rb_entry((b), struct drm_sched_entity, rb_tree_node);
79
80 return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
81 }
82
drm_sched_rq_remove_fifo_locked(struct drm_sched_entity * entity)83 static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity)
84 {
85 struct drm_sched_rq *rq = entity->rq;
86
87 if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
88 rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
89 RB_CLEAR_NODE(&entity->rb_tree_node);
90 }
91 }
92
drm_sched_rq_update_fifo(struct drm_sched_entity * entity,ktime_t ts)93 void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
94 {
95 /*
96 * Both locks need to be grabbed, one to protect from entity->rq change
97 * for entity from within concurrent drm_sched_entity_select_rq and the
98 * other to update the rb tree structure.
99 */
100 spin_lock(&entity->rq_lock);
101 spin_lock(&entity->rq->lock);
102
103 drm_sched_rq_remove_fifo_locked(entity);
104
105 entity->oldest_job_waiting = ts;
106
107 rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
108 drm_sched_entity_compare_before);
109
110 spin_unlock(&entity->rq->lock);
111 spin_unlock(&entity->rq_lock);
112 }
113
114 /**
115 * drm_sched_rq_init - initialize a given run queue struct
116 *
117 * @sched: scheduler instance to associate with this run queue
118 * @rq: scheduler run queue
119 *
120 * Initializes a scheduler runqueue.
121 */
drm_sched_rq_init(struct drm_gpu_scheduler * sched,struct drm_sched_rq * rq)122 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
123 struct drm_sched_rq *rq)
124 {
125 spin_lock_init(&rq->lock);
126 INIT_LIST_HEAD(&rq->entities);
127 rq->rb_tree_root = RB_ROOT_CACHED;
128 rq->current_entity = NULL;
129 rq->sched = sched;
130 }
131
132 /**
133 * drm_sched_rq_add_entity - add an entity
134 *
135 * @rq: scheduler run queue
136 * @entity: scheduler entity
137 *
138 * Adds a scheduler entity to the run queue.
139 */
drm_sched_rq_add_entity(struct drm_sched_rq * rq,struct drm_sched_entity * entity)140 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
141 struct drm_sched_entity *entity)
142 {
143 if (!list_empty(&entity->list))
144 return;
145
146 spin_lock(&rq->lock);
147
148 atomic_inc(rq->sched->score);
149 list_add_tail(&entity->list, &rq->entities);
150
151 spin_unlock(&rq->lock);
152 }
153
154 /**
155 * drm_sched_rq_remove_entity - remove an entity
156 *
157 * @rq: scheduler run queue
158 * @entity: scheduler entity
159 *
160 * Removes a scheduler entity from the run queue.
161 */
drm_sched_rq_remove_entity(struct drm_sched_rq * rq,struct drm_sched_entity * entity)162 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
163 struct drm_sched_entity *entity)
164 {
165 if (list_empty(&entity->list))
166 return;
167
168 spin_lock(&rq->lock);
169
170 atomic_dec(rq->sched->score);
171 list_del_init(&entity->list);
172
173 if (rq->current_entity == entity)
174 rq->current_entity = NULL;
175
176 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
177 drm_sched_rq_remove_fifo_locked(entity);
178
179 spin_unlock(&rq->lock);
180 }
181
182 /**
183 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
184 *
185 * @rq: scheduler run queue to check.
186 *
187 * Try to find a ready entity, returns NULL if none found.
188 */
189 static struct drm_sched_entity *
drm_sched_rq_select_entity_rr(struct drm_sched_rq * rq)190 drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
191 {
192 struct drm_sched_entity *entity;
193
194 spin_lock(&rq->lock);
195
196 entity = rq->current_entity;
197 if (entity) {
198 list_for_each_entry_continue(entity, &rq->entities, list) {
199 if (drm_sched_entity_is_ready(entity)) {
200 rq->current_entity = entity;
201 reinit_completion(&entity->entity_idle);
202 spin_unlock(&rq->lock);
203 return entity;
204 }
205 }
206 }
207
208 list_for_each_entry(entity, &rq->entities, list) {
209
210 if (drm_sched_entity_is_ready(entity)) {
211 rq->current_entity = entity;
212 reinit_completion(&entity->entity_idle);
213 spin_unlock(&rq->lock);
214 return entity;
215 }
216
217 if (entity == rq->current_entity)
218 break;
219 }
220
221 spin_unlock(&rq->lock);
222
223 return NULL;
224 }
225
226 /**
227 * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
228 *
229 * @rq: scheduler run queue to check.
230 *
231 * Find oldest waiting ready entity, returns NULL if none found.
232 */
233 static struct drm_sched_entity *
drm_sched_rq_select_entity_fifo(struct drm_sched_rq * rq)234 drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
235 {
236 struct rb_node *rb;
237
238 spin_lock(&rq->lock);
239 for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
240 struct drm_sched_entity *entity;
241
242 entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
243 if (drm_sched_entity_is_ready(entity)) {
244 rq->current_entity = entity;
245 reinit_completion(&entity->entity_idle);
246 break;
247 }
248 }
249 spin_unlock(&rq->lock);
250
251 return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
252 }
253
254 /**
255 * drm_sched_job_done - complete a job
256 * @s_job: pointer to the job which is done
257 *
258 * Finish the job's fence and wake up the worker thread.
259 */
drm_sched_job_done(struct drm_sched_job * s_job)260 static void drm_sched_job_done(struct drm_sched_job *s_job)
261 {
262 struct drm_sched_fence *s_fence = s_job->s_fence;
263 struct drm_gpu_scheduler *sched = s_fence->sched;
264
265 atomic_dec(&sched->hw_rq_count);
266 atomic_dec(sched->score);
267
268 trace_drm_sched_process_job(s_fence);
269
270 dma_fence_get(&s_fence->finished);
271 drm_sched_fence_finished(s_fence);
272 dma_fence_put(&s_fence->finished);
273 wake_up_interruptible(&sched->wake_up_worker);
274 }
275
276 /**
277 * drm_sched_job_done_cb - the callback for a done job
278 * @f: fence
279 * @cb: fence callbacks
280 */
drm_sched_job_done_cb(struct dma_fence * f,struct dma_fence_cb * cb)281 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
282 {
283 struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
284
285 drm_sched_job_done(s_job);
286 }
287
288 /**
289 * drm_sched_start_timeout - start timeout for reset worker
290 *
291 * @sched: scheduler instance to start the worker for
292 *
293 * Start the timeout for the given scheduler.
294 */
drm_sched_start_timeout(struct drm_gpu_scheduler * sched)295 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
296 {
297 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
298 !list_empty(&sched->pending_list))
299 queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
300 }
301
302 /**
303 * drm_sched_fault - immediately start timeout handler
304 *
305 * @sched: scheduler where the timeout handling should be started.
306 *
307 * Start timeout handling immediately when the driver detects a hardware fault.
308 */
drm_sched_fault(struct drm_gpu_scheduler * sched)309 void drm_sched_fault(struct drm_gpu_scheduler *sched)
310 {
311 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
312 }
313 EXPORT_SYMBOL(drm_sched_fault);
314
315 /**
316 * drm_sched_suspend_timeout - Suspend scheduler job timeout
317 *
318 * @sched: scheduler instance for which to suspend the timeout
319 *
320 * Suspend the delayed work timeout for the scheduler. This is done by
321 * modifying the delayed work timeout to an arbitrary large value,
322 * MAX_SCHEDULE_TIMEOUT in this case.
323 *
324 * Returns the timeout remaining
325 *
326 */
drm_sched_suspend_timeout(struct drm_gpu_scheduler * sched)327 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
328 {
329 unsigned long sched_timeout, now = jiffies;
330
331 sched_timeout = sched->work_tdr.timer.expires;
332
333 /*
334 * Modify the timeout to an arbitrarily large value. This also prevents
335 * the timeout to be restarted when new submissions arrive
336 */
337 if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
338 && time_after(sched_timeout, now))
339 return sched_timeout - now;
340 else
341 return sched->timeout;
342 }
343 EXPORT_SYMBOL(drm_sched_suspend_timeout);
344
345 /**
346 * drm_sched_resume_timeout - Resume scheduler job timeout
347 *
348 * @sched: scheduler instance for which to resume the timeout
349 * @remaining: remaining timeout
350 *
351 * Resume the delayed work timeout for the scheduler.
352 */
drm_sched_resume_timeout(struct drm_gpu_scheduler * sched,unsigned long remaining)353 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
354 unsigned long remaining)
355 {
356 spin_lock(&sched->job_list_lock);
357
358 if (list_empty(&sched->pending_list))
359 cancel_delayed_work(&sched->work_tdr);
360 else
361 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
362
363 spin_unlock(&sched->job_list_lock);
364 }
365 EXPORT_SYMBOL(drm_sched_resume_timeout);
366
drm_sched_job_begin(struct drm_sched_job * s_job)367 static void drm_sched_job_begin(struct drm_sched_job *s_job)
368 {
369 struct drm_gpu_scheduler *sched = s_job->sched;
370
371 spin_lock(&sched->job_list_lock);
372 list_add_tail(&s_job->list, &sched->pending_list);
373 drm_sched_start_timeout(sched);
374 spin_unlock(&sched->job_list_lock);
375 }
376
drm_sched_job_timedout(struct work_struct * work)377 static void drm_sched_job_timedout(struct work_struct *work)
378 {
379 struct drm_gpu_scheduler *sched;
380 struct drm_sched_job *job;
381 enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
382
383 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
384
385 /* Protects against concurrent deletion in drm_sched_get_cleanup_job */
386 spin_lock(&sched->job_list_lock);
387 job = list_first_entry_or_null(&sched->pending_list,
388 struct drm_sched_job, list);
389
390 if (job) {
391 /*
392 * Remove the bad job so it cannot be freed by concurrent
393 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
394 * is parked at which point it's safe.
395 */
396 list_del_init(&job->list);
397 spin_unlock(&sched->job_list_lock);
398
399 status = job->sched->ops->timedout_job(job);
400
401 /*
402 * Guilty job did complete and hence needs to be manually removed
403 * See drm_sched_stop doc.
404 */
405 if (sched->free_guilty) {
406 job->sched->ops->free_job(job);
407 sched->free_guilty = false;
408 }
409 } else {
410 spin_unlock(&sched->job_list_lock);
411 }
412
413 if (status != DRM_GPU_SCHED_STAT_ENODEV) {
414 spin_lock(&sched->job_list_lock);
415 drm_sched_start_timeout(sched);
416 spin_unlock(&sched->job_list_lock);
417 }
418 }
419
420 /**
421 * drm_sched_stop - stop the scheduler
422 *
423 * @sched: scheduler instance
424 * @bad: job which caused the time out
425 *
426 * Stop the scheduler and also removes and frees all completed jobs.
427 * Note: bad job will not be freed as it might be used later and so it's
428 * callers responsibility to release it manually if it's not part of the
429 * pending list any more.
430 *
431 */
drm_sched_stop(struct drm_gpu_scheduler * sched,struct drm_sched_job * bad)432 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
433 {
434 struct drm_sched_job *s_job, *tmp;
435
436 kthread_park(sched->thread);
437
438 /*
439 * Reinsert back the bad job here - now it's safe as
440 * drm_sched_get_cleanup_job cannot race against us and release the
441 * bad job at this point - we parked (waited for) any in progress
442 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
443 * now until the scheduler thread is unparked.
444 */
445 if (bad && bad->sched == sched)
446 /*
447 * Add at the head of the queue to reflect it was the earliest
448 * job extracted.
449 */
450 list_add(&bad->list, &sched->pending_list);
451
452 /*
453 * Iterate the job list from later to earlier one and either deactive
454 * their HW callbacks or remove them from pending list if they already
455 * signaled.
456 * This iteration is thread safe as sched thread is stopped.
457 */
458 list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
459 list) {
460 if (s_job->s_fence->parent &&
461 dma_fence_remove_callback(s_job->s_fence->parent,
462 &s_job->cb)) {
463 dma_fence_put(s_job->s_fence->parent);
464 s_job->s_fence->parent = NULL;
465 atomic_dec(&sched->hw_rq_count);
466 } else {
467 /*
468 * remove job from pending_list.
469 * Locking here is for concurrent resume timeout
470 */
471 spin_lock(&sched->job_list_lock);
472 list_del_init(&s_job->list);
473 spin_unlock(&sched->job_list_lock);
474
475 /*
476 * Wait for job's HW fence callback to finish using s_job
477 * before releasing it.
478 *
479 * Job is still alive so fence refcount at least 1
480 */
481 dma_fence_wait(&s_job->s_fence->finished, false);
482
483 /*
484 * We must keep bad job alive for later use during
485 * recovery by some of the drivers but leave a hint
486 * that the guilty job must be released.
487 */
488 if (bad != s_job)
489 sched->ops->free_job(s_job);
490 else
491 sched->free_guilty = true;
492 }
493 }
494
495 /*
496 * Stop pending timer in flight as we rearm it in drm_sched_start. This
497 * avoids the pending timeout work in progress to fire right away after
498 * this TDR finished and before the newly restarted jobs had a
499 * chance to complete.
500 */
501 cancel_delayed_work(&sched->work_tdr);
502 }
503
504 EXPORT_SYMBOL(drm_sched_stop);
505
506 /**
507 * drm_sched_start - recover jobs after a reset
508 *
509 * @sched: scheduler instance
510 * @full_recovery: proceed with complete sched restart
511 *
512 */
drm_sched_start(struct drm_gpu_scheduler * sched,bool full_recovery)513 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
514 {
515 struct drm_sched_job *s_job, *tmp;
516 int r;
517
518 /*
519 * Locking the list is not required here as the sched thread is parked
520 * so no new jobs are being inserted or removed. Also concurrent
521 * GPU recovers can't run in parallel.
522 */
523 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
524 struct dma_fence *fence = s_job->s_fence->parent;
525
526 atomic_inc(&sched->hw_rq_count);
527
528 if (!full_recovery)
529 continue;
530
531 if (fence) {
532 r = dma_fence_add_callback(fence, &s_job->cb,
533 drm_sched_job_done_cb);
534 if (r == -ENOENT)
535 drm_sched_job_done(s_job);
536 else if (r)
537 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
538 r);
539 } else
540 drm_sched_job_done(s_job);
541 }
542
543 if (full_recovery) {
544 spin_lock(&sched->job_list_lock);
545 drm_sched_start_timeout(sched);
546 spin_unlock(&sched->job_list_lock);
547 }
548
549 kthread_unpark(sched->thread);
550 }
551 EXPORT_SYMBOL(drm_sched_start);
552
553 /**
554 * drm_sched_resubmit_jobs - Deprecated, don't use in new code!
555 *
556 * @sched: scheduler instance
557 *
558 * Re-submitting jobs was a concept AMD came up as cheap way to implement
559 * recovery after a job timeout.
560 *
561 * This turned out to be not working very well. First of all there are many
562 * problem with the dma_fence implementation and requirements. Either the
563 * implementation is risking deadlocks with core memory management or violating
564 * documented implementation details of the dma_fence object.
565 *
566 * Drivers can still save and restore their state for recovery operations, but
567 * we shouldn't make this a general scheduler feature around the dma_fence
568 * interface.
569 */
drm_sched_resubmit_jobs(struct drm_gpu_scheduler * sched)570 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
571 {
572 struct drm_sched_job *s_job, *tmp;
573 uint64_t guilty_context;
574 bool found_guilty = false;
575 struct dma_fence *fence;
576
577 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
578 struct drm_sched_fence *s_fence = s_job->s_fence;
579
580 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
581 found_guilty = true;
582 guilty_context = s_job->s_fence->scheduled.context;
583 }
584
585 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
586 dma_fence_set_error(&s_fence->finished, -ECANCELED);
587
588 fence = sched->ops->run_job(s_job);
589
590 if (IS_ERR_OR_NULL(fence)) {
591 if (IS_ERR(fence))
592 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
593
594 s_job->s_fence->parent = NULL;
595 } else {
596
597 s_job->s_fence->parent = dma_fence_get(fence);
598
599 /* Drop for orignal kref_init */
600 dma_fence_put(fence);
601 }
602 }
603 }
604 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
605
606 /**
607 * drm_sched_job_init - init a scheduler job
608 * @job: scheduler job to init
609 * @entity: scheduler entity to use
610 * @owner: job owner for debugging
611 *
612 * Refer to drm_sched_entity_push_job() documentation
613 * for locking considerations.
614 *
615 * Drivers must make sure drm_sched_job_cleanup() if this function returns
616 * successfully, even when @job is aborted before drm_sched_job_arm() is called.
617 *
618 * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
619 * has died, which can mean that there's no valid runqueue for a @entity.
620 * This function returns -ENOENT in this case (which probably should be -EIO as
621 * a more meanigful return value).
622 *
623 * Returns 0 for success, negative error code otherwise.
624 */
drm_sched_job_init(struct drm_sched_job * job,struct drm_sched_entity * entity,void * owner)625 int drm_sched_job_init(struct drm_sched_job *job,
626 struct drm_sched_entity *entity,
627 void *owner)
628 {
629 if (!entity->rq)
630 return -ENOENT;
631
632 job->entity = entity;
633 job->s_fence = drm_sched_fence_alloc(entity, owner);
634 if (!job->s_fence)
635 return -ENOMEM;
636
637 INIT_LIST_HEAD(&job->list);
638
639 xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
640
641 return 0;
642 }
643 EXPORT_SYMBOL(drm_sched_job_init);
644
645 /**
646 * drm_sched_job_arm - arm a scheduler job for execution
647 * @job: scheduler job to arm
648 *
649 * This arms a scheduler job for execution. Specifically it initializes the
650 * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
651 * or other places that need to track the completion of this job.
652 *
653 * Refer to drm_sched_entity_push_job() documentation for locking
654 * considerations.
655 *
656 * This can only be called if drm_sched_job_init() succeeded.
657 */
drm_sched_job_arm(struct drm_sched_job * job)658 void drm_sched_job_arm(struct drm_sched_job *job)
659 {
660 struct drm_gpu_scheduler *sched;
661 struct drm_sched_entity *entity = job->entity;
662
663 BUG_ON(!entity);
664 drm_sched_entity_select_rq(entity);
665 sched = entity->rq->sched;
666
667 job->sched = sched;
668 job->s_priority = entity->rq - sched->sched_rq;
669 job->id = atomic64_inc_return(&sched->job_id_count);
670
671 drm_sched_fence_init(job->s_fence, job->entity);
672 }
673 EXPORT_SYMBOL(drm_sched_job_arm);
674
675 /**
676 * drm_sched_job_add_dependency - adds the fence as a job dependency
677 * @job: scheduler job to add the dependencies to
678 * @fence: the dma_fence to add to the list of dependencies.
679 *
680 * Note that @fence is consumed in both the success and error cases.
681 *
682 * Returns:
683 * 0 on success, or an error on failing to expand the array.
684 */
drm_sched_job_add_dependency(struct drm_sched_job * job,struct dma_fence * fence)685 int drm_sched_job_add_dependency(struct drm_sched_job *job,
686 struct dma_fence *fence)
687 {
688 struct dma_fence *entry;
689 unsigned long index;
690 u32 id = 0;
691 int ret;
692
693 if (!fence)
694 return 0;
695
696 /* Deduplicate if we already depend on a fence from the same context.
697 * This lets the size of the array of deps scale with the number of
698 * engines involved, rather than the number of BOs.
699 */
700 xa_for_each(&job->dependencies, index, entry) {
701 if (entry->context != fence->context)
702 continue;
703
704 if (dma_fence_is_later(fence, entry)) {
705 dma_fence_put(entry);
706 xa_store(&job->dependencies, index, fence, GFP_KERNEL);
707 } else {
708 dma_fence_put(fence);
709 }
710 return 0;
711 }
712
713 ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
714 if (ret != 0)
715 dma_fence_put(fence);
716
717 return ret;
718 }
719 EXPORT_SYMBOL(drm_sched_job_add_dependency);
720
721 /**
722 * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
723 * @job: scheduler job to add the dependencies to
724 * @resv: the dma_resv object to get the fences from
725 * @usage: the dma_resv_usage to use to filter the fences
726 *
727 * This adds all fences matching the given usage from @resv to @job.
728 * Must be called with the @resv lock held.
729 *
730 * Returns:
731 * 0 on success, or an error on failing to expand the array.
732 */
drm_sched_job_add_resv_dependencies(struct drm_sched_job * job,struct dma_resv * resv,enum dma_resv_usage usage)733 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
734 struct dma_resv *resv,
735 enum dma_resv_usage usage)
736 {
737 struct dma_resv_iter cursor;
738 struct dma_fence *fence;
739 int ret;
740
741 dma_resv_assert_held(resv);
742
743 dma_resv_for_each_fence(&cursor, resv, usage, fence) {
744 /* Make sure to grab an additional ref on the added fence */
745 dma_fence_get(fence);
746 ret = drm_sched_job_add_dependency(job, fence);
747 if (ret) {
748 dma_fence_put(fence);
749 return ret;
750 }
751 }
752 return 0;
753 }
754 EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
755
756 /**
757 * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
758 * dependencies
759 * @job: scheduler job to add the dependencies to
760 * @obj: the gem object to add new dependencies from.
761 * @write: whether the job might write the object (so we need to depend on
762 * shared fences in the reservation object).
763 *
764 * This should be called after drm_gem_lock_reservations() on your array of
765 * GEM objects used in the job but before updating the reservations with your
766 * own fences.
767 *
768 * Returns:
769 * 0 on success, or an error on failing to expand the array.
770 */
drm_sched_job_add_implicit_dependencies(struct drm_sched_job * job,struct drm_gem_object * obj,bool write)771 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
772 struct drm_gem_object *obj,
773 bool write)
774 {
775 return drm_sched_job_add_resv_dependencies(job, obj->resv,
776 dma_resv_usage_rw(write));
777 }
778 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
779
780 /**
781 * drm_sched_job_cleanup - clean up scheduler job resources
782 * @job: scheduler job to clean up
783 *
784 * Cleans up the resources allocated with drm_sched_job_init().
785 *
786 * Drivers should call this from their error unwind code if @job is aborted
787 * before drm_sched_job_arm() is called.
788 *
789 * After that point of no return @job is committed to be executed by the
790 * scheduler, and this function should be called from the
791 * &drm_sched_backend_ops.free_job callback.
792 */
drm_sched_job_cleanup(struct drm_sched_job * job)793 void drm_sched_job_cleanup(struct drm_sched_job *job)
794 {
795 struct dma_fence *fence;
796 unsigned long index;
797
798 if (kref_read(&job->s_fence->finished.refcount)) {
799 /* drm_sched_job_arm() has been called */
800 dma_fence_put(&job->s_fence->finished);
801 } else {
802 /* aborted job before committing to run it */
803 drm_sched_fence_free(job->s_fence);
804 }
805
806 job->s_fence = NULL;
807
808 xa_for_each(&job->dependencies, index, fence) {
809 dma_fence_put(fence);
810 }
811 xa_destroy(&job->dependencies);
812
813 }
814 EXPORT_SYMBOL(drm_sched_job_cleanup);
815
816 /**
817 * drm_sched_ready - is the scheduler ready
818 *
819 * @sched: scheduler instance
820 *
821 * Return true if we can push more jobs to the hw, otherwise false.
822 */
drm_sched_ready(struct drm_gpu_scheduler * sched)823 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
824 {
825 return atomic_read(&sched->hw_rq_count) <
826 sched->hw_submission_limit;
827 }
828
829 /**
830 * drm_sched_wakeup - Wake up the scheduler when it is ready
831 *
832 * @sched: scheduler instance
833 *
834 */
drm_sched_wakeup(struct drm_gpu_scheduler * sched)835 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
836 {
837 if (drm_sched_ready(sched))
838 wake_up_interruptible(&sched->wake_up_worker);
839 }
840
841 /**
842 * drm_sched_select_entity - Select next entity to process
843 *
844 * @sched: scheduler instance
845 *
846 * Returns the entity to process or NULL if none are found.
847 */
848 static struct drm_sched_entity *
drm_sched_select_entity(struct drm_gpu_scheduler * sched)849 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
850 {
851 struct drm_sched_entity *entity;
852 int i;
853
854 if (!drm_sched_ready(sched))
855 return NULL;
856
857 /* Kernel run queue has higher priority than normal run queue*/
858 for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
859 entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
860 drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) :
861 drm_sched_rq_select_entity_rr(&sched->sched_rq[i]);
862 if (entity)
863 break;
864 }
865
866 return entity;
867 }
868
869 /**
870 * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
871 *
872 * @sched: scheduler instance
873 *
874 * Returns the next finished job from the pending list (if there is one)
875 * ready for it to be destroyed.
876 */
877 static struct drm_sched_job *
drm_sched_get_cleanup_job(struct drm_gpu_scheduler * sched)878 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
879 {
880 struct drm_sched_job *job, *next;
881
882 spin_lock(&sched->job_list_lock);
883
884 job = list_first_entry_or_null(&sched->pending_list,
885 struct drm_sched_job, list);
886
887 if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
888 /* remove job from pending_list */
889 list_del_init(&job->list);
890
891 /* cancel this job's TO timer */
892 cancel_delayed_work(&sched->work_tdr);
893 /* make the scheduled timestamp more accurate */
894 next = list_first_entry_or_null(&sched->pending_list,
895 typeof(*next), list);
896
897 if (next) {
898 next->s_fence->scheduled.timestamp =
899 job->s_fence->finished.timestamp;
900 /* start TO timer for next job */
901 drm_sched_start_timeout(sched);
902 }
903 } else {
904 job = NULL;
905 }
906
907 spin_unlock(&sched->job_list_lock);
908
909 if (job) {
910 job->entity->elapsed_ns += ktime_to_ns(
911 ktime_sub(job->s_fence->finished.timestamp,
912 job->s_fence->scheduled.timestamp));
913 }
914
915 return job;
916 }
917
918 /**
919 * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
920 * @sched_list: list of drm_gpu_schedulers
921 * @num_sched_list: number of drm_gpu_schedulers in the sched_list
922 *
923 * Returns pointer of the sched with the least load or NULL if none of the
924 * drm_gpu_schedulers are ready
925 */
926 struct drm_gpu_scheduler *
drm_sched_pick_best(struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list)927 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
928 unsigned int num_sched_list)
929 {
930 struct drm_gpu_scheduler *sched, *picked_sched = NULL;
931 int i;
932 unsigned int min_score = UINT_MAX, num_score;
933
934 for (i = 0; i < num_sched_list; ++i) {
935 sched = sched_list[i];
936
937 if (!sched->ready) {
938 DRM_WARN("scheduler %s is not ready, skipping",
939 sched->name);
940 continue;
941 }
942
943 num_score = atomic_read(sched->score);
944 if (num_score < min_score) {
945 min_score = num_score;
946 picked_sched = sched;
947 }
948 }
949
950 return picked_sched;
951 }
952 EXPORT_SYMBOL(drm_sched_pick_best);
953
954 /**
955 * drm_sched_blocked - check if the scheduler is blocked
956 *
957 * @sched: scheduler instance
958 *
959 * Returns true if blocked, otherwise false.
960 */
drm_sched_blocked(struct drm_gpu_scheduler * sched)961 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
962 {
963 if (kthread_should_park()) {
964 kthread_parkme();
965 return true;
966 }
967
968 return false;
969 }
970
971 /**
972 * drm_sched_main - main scheduler thread
973 *
974 * @param: scheduler instance
975 *
976 * Returns 0.
977 */
drm_sched_main(void * param)978 static int drm_sched_main(void *param)
979 {
980 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
981 int r;
982
983 sched_set_fifo_low(current);
984
985 while (!kthread_should_stop()) {
986 struct drm_sched_entity *entity = NULL;
987 struct drm_sched_fence *s_fence;
988 struct drm_sched_job *sched_job;
989 struct dma_fence *fence;
990 struct drm_sched_job *cleanup_job = NULL;
991
992 wait_event_interruptible(sched->wake_up_worker,
993 (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
994 (!drm_sched_blocked(sched) &&
995 (entity = drm_sched_select_entity(sched))) ||
996 kthread_should_stop());
997
998 if (cleanup_job)
999 sched->ops->free_job(cleanup_job);
1000
1001 if (!entity)
1002 continue;
1003
1004 sched_job = drm_sched_entity_pop_job(entity);
1005
1006 if (!sched_job) {
1007 complete_all(&entity->entity_idle);
1008 continue;
1009 }
1010
1011 s_fence = sched_job->s_fence;
1012
1013 atomic_inc(&sched->hw_rq_count);
1014 drm_sched_job_begin(sched_job);
1015
1016 trace_drm_run_job(sched_job, entity);
1017 fence = sched->ops->run_job(sched_job);
1018 complete_all(&entity->entity_idle);
1019 drm_sched_fence_scheduled(s_fence);
1020
1021 if (!IS_ERR_OR_NULL(fence)) {
1022 s_fence->parent = dma_fence_get(fence);
1023 /* Drop for original kref_init of the fence */
1024 dma_fence_put(fence);
1025
1026 r = dma_fence_add_callback(fence, &sched_job->cb,
1027 drm_sched_job_done_cb);
1028 if (r == -ENOENT)
1029 drm_sched_job_done(sched_job);
1030 else if (r)
1031 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
1032 r);
1033 } else {
1034 if (IS_ERR(fence))
1035 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
1036
1037 drm_sched_job_done(sched_job);
1038 }
1039
1040 wake_up(&sched->job_scheduled);
1041 }
1042 return 0;
1043 }
1044
1045 /**
1046 * drm_sched_init - Init a gpu scheduler instance
1047 *
1048 * @sched: scheduler instance
1049 * @ops: backend operations for this scheduler
1050 * @hw_submission: number of hw submissions that can be in flight
1051 * @hang_limit: number of times to allow a job to hang before dropping it
1052 * @timeout: timeout value in jiffies for the scheduler
1053 * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
1054 * used
1055 * @score: optional score atomic shared with other schedulers
1056 * @name: name used for debugging
1057 * @dev: target &struct device
1058 *
1059 * Return 0 on success, otherwise error code.
1060 */
drm_sched_init(struct drm_gpu_scheduler * sched,const struct drm_sched_backend_ops * ops,unsigned hw_submission,unsigned hang_limit,long timeout,struct workqueue_struct * timeout_wq,atomic_t * score,const char * name,struct device * dev)1061 int drm_sched_init(struct drm_gpu_scheduler *sched,
1062 const struct drm_sched_backend_ops *ops,
1063 unsigned hw_submission, unsigned hang_limit,
1064 long timeout, struct workqueue_struct *timeout_wq,
1065 atomic_t *score, const char *name, struct device *dev)
1066 {
1067 int i, ret;
1068 sched->ops = ops;
1069 sched->hw_submission_limit = hw_submission;
1070 sched->name = name;
1071 sched->timeout = timeout;
1072 sched->timeout_wq = timeout_wq ? : system_wq;
1073 sched->hang_limit = hang_limit;
1074 sched->score = score ? score : &sched->_score;
1075 sched->dev = dev;
1076 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
1077 drm_sched_rq_init(sched, &sched->sched_rq[i]);
1078
1079 init_waitqueue_head(&sched->wake_up_worker);
1080 init_waitqueue_head(&sched->job_scheduled);
1081 INIT_LIST_HEAD(&sched->pending_list);
1082 spin_lock_init(&sched->job_list_lock);
1083 atomic_set(&sched->hw_rq_count, 0);
1084 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
1085 atomic_set(&sched->_score, 0);
1086 atomic64_set(&sched->job_id_count, 0);
1087
1088 /* Each scheduler will run on a seperate kernel thread */
1089 sched->thread = kthread_run(drm_sched_main, sched, sched->name);
1090 if (IS_ERR(sched->thread)) {
1091 ret = PTR_ERR(sched->thread);
1092 sched->thread = NULL;
1093 DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
1094 return ret;
1095 }
1096
1097 sched->ready = true;
1098 return 0;
1099 }
1100 EXPORT_SYMBOL(drm_sched_init);
1101
1102 /**
1103 * drm_sched_fini - Destroy a gpu scheduler
1104 *
1105 * @sched: scheduler instance
1106 *
1107 * Tears down and cleans up the scheduler.
1108 */
drm_sched_fini(struct drm_gpu_scheduler * sched)1109 void drm_sched_fini(struct drm_gpu_scheduler *sched)
1110 {
1111 struct drm_sched_entity *s_entity;
1112 int i;
1113
1114 if (sched->thread)
1115 kthread_stop(sched->thread);
1116
1117 for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
1118 struct drm_sched_rq *rq = &sched->sched_rq[i];
1119
1120 if (!rq)
1121 continue;
1122
1123 spin_lock(&rq->lock);
1124 list_for_each_entry(s_entity, &rq->entities, list)
1125 /*
1126 * Prevents reinsertion and marks job_queue as idle,
1127 * it will removed from rq in drm_sched_entity_fini
1128 * eventually
1129 */
1130 s_entity->stopped = true;
1131 spin_unlock(&rq->lock);
1132
1133 }
1134
1135 /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
1136 wake_up_all(&sched->job_scheduled);
1137
1138 /* Confirm no work left behind accessing device structures */
1139 cancel_delayed_work_sync(&sched->work_tdr);
1140
1141 sched->ready = false;
1142 }
1143 EXPORT_SYMBOL(drm_sched_fini);
1144
1145 /**
1146 * drm_sched_increase_karma - Update sched_entity guilty flag
1147 *
1148 * @bad: The job guilty of time out
1149 *
1150 * Increment on every hang caused by the 'bad' job. If this exceeds the hang
1151 * limit of the scheduler then the respective sched entity is marked guilty and
1152 * jobs from it will not be scheduled further
1153 */
drm_sched_increase_karma(struct drm_sched_job * bad)1154 void drm_sched_increase_karma(struct drm_sched_job *bad)
1155 {
1156 int i;
1157 struct drm_sched_entity *tmp;
1158 struct drm_sched_entity *entity;
1159 struct drm_gpu_scheduler *sched = bad->sched;
1160
1161 /* don't change @bad's karma if it's from KERNEL RQ,
1162 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
1163 * corrupt but keep in mind that kernel jobs always considered good.
1164 */
1165 if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
1166 atomic_inc(&bad->karma);
1167
1168 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
1169 i++) {
1170 struct drm_sched_rq *rq = &sched->sched_rq[i];
1171
1172 spin_lock(&rq->lock);
1173 list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
1174 if (bad->s_fence->scheduled.context ==
1175 entity->fence_context) {
1176 if (entity->guilty)
1177 atomic_set(entity->guilty, 1);
1178 break;
1179 }
1180 }
1181 spin_unlock(&rq->lock);
1182 if (&entity->list != &rq->entities)
1183 break;
1184 }
1185 }
1186 }
1187 EXPORT_SYMBOL(drm_sched_increase_karma);
1188