1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10
11 #include <linux/uaccess.h>
12 #include <linux/slab.h>
13
14 #define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
15 HL_CS_FLAGS_COLLECTIVE_WAIT)
16
17 /**
18 * enum hl_cs_wait_status - cs wait status
19 * @CS_WAIT_STATUS_BUSY: cs was not completed yet
20 * @CS_WAIT_STATUS_COMPLETED: cs completed
21 * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone
22 */
23 enum hl_cs_wait_status {
24 CS_WAIT_STATUS_BUSY,
25 CS_WAIT_STATUS_COMPLETED,
26 CS_WAIT_STATUS_GONE
27 };
28
29 static void job_wq_completion(struct work_struct *work);
30 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
31 u64 timeout_us, u64 seq,
32 enum hl_cs_wait_status *status, s64 *timestamp);
33 static void cs_do_release(struct kref *ref);
34
hl_sob_reset(struct kref * ref)35 static void hl_sob_reset(struct kref *ref)
36 {
37 struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
38 kref);
39 struct hl_device *hdev = hw_sob->hdev;
40
41 dev_dbg(hdev->dev, "reset sob id %u\n", hw_sob->sob_id);
42
43 hdev->asic_funcs->reset_sob(hdev, hw_sob);
44
45 hw_sob->need_reset = false;
46 }
47
hl_sob_reset_error(struct kref * ref)48 void hl_sob_reset_error(struct kref *ref)
49 {
50 struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
51 kref);
52 struct hl_device *hdev = hw_sob->hdev;
53
54 dev_crit(hdev->dev,
55 "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
56 hw_sob->q_idx, hw_sob->sob_id);
57 }
58
hw_sob_put(struct hl_hw_sob * hw_sob)59 void hw_sob_put(struct hl_hw_sob *hw_sob)
60 {
61 if (hw_sob)
62 kref_put(&hw_sob->kref, hl_sob_reset);
63 }
64
hw_sob_put_err(struct hl_hw_sob * hw_sob)65 static void hw_sob_put_err(struct hl_hw_sob *hw_sob)
66 {
67 if (hw_sob)
68 kref_put(&hw_sob->kref, hl_sob_reset_error);
69 }
70
hw_sob_get(struct hl_hw_sob * hw_sob)71 void hw_sob_get(struct hl_hw_sob *hw_sob)
72 {
73 if (hw_sob)
74 kref_get(&hw_sob->kref);
75 }
76
77 /**
78 * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet
79 * @sob_base: sob base id
80 * @sob_mask: sob user mask, each bit represents a sob offset from sob base
81 * @mask: generated mask
82 *
83 * Return: 0 if given parameters are valid
84 */
hl_gen_sob_mask(u16 sob_base,u8 sob_mask,u8 * mask)85 int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
86 {
87 int i;
88
89 if (sob_mask == 0)
90 return -EINVAL;
91
92 if (sob_mask == 0x1) {
93 *mask = ~(1 << (sob_base & 0x7));
94 } else {
95 /* find msb in order to verify sob range is valid */
96 for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--)
97 if (BIT(i) & sob_mask)
98 break;
99
100 if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1))
101 return -EINVAL;
102
103 *mask = ~sob_mask;
104 }
105
106 return 0;
107 }
108
hl_fence_release(struct kref * kref)109 static void hl_fence_release(struct kref *kref)
110 {
111 struct hl_fence *fence =
112 container_of(kref, struct hl_fence, refcount);
113 struct hl_cs_compl *hl_cs_cmpl =
114 container_of(fence, struct hl_cs_compl, base_fence);
115
116 kfree(hl_cs_cmpl);
117 }
118
hl_fence_put(struct hl_fence * fence)119 void hl_fence_put(struct hl_fence *fence)
120 {
121 if (IS_ERR_OR_NULL(fence))
122 return;
123 kref_put(&fence->refcount, hl_fence_release);
124 }
125
hl_fences_put(struct hl_fence ** fence,int len)126 void hl_fences_put(struct hl_fence **fence, int len)
127 {
128 int i;
129
130 for (i = 0; i < len; i++, fence++)
131 hl_fence_put(*fence);
132 }
133
hl_fence_get(struct hl_fence * fence)134 void hl_fence_get(struct hl_fence *fence)
135 {
136 if (fence)
137 kref_get(&fence->refcount);
138 }
139
hl_fence_init(struct hl_fence * fence,u64 sequence)140 static void hl_fence_init(struct hl_fence *fence, u64 sequence)
141 {
142 kref_init(&fence->refcount);
143 fence->cs_sequence = sequence;
144 fence->error = 0;
145 fence->timestamp = ktime_set(0, 0);
146 fence->mcs_handling_done = false;
147 init_completion(&fence->completion);
148 }
149
cs_get(struct hl_cs * cs)150 void cs_get(struct hl_cs *cs)
151 {
152 kref_get(&cs->refcount);
153 }
154
cs_get_unless_zero(struct hl_cs * cs)155 static int cs_get_unless_zero(struct hl_cs *cs)
156 {
157 return kref_get_unless_zero(&cs->refcount);
158 }
159
cs_put(struct hl_cs * cs)160 static void cs_put(struct hl_cs *cs)
161 {
162 kref_put(&cs->refcount, cs_do_release);
163 }
164
cs_job_do_release(struct kref * ref)165 static void cs_job_do_release(struct kref *ref)
166 {
167 struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount);
168
169 kfree(job);
170 }
171
cs_job_put(struct hl_cs_job * job)172 static void cs_job_put(struct hl_cs_job *job)
173 {
174 kref_put(&job->refcount, cs_job_do_release);
175 }
176
cs_needs_completion(struct hl_cs * cs)177 bool cs_needs_completion(struct hl_cs *cs)
178 {
179 /* In case this is a staged CS, only the last CS in sequence should
180 * get a completion, any non staged CS will always get a completion
181 */
182 if (cs->staged_cs && !cs->staged_last)
183 return false;
184
185 return true;
186 }
187
cs_needs_timeout(struct hl_cs * cs)188 bool cs_needs_timeout(struct hl_cs *cs)
189 {
190 /* In case this is a staged CS, only the first CS in sequence should
191 * get a timeout, any non staged CS will always get a timeout
192 */
193 if (cs->staged_cs && !cs->staged_first)
194 return false;
195
196 return true;
197 }
198
is_cb_patched(struct hl_device * hdev,struct hl_cs_job * job)199 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
200 {
201 /*
202 * Patched CB is created for external queues jobs, and for H/W queues
203 * jobs if the user CB was allocated by driver and MMU is disabled.
204 */
205 return (job->queue_type == QUEUE_TYPE_EXT ||
206 (job->queue_type == QUEUE_TYPE_HW &&
207 job->is_kernel_allocated_cb &&
208 !hdev->mmu_enable));
209 }
210
211 /*
212 * cs_parser - parse the user command submission
213 *
214 * @hpriv : pointer to the private data of the fd
215 * @job : pointer to the job that holds the command submission info
216 *
217 * The function parses the command submission of the user. It calls the
218 * ASIC specific parser, which returns a list of memory blocks to send
219 * to the device as different command buffers
220 *
221 */
cs_parser(struct hl_fpriv * hpriv,struct hl_cs_job * job)222 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
223 {
224 struct hl_device *hdev = hpriv->hdev;
225 struct hl_cs_parser parser;
226 int rc;
227
228 parser.ctx_id = job->cs->ctx->asid;
229 parser.cs_sequence = job->cs->sequence;
230 parser.job_id = job->id;
231
232 parser.hw_queue_id = job->hw_queue_id;
233 parser.job_userptr_list = &job->userptr_list;
234 parser.patched_cb = NULL;
235 parser.user_cb = job->user_cb;
236 parser.user_cb_size = job->user_cb_size;
237 parser.queue_type = job->queue_type;
238 parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
239 job->patched_cb = NULL;
240 parser.completion = cs_needs_completion(job->cs);
241
242 rc = hdev->asic_funcs->cs_parser(hdev, &parser);
243
244 if (is_cb_patched(hdev, job)) {
245 if (!rc) {
246 job->patched_cb = parser.patched_cb;
247 job->job_cb_size = parser.patched_cb_size;
248 job->contains_dma_pkt = parser.contains_dma_pkt;
249 atomic_inc(&job->patched_cb->cs_cnt);
250 }
251
252 /*
253 * Whether the parsing worked or not, we don't need the
254 * original CB anymore because it was already parsed and
255 * won't be accessed again for this CS
256 */
257 atomic_dec(&job->user_cb->cs_cnt);
258 hl_cb_put(job->user_cb);
259 job->user_cb = NULL;
260 } else if (!rc) {
261 job->job_cb_size = job->user_cb_size;
262 }
263
264 return rc;
265 }
266
complete_job(struct hl_device * hdev,struct hl_cs_job * job)267 static void complete_job(struct hl_device *hdev, struct hl_cs_job *job)
268 {
269 struct hl_cs *cs = job->cs;
270
271 if (is_cb_patched(hdev, job)) {
272 hl_userptr_delete_list(hdev, &job->userptr_list);
273
274 /*
275 * We might arrive here from rollback and patched CB wasn't
276 * created, so we need to check it's not NULL
277 */
278 if (job->patched_cb) {
279 atomic_dec(&job->patched_cb->cs_cnt);
280 hl_cb_put(job->patched_cb);
281 }
282 }
283
284 /* For H/W queue jobs, if a user CB was allocated by driver and MMU is
285 * enabled, the user CB isn't released in cs_parser() and thus should be
286 * released here.
287 * This is also true for INT queues jobs which were allocated by driver
288 */
289 if (job->is_kernel_allocated_cb &&
290 ((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) ||
291 job->queue_type == QUEUE_TYPE_INT)) {
292 atomic_dec(&job->user_cb->cs_cnt);
293 hl_cb_put(job->user_cb);
294 }
295
296 /*
297 * This is the only place where there can be multiple threads
298 * modifying the list at the same time
299 */
300 spin_lock(&cs->job_lock);
301 list_del(&job->cs_node);
302 spin_unlock(&cs->job_lock);
303
304 hl_debugfs_remove_job(hdev, job);
305
306 /* We decrement reference only for a CS that gets completion
307 * because the reference was incremented only for this kind of CS
308 * right before it was scheduled.
309 *
310 * In staged submission, only the last CS marked as 'staged_last'
311 * gets completion, hence its release function will be called from here.
312 * As for all the rest CS's in the staged submission which do not get
313 * completion, their CS reference will be decremented by the
314 * 'staged_last' CS during the CS release flow.
315 * All relevant PQ CI counters will be incremented during the CS release
316 * flow by calling 'hl_hw_queue_update_ci'.
317 */
318 if (cs_needs_completion(cs) &&
319 (job->queue_type == QUEUE_TYPE_EXT ||
320 job->queue_type == QUEUE_TYPE_HW))
321 cs_put(cs);
322
323 cs_job_put(job);
324 }
325
326 /*
327 * hl_staged_cs_find_first - locate the first CS in this staged submission
328 *
329 * @hdev: pointer to device structure
330 * @cs_seq: staged submission sequence number
331 *
332 * @note: This function must be called under 'hdev->cs_mirror_lock'
333 *
334 * Find and return a CS pointer with the given sequence
335 */
hl_staged_cs_find_first(struct hl_device * hdev,u64 cs_seq)336 struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq)
337 {
338 struct hl_cs *cs;
339
340 list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node)
341 if (cs->staged_cs && cs->staged_first &&
342 cs->sequence == cs_seq)
343 return cs;
344
345 return NULL;
346 }
347
348 /*
349 * is_staged_cs_last_exists - returns true if the last CS in sequence exists
350 *
351 * @hdev: pointer to device structure
352 * @cs: staged submission member
353 *
354 */
is_staged_cs_last_exists(struct hl_device * hdev,struct hl_cs * cs)355 bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs)
356 {
357 struct hl_cs *last_entry;
358
359 last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs,
360 staged_cs_node);
361
362 if (last_entry->staged_last)
363 return true;
364
365 return false;
366 }
367
368 /*
369 * staged_cs_get - get CS reference if this CS is a part of a staged CS
370 *
371 * @hdev: pointer to device structure
372 * @cs: current CS
373 * @cs_seq: staged submission sequence number
374 *
375 * Increment CS reference for every CS in this staged submission except for
376 * the CS which get completion.
377 */
staged_cs_get(struct hl_device * hdev,struct hl_cs * cs)378 static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs)
379 {
380 /* Only the last CS in this staged submission will get a completion.
381 * We must increment the reference for all other CS's in this
382 * staged submission.
383 * Once we get a completion we will release the whole staged submission.
384 */
385 if (!cs->staged_last)
386 cs_get(cs);
387 }
388
389 /*
390 * staged_cs_put - put a CS in case it is part of staged submission
391 *
392 * @hdev: pointer to device structure
393 * @cs: CS to put
394 *
395 * This function decrements a CS reference (for a non completion CS)
396 */
staged_cs_put(struct hl_device * hdev,struct hl_cs * cs)397 static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
398 {
399 /* We release all CS's in a staged submission except the last
400 * CS which we have never incremented its reference.
401 */
402 if (!cs_needs_completion(cs))
403 cs_put(cs);
404 }
405
cs_handle_tdr(struct hl_device * hdev,struct hl_cs * cs)406 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
407 {
408 bool next_entry_found = false;
409 struct hl_cs *next, *first_cs;
410
411 if (!cs_needs_timeout(cs))
412 return;
413
414 spin_lock(&hdev->cs_mirror_lock);
415
416 /* We need to handle tdr only once for the complete staged submission.
417 * Hence, we choose the CS that reaches this function first which is
418 * the CS marked as 'staged_last'.
419 * In case single staged cs was submitted which has both first and last
420 * indications, then "cs_find_first" below will return NULL, since we
421 * removed the cs node from the list before getting here,
422 * in such cases just continue with the cs to cancel it's TDR work.
423 */
424 if (cs->staged_cs && cs->staged_last) {
425 first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
426 if (first_cs)
427 cs = first_cs;
428 }
429
430 spin_unlock(&hdev->cs_mirror_lock);
431
432 /* Don't cancel TDR in case this CS was timedout because we might be
433 * running from the TDR context
434 */
435 if (cs->timedout || hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT)
436 return;
437
438 if (cs->tdr_active)
439 cancel_delayed_work_sync(&cs->work_tdr);
440
441 spin_lock(&hdev->cs_mirror_lock);
442
443 /* queue TDR for next CS */
444 list_for_each_entry(next, &hdev->cs_mirror_list, mirror_node)
445 if (cs_needs_timeout(next)) {
446 next_entry_found = true;
447 break;
448 }
449
450 if (next_entry_found && !next->tdr_active) {
451 next->tdr_active = true;
452 schedule_delayed_work(&next->work_tdr, next->timeout_jiffies);
453 }
454
455 spin_unlock(&hdev->cs_mirror_lock);
456 }
457
458 /*
459 * force_complete_multi_cs - complete all contexts that wait on multi-CS
460 *
461 * @hdev: pointer to habanalabs device structure
462 */
force_complete_multi_cs(struct hl_device * hdev)463 static void force_complete_multi_cs(struct hl_device *hdev)
464 {
465 int i;
466
467 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
468 struct multi_cs_completion *mcs_compl;
469
470 mcs_compl = &hdev->multi_cs_completion[i];
471
472 spin_lock(&mcs_compl->lock);
473
474 if (!mcs_compl->used) {
475 spin_unlock(&mcs_compl->lock);
476 continue;
477 }
478
479 /* when calling force complete no context should be waiting on
480 * multi-cS.
481 * We are calling the function as a protection for such case
482 * to free any pending context and print error message
483 */
484 dev_err(hdev->dev,
485 "multi-CS completion context %d still waiting when calling force completion\n",
486 i);
487 complete_all(&mcs_compl->completion);
488 spin_unlock(&mcs_compl->lock);
489 }
490 }
491
492 /*
493 * complete_multi_cs - complete all waiting entities on multi-CS
494 *
495 * @hdev: pointer to habanalabs device structure
496 * @cs: CS structure
497 * The function signals a waiting entity that has an overlapping stream masters
498 * with the completed CS.
499 * For example:
500 * - a completed CS worked on stream master QID 4, multi CS completion
501 * is actively waiting on stream master QIDs 3, 5. don't send signal as no
502 * common stream master QID
503 * - a completed CS worked on stream master QID 4, multi CS completion
504 * is actively waiting on stream master QIDs 3, 4. send signal as stream
505 * master QID 4 is common
506 */
complete_multi_cs(struct hl_device * hdev,struct hl_cs * cs)507 static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs)
508 {
509 struct hl_fence *fence = cs->fence;
510 int i;
511
512 /* in case of multi CS check for completion only for the first CS */
513 if (cs->staged_cs && !cs->staged_first)
514 return;
515
516 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
517 struct multi_cs_completion *mcs_compl;
518
519 mcs_compl = &hdev->multi_cs_completion[i];
520 if (!mcs_compl->used)
521 continue;
522
523 spin_lock(&mcs_compl->lock);
524
525 /*
526 * complete if:
527 * 1. still waiting for completion
528 * 2. the completed CS has at least one overlapping stream
529 * master with the stream masters in the completion
530 */
531 if (mcs_compl->used &&
532 (fence->stream_master_qid_map &
533 mcs_compl->stream_master_qid_map)) {
534 /* extract the timestamp only of first completed CS */
535 if (!mcs_compl->timestamp)
536 mcs_compl->timestamp =
537 ktime_to_ns(fence->timestamp);
538 complete_all(&mcs_compl->completion);
539
540 /*
541 * Setting mcs_handling_done inside the lock ensures
542 * at least one fence have mcs_handling_done set to
543 * true before wait for mcs finish. This ensures at
544 * least one CS will be set as completed when polling
545 * mcs fences.
546 */
547 fence->mcs_handling_done = true;
548 }
549
550 spin_unlock(&mcs_compl->lock);
551 }
552 /* In case CS completed without mcs completion initialized */
553 fence->mcs_handling_done = true;
554 }
555
cs_release_sob_reset_handler(struct hl_device * hdev,struct hl_cs * cs,struct hl_cs_compl * hl_cs_cmpl)556 static inline void cs_release_sob_reset_handler(struct hl_device *hdev,
557 struct hl_cs *cs,
558 struct hl_cs_compl *hl_cs_cmpl)
559 {
560 /* Skip this handler if the cs wasn't submitted, to avoid putting
561 * the hw_sob twice, since this case already handled at this point,
562 * also skip if the hw_sob pointer wasn't set.
563 */
564 if (!hl_cs_cmpl->hw_sob || !cs->submitted)
565 return;
566
567 spin_lock(&hl_cs_cmpl->lock);
568
569 /*
570 * we get refcount upon reservation of signals or signal/wait cs for the
571 * hw_sob object, and need to put it when the first staged cs
572 * (which cotains the encaps signals) or cs signal/wait is completed.
573 */
574 if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
575 (hl_cs_cmpl->type == CS_TYPE_WAIT) ||
576 (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) ||
577 (!!hl_cs_cmpl->encaps_signals)) {
578 dev_dbg(hdev->dev,
579 "CS 0x%llx type %d finished, sob_id: %d, sob_val: %u\n",
580 hl_cs_cmpl->cs_seq,
581 hl_cs_cmpl->type,
582 hl_cs_cmpl->hw_sob->sob_id,
583 hl_cs_cmpl->sob_val);
584
585 hw_sob_put(hl_cs_cmpl->hw_sob);
586
587 if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
588 hdev->asic_funcs->reset_sob_group(hdev,
589 hl_cs_cmpl->sob_group);
590 }
591
592 spin_unlock(&hl_cs_cmpl->lock);
593 }
594
cs_do_release(struct kref * ref)595 static void cs_do_release(struct kref *ref)
596 {
597 struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
598 struct hl_device *hdev = cs->ctx->hdev;
599 struct hl_cs_job *job, *tmp;
600 struct hl_cs_compl *hl_cs_cmpl =
601 container_of(cs->fence, struct hl_cs_compl, base_fence);
602
603 cs->completed = true;
604
605 /*
606 * Although if we reached here it means that all external jobs have
607 * finished, because each one of them took refcnt to CS, we still
608 * need to go over the internal jobs and complete them. Otherwise, we
609 * will have leaked memory and what's worse, the CS object (and
610 * potentially the CTX object) could be released, while the JOB
611 * still holds a pointer to them (but no reference).
612 */
613 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
614 complete_job(hdev, job);
615
616 if (!cs->submitted) {
617 /*
618 * In case the wait for signal CS was submitted, the fence put
619 * occurs in init_signal_wait_cs() or collective_wait_init_cs()
620 * right before hanging on the PQ.
621 */
622 if (cs->type == CS_TYPE_WAIT ||
623 cs->type == CS_TYPE_COLLECTIVE_WAIT)
624 hl_fence_put(cs->signal_fence);
625
626 goto out;
627 }
628
629 /* Need to update CI for all queue jobs that does not get completion */
630 hl_hw_queue_update_ci(cs);
631
632 /* remove CS from CS mirror list */
633 spin_lock(&hdev->cs_mirror_lock);
634 list_del_init(&cs->mirror_node);
635 spin_unlock(&hdev->cs_mirror_lock);
636
637 cs_handle_tdr(hdev, cs);
638
639 if (cs->staged_cs) {
640 /* the completion CS decrements reference for the entire
641 * staged submission
642 */
643 if (cs->staged_last) {
644 struct hl_cs *staged_cs, *tmp;
645
646 list_for_each_entry_safe(staged_cs, tmp,
647 &cs->staged_cs_node, staged_cs_node)
648 staged_cs_put(hdev, staged_cs);
649 }
650
651 /* A staged CS will be a member in the list only after it
652 * was submitted. We used 'cs_mirror_lock' when inserting
653 * it to list so we will use it again when removing it
654 */
655 if (cs->submitted) {
656 spin_lock(&hdev->cs_mirror_lock);
657 list_del(&cs->staged_cs_node);
658 spin_unlock(&hdev->cs_mirror_lock);
659 }
660
661 /* decrement refcount to handle when first staged cs
662 * with encaps signals is completed.
663 */
664 if (hl_cs_cmpl->encaps_signals)
665 kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount,
666 hl_encaps_handle_do_release);
667 }
668
669 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
670 && cs->encaps_signals)
671 kref_put(&cs->encaps_sig_hdl->refcount,
672 hl_encaps_handle_do_release);
673
674 out:
675 /* Must be called before hl_ctx_put because inside we use ctx to get
676 * the device
677 */
678 hl_debugfs_remove_cs(cs);
679
680 hl_ctx_put(cs->ctx);
681
682 /* We need to mark an error for not submitted because in that case
683 * the hl fence release flow is different. Mainly, we don't need
684 * to handle hw_sob for signal/wait
685 */
686 if (cs->timedout)
687 cs->fence->error = -ETIMEDOUT;
688 else if (cs->aborted)
689 cs->fence->error = -EIO;
690 else if (!cs->submitted)
691 cs->fence->error = -EBUSY;
692
693 if (unlikely(cs->skip_reset_on_timeout)) {
694 dev_err(hdev->dev,
695 "Command submission %llu completed after %llu (s)\n",
696 cs->sequence,
697 div_u64(jiffies - cs->submission_time_jiffies, HZ));
698 }
699
700 if (cs->timestamp)
701 cs->fence->timestamp = ktime_get();
702 complete_all(&cs->fence->completion);
703 complete_multi_cs(hdev, cs);
704
705 cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl);
706
707 hl_fence_put(cs->fence);
708
709 kfree(cs->jobs_in_queue_cnt);
710 kfree(cs);
711 }
712
cs_timedout(struct work_struct * work)713 static void cs_timedout(struct work_struct *work)
714 {
715 struct hl_device *hdev;
716 int rc;
717 struct hl_cs *cs = container_of(work, struct hl_cs,
718 work_tdr.work);
719 bool skip_reset_on_timeout = cs->skip_reset_on_timeout;
720
721 rc = cs_get_unless_zero(cs);
722 if (!rc)
723 return;
724
725 if ((!cs->submitted) || (cs->completed)) {
726 cs_put(cs);
727 return;
728 }
729
730 /* Mark the CS is timed out so we won't try to cancel its TDR */
731 if (likely(!skip_reset_on_timeout))
732 cs->timedout = true;
733
734 hdev = cs->ctx->hdev;
735
736 switch (cs->type) {
737 case CS_TYPE_SIGNAL:
738 dev_err(hdev->dev,
739 "Signal command submission %llu has not finished in time!\n",
740 cs->sequence);
741 break;
742
743 case CS_TYPE_WAIT:
744 dev_err(hdev->dev,
745 "Wait command submission %llu has not finished in time!\n",
746 cs->sequence);
747 break;
748
749 case CS_TYPE_COLLECTIVE_WAIT:
750 dev_err(hdev->dev,
751 "Collective Wait command submission %llu has not finished in time!\n",
752 cs->sequence);
753 break;
754
755 default:
756 dev_err(hdev->dev,
757 "Command submission %llu has not finished in time!\n",
758 cs->sequence);
759 break;
760 }
761
762 rc = hl_state_dump(hdev);
763 if (rc)
764 dev_err(hdev->dev, "Error during system state dump %d\n", rc);
765
766 cs_put(cs);
767
768 if (likely(!skip_reset_on_timeout)) {
769 if (hdev->reset_on_lockup)
770 hl_device_reset(hdev, HL_RESET_TDR);
771 else
772 hdev->needs_reset = true;
773 }
774 }
775
allocate_cs(struct hl_device * hdev,struct hl_ctx * ctx,enum hl_cs_type cs_type,u64 user_sequence,struct hl_cs ** cs_new,u32 flags,u32 timeout)776 static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
777 enum hl_cs_type cs_type, u64 user_sequence,
778 struct hl_cs **cs_new, u32 flags, u32 timeout)
779 {
780 struct hl_cs_counters_atomic *cntr;
781 struct hl_fence *other = NULL;
782 struct hl_cs_compl *cs_cmpl;
783 struct hl_cs *cs;
784 int rc;
785
786 cntr = &hdev->aggregated_cs_counters;
787
788 cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
789 if (!cs)
790 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
791
792 if (!cs) {
793 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
794 atomic64_inc(&cntr->out_of_mem_drop_cnt);
795 return -ENOMEM;
796 }
797
798 /* increment refcnt for context */
799 hl_ctx_get(hdev, ctx);
800
801 cs->ctx = ctx;
802 cs->submitted = false;
803 cs->completed = false;
804 cs->type = cs_type;
805 cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
806 cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
807 cs->timeout_jiffies = timeout;
808 cs->skip_reset_on_timeout =
809 hdev->skip_reset_on_timeout ||
810 !!(flags & HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT);
811 cs->submission_time_jiffies = jiffies;
812 INIT_LIST_HEAD(&cs->job_list);
813 INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
814 kref_init(&cs->refcount);
815 spin_lock_init(&cs->job_lock);
816
817 cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
818 if (!cs_cmpl)
819 cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_KERNEL);
820
821 if (!cs_cmpl) {
822 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
823 atomic64_inc(&cntr->out_of_mem_drop_cnt);
824 rc = -ENOMEM;
825 goto free_cs;
826 }
827
828 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
829 sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
830 if (!cs->jobs_in_queue_cnt)
831 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
832 sizeof(*cs->jobs_in_queue_cnt), GFP_KERNEL);
833
834 if (!cs->jobs_in_queue_cnt) {
835 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
836 atomic64_inc(&cntr->out_of_mem_drop_cnt);
837 rc = -ENOMEM;
838 goto free_cs_cmpl;
839 }
840
841 cs_cmpl->hdev = hdev;
842 cs_cmpl->type = cs->type;
843 spin_lock_init(&cs_cmpl->lock);
844 cs->fence = &cs_cmpl->base_fence;
845
846 spin_lock(&ctx->cs_lock);
847
848 cs_cmpl->cs_seq = ctx->cs_sequence;
849 other = ctx->cs_pending[cs_cmpl->cs_seq &
850 (hdev->asic_prop.max_pending_cs - 1)];
851
852 if (other && !completion_done(&other->completion)) {
853 /* If the following statement is true, it means we have reached
854 * a point in which only part of the staged submission was
855 * submitted and we don't have enough room in the 'cs_pending'
856 * array for the rest of the submission.
857 * This causes a deadlock because this CS will never be
858 * completed as it depends on future CS's for completion.
859 */
860 if (other->cs_sequence == user_sequence)
861 dev_crit_ratelimited(hdev->dev,
862 "Staged CS %llu deadlock due to lack of resources",
863 user_sequence);
864
865 dev_dbg_ratelimited(hdev->dev,
866 "Rejecting CS because of too many in-flights CS\n");
867 atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
868 atomic64_inc(&cntr->max_cs_in_flight_drop_cnt);
869 rc = -EAGAIN;
870 goto free_fence;
871 }
872
873 /* init hl_fence */
874 hl_fence_init(&cs_cmpl->base_fence, cs_cmpl->cs_seq);
875
876 cs->sequence = cs_cmpl->cs_seq;
877
878 ctx->cs_pending[cs_cmpl->cs_seq &
879 (hdev->asic_prop.max_pending_cs - 1)] =
880 &cs_cmpl->base_fence;
881 ctx->cs_sequence++;
882
883 hl_fence_get(&cs_cmpl->base_fence);
884
885 hl_fence_put(other);
886
887 spin_unlock(&ctx->cs_lock);
888
889 *cs_new = cs;
890
891 return 0;
892
893 free_fence:
894 spin_unlock(&ctx->cs_lock);
895 kfree(cs->jobs_in_queue_cnt);
896 free_cs_cmpl:
897 kfree(cs_cmpl);
898 free_cs:
899 kfree(cs);
900 hl_ctx_put(ctx);
901 return rc;
902 }
903
cs_rollback(struct hl_device * hdev,struct hl_cs * cs)904 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
905 {
906 struct hl_cs_job *job, *tmp;
907
908 staged_cs_put(hdev, cs);
909
910 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
911 complete_job(hdev, job);
912 }
913
hl_cs_rollback_all(struct hl_device * hdev)914 void hl_cs_rollback_all(struct hl_device *hdev)
915 {
916 int i;
917 struct hl_cs *cs, *tmp;
918
919 flush_workqueue(hdev->sob_reset_wq);
920
921 /* flush all completions before iterating over the CS mirror list in
922 * order to avoid a race with the release functions
923 */
924 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
925 flush_workqueue(hdev->cq_wq[i]);
926
927 /* Make sure we don't have leftovers in the CS mirror list */
928 list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
929 cs_get(cs);
930 cs->aborted = true;
931 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
932 cs->ctx->asid, cs->sequence);
933 cs_rollback(hdev, cs);
934 cs_put(cs);
935 }
936
937 force_complete_multi_cs(hdev);
938 }
939
940 static void
wake_pending_user_interrupt_threads(struct hl_user_interrupt * interrupt)941 wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
942 {
943 struct hl_user_pending_interrupt *pend;
944 unsigned long flags;
945
946 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
947 list_for_each_entry(pend, &interrupt->wait_list_head, wait_list_node) {
948 pend->fence.error = -EIO;
949 complete_all(&pend->fence.completion);
950 }
951 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
952 }
953
hl_release_pending_user_interrupts(struct hl_device * hdev)954 void hl_release_pending_user_interrupts(struct hl_device *hdev)
955 {
956 struct asic_fixed_properties *prop = &hdev->asic_prop;
957 struct hl_user_interrupt *interrupt;
958 int i;
959
960 if (!prop->user_interrupt_count)
961 return;
962
963 /* We iterate through the user interrupt requests and waking up all
964 * user threads waiting for interrupt completion. We iterate the
965 * list under a lock, this is why all user threads, once awake,
966 * will wait on the same lock and will release the waiting object upon
967 * unlock.
968 */
969
970 for (i = 0 ; i < prop->user_interrupt_count ; i++) {
971 interrupt = &hdev->user_interrupt[i];
972 wake_pending_user_interrupt_threads(interrupt);
973 }
974
975 interrupt = &hdev->common_user_interrupt;
976 wake_pending_user_interrupt_threads(interrupt);
977 }
978
job_wq_completion(struct work_struct * work)979 static void job_wq_completion(struct work_struct *work)
980 {
981 struct hl_cs_job *job = container_of(work, struct hl_cs_job,
982 finish_work);
983 struct hl_cs *cs = job->cs;
984 struct hl_device *hdev = cs->ctx->hdev;
985
986 /* job is no longer needed */
987 complete_job(hdev, job);
988 }
989
validate_queue_index(struct hl_device * hdev,struct hl_cs_chunk * chunk,enum hl_queue_type * queue_type,bool * is_kernel_allocated_cb)990 static int validate_queue_index(struct hl_device *hdev,
991 struct hl_cs_chunk *chunk,
992 enum hl_queue_type *queue_type,
993 bool *is_kernel_allocated_cb)
994 {
995 struct asic_fixed_properties *asic = &hdev->asic_prop;
996 struct hw_queue_properties *hw_queue_prop;
997
998 /* This must be checked here to prevent out-of-bounds access to
999 * hw_queues_props array
1000 */
1001 if (chunk->queue_index >= asic->max_queues) {
1002 dev_err(hdev->dev, "Queue index %d is invalid\n",
1003 chunk->queue_index);
1004 return -EINVAL;
1005 }
1006
1007 hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
1008
1009 if (hw_queue_prop->type == QUEUE_TYPE_NA) {
1010 dev_err(hdev->dev, "Queue index %d is invalid\n",
1011 chunk->queue_index);
1012 return -EINVAL;
1013 }
1014
1015 if (hw_queue_prop->driver_only) {
1016 dev_err(hdev->dev,
1017 "Queue index %d is restricted for the kernel driver\n",
1018 chunk->queue_index);
1019 return -EINVAL;
1020 }
1021
1022 /* When hw queue type isn't QUEUE_TYPE_HW,
1023 * USER_ALLOC_CB flag shall be referred as "don't care".
1024 */
1025 if (hw_queue_prop->type == QUEUE_TYPE_HW) {
1026 if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) {
1027 if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) {
1028 dev_err(hdev->dev,
1029 "Queue index %d doesn't support user CB\n",
1030 chunk->queue_index);
1031 return -EINVAL;
1032 }
1033
1034 *is_kernel_allocated_cb = false;
1035 } else {
1036 if (!(hw_queue_prop->cb_alloc_flags &
1037 CB_ALLOC_KERNEL)) {
1038 dev_err(hdev->dev,
1039 "Queue index %d doesn't support kernel CB\n",
1040 chunk->queue_index);
1041 return -EINVAL;
1042 }
1043
1044 *is_kernel_allocated_cb = true;
1045 }
1046 } else {
1047 *is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags
1048 & CB_ALLOC_KERNEL);
1049 }
1050
1051 *queue_type = hw_queue_prop->type;
1052 return 0;
1053 }
1054
get_cb_from_cs_chunk(struct hl_device * hdev,struct hl_cb_mgr * cb_mgr,struct hl_cs_chunk * chunk)1055 static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
1056 struct hl_cb_mgr *cb_mgr,
1057 struct hl_cs_chunk *chunk)
1058 {
1059 struct hl_cb *cb;
1060 u32 cb_handle;
1061
1062 cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
1063
1064 cb = hl_cb_get(hdev, cb_mgr, cb_handle);
1065 if (!cb) {
1066 dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle);
1067 return NULL;
1068 }
1069
1070 if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
1071 dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
1072 goto release_cb;
1073 }
1074
1075 atomic_inc(&cb->cs_cnt);
1076
1077 return cb;
1078
1079 release_cb:
1080 hl_cb_put(cb);
1081 return NULL;
1082 }
1083
hl_cs_allocate_job(struct hl_device * hdev,enum hl_queue_type queue_type,bool is_kernel_allocated_cb)1084 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
1085 enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
1086 {
1087 struct hl_cs_job *job;
1088
1089 job = kzalloc(sizeof(*job), GFP_ATOMIC);
1090 if (!job)
1091 job = kzalloc(sizeof(*job), GFP_KERNEL);
1092
1093 if (!job)
1094 return NULL;
1095
1096 kref_init(&job->refcount);
1097 job->queue_type = queue_type;
1098 job->is_kernel_allocated_cb = is_kernel_allocated_cb;
1099
1100 if (is_cb_patched(hdev, job))
1101 INIT_LIST_HEAD(&job->userptr_list);
1102
1103 if (job->queue_type == QUEUE_TYPE_EXT)
1104 INIT_WORK(&job->finish_work, job_wq_completion);
1105
1106 return job;
1107 }
1108
hl_cs_get_cs_type(u32 cs_type_flags)1109 static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
1110 {
1111 if (cs_type_flags & HL_CS_FLAGS_SIGNAL)
1112 return CS_TYPE_SIGNAL;
1113 else if (cs_type_flags & HL_CS_FLAGS_WAIT)
1114 return CS_TYPE_WAIT;
1115 else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT)
1116 return CS_TYPE_COLLECTIVE_WAIT;
1117 else if (cs_type_flags & HL_CS_FLAGS_RESERVE_SIGNALS_ONLY)
1118 return CS_RESERVE_SIGNALS;
1119 else if (cs_type_flags & HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY)
1120 return CS_UNRESERVE_SIGNALS;
1121 else
1122 return CS_TYPE_DEFAULT;
1123 }
1124
hl_cs_sanity_checks(struct hl_fpriv * hpriv,union hl_cs_args * args)1125 static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
1126 {
1127 struct hl_device *hdev = hpriv->hdev;
1128 struct hl_ctx *ctx = hpriv->ctx;
1129 u32 cs_type_flags, num_chunks;
1130 enum hl_device_status status;
1131 enum hl_cs_type cs_type;
1132
1133 if (!hl_device_operational(hdev, &status)) {
1134 dev_warn_ratelimited(hdev->dev,
1135 "Device is %s. Can't submit new CS\n",
1136 hdev->status[status]);
1137 return -EBUSY;
1138 }
1139
1140 if ((args->in.cs_flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1141 !hdev->supports_staged_submission) {
1142 dev_err(hdev->dev, "staged submission not supported");
1143 return -EPERM;
1144 }
1145
1146 cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK;
1147
1148 if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) {
1149 dev_err(hdev->dev,
1150 "CS type flags are mutually exclusive, context %d\n",
1151 ctx->asid);
1152 return -EINVAL;
1153 }
1154
1155 cs_type = hl_cs_get_cs_type(cs_type_flags);
1156 num_chunks = args->in.num_chunks_execute;
1157
1158 if (unlikely((cs_type != CS_TYPE_DEFAULT) &&
1159 !hdev->supports_sync_stream)) {
1160 dev_err(hdev->dev, "Sync stream CS is not supported\n");
1161 return -EINVAL;
1162 }
1163
1164 if (cs_type == CS_TYPE_DEFAULT) {
1165 if (!num_chunks) {
1166 dev_err(hdev->dev,
1167 "Got execute CS with 0 chunks, context %d\n",
1168 ctx->asid);
1169 return -EINVAL;
1170 }
1171 } else if (num_chunks != 1) {
1172 dev_err(hdev->dev,
1173 "Sync stream CS mandates one chunk only, context %d\n",
1174 ctx->asid);
1175 return -EINVAL;
1176 }
1177
1178 return 0;
1179 }
1180
hl_cs_copy_chunk_array(struct hl_device * hdev,struct hl_cs_chunk ** cs_chunk_array,void __user * chunks,u32 num_chunks,struct hl_ctx * ctx)1181 static int hl_cs_copy_chunk_array(struct hl_device *hdev,
1182 struct hl_cs_chunk **cs_chunk_array,
1183 void __user *chunks, u32 num_chunks,
1184 struct hl_ctx *ctx)
1185 {
1186 u32 size_to_copy;
1187
1188 if (num_chunks > HL_MAX_JOBS_PER_CS) {
1189 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1190 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1191 dev_err(hdev->dev,
1192 "Number of chunks can NOT be larger than %d\n",
1193 HL_MAX_JOBS_PER_CS);
1194 return -EINVAL;
1195 }
1196
1197 *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
1198 GFP_ATOMIC);
1199 if (!*cs_chunk_array)
1200 *cs_chunk_array = kmalloc_array(num_chunks,
1201 sizeof(**cs_chunk_array), GFP_KERNEL);
1202 if (!*cs_chunk_array) {
1203 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1204 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1205 return -ENOMEM;
1206 }
1207
1208 size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
1209 if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
1210 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1211 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1212 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
1213 kfree(*cs_chunk_array);
1214 return -EFAULT;
1215 }
1216
1217 return 0;
1218 }
1219
cs_staged_submission(struct hl_device * hdev,struct hl_cs * cs,u64 sequence,u32 flags,u32 encaps_signal_handle)1220 static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
1221 u64 sequence, u32 flags,
1222 u32 encaps_signal_handle)
1223 {
1224 if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION))
1225 return 0;
1226
1227 cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST);
1228 cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST);
1229
1230 if (cs->staged_first) {
1231 /* Staged CS sequence is the first CS sequence */
1232 INIT_LIST_HEAD(&cs->staged_cs_node);
1233 cs->staged_sequence = cs->sequence;
1234
1235 if (cs->encaps_signals)
1236 cs->encaps_sig_hdl_id = encaps_signal_handle;
1237 } else {
1238 /* User sequence will be validated in 'hl_hw_queue_schedule_cs'
1239 * under the cs_mirror_lock
1240 */
1241 cs->staged_sequence = sequence;
1242 }
1243
1244 /* Increment CS reference if needed */
1245 staged_cs_get(hdev, cs);
1246
1247 cs->staged_cs = true;
1248
1249 return 0;
1250 }
1251
get_stream_master_qid_mask(struct hl_device * hdev,u32 qid)1252 static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid)
1253 {
1254 int i;
1255
1256 for (i = 0; i < hdev->stream_master_qid_arr_size; i++)
1257 if (qid == hdev->stream_master_qid_arr[i])
1258 return BIT(i);
1259
1260 return 0;
1261 }
1262
cs_ioctl_default(struct hl_fpriv * hpriv,void __user * chunks,u32 num_chunks,u64 * cs_seq,u32 flags,u32 encaps_signals_handle,u32 timeout)1263 static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
1264 u32 num_chunks, u64 *cs_seq, u32 flags,
1265 u32 encaps_signals_handle, u32 timeout)
1266 {
1267 bool staged_mid, int_queues_only = true;
1268 struct hl_device *hdev = hpriv->hdev;
1269 struct hl_cs_chunk *cs_chunk_array;
1270 struct hl_cs_counters_atomic *cntr;
1271 struct hl_ctx *ctx = hpriv->ctx;
1272 struct hl_cs_job *job;
1273 struct hl_cs *cs;
1274 struct hl_cb *cb;
1275 u64 user_sequence;
1276 u8 stream_master_qid_map = 0;
1277 int rc, i;
1278
1279 cntr = &hdev->aggregated_cs_counters;
1280 user_sequence = *cs_seq;
1281 *cs_seq = ULLONG_MAX;
1282
1283 rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
1284 hpriv->ctx);
1285 if (rc)
1286 goto out;
1287
1288 if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1289 !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
1290 staged_mid = true;
1291 else
1292 staged_mid = false;
1293
1294 rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT,
1295 staged_mid ? user_sequence : ULLONG_MAX, &cs, flags,
1296 timeout);
1297 if (rc)
1298 goto free_cs_chunk_array;
1299
1300 *cs_seq = cs->sequence;
1301
1302 hl_debugfs_add_cs(cs);
1303
1304 rc = cs_staged_submission(hdev, cs, user_sequence, flags,
1305 encaps_signals_handle);
1306 if (rc)
1307 goto free_cs_object;
1308
1309 /* If this is a staged submission we must return the staged sequence
1310 * rather than the internal CS sequence
1311 */
1312 if (cs->staged_cs)
1313 *cs_seq = cs->staged_sequence;
1314
1315 /* Validate ALL the CS chunks before submitting the CS */
1316 for (i = 0 ; i < num_chunks ; i++) {
1317 struct hl_cs_chunk *chunk = &cs_chunk_array[i];
1318 enum hl_queue_type queue_type;
1319 bool is_kernel_allocated_cb;
1320
1321 rc = validate_queue_index(hdev, chunk, &queue_type,
1322 &is_kernel_allocated_cb);
1323 if (rc) {
1324 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1325 atomic64_inc(&cntr->validation_drop_cnt);
1326 goto free_cs_object;
1327 }
1328
1329 if (is_kernel_allocated_cb) {
1330 cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
1331 if (!cb) {
1332 atomic64_inc(
1333 &ctx->cs_counters.validation_drop_cnt);
1334 atomic64_inc(&cntr->validation_drop_cnt);
1335 rc = -EINVAL;
1336 goto free_cs_object;
1337 }
1338 } else {
1339 cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
1340 }
1341
1342 if (queue_type == QUEUE_TYPE_EXT ||
1343 queue_type == QUEUE_TYPE_HW) {
1344 int_queues_only = false;
1345
1346 /*
1347 * store which stream are being used for external/HW
1348 * queues of this CS
1349 */
1350 if (hdev->supports_wait_for_multi_cs)
1351 stream_master_qid_map |=
1352 get_stream_master_qid_mask(hdev,
1353 chunk->queue_index);
1354 }
1355
1356 job = hl_cs_allocate_job(hdev, queue_type,
1357 is_kernel_allocated_cb);
1358 if (!job) {
1359 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1360 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1361 dev_err(hdev->dev, "Failed to allocate a new job\n");
1362 rc = -ENOMEM;
1363 if (is_kernel_allocated_cb)
1364 goto release_cb;
1365
1366 goto free_cs_object;
1367 }
1368
1369 job->id = i + 1;
1370 job->cs = cs;
1371 job->user_cb = cb;
1372 job->user_cb_size = chunk->cb_size;
1373 job->hw_queue_id = chunk->queue_index;
1374
1375 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1376
1377 list_add_tail(&job->cs_node, &cs->job_list);
1378
1379 /*
1380 * Increment CS reference. When CS reference is 0, CS is
1381 * done and can be signaled to user and free all its resources
1382 * Only increment for JOB on external or H/W queues, because
1383 * only for those JOBs we get completion
1384 */
1385 if (cs_needs_completion(cs) &&
1386 (job->queue_type == QUEUE_TYPE_EXT ||
1387 job->queue_type == QUEUE_TYPE_HW))
1388 cs_get(cs);
1389
1390 hl_debugfs_add_job(hdev, job);
1391
1392 rc = cs_parser(hpriv, job);
1393 if (rc) {
1394 atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
1395 atomic64_inc(&cntr->parsing_drop_cnt);
1396 dev_err(hdev->dev,
1397 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
1398 cs->ctx->asid, cs->sequence, job->id, rc);
1399 goto free_cs_object;
1400 }
1401 }
1402
1403 /* We allow a CS with any queue type combination as long as it does
1404 * not get a completion
1405 */
1406 if (int_queues_only && cs_needs_completion(cs)) {
1407 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1408 atomic64_inc(&cntr->validation_drop_cnt);
1409 dev_err(hdev->dev,
1410 "Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n",
1411 cs->ctx->asid, cs->sequence);
1412 rc = -EINVAL;
1413 goto free_cs_object;
1414 }
1415
1416 /*
1417 * store the (external/HW queues) streams used by the CS in the
1418 * fence object for multi-CS completion
1419 */
1420 if (hdev->supports_wait_for_multi_cs)
1421 cs->fence->stream_master_qid_map = stream_master_qid_map;
1422
1423 rc = hl_hw_queue_schedule_cs(cs);
1424 if (rc) {
1425 if (rc != -EAGAIN)
1426 dev_err(hdev->dev,
1427 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
1428 cs->ctx->asid, cs->sequence, rc);
1429 goto free_cs_object;
1430 }
1431
1432 rc = HL_CS_STATUS_SUCCESS;
1433 goto put_cs;
1434
1435 release_cb:
1436 atomic_dec(&cb->cs_cnt);
1437 hl_cb_put(cb);
1438 free_cs_object:
1439 cs_rollback(hdev, cs);
1440 *cs_seq = ULLONG_MAX;
1441 /* The path below is both for good and erroneous exits */
1442 put_cs:
1443 /* We finished with the CS in this function, so put the ref */
1444 cs_put(cs);
1445 free_cs_chunk_array:
1446 kfree(cs_chunk_array);
1447 out:
1448 return rc;
1449 }
1450
hl_cs_ctx_switch(struct hl_fpriv * hpriv,union hl_cs_args * args,u64 * cs_seq)1451 static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
1452 u64 *cs_seq)
1453 {
1454 struct hl_device *hdev = hpriv->hdev;
1455 struct hl_ctx *ctx = hpriv->ctx;
1456 bool need_soft_reset = false;
1457 int rc = 0, do_ctx_switch;
1458 void __user *chunks;
1459 u32 num_chunks, tmp;
1460 int ret;
1461
1462 do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
1463
1464 if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
1465 mutex_lock(&hpriv->restore_phase_mutex);
1466
1467 if (do_ctx_switch) {
1468 rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
1469 if (rc) {
1470 dev_err_ratelimited(hdev->dev,
1471 "Failed to switch to context %d, rejecting CS! %d\n",
1472 ctx->asid, rc);
1473 /*
1474 * If we timedout, or if the device is not IDLE
1475 * while we want to do context-switch (-EBUSY),
1476 * we need to soft-reset because QMAN is
1477 * probably stuck. However, we can't call to
1478 * reset here directly because of deadlock, so
1479 * need to do it at the very end of this
1480 * function
1481 */
1482 if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
1483 need_soft_reset = true;
1484 mutex_unlock(&hpriv->restore_phase_mutex);
1485 goto out;
1486 }
1487 }
1488
1489 hdev->asic_funcs->restore_phase_topology(hdev);
1490
1491 chunks = (void __user *) (uintptr_t) args->in.chunks_restore;
1492 num_chunks = args->in.num_chunks_restore;
1493
1494 if (!num_chunks) {
1495 dev_dbg(hdev->dev,
1496 "Need to run restore phase but restore CS is empty\n");
1497 rc = 0;
1498 } else {
1499 rc = cs_ioctl_default(hpriv, chunks, num_chunks,
1500 cs_seq, 0, 0, hdev->timeout_jiffies);
1501 }
1502
1503 mutex_unlock(&hpriv->restore_phase_mutex);
1504
1505 if (rc) {
1506 dev_err(hdev->dev,
1507 "Failed to submit restore CS for context %d (%d)\n",
1508 ctx->asid, rc);
1509 goto out;
1510 }
1511
1512 /* Need to wait for restore completion before execution phase */
1513 if (num_chunks) {
1514 enum hl_cs_wait_status status;
1515 wait_again:
1516 ret = _hl_cs_wait_ioctl(hdev, ctx,
1517 jiffies_to_usecs(hdev->timeout_jiffies),
1518 *cs_seq, &status, NULL);
1519 if (ret) {
1520 if (ret == -ERESTARTSYS) {
1521 usleep_range(100, 200);
1522 goto wait_again;
1523 }
1524
1525 dev_err(hdev->dev,
1526 "Restore CS for context %d failed to complete %d\n",
1527 ctx->asid, ret);
1528 rc = -ENOEXEC;
1529 goto out;
1530 }
1531 }
1532
1533 ctx->thread_ctx_switch_wait_token = 1;
1534
1535 } else if (!ctx->thread_ctx_switch_wait_token) {
1536 rc = hl_poll_timeout_memory(hdev,
1537 &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
1538 100, jiffies_to_usecs(hdev->timeout_jiffies), false);
1539
1540 if (rc == -ETIMEDOUT) {
1541 dev_err(hdev->dev,
1542 "context switch phase timeout (%d)\n", tmp);
1543 goto out;
1544 }
1545 }
1546
1547 out:
1548 if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset))
1549 hl_device_reset(hdev, 0);
1550
1551 return rc;
1552 }
1553
1554 /*
1555 * hl_cs_signal_sob_wraparound_handler: handle SOB value wrapaound case.
1556 * if the SOB value reaches the max value move to the other SOB reserved
1557 * to the queue.
1558 * @hdev: pointer to device structure
1559 * @q_idx: stream queue index
1560 * @hw_sob: the H/W SOB used in this signal CS.
1561 * @count: signals count
1562 * @encaps_sig: tells whether it's reservation for encaps signals or not.
1563 *
1564 * Note that this function must be called while hw_queues_lock is taken.
1565 */
hl_cs_signal_sob_wraparound_handler(struct hl_device * hdev,u32 q_idx,struct hl_hw_sob ** hw_sob,u32 count,bool encaps_sig)1566 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
1567 struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig)
1568
1569 {
1570 struct hl_sync_stream_properties *prop;
1571 struct hl_hw_sob *sob = *hw_sob, *other_sob;
1572 u8 other_sob_offset;
1573
1574 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1575
1576 hw_sob_get(sob);
1577
1578 /* check for wraparound */
1579 if (prop->next_sob_val + count >= HL_MAX_SOB_VAL) {
1580 /*
1581 * Decrement as we reached the max value.
1582 * The release function won't be called here as we've
1583 * just incremented the refcount right before calling this
1584 * function.
1585 */
1586 hw_sob_put_err(sob);
1587
1588 /*
1589 * check the other sob value, if it still in use then fail
1590 * otherwise make the switch
1591 */
1592 other_sob_offset = (prop->curr_sob_offset + 1) % HL_RSVD_SOBS;
1593 other_sob = &prop->hw_sob[other_sob_offset];
1594
1595 if (kref_read(&other_sob->kref) != 1) {
1596 dev_err(hdev->dev, "error: Cannot switch SOBs q_idx: %d\n",
1597 q_idx);
1598 return -EINVAL;
1599 }
1600
1601 /*
1602 * next_sob_val always points to the next available signal
1603 * in the sob, so in encaps signals it will be the next one
1604 * after reserving the required amount.
1605 */
1606 if (encaps_sig)
1607 prop->next_sob_val = count + 1;
1608 else
1609 prop->next_sob_val = count;
1610
1611 /* only two SOBs are currently in use */
1612 prop->curr_sob_offset = other_sob_offset;
1613 *hw_sob = other_sob;
1614
1615 /*
1616 * check if other_sob needs reset, then do it before using it
1617 * for the reservation or the next signal cs.
1618 * we do it here, and for both encaps and regular signal cs
1619 * cases in order to avoid possible races of two kref_put
1620 * of the sob which can occur at the same time if we move the
1621 * sob reset(kref_put) to cs_do_release function.
1622 * in addition, if we have combination of cs signal and
1623 * encaps, and at the point we need to reset the sob there was
1624 * no more reservations and only signal cs keep coming,
1625 * in such case we need signal_cs to put the refcount and
1626 * reset the sob.
1627 */
1628 if (other_sob->need_reset)
1629 hw_sob_put(other_sob);
1630
1631 if (encaps_sig) {
1632 /* set reset indication for the sob */
1633 sob->need_reset = true;
1634 hw_sob_get(other_sob);
1635 }
1636
1637 dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
1638 prop->curr_sob_offset, q_idx);
1639 } else {
1640 prop->next_sob_val += count;
1641 }
1642
1643 return 0;
1644 }
1645
cs_ioctl_extract_signal_seq(struct hl_device * hdev,struct hl_cs_chunk * chunk,u64 * signal_seq,struct hl_ctx * ctx,bool encaps_signals)1646 static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
1647 struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx,
1648 bool encaps_signals)
1649 {
1650 u64 *signal_seq_arr = NULL;
1651 u32 size_to_copy, signal_seq_arr_len;
1652 int rc = 0;
1653
1654 if (encaps_signals) {
1655 *signal_seq = chunk->encaps_signal_seq;
1656 return 0;
1657 }
1658
1659 signal_seq_arr_len = chunk->num_signal_seq_arr;
1660
1661 /* currently only one signal seq is supported */
1662 if (signal_seq_arr_len != 1) {
1663 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1664 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1665 dev_err(hdev->dev,
1666 "Wait for signal CS supports only one signal CS seq\n");
1667 return -EINVAL;
1668 }
1669
1670 signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1671 sizeof(*signal_seq_arr),
1672 GFP_ATOMIC);
1673 if (!signal_seq_arr)
1674 signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1675 sizeof(*signal_seq_arr),
1676 GFP_KERNEL);
1677 if (!signal_seq_arr) {
1678 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1679 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
1680 return -ENOMEM;
1681 }
1682
1683 size_to_copy = signal_seq_arr_len * sizeof(*signal_seq_arr);
1684 if (copy_from_user(signal_seq_arr,
1685 u64_to_user_ptr(chunk->signal_seq_arr),
1686 size_to_copy)) {
1687 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1688 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
1689 dev_err(hdev->dev,
1690 "Failed to copy signal seq array from user\n");
1691 rc = -EFAULT;
1692 goto out;
1693 }
1694
1695 /* currently it is guaranteed to have only one signal seq */
1696 *signal_seq = signal_seq_arr[0];
1697
1698 out:
1699 kfree(signal_seq_arr);
1700
1701 return rc;
1702 }
1703
cs_ioctl_signal_wait_create_jobs(struct hl_device * hdev,struct hl_ctx * ctx,struct hl_cs * cs,enum hl_queue_type q_type,u32 q_idx,u32 encaps_signal_offset)1704 static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
1705 struct hl_ctx *ctx, struct hl_cs *cs,
1706 enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset)
1707 {
1708 struct hl_cs_counters_atomic *cntr;
1709 struct hl_cs_job *job;
1710 struct hl_cb *cb;
1711 u32 cb_size;
1712
1713 cntr = &hdev->aggregated_cs_counters;
1714
1715 job = hl_cs_allocate_job(hdev, q_type, true);
1716 if (!job) {
1717 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1718 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1719 dev_err(hdev->dev, "Failed to allocate a new job\n");
1720 return -ENOMEM;
1721 }
1722
1723 if (cs->type == CS_TYPE_WAIT)
1724 cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
1725 else
1726 cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
1727
1728 cb = hl_cb_kernel_create(hdev, cb_size,
1729 q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
1730 if (!cb) {
1731 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1732 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1733 kfree(job);
1734 return -EFAULT;
1735 }
1736
1737 job->id = 0;
1738 job->cs = cs;
1739 job->user_cb = cb;
1740 atomic_inc(&job->user_cb->cs_cnt);
1741 job->user_cb_size = cb_size;
1742 job->hw_queue_id = q_idx;
1743
1744 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
1745 && cs->encaps_signals)
1746 job->encaps_sig_wait_offset = encaps_signal_offset;
1747 /*
1748 * No need in parsing, user CB is the patched CB.
1749 * We call hl_cb_destroy() out of two reasons - we don't need the CB in
1750 * the CB idr anymore and to decrement its refcount as it was
1751 * incremented inside hl_cb_kernel_create().
1752 */
1753 job->patched_cb = job->user_cb;
1754 job->job_cb_size = job->user_cb_size;
1755 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
1756
1757 /* increment refcount as for external queues we get completion */
1758 cs_get(cs);
1759
1760 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1761
1762 list_add_tail(&job->cs_node, &cs->job_list);
1763
1764 hl_debugfs_add_job(hdev, job);
1765
1766 return 0;
1767 }
1768
cs_ioctl_reserve_signals(struct hl_fpriv * hpriv,u32 q_idx,u32 count,u32 * handle_id,u32 * sob_addr,u32 * signals_count)1769 static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
1770 u32 q_idx, u32 count,
1771 u32 *handle_id, u32 *sob_addr,
1772 u32 *signals_count)
1773 {
1774 struct hw_queue_properties *hw_queue_prop;
1775 struct hl_sync_stream_properties *prop;
1776 struct hl_device *hdev = hpriv->hdev;
1777 struct hl_cs_encaps_sig_handle *handle;
1778 struct hl_encaps_signals_mgr *mgr;
1779 struct hl_hw_sob *hw_sob;
1780 int hdl_id;
1781 int rc = 0;
1782
1783 if (count >= HL_MAX_SOB_VAL) {
1784 dev_err(hdev->dev, "signals count(%u) exceeds the max SOB value\n",
1785 count);
1786 rc = -EINVAL;
1787 goto out;
1788 }
1789
1790 if (q_idx >= hdev->asic_prop.max_queues) {
1791 dev_err(hdev->dev, "Queue index %d is invalid\n",
1792 q_idx);
1793 rc = -EINVAL;
1794 goto out;
1795 }
1796
1797 hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
1798
1799 if (!hw_queue_prop->supports_sync_stream) {
1800 dev_err(hdev->dev,
1801 "Queue index %d does not support sync stream operations\n",
1802 q_idx);
1803 rc = -EINVAL;
1804 goto out;
1805 }
1806
1807 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1808
1809 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1810 if (!handle) {
1811 rc = -ENOMEM;
1812 goto out;
1813 }
1814
1815 handle->count = count;
1816 mgr = &hpriv->ctx->sig_mgr;
1817
1818 spin_lock(&mgr->lock);
1819 hdl_id = idr_alloc(&mgr->handles, handle, 1, 0, GFP_ATOMIC);
1820 spin_unlock(&mgr->lock);
1821
1822 if (hdl_id < 0) {
1823 dev_err(hdev->dev, "Failed to allocate IDR for a new signal reservation\n");
1824 rc = -EINVAL;
1825 goto out;
1826 }
1827
1828 handle->id = hdl_id;
1829 handle->q_idx = q_idx;
1830 handle->hdev = hdev;
1831 kref_init(&handle->refcount);
1832
1833 hdev->asic_funcs->hw_queues_lock(hdev);
1834
1835 hw_sob = &prop->hw_sob[prop->curr_sob_offset];
1836
1837 /*
1838 * Increment the SOB value by count by user request
1839 * to reserve those signals
1840 * check if the signals amount to reserve is not exceeding the max sob
1841 * value, if yes then switch sob.
1842 */
1843 rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, count,
1844 true);
1845 if (rc) {
1846 dev_err(hdev->dev, "Failed to switch SOB\n");
1847 hdev->asic_funcs->hw_queues_unlock(hdev);
1848 rc = -EINVAL;
1849 goto remove_idr;
1850 }
1851 /* set the hw_sob to the handle after calling the sob wraparound handler
1852 * since sob could have changed.
1853 */
1854 handle->hw_sob = hw_sob;
1855
1856 /* store the current sob value for unreserve validity check, and
1857 * signal offset support
1858 */
1859 handle->pre_sob_val = prop->next_sob_val - handle->count;
1860
1861 *signals_count = prop->next_sob_val;
1862 hdev->asic_funcs->hw_queues_unlock(hdev);
1863
1864 *sob_addr = handle->hw_sob->sob_addr;
1865 *handle_id = hdl_id;
1866
1867 dev_dbg(hdev->dev,
1868 "Signals reserved, sob_id: %d, sob addr: 0x%x, last sob_val: %u, q_idx: %d, hdl_id: %d\n",
1869 hw_sob->sob_id, handle->hw_sob->sob_addr,
1870 prop->next_sob_val - 1, q_idx, hdl_id);
1871 goto out;
1872
1873 remove_idr:
1874 spin_lock(&mgr->lock);
1875 idr_remove(&mgr->handles, hdl_id);
1876 spin_unlock(&mgr->lock);
1877
1878 kfree(handle);
1879 out:
1880 return rc;
1881 }
1882
cs_ioctl_unreserve_signals(struct hl_fpriv * hpriv,u32 handle_id)1883 static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id)
1884 {
1885 struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
1886 struct hl_sync_stream_properties *prop;
1887 struct hl_device *hdev = hpriv->hdev;
1888 struct hl_encaps_signals_mgr *mgr;
1889 struct hl_hw_sob *hw_sob;
1890 u32 q_idx, sob_addr;
1891 int rc = 0;
1892
1893 mgr = &hpriv->ctx->sig_mgr;
1894
1895 spin_lock(&mgr->lock);
1896 encaps_sig_hdl = idr_find(&mgr->handles, handle_id);
1897 if (encaps_sig_hdl) {
1898 dev_dbg(hdev->dev, "unreserve signals, handle: %u, SOB:0x%x, count: %u\n",
1899 handle_id, encaps_sig_hdl->hw_sob->sob_addr,
1900 encaps_sig_hdl->count);
1901
1902 hdev->asic_funcs->hw_queues_lock(hdev);
1903
1904 q_idx = encaps_sig_hdl->q_idx;
1905 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1906 hw_sob = &prop->hw_sob[prop->curr_sob_offset];
1907 sob_addr = hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
1908
1909 /* Check if sob_val got out of sync due to other
1910 * signal submission requests which were handled
1911 * between the reserve-unreserve calls or SOB switch
1912 * upon reaching SOB max value.
1913 */
1914 if (encaps_sig_hdl->pre_sob_val + encaps_sig_hdl->count
1915 != prop->next_sob_val ||
1916 sob_addr != encaps_sig_hdl->hw_sob->sob_addr) {
1917 dev_err(hdev->dev, "Cannot unreserve signals, SOB val ran out of sync, expected: %u, actual val: %u\n",
1918 encaps_sig_hdl->pre_sob_val,
1919 (prop->next_sob_val - encaps_sig_hdl->count));
1920
1921 hdev->asic_funcs->hw_queues_unlock(hdev);
1922 rc = -EINVAL;
1923 goto out;
1924 }
1925
1926 /*
1927 * Decrement the SOB value by count by user request
1928 * to unreserve those signals
1929 */
1930 prop->next_sob_val -= encaps_sig_hdl->count;
1931
1932 hdev->asic_funcs->hw_queues_unlock(hdev);
1933
1934 hw_sob_put(hw_sob);
1935
1936 /* Release the id and free allocated memory of the handle */
1937 idr_remove(&mgr->handles, handle_id);
1938 kfree(encaps_sig_hdl);
1939 } else {
1940 rc = -EINVAL;
1941 dev_err(hdev->dev, "failed to unreserve signals, cannot find handler\n");
1942 }
1943 out:
1944 spin_unlock(&mgr->lock);
1945
1946 return rc;
1947 }
1948
cs_ioctl_signal_wait(struct hl_fpriv * hpriv,enum hl_cs_type cs_type,void __user * chunks,u32 num_chunks,u64 * cs_seq,u32 flags,u32 timeout)1949 static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
1950 void __user *chunks, u32 num_chunks,
1951 u64 *cs_seq, u32 flags, u32 timeout)
1952 {
1953 struct hl_cs_encaps_sig_handle *encaps_sig_hdl = NULL;
1954 bool handle_found = false, is_wait_cs = false,
1955 wait_cs_submitted = false,
1956 cs_encaps_signals = false;
1957 struct hl_cs_chunk *cs_chunk_array, *chunk;
1958 bool staged_cs_with_encaps_signals = false;
1959 struct hw_queue_properties *hw_queue_prop;
1960 struct hl_device *hdev = hpriv->hdev;
1961 struct hl_cs_compl *sig_waitcs_cmpl;
1962 u32 q_idx, collective_engine_id = 0;
1963 struct hl_cs_counters_atomic *cntr;
1964 struct hl_fence *sig_fence = NULL;
1965 struct hl_ctx *ctx = hpriv->ctx;
1966 enum hl_queue_type q_type;
1967 struct hl_cs *cs;
1968 u64 signal_seq;
1969 int rc;
1970
1971 cntr = &hdev->aggregated_cs_counters;
1972 *cs_seq = ULLONG_MAX;
1973
1974 rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
1975 ctx);
1976 if (rc)
1977 goto out;
1978
1979 /* currently it is guaranteed to have only one chunk */
1980 chunk = &cs_chunk_array[0];
1981
1982 if (chunk->queue_index >= hdev->asic_prop.max_queues) {
1983 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1984 atomic64_inc(&cntr->validation_drop_cnt);
1985 dev_err(hdev->dev, "Queue index %d is invalid\n",
1986 chunk->queue_index);
1987 rc = -EINVAL;
1988 goto free_cs_chunk_array;
1989 }
1990
1991 q_idx = chunk->queue_index;
1992 hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
1993 q_type = hw_queue_prop->type;
1994
1995 if (!hw_queue_prop->supports_sync_stream) {
1996 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1997 atomic64_inc(&cntr->validation_drop_cnt);
1998 dev_err(hdev->dev,
1999 "Queue index %d does not support sync stream operations\n",
2000 q_idx);
2001 rc = -EINVAL;
2002 goto free_cs_chunk_array;
2003 }
2004
2005 if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
2006 if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
2007 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2008 atomic64_inc(&cntr->validation_drop_cnt);
2009 dev_err(hdev->dev,
2010 "Queue index %d is invalid\n", q_idx);
2011 rc = -EINVAL;
2012 goto free_cs_chunk_array;
2013 }
2014
2015 if (!hdev->nic_ports_mask) {
2016 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2017 atomic64_inc(&cntr->validation_drop_cnt);
2018 dev_err(hdev->dev,
2019 "Collective operations not supported when NIC ports are disabled");
2020 rc = -EINVAL;
2021 goto free_cs_chunk_array;
2022 }
2023
2024 collective_engine_id = chunk->collective_engine_id;
2025 }
2026
2027 is_wait_cs = !!(cs_type == CS_TYPE_WAIT ||
2028 cs_type == CS_TYPE_COLLECTIVE_WAIT);
2029
2030 cs_encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
2031
2032 if (is_wait_cs) {
2033 rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq,
2034 ctx, cs_encaps_signals);
2035 if (rc)
2036 goto free_cs_chunk_array;
2037
2038 if (cs_encaps_signals) {
2039 /* check if cs sequence has encapsulated
2040 * signals handle
2041 */
2042 struct idr *idp;
2043 u32 id;
2044
2045 spin_lock(&ctx->sig_mgr.lock);
2046 idp = &ctx->sig_mgr.handles;
2047 idr_for_each_entry(idp, encaps_sig_hdl, id) {
2048 if (encaps_sig_hdl->cs_seq == signal_seq) {
2049 handle_found = true;
2050 /* get refcount to protect removing
2051 * this handle from idr, needed when
2052 * multiple wait cs are used with offset
2053 * to wait on reserved encaps signals.
2054 */
2055 kref_get(&encaps_sig_hdl->refcount);
2056 break;
2057 }
2058 }
2059 spin_unlock(&ctx->sig_mgr.lock);
2060
2061 if (!handle_found) {
2062 /* treat as signal CS already finished */
2063 dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
2064 signal_seq);
2065 rc = 0;
2066 goto free_cs_chunk_array;
2067 }
2068
2069 /* validate also the signal offset value */
2070 if (chunk->encaps_signal_offset >
2071 encaps_sig_hdl->count) {
2072 dev_err(hdev->dev, "offset(%u) value exceed max reserved signals count(%u)!\n",
2073 chunk->encaps_signal_offset,
2074 encaps_sig_hdl->count);
2075 rc = -EINVAL;
2076 goto free_cs_chunk_array;
2077 }
2078 }
2079
2080 sig_fence = hl_ctx_get_fence(ctx, signal_seq);
2081 if (IS_ERR(sig_fence)) {
2082 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2083 atomic64_inc(&cntr->validation_drop_cnt);
2084 dev_err(hdev->dev,
2085 "Failed to get signal CS with seq 0x%llx\n",
2086 signal_seq);
2087 rc = PTR_ERR(sig_fence);
2088 goto free_cs_chunk_array;
2089 }
2090
2091 if (!sig_fence) {
2092 /* signal CS already finished */
2093 rc = 0;
2094 goto free_cs_chunk_array;
2095 }
2096
2097 sig_waitcs_cmpl =
2098 container_of(sig_fence, struct hl_cs_compl, base_fence);
2099
2100 staged_cs_with_encaps_signals = !!
2101 (sig_waitcs_cmpl->type == CS_TYPE_DEFAULT &&
2102 (flags & HL_CS_FLAGS_ENCAP_SIGNALS));
2103
2104 if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL &&
2105 !staged_cs_with_encaps_signals) {
2106 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2107 atomic64_inc(&cntr->validation_drop_cnt);
2108 dev_err(hdev->dev,
2109 "CS seq 0x%llx is not of a signal/encaps-signal CS\n",
2110 signal_seq);
2111 hl_fence_put(sig_fence);
2112 rc = -EINVAL;
2113 goto free_cs_chunk_array;
2114 }
2115
2116 if (completion_done(&sig_fence->completion)) {
2117 /* signal CS already finished */
2118 hl_fence_put(sig_fence);
2119 rc = 0;
2120 goto free_cs_chunk_array;
2121 }
2122 }
2123
2124 rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout);
2125 if (rc) {
2126 if (is_wait_cs)
2127 hl_fence_put(sig_fence);
2128
2129 goto free_cs_chunk_array;
2130 }
2131
2132 /*
2133 * Save the signal CS fence for later initialization right before
2134 * hanging the wait CS on the queue.
2135 * for encaps signals case, we save the cs sequence and handle pointer
2136 * for later initialization.
2137 */
2138 if (is_wait_cs) {
2139 cs->signal_fence = sig_fence;
2140 /* store the handle pointer, so we don't have to
2141 * look for it again, later on the flow
2142 * when we need to set SOB info in hw_queue.
2143 */
2144 if (cs->encaps_signals)
2145 cs->encaps_sig_hdl = encaps_sig_hdl;
2146 }
2147
2148 hl_debugfs_add_cs(cs);
2149
2150 *cs_seq = cs->sequence;
2151
2152 if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL)
2153 rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type,
2154 q_idx, chunk->encaps_signal_offset);
2155 else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
2156 rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
2157 cs, q_idx, collective_engine_id,
2158 chunk->encaps_signal_offset);
2159 else {
2160 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2161 atomic64_inc(&cntr->validation_drop_cnt);
2162 rc = -EINVAL;
2163 }
2164
2165 if (rc)
2166 goto free_cs_object;
2167
2168 rc = hl_hw_queue_schedule_cs(cs);
2169 if (rc) {
2170 /* In case wait cs failed here, it means the signal cs
2171 * already completed. we want to free all it's related objects
2172 * but we don't want to fail the ioctl.
2173 */
2174 if (is_wait_cs)
2175 rc = 0;
2176 else if (rc != -EAGAIN)
2177 dev_err(hdev->dev,
2178 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
2179 ctx->asid, cs->sequence, rc);
2180 goto free_cs_object;
2181 }
2182
2183 rc = HL_CS_STATUS_SUCCESS;
2184 if (is_wait_cs)
2185 wait_cs_submitted = true;
2186 goto put_cs;
2187
2188 free_cs_object:
2189 cs_rollback(hdev, cs);
2190 *cs_seq = ULLONG_MAX;
2191 /* The path below is both for good and erroneous exits */
2192 put_cs:
2193 /* We finished with the CS in this function, so put the ref */
2194 cs_put(cs);
2195 free_cs_chunk_array:
2196 if (!wait_cs_submitted && cs_encaps_signals && handle_found &&
2197 is_wait_cs)
2198 kref_put(&encaps_sig_hdl->refcount,
2199 hl_encaps_handle_do_release);
2200 kfree(cs_chunk_array);
2201 out:
2202 return rc;
2203 }
2204
hl_cs_ioctl(struct hl_fpriv * hpriv,void * data)2205 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
2206 {
2207 union hl_cs_args *args = data;
2208 enum hl_cs_type cs_type = 0;
2209 u64 cs_seq = ULONG_MAX;
2210 void __user *chunks;
2211 u32 num_chunks, flags, timeout,
2212 signals_count = 0, sob_addr = 0, handle_id = 0;
2213 int rc;
2214
2215 rc = hl_cs_sanity_checks(hpriv, args);
2216 if (rc)
2217 goto out;
2218
2219 rc = hl_cs_ctx_switch(hpriv, args, &cs_seq);
2220 if (rc)
2221 goto out;
2222
2223 cs_type = hl_cs_get_cs_type(args->in.cs_flags &
2224 ~HL_CS_FLAGS_FORCE_RESTORE);
2225 chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
2226 num_chunks = args->in.num_chunks_execute;
2227 flags = args->in.cs_flags;
2228
2229 /* In case this is a staged CS, user should supply the CS sequence */
2230 if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
2231 !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
2232 cs_seq = args->in.seq;
2233
2234 timeout = flags & HL_CS_FLAGS_CUSTOM_TIMEOUT
2235 ? msecs_to_jiffies(args->in.timeout * 1000)
2236 : hpriv->hdev->timeout_jiffies;
2237
2238 switch (cs_type) {
2239 case CS_TYPE_SIGNAL:
2240 case CS_TYPE_WAIT:
2241 case CS_TYPE_COLLECTIVE_WAIT:
2242 rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
2243 &cs_seq, args->in.cs_flags, timeout);
2244 break;
2245 case CS_RESERVE_SIGNALS:
2246 rc = cs_ioctl_reserve_signals(hpriv,
2247 args->in.encaps_signals_q_idx,
2248 args->in.encaps_signals_count,
2249 &handle_id, &sob_addr, &signals_count);
2250 break;
2251 case CS_UNRESERVE_SIGNALS:
2252 rc = cs_ioctl_unreserve_signals(hpriv,
2253 args->in.encaps_sig_handle_id);
2254 break;
2255 default:
2256 rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
2257 args->in.cs_flags,
2258 args->in.encaps_sig_handle_id,
2259 timeout);
2260 break;
2261 }
2262 out:
2263 if (rc != -EAGAIN) {
2264 memset(args, 0, sizeof(*args));
2265
2266 if (cs_type == CS_RESERVE_SIGNALS) {
2267 args->out.handle_id = handle_id;
2268 args->out.sob_base_addr_offset = sob_addr;
2269 args->out.count = signals_count;
2270 } else {
2271 args->out.seq = cs_seq;
2272 }
2273 args->out.status = rc;
2274 }
2275
2276 return rc;
2277 }
2278
hl_wait_for_fence(struct hl_ctx * ctx,u64 seq,struct hl_fence * fence,enum hl_cs_wait_status * status,u64 timeout_us,s64 * timestamp)2279 static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence,
2280 enum hl_cs_wait_status *status, u64 timeout_us,
2281 s64 *timestamp)
2282 {
2283 struct hl_device *hdev = ctx->hdev;
2284 long completion_rc;
2285 int rc = 0;
2286
2287 if (IS_ERR(fence)) {
2288 rc = PTR_ERR(fence);
2289 if (rc == -EINVAL)
2290 dev_notice_ratelimited(hdev->dev,
2291 "Can't wait on CS %llu because current CS is at seq %llu\n",
2292 seq, ctx->cs_sequence);
2293 return rc;
2294 }
2295
2296 if (!fence) {
2297 dev_dbg(hdev->dev,
2298 "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
2299 seq, ctx->cs_sequence);
2300
2301 *status = CS_WAIT_STATUS_GONE;
2302 return 0;
2303 }
2304
2305 if (!timeout_us) {
2306 completion_rc = completion_done(&fence->completion);
2307 } else {
2308 unsigned long timeout;
2309
2310 timeout = (timeout_us == MAX_SCHEDULE_TIMEOUT) ?
2311 timeout_us : usecs_to_jiffies(timeout_us);
2312 completion_rc =
2313 wait_for_completion_interruptible_timeout(
2314 &fence->completion, timeout);
2315 }
2316
2317 if (completion_rc > 0) {
2318 *status = CS_WAIT_STATUS_COMPLETED;
2319 if (timestamp)
2320 *timestamp = ktime_to_ns(fence->timestamp);
2321 } else {
2322 *status = CS_WAIT_STATUS_BUSY;
2323 }
2324
2325 if (fence->error == -ETIMEDOUT)
2326 rc = -ETIMEDOUT;
2327 else if (fence->error == -EIO)
2328 rc = -EIO;
2329
2330 return rc;
2331 }
2332
2333 /*
2334 * hl_cs_poll_fences - iterate CS fences to check for CS completion
2335 *
2336 * @mcs_data: multi-CS internal data
2337 *
2338 * @return 0 on success, otherwise non 0 error code
2339 *
2340 * The function iterates on all CS sequence in the list and set bit in
2341 * completion_bitmap for each completed CS.
2342 * while iterating, the function can extracts the stream map to be later
2343 * used by the waiting function.
2344 * this function shall be called after taking context ref
2345 */
hl_cs_poll_fences(struct multi_cs_data * mcs_data)2346 static int hl_cs_poll_fences(struct multi_cs_data *mcs_data)
2347 {
2348 struct hl_fence **fence_ptr = mcs_data->fence_arr;
2349 struct hl_device *hdev = mcs_data->ctx->hdev;
2350 int i, rc, arr_len = mcs_data->arr_len;
2351 u64 *seq_arr = mcs_data->seq_arr;
2352 ktime_t max_ktime, first_cs_time;
2353 enum hl_cs_wait_status status;
2354
2355 memset(fence_ptr, 0, arr_len * sizeof(*fence_ptr));
2356
2357 /* get all fences under the same lock */
2358 rc = hl_ctx_get_fences(mcs_data->ctx, seq_arr, fence_ptr, arr_len);
2359 if (rc)
2360 return rc;
2361
2362 /*
2363 * set to maximum time to verify timestamp is valid: if at the end
2364 * this value is maintained- no timestamp was updated
2365 */
2366 max_ktime = ktime_set(KTIME_SEC_MAX, 0);
2367 first_cs_time = max_ktime;
2368
2369 for (i = 0; i < arr_len; i++, fence_ptr++) {
2370 struct hl_fence *fence = *fence_ptr;
2371
2372 /*
2373 * function won't sleep as it is called with timeout 0 (i.e.
2374 * poll the fence)
2375 */
2376 rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence,
2377 &status, 0, NULL);
2378 if (rc) {
2379 dev_err(hdev->dev,
2380 "wait_for_fence error :%d for CS seq %llu\n",
2381 rc, seq_arr[i]);
2382 break;
2383 }
2384
2385 switch (status) {
2386 case CS_WAIT_STATUS_BUSY:
2387 /* CS did not finished, keep waiting on its QID*/
2388 mcs_data->stream_master_qid_map |=
2389 fence->stream_master_qid_map;
2390 break;
2391 case CS_WAIT_STATUS_COMPLETED:
2392 /*
2393 * Using mcs_handling_done to avoid possibility of mcs_data
2394 * returns to user indicating CS completed before it finished
2395 * all of its mcs handling, to avoid race the next time the
2396 * user waits for mcs.
2397 */
2398 if (!fence->mcs_handling_done)
2399 break;
2400
2401 mcs_data->completion_bitmap |= BIT(i);
2402 /*
2403 * For all completed CSs we take the earliest timestamp.
2404 * For this we have to validate that the timestamp is
2405 * earliest of all timestamps so far.
2406 */
2407 if (mcs_data->update_ts &&
2408 (ktime_compare(fence->timestamp, first_cs_time) < 0))
2409 first_cs_time = fence->timestamp;
2410 break;
2411 case CS_WAIT_STATUS_GONE:
2412 mcs_data->update_ts = false;
2413 mcs_data->gone_cs = true;
2414 /*
2415 * It is possible to get an old sequence numbers from user
2416 * which related to already completed CSs and their fences
2417 * already gone. In this case, CS set as completed but
2418 * no need to consider its QID for mcs completion.
2419 */
2420 mcs_data->completion_bitmap |= BIT(i);
2421 break;
2422 default:
2423 dev_err(hdev->dev, "Invalid fence status\n");
2424 return -EINVAL;
2425 }
2426
2427 }
2428
2429 hl_fences_put(mcs_data->fence_arr, arr_len);
2430
2431 if (mcs_data->update_ts &&
2432 (ktime_compare(first_cs_time, max_ktime) != 0))
2433 mcs_data->timestamp = ktime_to_ns(first_cs_time);
2434
2435 return rc;
2436 }
2437
_hl_cs_wait_ioctl(struct hl_device * hdev,struct hl_ctx * ctx,u64 timeout_us,u64 seq,enum hl_cs_wait_status * status,s64 * timestamp)2438 static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
2439 u64 timeout_us, u64 seq,
2440 enum hl_cs_wait_status *status, s64 *timestamp)
2441 {
2442 struct hl_fence *fence;
2443 int rc = 0;
2444
2445 if (timestamp)
2446 *timestamp = 0;
2447
2448 hl_ctx_get(hdev, ctx);
2449
2450 fence = hl_ctx_get_fence(ctx, seq);
2451
2452 rc = hl_wait_for_fence(ctx, seq, fence, status, timeout_us, timestamp);
2453 hl_fence_put(fence);
2454 hl_ctx_put(ctx);
2455
2456 return rc;
2457 }
2458
2459 /*
2460 * hl_wait_multi_cs_completion_init - init completion structure
2461 *
2462 * @hdev: pointer to habanalabs device structure
2463 * @stream_master_bitmap: stream master QIDs map, set bit indicates stream
2464 * master QID to wait on
2465 *
2466 * @return valid completion struct pointer on success, otherwise error pointer
2467 *
2468 * up to MULTI_CS_MAX_USER_CTX calls can be done concurrently to the driver.
2469 * the function gets the first available completion (by marking it "used")
2470 * and initialize its values.
2471 */
hl_wait_multi_cs_completion_init(struct hl_device * hdev,u8 stream_master_bitmap)2472 static struct multi_cs_completion *hl_wait_multi_cs_completion_init(
2473 struct hl_device *hdev,
2474 u8 stream_master_bitmap)
2475 {
2476 struct multi_cs_completion *mcs_compl;
2477 int i;
2478
2479 /* find free multi_cs completion structure */
2480 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2481 mcs_compl = &hdev->multi_cs_completion[i];
2482 spin_lock(&mcs_compl->lock);
2483 if (!mcs_compl->used) {
2484 mcs_compl->used = 1;
2485 mcs_compl->timestamp = 0;
2486 mcs_compl->stream_master_qid_map = stream_master_bitmap;
2487 reinit_completion(&mcs_compl->completion);
2488 spin_unlock(&mcs_compl->lock);
2489 break;
2490 }
2491 spin_unlock(&mcs_compl->lock);
2492 }
2493
2494 if (i == MULTI_CS_MAX_USER_CTX) {
2495 dev_err(hdev->dev,
2496 "no available multi-CS completion structure\n");
2497 return ERR_PTR(-ENOMEM);
2498 }
2499 return mcs_compl;
2500 }
2501
2502 /*
2503 * hl_wait_multi_cs_completion_fini - return completion structure and set as
2504 * unused
2505 *
2506 * @mcs_compl: pointer to the completion structure
2507 */
hl_wait_multi_cs_completion_fini(struct multi_cs_completion * mcs_compl)2508 static void hl_wait_multi_cs_completion_fini(
2509 struct multi_cs_completion *mcs_compl)
2510 {
2511 /*
2512 * free completion structure, do it under lock to be in-sync with the
2513 * thread that signals completion
2514 */
2515 spin_lock(&mcs_compl->lock);
2516 mcs_compl->used = 0;
2517 spin_unlock(&mcs_compl->lock);
2518 }
2519
2520 /*
2521 * hl_wait_multi_cs_completion - wait for first CS to complete
2522 *
2523 * @mcs_data: multi-CS internal data
2524 *
2525 * @return 0 on success, otherwise non 0 error code
2526 */
hl_wait_multi_cs_completion(struct multi_cs_data * mcs_data)2527 static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data)
2528 {
2529 struct hl_device *hdev = mcs_data->ctx->hdev;
2530 struct multi_cs_completion *mcs_compl;
2531 long completion_rc;
2532
2533 mcs_compl = hl_wait_multi_cs_completion_init(hdev,
2534 mcs_data->stream_master_qid_map);
2535 if (IS_ERR(mcs_compl))
2536 return PTR_ERR(mcs_compl);
2537
2538 completion_rc = wait_for_completion_interruptible_timeout(
2539 &mcs_compl->completion,
2540 usecs_to_jiffies(mcs_data->timeout_us));
2541
2542 /* update timestamp */
2543 if (completion_rc > 0)
2544 mcs_data->timestamp = mcs_compl->timestamp;
2545
2546 hl_wait_multi_cs_completion_fini(mcs_compl);
2547
2548 mcs_data->wait_status = completion_rc;
2549
2550 return 0;
2551 }
2552
2553 /*
2554 * hl_multi_cs_completion_init - init array of multi-CS completion structures
2555 *
2556 * @hdev: pointer to habanalabs device structure
2557 */
hl_multi_cs_completion_init(struct hl_device * hdev)2558 void hl_multi_cs_completion_init(struct hl_device *hdev)
2559 {
2560 struct multi_cs_completion *mcs_cmpl;
2561 int i;
2562
2563 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2564 mcs_cmpl = &hdev->multi_cs_completion[i];
2565 mcs_cmpl->used = 0;
2566 spin_lock_init(&mcs_cmpl->lock);
2567 init_completion(&mcs_cmpl->completion);
2568 }
2569 }
2570
2571 /*
2572 * hl_multi_cs_wait_ioctl - implementation of the multi-CS wait ioctl
2573 *
2574 * @hpriv: pointer to the private data of the fd
2575 * @data: pointer to multi-CS wait ioctl in/out args
2576 *
2577 */
hl_multi_cs_wait_ioctl(struct hl_fpriv * hpriv,void * data)2578 static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2579 {
2580 struct hl_device *hdev = hpriv->hdev;
2581 struct multi_cs_data mcs_data = {0};
2582 union hl_wait_cs_args *args = data;
2583 struct hl_ctx *ctx = hpriv->ctx;
2584 struct hl_fence **fence_arr;
2585 void __user *seq_arr;
2586 u32 size_to_copy;
2587 u64 *cs_seq_arr;
2588 u8 seq_arr_len;
2589 int rc;
2590
2591 if (!hdev->supports_wait_for_multi_cs) {
2592 dev_err(hdev->dev, "Wait for multi CS is not supported\n");
2593 return -EPERM;
2594 }
2595
2596 seq_arr_len = args->in.seq_arr_len;
2597
2598 if (seq_arr_len > HL_WAIT_MULTI_CS_LIST_MAX_LEN) {
2599 dev_err(hdev->dev, "Can wait only up to %d CSs, input sequence is of length %u\n",
2600 HL_WAIT_MULTI_CS_LIST_MAX_LEN, seq_arr_len);
2601 return -EINVAL;
2602 }
2603
2604 /* allocate memory for sequence array */
2605 cs_seq_arr =
2606 kmalloc_array(seq_arr_len, sizeof(*cs_seq_arr), GFP_KERNEL);
2607 if (!cs_seq_arr)
2608 return -ENOMEM;
2609
2610 /* copy CS sequence array from user */
2611 seq_arr = (void __user *) (uintptr_t) args->in.seq;
2612 size_to_copy = seq_arr_len * sizeof(*cs_seq_arr);
2613 if (copy_from_user(cs_seq_arr, seq_arr, size_to_copy)) {
2614 dev_err(hdev->dev, "Failed to copy multi-cs sequence array from user\n");
2615 rc = -EFAULT;
2616 goto free_seq_arr;
2617 }
2618
2619 /* allocate array for the fences */
2620 fence_arr = kmalloc_array(seq_arr_len, sizeof(*fence_arr), GFP_KERNEL);
2621 if (!fence_arr) {
2622 rc = -ENOMEM;
2623 goto free_seq_arr;
2624 }
2625
2626 /* initialize the multi-CS internal data */
2627 mcs_data.ctx = ctx;
2628 mcs_data.seq_arr = cs_seq_arr;
2629 mcs_data.fence_arr = fence_arr;
2630 mcs_data.arr_len = seq_arr_len;
2631
2632 hl_ctx_get(hdev, ctx);
2633
2634 /* poll all CS fences, extract timestamp */
2635 mcs_data.update_ts = true;
2636 rc = hl_cs_poll_fences(&mcs_data);
2637 /*
2638 * skip wait for CS completion when one of the below is true:
2639 * - an error on the poll function
2640 * - one or more CS in the list completed
2641 * - the user called ioctl with timeout 0
2642 */
2643 if (rc || mcs_data.completion_bitmap || !args->in.timeout_us)
2644 goto put_ctx;
2645
2646 /* wait (with timeout) for the first CS to be completed */
2647 mcs_data.timeout_us = args->in.timeout_us;
2648 rc = hl_wait_multi_cs_completion(&mcs_data);
2649 if (rc)
2650 goto put_ctx;
2651
2652 if (mcs_data.wait_status > 0) {
2653 /*
2654 * poll fences once again to update the CS map.
2655 * no timestamp should be updated this time.
2656 */
2657 mcs_data.update_ts = false;
2658 rc = hl_cs_poll_fences(&mcs_data);
2659
2660 /*
2661 * if hl_wait_multi_cs_completion returned before timeout (i.e.
2662 * it got a completion) we expect to see at least one CS
2663 * completed after the poll function.
2664 */
2665 if (!mcs_data.completion_bitmap) {
2666 dev_warn_ratelimited(hdev->dev,
2667 "Multi-CS got completion on wait but no CS completed\n");
2668 rc = -EFAULT;
2669 }
2670 }
2671
2672 put_ctx:
2673 hl_ctx_put(ctx);
2674 kfree(fence_arr);
2675
2676 free_seq_arr:
2677 kfree(cs_seq_arr);
2678
2679 if (rc)
2680 return rc;
2681
2682 if (mcs_data.wait_status == -ERESTARTSYS) {
2683 dev_err_ratelimited(hdev->dev,
2684 "user process got signal while waiting for Multi-CS\n");
2685 return -EINTR;
2686 }
2687
2688 /* update output args */
2689 memset(args, 0, sizeof(*args));
2690
2691 if (mcs_data.completion_bitmap) {
2692 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
2693 args->out.cs_completion_map = mcs_data.completion_bitmap;
2694
2695 /* if timestamp not 0- it's valid */
2696 if (mcs_data.timestamp) {
2697 args->out.timestamp_nsec = mcs_data.timestamp;
2698 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
2699 }
2700
2701 /* update if some CS was gone */
2702 if (mcs_data.timestamp)
2703 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
2704 } else {
2705 args->out.status = HL_WAIT_CS_STATUS_BUSY;
2706 }
2707
2708 return 0;
2709 }
2710
hl_cs_wait_ioctl(struct hl_fpriv * hpriv,void * data)2711 static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2712 {
2713 struct hl_device *hdev = hpriv->hdev;
2714 union hl_wait_cs_args *args = data;
2715 enum hl_cs_wait_status status;
2716 u64 seq = args->in.seq;
2717 s64 timestamp;
2718 int rc;
2719
2720 rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq,
2721 &status, ×tamp);
2722
2723 if (rc == -ERESTARTSYS) {
2724 dev_err_ratelimited(hdev->dev,
2725 "user process got signal while waiting for CS handle %llu\n",
2726 seq);
2727 return -EINTR;
2728 }
2729
2730 memset(args, 0, sizeof(*args));
2731
2732 if (rc) {
2733 if (rc == -ETIMEDOUT) {
2734 dev_err_ratelimited(hdev->dev,
2735 "CS %llu has timed-out while user process is waiting for it\n",
2736 seq);
2737 args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
2738 } else if (rc == -EIO) {
2739 dev_err_ratelimited(hdev->dev,
2740 "CS %llu has been aborted while user process is waiting for it\n",
2741 seq);
2742 args->out.status = HL_WAIT_CS_STATUS_ABORTED;
2743 }
2744 return rc;
2745 }
2746
2747 if (timestamp) {
2748 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
2749 args->out.timestamp_nsec = timestamp;
2750 }
2751
2752 switch (status) {
2753 case CS_WAIT_STATUS_GONE:
2754 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
2755 fallthrough;
2756 case CS_WAIT_STATUS_COMPLETED:
2757 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
2758 break;
2759 case CS_WAIT_STATUS_BUSY:
2760 default:
2761 args->out.status = HL_WAIT_CS_STATUS_BUSY;
2762 break;
2763 }
2764
2765 return 0;
2766 }
2767
_hl_interrupt_wait_ioctl(struct hl_device * hdev,struct hl_ctx * ctx,u32 timeout_us,u64 user_address,u64 target_value,u16 interrupt_offset,enum hl_cs_wait_status * status,u64 * timestamp)2768 static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
2769 u32 timeout_us, u64 user_address,
2770 u64 target_value, u16 interrupt_offset,
2771 enum hl_cs_wait_status *status,
2772 u64 *timestamp)
2773 {
2774 struct hl_user_pending_interrupt *pend;
2775 struct hl_user_interrupt *interrupt;
2776 unsigned long timeout, flags;
2777 u64 completion_value;
2778 long completion_rc;
2779 int rc = 0;
2780
2781 if (timeout_us == U32_MAX)
2782 timeout = timeout_us;
2783 else
2784 timeout = usecs_to_jiffies(timeout_us);
2785
2786 hl_ctx_get(hdev, ctx);
2787
2788 pend = kmalloc(sizeof(*pend), GFP_KERNEL);
2789 if (!pend) {
2790 hl_ctx_put(ctx);
2791 return -ENOMEM;
2792 }
2793
2794 hl_fence_init(&pend->fence, ULONG_MAX);
2795
2796 if (interrupt_offset == HL_COMMON_USER_INTERRUPT_ID)
2797 interrupt = &hdev->common_user_interrupt;
2798 else
2799 interrupt = &hdev->user_interrupt[interrupt_offset];
2800
2801 /* Add pending user interrupt to relevant list for the interrupt
2802 * handler to monitor
2803 */
2804 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
2805 list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
2806 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
2807
2808 /* We check for completion value as interrupt could have been received
2809 * before we added the node to the wait list
2810 */
2811 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
2812 dev_err(hdev->dev, "Failed to copy completion value from user\n");
2813 rc = -EFAULT;
2814 goto remove_pending_user_interrupt;
2815 }
2816
2817 if (completion_value >= target_value) {
2818 *status = CS_WAIT_STATUS_COMPLETED;
2819 /* There was no interrupt, we assume the completion is now. */
2820 pend->fence.timestamp = ktime_get();
2821 } else
2822 *status = CS_WAIT_STATUS_BUSY;
2823
2824 if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED))
2825 goto remove_pending_user_interrupt;
2826
2827 wait_again:
2828 /* Wait for interrupt handler to signal completion */
2829 completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
2830 timeout);
2831
2832 /* If timeout did not expire we need to perform the comparison.
2833 * If comparison fails, keep waiting until timeout expires
2834 */
2835 if (completion_rc > 0) {
2836 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
2837 /* reinit_completion must be called before we check for user
2838 * completion value, otherwise, if interrupt is received after
2839 * the comparison and before the next wait_for_completion,
2840 * we will reach timeout and fail
2841 */
2842 reinit_completion(&pend->fence.completion);
2843 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
2844
2845 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
2846 dev_err(hdev->dev, "Failed to copy completion value from user\n");
2847 rc = -EFAULT;
2848
2849 goto remove_pending_user_interrupt;
2850 }
2851
2852 if (completion_value >= target_value) {
2853 *status = CS_WAIT_STATUS_COMPLETED;
2854 } else {
2855 timeout = completion_rc;
2856 goto wait_again;
2857 }
2858 } else if (completion_rc == -ERESTARTSYS) {
2859 dev_err_ratelimited(hdev->dev,
2860 "user process got signal while waiting for interrupt ID %d\n",
2861 interrupt->interrupt_id);
2862 rc = -EINTR;
2863 } else {
2864 *status = CS_WAIT_STATUS_BUSY;
2865 }
2866
2867 remove_pending_user_interrupt:
2868 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
2869 list_del(&pend->wait_list_node);
2870 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
2871
2872 *timestamp = ktime_to_ns(pend->fence.timestamp);
2873
2874 kfree(pend);
2875 hl_ctx_put(ctx);
2876
2877 return rc;
2878 }
2879
hl_interrupt_wait_ioctl(struct hl_fpriv * hpriv,void * data)2880 static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2881 {
2882 u16 interrupt_id, interrupt_offset, first_interrupt, last_interrupt;
2883 struct hl_device *hdev = hpriv->hdev;
2884 struct asic_fixed_properties *prop;
2885 union hl_wait_cs_args *args = data;
2886 enum hl_cs_wait_status status;
2887 u64 timestamp;
2888 int rc;
2889
2890 prop = &hdev->asic_prop;
2891
2892 if (!prop->user_interrupt_count) {
2893 dev_err(hdev->dev, "no user interrupts allowed");
2894 return -EPERM;
2895 }
2896
2897 interrupt_id =
2898 FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags);
2899
2900 first_interrupt = prop->first_available_user_msix_interrupt;
2901 last_interrupt = prop->first_available_user_msix_interrupt +
2902 prop->user_interrupt_count - 1;
2903
2904 if ((interrupt_id < first_interrupt || interrupt_id > last_interrupt) &&
2905 interrupt_id != HL_COMMON_USER_INTERRUPT_ID) {
2906 dev_err(hdev->dev, "invalid user interrupt %u", interrupt_id);
2907 return -EINVAL;
2908 }
2909
2910 if (interrupt_id == HL_COMMON_USER_INTERRUPT_ID)
2911 interrupt_offset = HL_COMMON_USER_INTERRUPT_ID;
2912 else
2913 interrupt_offset = interrupt_id - first_interrupt;
2914
2915 rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx,
2916 args->in.interrupt_timeout_us, args->in.addr,
2917 args->in.target, interrupt_offset, &status,
2918 ×tamp);
2919
2920 if (rc) {
2921 if (rc != -EINTR)
2922 dev_err_ratelimited(hdev->dev,
2923 "interrupt_wait_ioctl failed (%d)\n", rc);
2924
2925 return rc;
2926 }
2927
2928 memset(args, 0, sizeof(*args));
2929
2930 if (timestamp) {
2931 args->out.timestamp_nsec = timestamp;
2932 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
2933 }
2934
2935 switch (status) {
2936 case CS_WAIT_STATUS_COMPLETED:
2937 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
2938 break;
2939 case CS_WAIT_STATUS_BUSY:
2940 default:
2941 args->out.status = HL_WAIT_CS_STATUS_BUSY;
2942 break;
2943 }
2944
2945 return 0;
2946 }
2947
hl_wait_ioctl(struct hl_fpriv * hpriv,void * data)2948 int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2949 {
2950 union hl_wait_cs_args *args = data;
2951 u32 flags = args->in.flags;
2952 int rc;
2953
2954 /* If the device is not operational, no point in waiting for any command submission or
2955 * user interrupt
2956 */
2957 if (!hl_device_operational(hpriv->hdev, NULL))
2958 return -EPERM;
2959
2960 if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
2961 rc = hl_interrupt_wait_ioctl(hpriv, data);
2962 else if (flags & HL_WAIT_CS_FLAGS_MULTI_CS)
2963 rc = hl_multi_cs_wait_ioctl(hpriv, data);
2964 else
2965 rc = hl_cs_wait_ioctl(hpriv, data);
2966
2967 return rc;
2968 }
2969